code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package runtime
import (
"fmt"
"strings"
log "github.com/sirupsen/logrus"
)
// StmtType indicates the type of statement
type StmtType string
// Different statement types
const (
StmtTypeAssert StmtType = "assert"
StmtTypeAssign StmtType = "assign"
)
// Stmt statement interface
type Stmt interface {
Type() StmtType
Replace(key, val string)
}
// AssignStmtType is the type of assign statement
type AssignStmtType string
// Different assign statement types
const (
AssignStmtTypeDefine AssignStmtType = ":="
AssignSTmtTypeAssign AssignStmtType = "="
)
// AssignStmt an assign statement
type AssignStmt struct {
AssignStmtType AssignStmtType
LeftHand string
RightHand string
}
// Type retrieves the type of assert stmt
func (a *AssignStmt) Type() StmtType {
return StmtTypeAssign
}
// Replace replaces key with value used to resolve map and array index values
func (a *AssignStmt) Replace(key, val string) {
if strings.Contains(a.RightHand, key) {
a.RightHand = strings.ReplaceAll(a.RightHand, key, val)
}
}
// AssertStmtType Type of assert statement
type AssertStmtType string
// Used assert statement types
const (
AssertStmtTypeEqualValues AssertStmtType = "EqualValues"
AssertStmtTypeNil AssertStmtType = "Nil"
AssertStmtTypeNoError AssertStmtType = "NoError"
AssertStmtTypeError AssertStmtType = "Error"
AssertStmtTypeFalse AssertStmtType = "False"
AssertStmtTypeTrue AssertStmtType = "True"
)
// AssertStmt an assert statement
type AssertStmt struct {
AssertStmtType AssertStmtType
Expected string
Value string
}
// Type retrieves the type of assert stmt
func (a *AssertStmt) Type() StmtType {
return StmtTypeAssert
}
// Replace replaces key with value
func (a *AssertStmt) Replace(key, val string) {
}
// StmtPrinter printer for assert statements
type StmtPrinter interface {
fmt.Stringer
PrintStmt(stmt Stmt) string
}
// TestifySuitePrinter printer for testify suites
type TestifySuitePrinter struct {
Receiver string
}
// NewTestifySuitePrinter new testify suite
func NewTestifySuitePrinter(receiver string) StmtPrinter {
return &TestifySuitePrinter{
Receiver: receiver,
}
}
// PrintStmt prints a statement
func (t *TestifySuitePrinter) PrintStmt(stmt Stmt) string {
switch tp := stmt.(type) {
case *AssertStmt:
return t.PrintAssertStmt(tp)
case *AssignStmt:
return t.PrintAssignStmt(tp)
default:
log.Warningf("unexpected stmt type")
return ""
}
}
// PrintAssertStmt prints an assert statement for a testcase in a testify suite
func (t *TestifySuitePrinter) PrintAssertStmt(astmt *AssertStmt) string {
switch astmt.AssertStmtType {
case AssertStmtTypeEqualValues:
return fmt.Sprintf("%s.%s(%s,%s)", t.Receiver, astmt.AssertStmtType, astmt.Expected, astmt.Value)
case AssertStmtTypeNil,
AssertStmtTypeNoError,
AssertStmtTypeError,
AssertStmtTypeFalse,
AssertStmtTypeTrue:
return fmt.Sprintf("%s.%s(%s)", t.Receiver, astmt.AssertStmtType, astmt.Expected)
default:
log.Warningf("unexpected assert stmt type")
return fmt.Sprintf("// FIXME: unknown assertion %s.%s(%s,%s)", t.Receiver, astmt.AssertStmtType, astmt.Expected, astmt.Value)
}
}
// PrintAssignStmt prints an assign stmt
func (t *TestifySuitePrinter) PrintAssignStmt(astmt *AssignStmt) string {
switch astmt.AssignStmtType {
case AssignSTmtTypeAssign,
AssignStmtTypeDefine:
return fmt.Sprintf("%s %s %s", astmt.LeftHand, astmt.AssignStmtType, astmt.RightHand)
default:
log.Warningf("unexpected assert stmt type")
return fmt.Sprintf("// FIXME: unknown assign %s %s %s", astmt.LeftHand, astmt.AssignStmtType, astmt.RightHand)
}
}
func (t *TestifySuitePrinter) String() string {
return "testify suite printer"
} | internal/runtime/runtime_assertions.go | 0.540681 | 0.45744 | runtime_assertions.go | starcoder |
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent/dialect/sql"
"github.com/vorteil/direktiv/ent/namespace"
)
// Namespace is the model entity for the Namespace schema.
type Namespace struct {
config `json:"-"`
// ID of the ent.
ID string `json:"id,omitempty"`
// Created holds the value of the "created" field.
Created time.Time `json:"created,omitempty"`
// Key holds the value of the "key" field.
Key []byte `json:"key,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the NamespaceQuery when eager-loading is set.
Edges NamespaceEdges `json:"edges"`
}
// NamespaceEdges holds the relations/edges for other nodes in the graph.
type NamespaceEdges struct {
// Workflows holds the value of the workflows edge.
Workflows []*Workflow `json:"workflows,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [1]bool
}
// WorkflowsOrErr returns the Workflows value or an error if the edge
// was not loaded in eager-loading.
func (e NamespaceEdges) WorkflowsOrErr() ([]*Workflow, error) {
if e.loadedTypes[0] {
return e.Workflows, nil
}
return nil, &NotLoadedError{edge: "workflows"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Namespace) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case namespace.FieldKey:
values[i] = &[]byte{}
case namespace.FieldID:
values[i] = &sql.NullString{}
case namespace.FieldCreated:
values[i] = &sql.NullTime{}
default:
return nil, fmt.Errorf("unexpected column %q for type Namespace", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Namespace fields.
func (n *Namespace) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case namespace.FieldID:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field id", values[i])
} else if value.Valid {
n.ID = value.String
}
case namespace.FieldCreated:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created", values[i])
} else if value.Valid {
n.Created = value.Time
}
case namespace.FieldKey:
if value, ok := values[i].(*[]byte); !ok {
return fmt.Errorf("unexpected type %T for field key", values[i])
} else if value != nil {
n.Key = *value
}
}
}
return nil
}
// QueryWorkflows queries the "workflows" edge of the Namespace entity.
func (n *Namespace) QueryWorkflows() *WorkflowQuery {
return (&NamespaceClient{config: n.config}).QueryWorkflows(n)
}
// Update returns a builder for updating this Namespace.
// Note that you need to call Namespace.Unwrap() before calling this method if this Namespace
// was returned from a transaction, and the transaction was committed or rolled back.
func (n *Namespace) Update() *NamespaceUpdateOne {
return (&NamespaceClient{config: n.config}).UpdateOne(n)
}
// Unwrap unwraps the Namespace entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (n *Namespace) Unwrap() *Namespace {
tx, ok := n.config.driver.(*txDriver)
if !ok {
panic("ent: Namespace is not a transactional entity")
}
n.config.driver = tx.drv
return n
}
// String implements the fmt.Stringer.
func (n *Namespace) String() string {
var builder strings.Builder
builder.WriteString("Namespace(")
builder.WriteString(fmt.Sprintf("id=%v", n.ID))
builder.WriteString(", created=")
builder.WriteString(n.Created.Format(time.ANSIC))
builder.WriteString(", key=")
builder.WriteString(fmt.Sprintf("%v", n.Key))
builder.WriteByte(')')
return builder.String()
}
// Namespaces is a parsable slice of Namespace.
type Namespaces []*Namespace
func (n Namespaces) config(cfg config) {
for _i := range n {
n[_i].config = cfg
}
} | ent/namespace.go | 0.643217 | 0.438785 | namespace.go | starcoder |
package shapes
import (
. "github.com/gabz57/goledmatrix/canvas"
. "github.com/gabz57/goledmatrix/components"
)
type Circle struct {
*Graphic
center Point
radius int
fill bool
pixels []Pixel
}
func NewCircle(graphic *Graphic, center Point, radius int, fill bool) *Circle {
c := Circle{
Graphic: graphic,
center: center,
radius: radius,
fill: fill,
}
c.pixels = c.buildPixels(center, radius, fill)
return &c
}
func (c *Circle) buildPixels(centerPoint Point, radius int, fill bool) []Pixel {
var pixels []Pixel
offset := c.ComputedOffset()
center := offset.Add(centerPoint)
if fill {
c.fillCircle(&pixels, radius, center)
}
c.contourCircle(&pixels, radius, center)
return pixels
}
func (c *Circle) fillCircle(pixels *[]Pixel, radius int, center Point) {
bgColorColor := c.Layout().BackgroundColor()
radiusSqr := radius * radius
for x := 0; x <= radius; x++ {
for y := 0; y <= radius; y++ {
if x*x+y*y <= radiusSqr {
*pixels = append(*pixels,
Pixel{
X: center.X + x,
Y: center.Y + y,
C: bgColorColor,
},
Pixel{
X: center.X + x,
Y: center.Y - y,
C: bgColorColor,
},
Pixel{
X: center.X - x,
Y: center.Y + y,
C: bgColorColor,
},
Pixel{
X: center.X - x,
Y: center.Y - y,
C: bgColorColor,
},
)
}
}
}
}
func (c *Circle) contourCircle(pixels *[]Pixel, radius int, center Point) {
fgColor := c.Layout().Color()
var x = radius
var y = 0
var radiusError = 1 - x
for y <= x {
*pixels = append(*pixels,
Pixel{
X: center.X + x,
Y: center.Y + y,
C: fgColor,
},
Pixel{
X: center.X + x,
Y: center.Y - y,
C: fgColor,
},
Pixel{
X: center.X - x,
Y: center.Y + y,
C: fgColor,
},
Pixel{
X: center.X - x,
Y: center.Y - y,
C: fgColor,
},
Pixel{
X: center.X + y,
Y: center.Y + x,
C: fgColor,
},
Pixel{
X: center.X + y,
Y: center.Y - x,
C: fgColor,
},
Pixel{
X: center.X - y,
Y: center.Y + x,
C: fgColor,
},
Pixel{
X: center.X - y,
Y: center.Y - x,
C: fgColor,
})
y++
if radiusError < 0 {
radiusError += 2*y + 1
} else {
x--
radiusError += 2 * (y - x + 1)
}
}
}
func (c *Circle) Draw(canvas Canvas) error {
for _, pixel := range c.pixels {
canvas.Set(pixel.X, pixel.Y, pixel.C)
}
return nil
} | components/shapes/circle.go | 0.723016 | 0.419113 | circle.go | starcoder |
package d10
import (
"math"
"sort"
"strings"
"github.com/jzimbel/adventofcode-go/solutions"
)
const (
// width and height of my puzzle input, used for some slight optimizations
width = 24
height = 24
)
var epsilon float64
type point struct {
x int
y int
}
type grid map[point]struct{}
// stores memoized results
var gcdCache map[[2]int]int
// gcd returns the greatest common denominator of two ints.
// Results are memoized for a slight performance bump.
func gcd(a, b int) (n int) {
var key [2]int
if a < b {
key = [...]int{a, b}
} else {
key = [...]int{b, a}
}
var ok bool
if n, ok = gcdCache[key]; !ok {
if b != 0 {
n = gcd(b, a%b)
} else {
n = a
}
gcdCache[key] = n
}
return
}
// axisDistances returns (separately) the x and y distances between two points.
func axisDistances(p1, p2 *point) (xDist int, yDist int) {
xDist, yDist = int(math.Abs(float64(p1.x-p2.x))), int(math.Abs(float64(p1.y-p2.y)))
return
}
func isBlocked(p1, p2 *point, g grid) (blocked bool) {
denom := gcd(axisDistances(p1, p2))
xStepSize, yStepSize := (p2.x-p1.x)/denom, (p2.y-p1.y)/denom
for i := 1; i < denom; i++ {
if _, ok := g[point{p1.x + i*xStepSize, p1.y + i*yStepSize}]; ok {
blocked = true
break
}
}
return
}
func part1(g grid) (maxVisibleCount int, optimalPoint point) {
for p1 := range g {
var visibleCount int
for p2 := range g {
if p1 == p2 {
continue
}
if !isBlocked(&p1, &p2, g) {
visibleCount++
}
}
if visibleCount > maxVisibleCount {
maxVisibleCount = visibleCount
optimalPoint = p1
}
}
return
}
type rPoint struct {
r float64
θ float64 // #codegolfing
orig point
}
// rPoints is an ordered list of radial (or polar) coordinates.
// Polar axis (where θ = 0) is up.
type rPoints []rPoint
func (rg rPoints) Len() int {
return len(rg)
}
func (rg rPoints) Less(i, j int) bool {
if math.Abs(rg[i].θ-rg[j].θ) < epsilon {
return rg[i].r < rg[j].r
}
return rg[i].θ < rg[j].θ
}
func (rg rPoints) Swap(i, j int) {
rg[i], rg[j] = rg[j], rg[i]
}
// dist returns the Euclidean distance between two points. ( sqrt(a**2 + b**2) )
func dist(p1, p2 *point) float64 {
return math.Sqrt(math.Pow(math.Abs(float64(p1.x-p2.x)), 2) + math.Pow(math.Abs(float64(p1.y-p2.y)), 2))
}
// clockwiseAngleFromUp calculates the angle from -y in radians of the ray from p1 to p2, moving clockwise.
// Atan2 normally takes arguments as (y,x), but we reverse them and negate x in order to
// have polar θ = 0 be Cartesian (0,-1) and increasing θ move in a clockwise direction.
// Atan2 also produces values in range [-π, π], but we want them to be [0, 2π],
// so when it would normally produce a negative, we use 2π + atan2Result.
func clockwiseAngleFromUp(p1, p2 *point) (θ float64) {
newX, newY := -(p2.y - p1.y), p2.x-p1.x
θ = math.Atan2(float64(newY), float64(newX))
if θ < 0 {
θ = 2*math.Pi + θ
}
return
}
func part2(g grid, optimalPoint point) int {
// record angle and distance from center of each asteroid in a sorted slice of struct {rad float64; dist float64}
rp := make(rPoints, 0, len(g))
for p := range g {
if p == optimalPoint {
continue
}
rp = append(rp, rPoint{r: dist(&optimalPoint, &p), θ: clockwiseAngleFromUp(&optimalPoint, &p), orig: p})
}
sort.Sort(rp)
var vaporizedCount int
for len(rp) > 0 {
nextRp := make(rPoints, 0, len(rp))
remove := make([]*point, 0, len(rp))
for i := range rp {
if isBlocked(&optimalPoint, &rp[i].orig, g) {
nextRp = append(nextRp, rp[i])
} else {
vaporizedCount++
if vaporizedCount == 200 {
return rp[i].orig.x*100 + rp[i].orig.y
}
remove = append(remove, &rp[i].orig)
}
}
for i := range remove {
delete(g, *remove[i])
}
rp = nextRp
}
// unreachable as long as there are at least 200 asteroids on the grid
return 0
}
// Solve provides the day 10 puzzle solution.
func Solve(input string) (*solutions.Solution, error) {
g := make(grid, width*height)
rows := strings.Split(input, "\n")
for y := range rows {
for x := range rows[y] {
if rows[y][x] == '#' {
g[point{x, y}] = struct{}{}
}
}
}
maxVisibleCount, optimalPoint := part1(g)
return &solutions.Solution{Part1: maxVisibleCount, Part2: part2(g, optimalPoint)}, nil
}
func init() {
epsilon = math.Nextafter(1, 2) - 1
gcdCache = make(map[[2]int]int, width*height)
} | solutions/y2019/d10/solution.go | 0.707708 | 0.408837 | solution.go | starcoder |
package types
import (
"github.com/PapaCharlie/go-restli/codegen/utils"
. "github.com/dave/jennifer/jen"
)
func AddEquals(def *Statement, receiver, typeName string, f func(other Code, def *Group)) *Statement {
other := Id("other")
otherInterface := Id("otherInterface")
rightHandType := Op("*").Id(typeName)
utils.AddFuncOnReceiver(def, receiver, typeName, utils.EqualsInterface).
Params(Add(otherInterface).Interface()).Bool().
BlockFunc(func(def *Group) {
ok := Id("ok")
def.List(other, ok).Op(":=").Add(otherInterface).Assert(rightHandType)
def.If(Op("!").Add(ok)).Block(Return(False())).Line()
def.Return(Id(receiver).Dot(utils.Equals).Call(other))
}).Line().Line()
return utils.AddFuncOnReceiver(def, receiver, typeName, utils.Equals).
Params(Add(other).Add(rightHandType)).Bool().
BlockFunc(func(def *Group) {
def.If(Id(receiver).Op("==").Nil().Op("||").Add(other).Op("==").Nil()).Block(Return(False())).Line()
f(other, def)
}).Line().Line()
}
func (r *Record) GenerateEquals() Code {
return AddEquals(Empty(), r.Receiver(), r.Name, func(other Code, def *Group) {
for _, f := range r.SortedFields() {
left, right := r.fieldAccessor(f), fieldAccessor(other, f)
def.Add(equals(f.Type, f.IsOptionalOrDefault(), left, right)).Line()
}
def.Return(True())
})
}
func equals(t RestliType, isPointer bool, left, right Code) Code {
allocateNewRight := func(def *Group, t RestliType, right Code) Code {
if t.Typeref() != nil {
ref := Id("ref")
def.Add(ref).Op(":=").Add(right)
return ref
} else {
return right
}
}
check := func(left, right Code) Code {
def := Empty()
switch {
case t.Primitive != nil:
if t.Primitive.IsBytes() {
def.If(Op("!").Qual("bytes", "Equal").Call(left, right)).Block(Return(False()))
} else {
def.If(Add(left).Op("!=").Add(right)).Block(Return(False()))
}
case t.Reference != nil:
if !isPointer {
right = Op("&").Add(right)
}
def.If(Op("!").Add(left).Dot(utils.Equals).Call(right)).Block(Return(False()))
case t.Array != nil:
def.If(Len(left).Op("!=").Len(right)).Block(Return(False())).Line()
index, item := tempIteratorVariableNames(t)
def.For().List(index, item).Op(":=").Range().Add(left).BlockFunc(func(def *Group) {
def.Add(equals(*t.Array, t.Array.ShouldReference(), item,
allocateNewRight(def, *t.Array, Parens(right).Index(index))))
})
case t.Map != nil:
def.If(Len(left).Op("!=").Len(right)).Block(Return(False())).Line()
key, value := tempIteratorVariableNames(t)
def.For().List(key, value).Op(":=").Range().Add(left).BlockFunc(func(def *Group) {
def.Add(equals(*t.Map, t.Map.ShouldReference(), value,
allocateNewRight(def, *t.Map, Parens(right).Index(key))))
})
}
return def
}
if isPointer {
return If(Add(left).Op("!=").Add(right)).BlockFunc(func(def *Group) {
def.If(Add(left).Op("==").Nil().Op("||").Add(right).Op("==").Nil()).Block(Return(False()))
if t.Reference == nil {
left, right = Op("*").Add(left), Op("*").Add(right)
}
def.Add(check(left, right))
})
} else {
return check(left, right)
}
} | codegen/types/record_equals.go | 0.631594 | 0.486697 | record_equals.go | starcoder |
package golem
import (
"fmt"
)
//////////// revisions to delta functions
func Delta_Ends(f []float64) float64 {
x := f[len(f) -1] - f[0]
return ZeroDiv(x, f[0],float64(1), float64(-1))
}
/*
*/
func Delta_Mean2End(f []float64) float64 {
mean := MeanIterable(f)
x := f[len(f) - 1] - mean
return ZeroDiv(x, mean,float64(1), float64(-1))
}
func Delta_Mean2Start(f []float64) float64 {
mean := MeanIterable(f)
x := mean - f[0]
return ZeroDiv(x, mean, float64(1), float64(-1))
}
/*
0-div -> 100% delta
*/
func Delta_Linear(f []float64) float64 {
j := len(f) - 1
deltaSum := float64(0);
for i := 0; i < j; i++ {
deltaSum += ZeroDiv(f[i + 1] - f[i], f[i], float64(-1), float64(1))
}
return deltaSum / float64(j)
}
/*
divides delta by reference, undef(Pos|Neg) are floats to output
*/
func ZeroDiv(delta float64, reference float64, undefPos float64, undefNeg float64) float64 {
if reference == float64(0) {
if delta < reference {
return undefNeg
}
return undefPos
}
return delta / reference
}
/*
divides delta by reference, outputs 0 for zero-division
*/
func ZeroDivStandard(delta float64, reference float64) float64 {
if reference == float64(0) {
return reference
}
return delta / reference
}
// TODO: error-check
func DeltaOpOnSequence(f []float64, opType string) float64 {
if len(f) < 2 {
panic("slice must be at least size 2")
}
switch opType{
case "ends":
return Delta_Ends(f)
case "mean-to-end":
return Delta_Mean2End(f)
case "mean-to-start":
return Delta_Mean2Start(f)
case "linear":
return Delta_Linear(f)
default:
panic(fmt.Sprintf("invalid op type %s", opType))
}
}
/// TODO: code "ends"
/*
*/
func DeltaOpOnDiscreteSequence(s []string) string {
output := make(map[string]float64,0)
for _,s_ := range s {
output[s_]++
}
_,max := MinAndMaxKeyByValueOfMapStringToFloat64(output)
return max
}
/*
given a sequence of at least size 2,
return:
- |set_difference(s[0], s[-1])| : sub
- |set_difference(s[-1], s[0])| : add
*/
func DeltaOpOnSetSequence(s []string) (int,int) {
if len(s) < 2 {
panic("cannot perform delta op. on set seq. less than length 2")
}
s0,_ := DefaultStringToStringSlice(s[0], DEFAULT_DELIMITER)
s1,_ := DefaultStringToStringSlice(s[len(s) - 1], DEFAULT_DELIMITER)
return len(StringSliceDifference(s0,s1)), len(StringSliceDifference(s1,s0))
} | golem/golem_base/ob_measure.go | 0.506836 | 0.465266 | ob_measure.go | starcoder |
package data_parser
import "math"
func Normalize(segments []Segment) (out []Segment) {
lastType := ""
cx := float64(0)
cy := float64(0)
subx := float64(0)
suby := float64(0)
lcx := float64(0)
lcy := float64(0)
for _, s := range segments {
switch s.Key {
case "M":
out = append(out, Segment{
Key: "M",
Data: s.Data,
})
cx = s.Data[0]
cy = s.Data[1]
subx = s.Data[0]
suby = s.Data[1]
case "C":
out = append(out, Segment{
Key: "C",
Data: s.Data,
})
cx = s.Data[4]
cy = s.Data[5]
lcx = s.Data[2]
lcy = s.Data[3]
case "L":
out = append(out, Segment{
Key: "L",
Data: s.Data,
})
cx = s.Data[0]
cy = s.Data[1]
case "H":
cx = s.Data[0]
out = append(out, Segment{
Key: "L",
Data: []float64{cx, cy},
})
case "V":
cy = s.Data[0]
out = append(out, Segment{
Key: "L",
Data: []float64{cx, cy},
})
case "S":
cx1 := cx
cy1 := cy
if lastType == "C" || lastType == "S" {
cx1 = cx + (cx - lcx)
cy1 = cy + (cy - lcy)
}
newData := []float64{cx1, cy1}
newData = append(newData, s.Data...)
out = append(out, Segment{
Key: "C",
Data: newData,
})
lcx = s.Data[0]
lcy = s.Data[1]
cx = s.Data[2]
cy = s.Data[3]
case "T":
x := s.Data[0]
y := s.Data[1]
x1 := cx
y1 := cy
if lastType == "Q" || lastType == "T" {
x1 = cx + (cx - lcx)
y1 = cy + (cy - lcy)
}
cx1 := cx + 2*(x1-cx)/3
cy1 := cy + 2*(y1-cy)/3
cx2 := x + 2*(x1-x)/3
cy2 := y + 2*(y1-y)/3
out = append(out, Segment{
Key: "C",
Data: []float64{cx1, cy1, cx2, cy2, x, y},
})
lcx = x1
lcy = y1
cx = x
cy = y
case "Q":
x1 := s.Data[0]
y1 := s.Data[1]
x := s.Data[2]
y := s.Data[3]
cx1 := cx + 2*(x1-cx)/3
cy1 := cy + 2*(y1-cy)/3
cx2 := x + 2*(x1-x)/3
cy2 := y + 2*(y1-y)/3
out = append(out, Segment{
Key: "C",
Data: []float64{cx1, cy1, cx2, cy2, x, y},
})
lcx = x1
lcy = y1
cx = x
cy = y
case "A":
r1 := math.Abs(s.Data[0])
r2 := math.Abs(s.Data[1])
angle := s.Data[2]
largeArcFlag := s.Data[3]
sweepFlag := s.Data[4]
x := s.Data[5]
y := s.Data[6]
if r1 == 0 || r2 == 0 {
out = append(out, Segment{
Key: "C",
Data: []float64{cx, cy, x, y, x, y},
})
cx = x
cy = y
} else if cx != x || cy != y {
for _, curve := range arcToCubicCurves(cx, cy, x, y, r1, r2, angle, largeArcFlag, sweepFlag) {
out = append(out, Segment{Key: "C", Data: curve})
}
cx = x
cy = y
}
case "Z":
out = append(out, Segment{
Key: "Z",
Data: []float64{},
})
cx = subx
cy = suby
}
lastType = s.Key
}
return
}
func degToRad(degrees float64) float64 {
return math.Pi * degrees / 180
}
func rotate(x float64, y float64, angleRad float64) (resX float64, resY float64) {
resX = x*math.Cos(angleRad) - y*math.Sin(angleRad)
resY = x*math.Sin(angleRad) + y*math.Cos(angleRad)
return
}
func arcToCubicCurves(x1, y1, x2, y2, r1, r2, angle, largeArcFlag, sweepFlag float64, recursive ...float64) [][]float64 {
angleRad := degToRad(angle)
var params [][]float64
params = [][]float64{}
f1 := float64(0)
f2 := float64(0)
cx := float64(0)
cy := float64(0)
if len(recursive) > 0 {
f1 = recursive[0]
f2 = recursive[1]
cx = recursive[2]
cy = recursive[3]
} else {
x1, y1 = rotate(x1, y1, -angle)
x2, y2 = rotate(x2, y2, -angle)
x := (x1 - x2) / 2
y := (y1 - y2) / 2
h := (x*x)/(r1*r1) + (y*y)/(r2*r2)
if h > 1 {
h = math.Sqrt(h)
r1 = h * r1
r2 = h * r2
}
sign := float64(1)
if largeArcFlag == sweepFlag {
sign = -1
}
r1Pow := r1 * r1
r2Pow := r2 * r2
left := r1Pow*r2Pow - r1Pow*y*y - r2Pow*x*x
right := r1Pow*y*y + r2Pow*x*x
k := sign * math.Sqrt(math.Abs(left/right))
cx = k*r1*y/r2 + (x1+x2)/2
cy = k*-r2*x/r1 + (y1+y2)/2
f1 = math.Asin((y1 - cy) / r2)
f2 = math.Asin((y2 - cy) / r2)
if x1 < cx {
f1 = math.Pi - f1
}
if x2 < cx {
f2 = math.Pi - f2
}
if f1 < 0 {
f1 = math.Pi*2 - f1
}
if f2 < 0 {
f2 = math.Pi*2 - f2
}
if sweepFlag != 0 && f1 > f2 {
f1 = f1 - math.Pi*2
}
if sweepFlag == 0 && f2 > f1 {
f2 = f2 - math.Pi*2
}
}
df := f2 - f1
if math.Abs(df) > (math.Pi * 120 / 180) {
f2old := f2
x2old := x2
y2old := y2
if sweepFlag != 0 && f2 > f1 {
f2 = f1 + (math.Pi * 120 / 180) //*(1)
} else {
f2 = f1 + (-math.Pi * 120 / 180) //*(-1)
}
x2 = cx + r1*math.Cos(f2)
y2 = cy + r2*math.Sin(f2)
params = arcToCubicCurves(x2, y2, x2old, y2old, r1, r2, angle, 0, sweepFlag, f2, f2old, cx, cy)
}
df = f2 - f1
c1 := math.Cos(f1)
s1 := math.Sin(f1)
c2 := math.Cos(f2)
s2 := math.Sin(f2)
t := math.Tan(df / 4)
hx := 4 / 3 * r1 * t
hy := 4 / 3 * r2 * t
m1 := []float64{x1, y1}
m2 := []float64{x1 + hx*s1, y1 - hy*c1}
m3 := []float64{x2 + hx*s2, y2 - hy*c2}
m4 := []float64{x2, y2}
m2[0] = 2*m1[0] - m2[0]
m2[1] = 2*m1[1] - m2[1]
res := [][]float64{m2, m3, m4}
if len(recursive) > 0 {
return append(res, params...)
}
params = append(res, params...)
var curves [][]float64
curves = [][]float64{}
for i := 0; i < len(params); i += 3 {
r11, r12 := rotate(params[i][0], params[i][1], angleRad)
r21, r22 := rotate(params[i+1][0], params[i+1][1], angleRad)
r31, r32 := rotate(params[i+2][0], params[i+2][1], angleRad)
curves = append(curves, []float64{r11, r12, r21, r22, r31, r32})
}
return curves
} | data_parser/normalize.go | 0.508788 | 0.465995 | normalize.go | starcoder |
// Driver for ADAM-4000 series I/O Modules from Advantech
package adam4000
import (
"bufio"
"encoding/hex"
"fmt"
"strconv"
"strings"
"time"
)
func NewADAM4000(addr byte, rc *bufio.Reader, wc *bufio.Writer) *ADAM4000 {
var a ADAM4000
a.address = addr
a.rc = rc
a.wc = wc
a.Value = make([]float64, 8)
a.Retries = 2
//Modules usually reply within 50ms. Logest command seems to be read analog channels. (90-110ms)
a.Timeout = 250 * time.Millisecond
go a.startReader()
return &a
}
func (a *ADAM4000) GetName() (string, error) {
resp, err := a.comResF("$%02XM\r", a.address)
if err != nil {
return "", err
}
a.Name = strings.Trim(string(resp[3:]), "\r ")
return a.Name, nil
}
func (a *ADAM4000) GetAllValue() ([]float64, error) {
resp, err := a.comResF("#%02X\r", a.address)
if err != nil {
return nil, err
}
values := string(resp[1:])
fmt.Printf("%d\n", len(values))
if len(values) == 57 {
a.Value[0], err = strconv.ParseFloat(values[0:7], 64)
a.Value[1], err = strconv.ParseFloat(values[7:14], 64)
a.Value[2], err = strconv.ParseFloat(values[14:21], 64)
a.Value[3], err = strconv.ParseFloat(values[21:28], 64)
a.Value[4], err = strconv.ParseFloat(values[28:35], 64)
a.Value[5], err = strconv.ParseFloat(values[35:42], 64)
a.Value[6], err = strconv.ParseFloat(values[42:49], 64)
a.Value[7], err = strconv.ParseFloat(values[49:56], 64)
} else {
intvals := make([]int64, 8)
intvals[0], err = strconv.ParseInt(values[0:4], 16, 64)
intvals[1], err = strconv.ParseInt(values[4:8], 16, 64)
intvals[2], err = strconv.ParseInt(values[8:12], 16, 64)
intvals[3], err = strconv.ParseInt(values[12:16], 16, 64)
intvals[4], err = strconv.ParseInt(values[16:20], 16, 64)
intvals[5], err = strconv.ParseInt(values[20:24], 16, 64)
intvals[6], err = strconv.ParseInt(values[24:28], 16, 64)
intvals[7], err = strconv.ParseInt(values[28:32], 16, 64)
a.Value[0] = float64(intvals[0])
a.Value[1] = float64(intvals[1])
a.Value[2] = float64(intvals[2])
a.Value[3] = float64(intvals[3])
a.Value[4] = float64(intvals[4])
a.Value[5] = float64(intvals[5])
a.Value[6] = float64(intvals[6])
a.Value[7] = float64(intvals[7])
}
return a.Value, err
}
func (a *ADAM4000) GetChannelValue(n int) (float64, error) {
resp, err := a.comResF("#%02X%d\r", a.address, n)
if err != nil {
return float64(0), err
}
values := string(resp[1:])
if len(values) == 7 {
a.Value[n], err = strconv.ParseFloat(values[0:7], 64)
} else {
intval, _ := strconv.ParseInt(values[0:4], 16, 64)
a.Value[n] = float64(intval)
}
return a.Value[n], err
}
func (a *ADAM4000) GetVersion() (string, error) {
resp, err := a.comResF("$%02XF\r", a.address)
if err != nil {
return "", err
}
a.Version = strings.Trim(string(resp[3:]), "\r ")
return a.Version, nil
}
func (a *ADAM4000) SetChannelRange(channel int, rangec InputRangeCode) error {
_, err := a.comResF("$%02X7C%dR%02X\r", a.address, channel, byte(rangec))
if err != nil {
return err
}
return nil
}
func (a *ADAM4000) GetChannelRange(channel int) (InputRangeCode, error) {
resp, err := a.comResF("$%02X8C%d\r", a.address, channel)
if err != nil {
return 0, err
}
rangec := make([]byte, 1)
hex.Decode(rangec, resp[6:8])
return InputRangeCode(rangec[0]), nil
}
func (a *ADAM4000) SyncronizeRead() error {
//Stub
return nil
}
func (a *ADAM4000) SyncronizedValue() ([]float64, error) {
//Stub
return nil, nil
}
func (a *ADAM4000) GetConfig() error {
resp, err := a.comResF("$%02X2\r", a.address)
if err != nil {
return err
}
addr := make([]byte, 1)
typecode := make([]byte, 1)
baud := make([]byte, 1)
data := make([]byte, 1)
hex.Decode(addr, resp[1:3])
hex.Decode(typecode, resp[3:5])
hex.Decode(baud, resp[5:7])
hex.Decode(data, resp[7:9])
a.Address = addr[0]
a.InputRange = InputRangeCode(typecode[0])
a.BaudRate = BaudRateCode(baud[0])
fmt.Printf("%X\n", data)
a.Integration_time = data[0]&byte(1<<7) > 0
a.Checksum = data[0]&byte(1<<6) > 0
a.DataFormat = DataFormatCode(data[0] & byte(2))
if a.Address != a.address {
fmt.Printf("Warning: Configured address (%d) differs from connected address (%d), in init mode?\n", a.Address, a.address)
}
return nil
}
func (a *ADAM4000) SetConfig() error {
data := byte(a.DataFormat)
if a.Integration_time {
data |= byte(1 << 7)
}
if a.Checksum {
data |= byte(1 << 6)
}
_, err := a.comResF("%%%02X%02X%02X%02X%02X\r", a.address, a.Address, byte(a.InputRange), byte(a.BaudRate), data)
return err
} | adam4000.go | 0.555194 | 0.404713 | adam4000.go | starcoder |
package challenge16
import (
"crypto/aes"
"net/url"
"strings"
"github.com/esturcke/cryptopals-golang/bytes"
"github.com/esturcke/cryptopals-golang/crypt"
)
/*Solve challenge 16
CBC bitflipping attacks
See https://cryptopals.com/sets/2/challenges/16
Generate a random AES key.
Combine your padding code and CBC code to write two functions.
The first function should take an arbitrary input string, prepend the string:
"comment1=cooking%20MCs;userdata="
.. and append the string:
";comment2=%20like%20a%20pound%20of%20bacon"
The function should quote out the ";" and "=" characters.
The function should then pad out the input to the 16-byte AES block length and encrypt it under the random AES key.
The second function should decrypt the string and look for the characters ";admin=true;" (or, equivalently, decrypt, split the string on ";", convert each resulting string into 2-tuples, and look for the "admin" tuple).
Return true or false based on whether the string exists.
If you've written the first function properly, it should not be possible to provide user input to it that will generate the string the second function is looking for. We'll have to break the crypto to do that.
Instead, modify the ciphertext (without knowledge of the AES key) to accomplish this.
You're relying on the fact that in CBC mode, a 1-bit error in a ciphertext block:
* Completely scrambles the block the error occurs in
* Produces the identical 1-bit error(/edit) in the next ciphertext block.
*/
func Solve() string {
ct := encryptUserData(make([]byte, 32))
block := bytes.Xor(ct[32:48], []byte("<--->;admin=true"))
for i := range block {
ct[32+i] = block[i]
}
return getAdmin(ct)
}
var block, _ = aes.NewCipher(bytes.Random(16))
var iv = bytes.Random(16)
func encryptUserData(data []byte) []byte {
return crypt.EncryptCbc(block, bytes.PadPkcs7([]byte(
"comment1=cooking%20MCs;userdata="+
strings.Replace(strings.Replace(string(data), ";", "%3B", -1), "=", "%3D", -1)+
";comment2=%20like%20a%20pound%20of%20bacon"), block.BlockSize()),
iv)
}
func getAdmin(ct []byte) string {
pt := bytes.StripPkcs7(crypt.DecryptCbc(block, ct, iv))
m, err := url.ParseQuery(string(pt))
if err != nil {
panic(err)
}
return m["admin"][0]
} | challenge16/challenge16.go | 0.615319 | 0.441553 | challenge16.go | starcoder |
package script
// asSmallInt returns the passed opcode, which must be true according to
// isSmallInt(), as an integer.
func asSmallInt(op byte) int {
if op == OP_0 {
return 0
}
return int(op - (OP_1 - 1))
}
// isSmallInt returns whether or not the opcode is considered a small integer,
// which is an OP_0, or OP_1 through OP_16.
func isSmallInt(op byte) bool {
if op == OP_0 || (op >= OP_1 && op <= OP_16) {
return true
}
return false
}
// isPubkey returns true if the script passed is a pay-to-pubkey transaction,
// false otherwise.
func isPubkey(scriptType []byte) bool {
// Valid pubkeys are either 33 or 65 bytes.
return len(scriptType) == 2 &&
(scriptType[0] == 33 || scriptType[0] == 65) &&
scriptType[1] == OP_CHECKSIG
}
// Recent output script type, pays to hash160(script)
func isPayToScriptHash(scriptType []byte) bool {
return len(scriptType) == 3 &&
scriptType[0] == OP_HASH160 &&
scriptType[1] == OP_DATA_20 &&
scriptType[2] == OP_EQUAL
}
// isPubkeyHash returns true if the script passed is a pay-to-pubkey-hash
// transaction, false otherwise.
func isPubkeyHash(scriptType []byte) bool {
return len(scriptType) >= 5 &&
scriptType[0] == OP_DUP &&
scriptType[1] == OP_HASH160 &&
scriptType[2] == OP_DATA_20 &&
scriptType[3] == OP_EQUALVERIFY &&
scriptType[4] == OP_CHECKSIG
}
// isMultiSig returns true if the passed script is a multisig transaction, false
// otherwise.
func isMultiSig(scriptType []byte) bool {
// The absolute minimum is 1 pubkey:
// OP_0/OP_1-16 <pubkey> OP_1 OP_CHECKMULTISIG
l := len(scriptType)
if l < 4 {
return false
}
if !isSmallInt(scriptType[0]) {
return false
}
if !isSmallInt(scriptType[l-2]) {
return false
}
if scriptType[l-1] != OP_CHECKMULTISIG {
return false
}
// Verify the number of pubkeys specified matches the actual number
// of pubkeys provided.
if l-2-1 != asSmallInt(scriptType[l-2]) {
return false
}
for _, pop := range scriptType[1 : l-2] {
// Valid pubkeys are either 33 or 65 bytes.
if pop != 33 && pop != 65 {
return false
}
}
return true
}
func IsOpreturn(scriptType []byte) bool {
if len(scriptType) > 0 && scriptType[0] == OP_RETURN {
return true
}
if len(scriptType) > 1 && scriptType[0] == OP_FALSE && scriptType[1] == OP_RETURN {
return true
}
return false
}
func IsLockingScriptOnlyEqual(pkScript []byte) bool {
// test locking script
// "0b 3c4b616e7965323032303e 87"
length := len(pkScript)
if length == 0 {
return true
}
if pkScript[length-1] != OP_EQUAL {
return false
}
cnt, cntsize := SafeDecodeVarIntForScript(pkScript)
if length == int(cnt+cntsize+1) {
return true
}
return false
}
func GetLockingScriptPushDropPosition(pkScript []byte) (pc int, ok bool) {
// test locking script
// "0b 3c4b616e7965323032303e 75"
length := len(pkScript)
if length == 0 {
return 0, false
}
if pkScript[0] > OP_16 {
return 0, false
}
cnt, cntsize := SafeDecodeVarIntForScript(pkScript)
pc = int(cnt + cntsize)
if length < pc+1 {
return 0, false
}
if pkScript[pc] != OP_DROP {
return 0, false
}
return pc + 1, true
} | script.go | 0.733356 | 0.412944 | script.go | starcoder |
package p1157
import "sort"
type Pair struct {
first int
second int
}
type Pairs []Pair
func (this Pairs) Len() int {
return len(this)
}
func (this Pairs) Less(i, j int) bool {
return this[i].second > this[j].second
}
func (this Pairs) Swap(i, j int) {
this[i], this[j] = this[j], this[i]
}
type Node struct {
nums Pairs
}
type MajorityChecker struct {
tree []*Node
size int
}
func merge(a, b *Node) *Node {
if a == nil && b == nil {
return nil
}
if a == nil {
return b
}
if b == nil {
return a
}
nums_a := a.nums
nums_b := b.nums
cnt := make(map[int]int)
for i := 0; i < len(nums_a); i++ {
cnt[nums_a[i].first] = nums_a[i].second
}
for i := 0; i < len(nums_b); i++ {
cnt[nums_b[i].first] += nums_b[i].second
}
nums := make([]Pair, 0, 6)
for k, v := range cnt {
nums = append(nums, Pair{k, v})
}
sort.Sort(Pairs(nums))
if len(nums) > 3 {
nums = nums[:5]
}
return &Node{nums}
}
func Constructor(arr []int) MajorityChecker {
n := len(arr)
tree := make([]*Node, 4*n)
var loop func(i int, left int, right int)
loop = func(i int, left int, right int) {
if left == right {
node := new(Node)
node.nums = Pairs(make([]Pair, 1))
node.nums[0] = Pair{arr[left], 1}
tree[i] = node
return
}
mid := (left + right) / 2
loop(2*i+1, left, mid)
loop(2*i+2, mid+1, right)
tree[i] = merge(tree[2*i+1], tree[2*i+2])
}
loop(0, 0, n-1)
return MajorityChecker{tree, n}
}
func (this *MajorityChecker) Query(left int, right int, threshold int) int {
tree := this.tree
var loop func(i int, start int, end int) *Node
loop = func(i int, start int, end int) *Node {
if end < left || right < start {
return nil
}
if left <= start && end <= right {
return tree[i]
}
mid := (start + end) / 2
a := loop(2*i+1, start, mid)
b := loop(2*i+2, mid+1, end)
return merge(a, b)
}
res := loop(0, 0, this.size-1)
if res == nil {
return -1
}
theNum := res.nums[0]
if theNum.second >= threshold {
return theNum.first
}
return -1
}
func max(a, b int) int {
if a >= b {
return a
}
return b
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
/**
* Your MajorityChecker object will be instantiated and called as such:
* obj := Constructor(arr);
* param_1 := obj.Query(left,right,threshold);
*/ | src/leetcode/set1000/set1000/set1100/set1150/p1157/solution.go | 0.511229 | 0.467879 | solution.go | starcoder |
package dackbox
import (
clusterDackBox "github.com/stackrox/rox/central/cluster/dackbox"
cveDackBox "github.com/stackrox/rox/central/cve/dackbox"
deploymentDackBox "github.com/stackrox/rox/central/deployment/dackbox"
imageDackBox "github.com/stackrox/rox/central/image/dackbox"
componentDackBox "github.com/stackrox/rox/central/imagecomponent/dackbox"
nsDackBox "github.com/stackrox/rox/central/namespace/dackbox"
"github.com/stackrox/rox/pkg/dackbox/keys/transformation"
)
var (
// ClusterToCVETransformation uses a graph context to transform a cluster ID into a cve ID.
ClusterToCVETransformation = transformation.AddPrefix(clusterDackBox.Bucket).
ThenMapToMany(transformation.ForwardFromContext(nsDackBox.Bucket)).
ThenMapEachToMany(transformation.ForwardFromContext(deploymentDackBox.Bucket)).
ThenMapEachToMany(transformation.ForwardFromContext(imageDackBox.Bucket)).
Then(transformation.Dedupe()).
ThenMapEachToMany(transformation.ForwardFromContext(componentDackBox.Bucket)).
Then(transformation.Dedupe()).
ThenMapEachToMany(transformation.ForwardFromContext(cveDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(cveDackBox.Bucket)).
Then(transformation.Dedupe())
// ClusterToClusterCVETransformation uses a graph context to transform a cluster ID into a cluster cve ID.
ClusterToClusterCVETransformation = transformation.AddPrefix(clusterDackBox.Bucket).
ThenMapToMany(transformation.ForwardFromContext(cveDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(cveDackBox.Bucket))
// DeploymentToImageComponentTransformation uses a graph context to transform a deployment ID into a component ID.
DeploymentToImageComponentTransformation = transformation.AddPrefix(deploymentDackBox.Bucket).
ThenMapToMany(transformation.ForwardFromContext(imageDackBox.Bucket)).
ThenMapEachToMany(transformation.ForwardFromContext(componentDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(componentDackBox.Bucket)).
Then(transformation.Dedupe())
// DeploymentToCVETransformation uses a graph context to transform a deployment ID into a cve ID.
DeploymentToCVETransformation = transformation.AddPrefix(deploymentDackBox.Bucket).
ThenMapToMany(transformation.ForwardFromContext(imageDackBox.Bucket)).
ThenMapEachToMany(transformation.ForwardFromContext(componentDackBox.Bucket)).
Then(transformation.Dedupe()).
ThenMapEachToMany(transformation.ForwardFromContext(cveDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(cveDackBox.Bucket)).
Then(transformation.Dedupe())
// ImageToDeploymentTransformation uses a graph context to transform a image ID into a Deployment ID.
ImageToDeploymentTransformation = transformation.AddPrefix(imageDackBox.Bucket).
ThenMapToMany(transformation.BackwardFromContext(deploymentDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(deploymentDackBox.Bucket))
// ImageToImageComponentTransformation trasforms an image id to a set of image component ids.
ImageToImageComponentTransformation = transformation.AddPrefix(imageDackBox.Bucket).
ThenMapToMany(transformation.ForwardFromContext(componentDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(componentDackBox.Bucket))
// ImageToCVETransformation uses a graph context to transform a image ID into a cve ID.
ImageToCVETransformation = transformation.AddPrefix(imageDackBox.Bucket).
ThenMapToMany(transformation.ForwardFromContext(componentDackBox.Bucket)).
ThenMapEachToMany(transformation.ForwardFromContext(cveDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(cveDackBox.Bucket)).
Then(transformation.Dedupe())
// ComponentToDeploymentTransformation uses a graph context to transform a component ID into a Deployment ID.
ComponentToDeploymentTransformation = transformation.AddPrefix(componentDackBox.Bucket).
ThenMapToMany(transformation.BackwardFromContext(imageDackBox.Bucket)).
ThenMapEachToMany(transformation.BackwardFromContext(deploymentDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(deploymentDackBox.Bucket)).
Then(transformation.Dedupe())
// ComponentToImageTransformation uses a graph context to transform a component ID into an image ID.
ComponentToImageTransformation = transformation.AddPrefix(componentDackBox.Bucket).
ThenMapToMany(transformation.BackwardFromContext(imageDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(imageDackBox.Bucket))
// ComponentToCVETransformation uses a graph context to transform a component ID into a cve ID.
ComponentToCVETransformation = transformation.AddPrefix(componentDackBox.Bucket).
ThenMapToMany(transformation.ForwardFromContext(cveDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(cveDackBox.Bucket))
// CVEToComponentTransformation uses a graph context to transform a cve ID into a component ID.
CVEToComponentTransformation = transformation.AddPrefix(cveDackBox.Bucket).
ThenMapToMany(transformation.BackwardFromContext(componentDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(componentDackBox.Bucket))
// CVEToImageTransformation uses a graph context to transform a cve ID into an image ID.
CVEToImageTransformation = transformation.AddPrefix(cveDackBox.Bucket).
ThenMapToMany(transformation.BackwardFromContext(componentDackBox.Bucket)).
ThenMapEachToMany(transformation.BackwardFromContext(imageDackBox.Bucket)).
Then(transformation.Dedupe()).
ThenMapEachToOne(transformation.StripPrefixUnchecked(imageDackBox.Bucket))
// CVEToDeploymentTransformation uses a graph context to transform a cve ID into a deployment ID.
CVEToDeploymentTransformation = transformation.AddPrefix(cveDackBox.Bucket).
ThenMapToMany(transformation.BackwardFromContext(componentDackBox.Bucket)).
ThenMapEachToMany(transformation.BackwardFromContext(imageDackBox.Bucket)).
Then(transformation.Dedupe()).
ThenMapEachToMany(transformation.BackwardFromContext(deploymentDackBox.Bucket)).
ThenMapEachToOne(transformation.StripPrefixUnchecked(deploymentDackBox.Bucket)).
Then(transformation.Dedupe())
// ComponentCVEEdgeToCVETransformation transforms a component:cve edge ID into a cve ID.
ComponentCVEEdgeToCVETransformation = transformation.Split([]byte{':'}).
Then(transformation.AtIndex(1)).
ThenMapEachToOne(transformation.Decode())
// ComponentCVEEdgeToComponentTransformation transforms a component:cve edge ID into a component ID.
ComponentCVEEdgeToComponentTransformation = transformation.Split([]byte{':'}).
Then(transformation.AtIndex(0)).
ThenMapEachToOne(transformation.Decode())
// ClusterCVEEdgeToCVETransformation transforms a cluster:cve ID into a cve ID.
ClusterCVEEdgeToCVETransformation = transformation.Split([]byte{':'}).
Then(transformation.AtIndex(1)).
ThenMapEachToOne(transformation.Decode())
// ImageComponentEdgeToImageTransformation transforms a image:component ID into an image ID.
ImageComponentEdgeToImageTransformation = transformation.Split([]byte{':'}).
Then(transformation.AtIndex(0)).
ThenMapEachToOne(transformation.Decode())
// ImageComponentEdgeToComponentTransformation transforms a image:component ID into a component ID.
ImageComponentEdgeToComponentTransformation = transformation.Split([]byte{':'}).
Then(transformation.AtIndex(1)).
ThenMapEachToOne(transformation.Decode())
// ImageComponentEdgeToDeploymentTransformation transforms an image:component edge id into a set of deployment ids.
ImageComponentEdgeToDeploymentTransformation = ImageComponentEdgeToImageTransformation.
ThenMapEachToMany(ImageToDeploymentTransformation)
// ImageComponentEdgeToCVETransformation transforms an image:component edge id into a set of cve ids.
ImageComponentEdgeToCVETransformation = ImageComponentEdgeToComponentTransformation.
ThenMapEachToMany(ComponentToCVETransformation)
// ComponentCVEEdgeToDeploymentTransformation transforms a component:cve edge id into a deployment id.
ComponentCVEEdgeToDeploymentTransformation = ComponentCVEEdgeToComponentTransformation.
ThenMapEachToMany(ComponentToDeploymentTransformation)
// ComponentCVEEdgeToImageTransformation transforms a component:cve edge id into an image id.
ComponentCVEEdgeToImageTransformation = ComponentCVEEdgeToComponentTransformation.
ThenMapEachToMany(ComponentToImageTransformation)
) | central/dackbox/key_transformations.go | 0.563618 | 0.432123 | key_transformations.go | starcoder |
//go:generate genny -pkg=column -in=column_generate.go -out=column_numbers.go gen "number=float32,float64,int,int16,int32,int64,uint,uint16,uint32,uint64"
package column
import (
"fmt"
"reflect"
"sync"
"github.com/kelindar/bitmap"
"github.com/kelindar/column/commit"
)
// columnType represents a type of a column.
type columnType uint8
const (
typeGeneric = columnType(0) // Generic column, every column should support this
typeNumeric = columnType(1 << 0) // Numeric column supporting float64, int64 or uint64
typeTextual = columnType(1 << 1) // Textual column supporting strings
)
// typeOf resolves all supported types of the column
func typeOf(column Column) (typ columnType) {
if _, ok := column.(Numeric); ok {
typ = typ | typeNumeric
}
if _, ok := column.(Textual); ok {
typ = typ | typeTextual
}
return
}
// --------------------------- Contracts ----------------------------
// Column represents a column implementation
type Column interface {
Grow(idx uint32)
Apply(*commit.Reader)
Value(idx uint32) (interface{}, bool)
Contains(idx uint32) bool
Index() *bitmap.Bitmap
Snapshot(chunk commit.Chunk, dst *commit.Buffer)
}
// Numeric represents a column that stores numbers.
type Numeric interface {
Column
LoadFloat64(uint32) (float64, bool)
LoadUint64(uint32) (uint64, bool)
LoadInt64(uint32) (int64, bool)
FilterFloat64(uint32, bitmap.Bitmap, func(v float64) bool)
FilterUint64(uint32, bitmap.Bitmap, func(v uint64) bool)
FilterInt64(uint32, bitmap.Bitmap, func(v int64) bool)
}
// Textual represents a column that stores strings.
type Textual interface {
Column
LoadString(uint32) (string, bool)
FilterString(uint32, bitmap.Bitmap, func(v string) bool)
}
// --------------------------- Constructors ----------------------------
// Various column constructor functions for a specific types.
var (
ForString = makeStrings
ForFloat32 = makeFloat32s
ForFloat64 = makeFloat64s
ForInt = makeInts
ForInt16 = makeInt16s
ForInt32 = makeInt32s
ForInt64 = makeInt64s
ForUint = makeUints
ForUint16 = makeUint16s
ForUint32 = makeUint32s
ForUint64 = makeUint64s
ForBool = makeBools
ForEnum = makeEnum
ForKey = makeKey
)
// ForKind creates a new column instance for a specified reflect.Kind
func ForKind(kind reflect.Kind) (Column, error) {
switch kind {
case reflect.Float32:
return makeFloat32s(), nil
case reflect.Float64:
return makeFloat64s(), nil
case reflect.Int:
return makeInts(), nil
case reflect.Int16:
return makeInt16s(), nil
case reflect.Int32:
return makeInt32s(), nil
case reflect.Int64:
return makeInt64s(), nil
case reflect.Uint:
return makeUints(), nil
case reflect.Uint16:
return makeUint16s(), nil
case reflect.Uint32:
return makeUint32s(), nil
case reflect.Uint64:
return makeUint64s(), nil
case reflect.Bool:
return makeBools(), nil
case reflect.String:
return makeStrings(), nil
default:
return nil, fmt.Errorf("column: unsupported column kind (%v)", kind)
}
}
// --------------------------- Column ----------------------------
// column represents a column wrapper that synchronizes operations
type column struct {
Column
lock sync.RWMutex // The lock to protect the entire column
kind columnType // The type of the colum
name string // The name of the column
}
// columnFor creates a synchronized column for a column implementation
func columnFor(name string, v Column) *column {
return &column{
kind: typeOf(v),
name: name,
Column: v,
}
}
// IsIndex returns whether the column is an index
func (c *column) IsIndex() bool {
_, ok := c.Column.(*columnIndex)
return ok
}
// Is checks whether a column type supports certain numerical operations.
func (c *column) IsNumeric() bool {
return (c.kind & typeNumeric) == typeNumeric
}
// Is checks whether a column type supports certain string operations.
func (c *column) IsTextual() bool {
return (c.kind & typeTextual) == typeTextual
}
// Grow grows the size of the column
func (c *column) Grow(idx uint32) {
c.lock.Lock()
defer c.lock.Unlock()
c.Column.Grow(idx)
}
// Apply performs a series of operations on a column.
func (c *column) Apply(r *commit.Reader) {
c.lock.RLock()
defer c.lock.RUnlock()
r.Rewind()
c.Column.Apply(r)
}
// Snapshot takes a snapshot of a column, skipping indexes
func (c *column) Snapshot(chunk commit.Chunk, buffer *commit.Buffer) bool {
if c.IsIndex() {
return false
}
buffer.Reset(c.name)
c.Column.Snapshot(chunk, buffer)
return true
}
// Value retrieves a value at a specified index
func (c *column) Value(idx uint32) (v interface{}, ok bool) {
v, ok = c.Column.Value(idx)
return
}
// --------------------------- booleans ----------------------------
// columnBool represents a boolean column
type columnBool struct {
data bitmap.Bitmap
}
// makeBools creates a new boolean column
func makeBools() Column {
return &columnBool{
data: make(bitmap.Bitmap, 0, 4),
}
}
// Grow grows the size of the column until we have enough to store
func (c *columnBool) Grow(idx uint32) {
c.data.Grow(idx)
}
// Apply applies a set of operations to the column.
func (c *columnBool) Apply(r *commit.Reader) {
for r.Next() {
v := uint64(1) << (r.Offset & 0x3f)
switch r.Type {
case commit.PutTrue:
c.data[r.Offset>>6] |= v
case commit.PutFalse: // also "delete"
c.data[r.Offset>>6] &^= v
}
}
}
// Value retrieves a value at a specified index
func (c *columnBool) Value(idx uint32) (interface{}, bool) {
value := c.data.Contains(idx)
return value, value
}
// Contains checks whether the column has a value at a specified index.
func (c *columnBool) Contains(idx uint32) bool {
return c.data.Contains(idx)
}
// Index returns the fill list for the column
func (c *columnBool) Index() *bitmap.Bitmap {
return &c.data
}
// Snapshot writes the entire column into the specified destination buffer
func (c *columnBool) Snapshot(chunk commit.Chunk, dst *commit.Buffer) {
dst.PutBitmap(commit.PutTrue, chunk, c.data)
}
// boolReader represens a read-only accessor for boolean values
type boolReader struct {
cursor *uint32
reader Column
}
// Get loads the value at the current transaction cursor
func (s boolReader) Get() bool {
return s.reader.Contains(*s.cursor)
}
// boolReaderFor creates a new reader
func boolReaderFor(txn *Txn, columnName string) boolReader {
column, ok := txn.columnAt(columnName)
if !ok {
panic(fmt.Errorf("column: column '%s' does not exist", columnName))
}
return boolReader{
cursor: &txn.cursor,
reader: column.Column,
}
}
// boolWriter represents read-write accessor for boolean values
type boolWriter struct {
boolReader
writer *commit.Buffer
}
// Set sets the value at the current transaction cursor
func (s boolWriter) Set(value bool) {
s.writer.PutBool(*s.cursor, value)
}
// String returns a string column accessor
func (txn *Txn) Bool(columnName string) boolWriter {
return boolWriter{
boolReader: boolReaderFor(txn, columnName),
writer: txn.bufferFor(columnName),
}
}
// --------------------------- Accessor ----------------------------
// anyReader represens a read-only accessor for any value
type anyReader struct {
cursor *uint32
reader Column
}
// Get loads the value at the current transaction cursor
func (s anyReader) Get() (interface{}, bool) {
return s.reader.Value(*s.cursor)
}
// anyReaderFor creates a new any reader
func anyReaderFor(txn *Txn, columnName string) anyReader {
column, ok := txn.columnAt(columnName)
if !ok {
panic(fmt.Errorf("column: column '%s' does not exist", columnName))
}
return anyReader{
cursor: &txn.cursor,
reader: column.Column,
}
}
// anyWriter represents read-write accessor for any column type
type anyWriter struct {
anyReader
writer *commit.Buffer
}
// Set sets the value at the current transaction cursor
func (s anyWriter) Set(value interface{}) {
s.writer.PutAny(commit.Put, *s.cursor, value)
}
// Any returns a column accessor
func (txn *Txn) Any(columnName string) anyWriter {
return anyWriter{
anyReader: anyReaderFor(txn, columnName),
writer: txn.bufferFor(columnName),
}
}
// --------------------------- funcs ----------------------------
// resize calculates the new required capacity and a new index
func resize(capacity int, v uint32) int {
const threshold = 256
if v < threshold {
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v++
return int(v)
}
if capacity < threshold {
capacity = threshold
}
for 0 < capacity && capacity < int(v+1) {
capacity += (capacity + 3*threshold) / 4
}
return capacity
} | column.go | 0.695235 | 0.470554 | column.go | starcoder |
package main
import (
"log"
"math"
"os"
"strconv"
"github.com/TomasCruz/projecteuler"
)
/*
Problem 62; Cubic permutations
The cube, 41063625 (345^3), can be permuted to produce two other cubes: 56623104 (384^3) and 66430125 (405^3).
In fact, 41063625 is the smallest cube which has exactly three permutations of its digits which are also cube.
Find the smallest cube for which exactly five permutations of its digits are cube.
*/
func main() {
var cubicPermutationsCount int
if len(os.Args) > 1 {
cubicPermutationsCount64, err := strconv.ParseInt(os.Args[1], 10, 64)
if err != nil {
log.Fatal("bad argument")
}
cubicPermutationsCount = int(cubicPermutationsCount64)
} else {
cubicPermutationsCount = 5
}
projecteuler.Timed(calc, cubicPermutationsCount)
}
func calc(args ...interface{}) (result string, err error) {
// 10^(n-1) <= n digit cube < 10^n
// n-1 <= log(x) < n
// 10^[(n-1)/3] <= x < 10^[n/3]
cubicPermutationsCount := args[0].(int)
weights := fillWeights()
for digitCount := 3; ; digitCount++ {
start := math.Floor(math.Pow(10, float64(digitCount-1)/3.0))
end := math.Floor(math.Pow(10, float64(digitCount)/3.0))
masks := make(map[int]map[int64]struct{})
maskMin := make(map[int]int64)
for i := start; i < end; i++ {
cube := int64(i) * int64(i) * int64(i)
currMask := getMask(cube, weights)
if _, ok := masks[currMask]; !ok {
masks[currMask] = make(map[int64]struct{})
maskMin[currMask] = cube
}
masks[currMask][cube] = struct{}{}
}
res := int64(math.MaxInt64)
for k, v := range masks {
if len(v) == cubicPermutationsCount && maskMin[k] < res {
res = maskMin[k]
}
}
if res != int64(math.MaxInt64) {
result = strconv.FormatInt(res, 10)
return
}
}
}
func getMask(cube int64, weights []int) int {
bi, _ := projecteuler.MakeBigInt(strconv.FormatInt(cube, 10))
digits := []byte(bi.String())
mask := 0
for i := 0; i < len(digits); i++ {
mask += weights[digits[i]-'0']
}
return mask
}
func fillWeights() []int {
weights := make([]int, 10)
weights[0] = 0x1
for i := 1; i < 10; i++ {
weights[i] = weights[i-1] * 8
}
return weights
} | 001-100/061-070/062/main.go | 0.66236 | 0.447098 | main.go | starcoder |
package tonacity
// Welcome to music theory, where nothing is unanimously agreed upon, there are multiple equivalent ways of saying the same thing, and the distance between two notes is a second.
// Evidence:
// - What physical frequency is a particular note?
// - Which C is middle C?
// - A𝄫 == G == F𝄪
// - B♭ == A♯
// - C♭ == B
// - In C Major the distance between C (note I in the scale), and F (note IV in the scale) is "a fourth"
// In this library presentation has been kept completely separate to the data structures. This is because the name of something typically depends entirely on context.
const (
// WholeStepValue The numerical value of a whole step
WholeStepValue = 2
// HalfStepValue The numerical value of a half step
HalfStepValue = WholeStepValue / 2
// OctaveValue The numerical value of an entire octave
OctaveValue = HalfStepValue * 12
)
// HalfSteps The type for specifying a number of half steps
type HalfSteps int8
// Singer Make that object SING. Something which can, given a valid starting pitch, produce an indefinite sequence of notes. For example, the G Major key signature will produce,
// given a starting pitch of B3, the pitch classes B3, C4, D4, E4, F♯4, G4, A4, B4, C5, D5, etc.
// Not all starting pitches may be valid, for example if the receiver is the key of G Major then a starting pitch of E♭, which is not in the key, is not valid.
type Singer interface {
// Sing Generates the next note. Bool will be false if there are no more notes.
Sing() (pitch Pitch, more bool)
}
// PitchClassProducer Something which contains a set of pitch classes, where order and starting point are irrelevant. For example, the G Major key signature will produce
// the pitch classes G, A, B, C, D, E, F♯.
type PitchClassProducer interface {
// Generates a slice of unique pitch classes, returned in ascending order (to be useful to the caller, not because the order necessarily has a meaning).
ProducePitchClasses() []*PitchClass
}
// Transposer Something which can be transposed to alter its tone by a number of half steps. For example: transposing the scale of G Major
// by (plus) three half steps will yield the scale of B♭ Major.
type Transposer interface {
Transpose(halfSteps HalfSteps)
}
// PatternRepeatingSinger A singer that can produce pitches indefinitely. Unless the pattern of steps purposefully loops back, i.e. the sum of all is zero, then
// this singer does NOT loop, i.e., given infinite time it will either tend to a pitch of negative or positive infinity hertz.
type PatternRepeatingSinger struct {
pattern Pattern
nextPitch Pitch
offset int
}
// Sing Keeps producing the next pitch in the sequence according to its underlying pattern of half-step intervals
func (singer PatternRepeatingSinger) Sing() (pitch Pitch, more bool) {
pitch = singer.nextPitch
singer.nextPitch.Transpose(singer.pattern.At(singer.offset))
singer.offset = (singer.offset + 1) % singer.pattern.Length()
// Can always produce more notes
more = true
return
}
type KeySignature struct {
pitches []*PitchClass
}
type TimeSignature struct {
noteCount int
noteValue int
}
type Bar struct {
time TimeSignature
}
type Note struct {
// The value of this note is one over this value
value int
}
type Stave struct {
bars []Bar
}
type Clef struct {
firstNote int
} | tonacity.go | 0.571647 | 0.75199 | tonacity.go | starcoder |
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stats
import (
"math"
"github.com/stockparfait/errors"
"github.com/stockparfait/stockparfait/db"
)
// Timeseries stores numeric values along with timestamps. The timestamps are
// always sorted in ascending order.
type Timeseries struct {
dates []db.Date
data []float64
}
// NewTimeseries creates a new empty Timeseries.
func NewTimeseries() *Timeseries {
return &Timeseries{}
}
// Dates of the Timeseries.
func (t *Timeseries) Dates() []db.Date { return t.dates }
// Data of the Timeseries.
func (t *Timeseries) Data() []float64 { return t.data }
// Init assigns values to the Timeseries. The dates are expected to be sorted in
// ascending order (not checked). It returns self for inline declarations, and
// panics if the arguments don't have the same length.
func (t *Timeseries) Init(dates []db.Date, data []float64) *Timeseries {
if len(dates) != len(data) {
panic(errors.Reason("len(dates) [%d] != len(data) [%d]",
len(dates), len(data)))
}
t.dates = dates
t.data = data
return t
}
// Copy makes a copy of the data before assigning it to the
// Timeseries. Otherwise it's the same as Init.
func (t *Timeseries) Copy(dates []db.Date, data []float64) *Timeseries {
da := make([]db.Date, len(dates))
dt := make([]float64, len(data))
copy(da, dates)
copy(dt, data)
return t.Init(da, dt)
}
// Check that Timeseries is consistent: the lengths of dates and data are the
// same and the dates are ordered in ascending order.
func (t *Timeseries) Check() error {
if len(t.dates) != len(t.data) {
return errors.Reason("len(dates) [%d] != len(data) [%d]",
len(t.dates), len(t.data))
}
for i, d := range t.dates {
if i == 0 {
continue
}
if !t.dates[i-1].Before(d) {
return errors.Reason("dates[%d] = %s >= dates[%d] = %s",
i-1, t.dates[i-1].String(), i, d.String())
}
}
return nil
}
// rangeSlice returns slice indices for dates to extract an inclusive interval
// between start and end timestamps.
func rangeSlice(dates []db.Date, start, end db.Date) (s, e int) {
if start.After(end) {
return 0, 0
}
s = len(dates)
e = len(dates)
var startSet, endSet bool
for i, d := range dates {
if !startSet && !start.After(d) {
s = i
startSet = true
}
if !endSet && end.Before(d) {
e = i
endSet = true
}
if startSet && endSet {
break
}
}
if s >= e {
return 0, 0
}
return
}
// Range extracts the sub-series from the inclusive time interval. It may return
// an empty Timeseries, but never nil.
func (t *Timeseries) Range(start, end db.Date) *Timeseries {
s, e := rangeSlice(t.dates, start, end)
if s == 0 && e == len(t.dates) {
return t
}
return NewTimeseries().Init(t.dates[s:e], t.data[s:e])
}
// Shift the timeseries in time. A positive shift moves the values into the
// future, negative - into the past. The values outside of the date range are
// dropped. It may return an empty Timeseries, but never nil.
func (t *Timeseries) Shift(shift int) *Timeseries {
if shift == 0 {
return t
}
absShift := shift
if absShift < 0 {
absShift = -shift
}
l := len(t.dates)
if absShift >= l {
return NewTimeseries()
}
if shift > 0 {
return NewTimeseries().Init(t.dates[shift:], t.data[:l-shift])
}
return NewTimeseries().Init(t.dates[:l+shift], t.data[-shift:])
}
// DeltaParams are configuration parameters for computing delta time series. By
// default, d[t] = x[t] - x[t-1].
type DeltaParams struct {
Relative bool // d[t] = (x[t] - x[t-1]) / x[t-1]
Log bool // use log(x[t]) instead of x[t]
Normalized bool // normalize deltas so mean=0 and MAD=1
}
// Deltas computes the deltas from the timeseries.
func (t *Timeseries) Deltas(params DeltaParams) (*Timeseries, error) {
data := t.Data()
if params.Log {
data = make([]float64, len(t.Data()))
for i, d := range t.Data() {
data[i] = math.Log(d)
}
}
deltas := []float64{}
dates := []db.Date{}
for i := range data {
if i == 0 {
continue
}
d := data[i] - data[i-1]
if params.Relative {
if data[i-1] == 0.0 {
continue
}
d = d / data[i-1]
}
deltas = append(deltas, d)
dates = append(dates, t.Dates()[i])
}
if params.Normalized {
s := NewSample().Init(deltas)
mean := s.Mean()
mad := s.MAD()
if mad == 0.0 {
return nil, errors.Reason("MAD(deltas)=0")
}
for i, d := range deltas {
deltas[i] = (d - mean) / mad
}
}
return NewTimeseries().Init(dates, deltas), nil
}
// PriceField is an enum type indicating which PriceRow field to use.
type PriceField uint8
const (
PriceUnadjusted PriceField = iota
PriceSplitAdjusted
PriceFullyAdjusted
PriceDollarVolume
)
// FromPrices initializes the Timeseries from PriceRow slice.
func (t *Timeseries) FromPrices(prices []db.PriceRow, f PriceField) *Timeseries {
dates := make([]db.Date, len(prices))
data := make([]float64, len(prices))
for i, p := range prices {
dates[i] = p.Date
switch f {
case PriceUnadjusted:
data[i] = float64(p.CloseUnadjusted())
case PriceSplitAdjusted:
data[i] = float64(p.CloseSplitAdjusted)
case PriceFullyAdjusted:
data[i] = float64(p.CloseFullyAdjusted)
case PriceDollarVolume:
data[i] = float64(p.DollarVolume)
default:
panic(errors.Reason("unsupported PriceField: %d", f))
}
}
return t.Init(dates, data)
} | stats/timeseries.go | 0.827096 | 0.513912 | timeseries.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"strings"
)
func parse(filename string) [][]byte {
bytes, err := ioutil.ReadFile(filename)
if err != nil {
panic(err)
}
lines := strings.Split(string(bytes), "\n")
result := [][]byte{}
for _, line := range lines {
result = append(result, []byte(line))
}
return result
}
type position struct {
x, y int
}
func (p position) String() string {
return fmt.Sprintf("(%v, %v)", p.x, p.y)
}
type velocity position
func (p position) Move(v velocity) position {
return position{x: p.x + v.x, y: p.y + v.y}
}
func (v velocity) IsZero() bool {
return v.x == 0 && v.y == 0
}
func findStart(maze [][]byte) position {
top := maze[0]
for x := 0; x < len(top); x++ {
if top[x] != ' ' {
return position{x: x, y: 0}
}
}
panic("nothing in the top row")
}
func canMoveHorizontal(maze [][]byte, x, y, dx int) bool {
nx := x + dx
if nx < 0 || nx >= len(maze[y]) {
// That would be out of bounds.
return false
}
c := maze[y][nx]
switch c {
case '-':
fallthrough
case '+':
// It's definitely okay.
return true
case ' ':
// It's definitely not okay.
return false
case '|':
fallthrough
default:
// It might be okay. Can we collect it and keep moving?
return canMoveHorizontal(maze, nx, y, dx)
}
}
func canMoveVertical(maze [][]byte, x, y, dy int) bool {
ny := y + dy
if ny < 0 || ny >= len(maze) {
// That would be out of bounds.
return false
}
c := maze[ny][x]
switch c {
case '|':
fallthrough
case '+':
// It's definitely okay.
return true
case ' ':
// It's definitely not okay.
return false
case '-':
fallthrough
default:
// It might be okay. Can we collect it and keep moving?
return canMoveVertical(maze, x, ny, dy)
}
}
func newDirection(maze [][]byte, cur position, vel velocity) velocity {
if vel.x == 0 {
// Currently moving up/down.
if canMoveHorizontal(maze, cur.x, cur.y, -1) {
// We can move left, let's do that.
return velocity{x: -1, y: 0}
}
if canMoveHorizontal(maze, cur.x, cur.y, 1) {
// We can move right, let's do that.
return velocity{x: 1, y: 0}
}
} else {
// Currently moving left/right.
if canMoveVertical(maze, cur.x, cur.y, -1) {
// We can move up, let's do that.
return velocity{x: 0, y: -1}
}
if canMoveVertical(maze, cur.x, cur.y, 1) {
// We can move down, let's do that.
return velocity{x: 0, y: 1}
}
}
panic(fmt.Sprintf("dead end at %v!", cur))
}
func iLikeToMoveItMoveIt(maze [][]byte) (position, []byte, int) {
cur := findStart(maze)
vel := velocity{x: 0, y: 1}
chars := []byte{}
steps := 0
for {
c := maze[cur.y][cur.x]
switch c {
case ' ':
return cur, chars, steps
case '|':
fallthrough
case '-':
// Keep going in the direction we were going.
case '+':
// Make a turn.
vel = newDirection(maze, cur, vel)
default:
// Collect the letter and keep moving.
chars = append(chars, c)
}
cur = cur.Move(vel)
steps++
}
}
func main() {
// maze := parse("test.txt")
maze := parse("input.txt")
cur, chars, steps := iLikeToMoveItMoveIt(maze)
fmt.Println(cur)
fmt.Println(string(chars))
fmt.Println(steps)
} | week3/day19/main.go | 0.604282 | 0.404802 | main.go | starcoder |
package csg
import (
"fmt"
"io"
)
// NewTriangle creates a new polygon from 3 points
func NewTriangle(a *Vertex, b *Vertex, c *Vertex, plane *Plane) *Polygon {
return &Polygon{Vertices: []*Vertex{a, b, c}, Plane: plane}
}
// Polygon is a 3 dimensional polygon with 3 or more vertices
type Polygon struct {
Vertices []*Vertex
Plane *Plane
}
// NewPolygonFromVertices creates a new polygon from a set of vertices
func NewPolygonFromVertices(vertices []*Vertex) *Polygon {
return &Polygon{Vertices: vertices, Plane: NewPlaneFromPoints(vertices[0].Position, vertices[1].Position, vertices[2].Position)}
}
//triangulate will (poorly) triangulate this polygon
func triangulate(vertices []*Vertex, plane *Plane) []*Polygon {
t := make([]*Polygon, 0)
l := len(vertices)
if l == 3 {
t = append(t, NewTriangle(vertices[0], vertices[1], vertices[2], plane))
} else if l == 4 {
t = append(t, NewTriangle(vertices[0], vertices[1], vertices[2], plane))
t = append(t, NewTriangle(vertices[0], vertices[2], vertices[3], plane))
} else if l == 5 {
t = append(t, NewTriangle(vertices[0], vertices[1], vertices[2], plane))
t = append(t, NewTriangle(vertices[0], vertices[2], vertices[4], plane))
t = append(t, NewTriangle(vertices[2], vertices[3], vertices[4], plane))
} else if l == 6 {
t = append(t, NewTriangle(vertices[0], vertices[1], vertices[2], plane))
t = append(t, NewTriangle(vertices[2], vertices[3], vertices[4], plane))
t = append(t, NewTriangle(vertices[5], vertices[2], vertices[4], plane))
t = append(t, NewTriangle(vertices[0], vertices[2], vertices[5], plane))
} else if l > 6 {
t = append(t, NewTriangle(vertices[0], vertices[1], vertices[2], plane))
t = append(t, triangulate(vertices[2:], plane)...)
t = append(t, NewTriangle(vertices[0], vertices[2], vertices[l-1], plane))
}
return t
}
// Triangles returns a triangulation of this polygon
func (p *Polygon) Triangles() []*Polygon {
return triangulate(p.Vertices, p.Plane)
}
// IsTriangle returns true if this polygon is a triangle
func (p *Polygon) IsTriangle() bool {
return len(p.Vertices) == 3
}
// Clone clones this polygon
func (p *Polygon) Clone() *Polygon {
vs := make([]*Vertex, 0)
for _, cp := range p.Vertices {
vs = append(vs, cp.Clone())
}
return NewPolygonFromVertices(vs)
}
// Flip flips the normal of this polygon by reversing the ordering of points and flipping the normal on the associated plane
func (p *Polygon) Flip() {
for i := len(p.Vertices)/2 - 1; i >= 0; i-- {
opp := len(p.Vertices) - 1 - i
p.Vertices[i], p.Vertices[opp] = p.Vertices[opp], p.Vertices[i]
}
p.Plane.Flip()
}
// MarshalToASCIISTL will write this polygon out as ASCII STL
func (p *Polygon) MarshalToASCIISTL(out io.Writer) {
fmt.Fprintf(out, "facet normal %f %f %f\n", p.Plane.Normal.X, p.Plane.Normal.Y, p.Plane.Normal.Z)
fmt.Fprintf(out, "\touter loop\n")
for _, v := range p.Vertices {
v.Position.MarshalToASCIISTL(out)
}
fmt.Fprintf(out, "\tendloop\n")
fmt.Fprintf(out, "endfacet\n")
} | csg/polygon.go | 0.774754 | 0.81409 | polygon.go | starcoder |
package codec
import (
"time"
"github.com/juju/errors"
"github.com/pingcap/tidb/mysql"
)
const (
nilFlag byte = iota
bytesFlag
compactBytesFlag
intFlag
uintFlag
floatFlag
decimalFlag
durationFlag
)
func encode(b []byte, vals []interface{}, comparable bool) ([]byte, error) {
for _, val := range vals {
switch v := val.(type) {
case bool:
b = append(b, intFlag)
if v {
b = EncodeInt(b, int64(1))
} else {
b = EncodeInt(b, int64(0))
}
case int:
b = append(b, intFlag)
b = EncodeInt(b, int64(v))
case int8:
b = append(b, intFlag)
b = EncodeInt(b, int64(v))
case int16:
b = append(b, intFlag)
b = EncodeInt(b, int64(v))
case int32:
b = append(b, intFlag)
b = EncodeInt(b, int64(v))
case int64:
b = append(b, intFlag)
b = EncodeInt(b, int64(v))
case uint:
b = append(b, uintFlag)
b = EncodeUint(b, uint64(v))
case uint8:
b = append(b, uintFlag)
b = EncodeUint(b, uint64(v))
case uint16:
b = append(b, uintFlag)
b = EncodeUint(b, uint64(v))
case uint32:
b = append(b, uintFlag)
b = EncodeUint(b, uint64(v))
case uint64:
b = append(b, uintFlag)
b = EncodeUint(b, uint64(v))
case float32:
b = append(b, floatFlag)
b = EncodeFloat(b, float64(v))
case float64:
b = append(b, floatFlag)
b = EncodeFloat(b, float64(v))
case string:
b = encodeBytes(b, []byte(v), comparable)
case []byte:
b = encodeBytes(b, v, comparable)
case mysql.Time:
b = encodeBytes(b, []byte(v.String()), comparable)
case mysql.Duration:
// duration may have negative value, so we cannot use String to encode directly.
b = append(b, durationFlag)
b = EncodeInt(b, int64(v.Duration))
case mysql.Decimal:
b = append(b, decimalFlag)
b = EncodeDecimal(b, v)
case mysql.Hex:
b = append(b, intFlag)
b = EncodeInt(b, int64(v.ToNumber()))
case mysql.Bit:
b = append(b, uintFlag)
b = EncodeUint(b, uint64(v.ToNumber()))
case mysql.Enum:
b = append(b, uintFlag)
b = EncodeUint(b, uint64(v.ToNumber()))
case mysql.Set:
b = append(b, uintFlag)
b = EncodeUint(b, uint64(v.ToNumber()))
case nil:
b = append(b, nilFlag)
default:
return nil, errors.Errorf("unsupport encode type %T", val)
}
}
return b, nil
}
func encodeBytes(b []byte, v []byte, comparable bool) []byte {
if comparable {
b = append(b, bytesFlag)
b = EncodeBytes(b, v)
} else {
b = append(b, compactBytesFlag)
b = EncodeCompactBytes(b, v)
}
return b
}
// EncodeKey appends the encoded values to byte slice b, returns the appended
// slice. It guarantees the encoded value is in ascending order for comparison.
func EncodeKey(b []byte, v ...interface{}) ([]byte, error) {
return encode(b, v, true)
}
// EncodeValue appends the encoded values to byte slice b, returning the appended
// slice. It does not guarantee the order for comparison.
func EncodeValue(b []byte, v ...interface{}) ([]byte, error) {
return encode(b, v, false)
}
// Decode decodes values from a byte slice generated with EncodeKey or EncodeValue
// before.
func Decode(b []byte) ([]interface{}, error) {
if len(b) < 1 {
return nil, errors.New("invalid encoded key")
}
var (
flag byte
err error
v interface{}
values = make([]interface{}, 0)
)
for len(b) > 0 {
flag = b[0]
b = b[1:]
switch flag {
case intFlag:
b, v, err = DecodeInt(b)
case uintFlag:
b, v, err = DecodeUint(b)
case floatFlag:
b, v, err = DecodeFloat(b)
case bytesFlag:
b, v, err = DecodeBytes(b)
case compactBytesFlag:
b, v, err = DecodeCompactBytes(b)
case decimalFlag:
b, v, err = DecodeDecimal(b)
case durationFlag:
var r int64
b, r, err = DecodeInt(b)
if err == nil {
// use max fsp, let outer to do round manually.
v = mysql.Duration{Duration: time.Duration(r), Fsp: mysql.MaxFsp}
}
case nilFlag:
v = nil
default:
return nil, errors.Errorf("invalid encoded key flag %v", flag)
}
if err != nil {
return nil, errors.Trace(err)
}
values = append(values, v)
}
return values, nil
} | vendor/github.com/pingcap/tidb/util/codec/codec.go | 0.550607 | 0.454291 | codec.go | starcoder |
package iterator
import (
"sync/atomic"
"github.com/apache/arrow/go/arrow"
"github.com/apache/arrow/go/arrow/array"
"github.com/go-bullseye/bullseye/internal/debug"
)
// Int64ChunkIterator is an iterator for reading an Arrow Column value by value.
type Int64ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Int64 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Int64 // current chunk
}
// NewInt64ChunkIterator creates a new Int64ChunkIterator for reading an Arrow Column.
func NewInt64ChunkIterator(col *array.Column) *Int64ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Int64, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Int64)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Int64ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Int64ChunkIterator) Chunk() *array.Int64 { return cr.currentChunk }
// ChunkValues returns the underlying []int64 chunk values.
// Keep in mind the []int64 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Int64ChunkIterator) ChunkValues() []int64 { return cr.Chunk().Int64Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Int64ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Int64ChunkIterator
func (cr *Int64ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Int64ChunkIterator
func (cr *Int64ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Uint64ChunkIterator is an iterator for reading an Arrow Column value by value.
type Uint64ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Uint64 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Uint64 // current chunk
}
// NewUint64ChunkIterator creates a new Uint64ChunkIterator for reading an Arrow Column.
func NewUint64ChunkIterator(col *array.Column) *Uint64ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Uint64, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Uint64)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Uint64ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Uint64ChunkIterator) Chunk() *array.Uint64 { return cr.currentChunk }
// ChunkValues returns the underlying []uint64 chunk values.
// Keep in mind the []uint64 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Uint64ChunkIterator) ChunkValues() []uint64 { return cr.Chunk().Uint64Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Uint64ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Uint64ChunkIterator
func (cr *Uint64ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Uint64ChunkIterator
func (cr *Uint64ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Float64ChunkIterator is an iterator for reading an Arrow Column value by value.
type Float64ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Float64 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Float64 // current chunk
}
// NewFloat64ChunkIterator creates a new Float64ChunkIterator for reading an Arrow Column.
func NewFloat64ChunkIterator(col *array.Column) *Float64ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Float64, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Float64)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Float64ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Float64ChunkIterator) Chunk() *array.Float64 { return cr.currentChunk }
// ChunkValues returns the underlying []float64 chunk values.
// Keep in mind the []float64 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Float64ChunkIterator) ChunkValues() []float64 { return cr.Chunk().Float64Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Float64ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Float64ChunkIterator
func (cr *Float64ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Float64ChunkIterator
func (cr *Float64ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Int32ChunkIterator is an iterator for reading an Arrow Column value by value.
type Int32ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Int32 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Int32 // current chunk
}
// NewInt32ChunkIterator creates a new Int32ChunkIterator for reading an Arrow Column.
func NewInt32ChunkIterator(col *array.Column) *Int32ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Int32, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Int32)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Int32ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Int32ChunkIterator) Chunk() *array.Int32 { return cr.currentChunk }
// ChunkValues returns the underlying []int32 chunk values.
// Keep in mind the []int32 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Int32ChunkIterator) ChunkValues() []int32 { return cr.Chunk().Int32Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Int32ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Int32ChunkIterator
func (cr *Int32ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Int32ChunkIterator
func (cr *Int32ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Uint32ChunkIterator is an iterator for reading an Arrow Column value by value.
type Uint32ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Uint32 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Uint32 // current chunk
}
// NewUint32ChunkIterator creates a new Uint32ChunkIterator for reading an Arrow Column.
func NewUint32ChunkIterator(col *array.Column) *Uint32ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Uint32, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Uint32)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Uint32ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Uint32ChunkIterator) Chunk() *array.Uint32 { return cr.currentChunk }
// ChunkValues returns the underlying []uint32 chunk values.
// Keep in mind the []uint32 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Uint32ChunkIterator) ChunkValues() []uint32 { return cr.Chunk().Uint32Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Uint32ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Uint32ChunkIterator
func (cr *Uint32ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Uint32ChunkIterator
func (cr *Uint32ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Float32ChunkIterator is an iterator for reading an Arrow Column value by value.
type Float32ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Float32 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Float32 // current chunk
}
// NewFloat32ChunkIterator creates a new Float32ChunkIterator for reading an Arrow Column.
func NewFloat32ChunkIterator(col *array.Column) *Float32ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Float32, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Float32)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Float32ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Float32ChunkIterator) Chunk() *array.Float32 { return cr.currentChunk }
// ChunkValues returns the underlying []float32 chunk values.
// Keep in mind the []float32 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Float32ChunkIterator) ChunkValues() []float32 { return cr.Chunk().Float32Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Float32ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Float32ChunkIterator
func (cr *Float32ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Float32ChunkIterator
func (cr *Float32ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Int16ChunkIterator is an iterator for reading an Arrow Column value by value.
type Int16ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Int16 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Int16 // current chunk
}
// NewInt16ChunkIterator creates a new Int16ChunkIterator for reading an Arrow Column.
func NewInt16ChunkIterator(col *array.Column) *Int16ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Int16, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Int16)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Int16ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Int16ChunkIterator) Chunk() *array.Int16 { return cr.currentChunk }
// ChunkValues returns the underlying []int16 chunk values.
// Keep in mind the []int16 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Int16ChunkIterator) ChunkValues() []int16 { return cr.Chunk().Int16Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Int16ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Int16ChunkIterator
func (cr *Int16ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Int16ChunkIterator
func (cr *Int16ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Uint16ChunkIterator is an iterator for reading an Arrow Column value by value.
type Uint16ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Uint16 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Uint16 // current chunk
}
// NewUint16ChunkIterator creates a new Uint16ChunkIterator for reading an Arrow Column.
func NewUint16ChunkIterator(col *array.Column) *Uint16ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Uint16, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Uint16)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Uint16ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Uint16ChunkIterator) Chunk() *array.Uint16 { return cr.currentChunk }
// ChunkValues returns the underlying []uint16 chunk values.
// Keep in mind the []uint16 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Uint16ChunkIterator) ChunkValues() []uint16 { return cr.Chunk().Uint16Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Uint16ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Uint16ChunkIterator
func (cr *Uint16ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Uint16ChunkIterator
func (cr *Uint16ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Int8ChunkIterator is an iterator for reading an Arrow Column value by value.
type Int8ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Int8 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Int8 // current chunk
}
// NewInt8ChunkIterator creates a new Int8ChunkIterator for reading an Arrow Column.
func NewInt8ChunkIterator(col *array.Column) *Int8ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Int8, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Int8)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Int8ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Int8ChunkIterator) Chunk() *array.Int8 { return cr.currentChunk }
// ChunkValues returns the underlying []int8 chunk values.
// Keep in mind the []int8 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Int8ChunkIterator) ChunkValues() []int8 { return cr.Chunk().Int8Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Int8ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Int8ChunkIterator
func (cr *Int8ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Int8ChunkIterator
func (cr *Int8ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Uint8ChunkIterator is an iterator for reading an Arrow Column value by value.
type Uint8ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Uint8 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Uint8 // current chunk
}
// NewUint8ChunkIterator creates a new Uint8ChunkIterator for reading an Arrow Column.
func NewUint8ChunkIterator(col *array.Column) *Uint8ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Uint8, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Uint8)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Uint8ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Uint8ChunkIterator) Chunk() *array.Uint8 { return cr.currentChunk }
// ChunkValues returns the underlying []uint8 chunk values.
// Keep in mind the []uint8 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Uint8ChunkIterator) ChunkValues() []uint8 { return cr.Chunk().Uint8Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Uint8ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Uint8ChunkIterator
func (cr *Uint8ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Uint8ChunkIterator
func (cr *Uint8ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// TimestampChunkIterator is an iterator for reading an Arrow Column value by value.
type TimestampChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Timestamp // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Timestamp // current chunk
}
// NewTimestampChunkIterator creates a new TimestampChunkIterator for reading an Arrow Column.
func NewTimestampChunkIterator(col *array.Column) *TimestampChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Timestamp, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Timestamp)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &TimestampChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *TimestampChunkIterator) Chunk() *array.Timestamp { return cr.currentChunk }
// ChunkValues returns the underlying []arrow.Timestamp chunk values.
// Keep in mind the []arrow.Timestamp type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *TimestampChunkIterator) ChunkValues() []arrow.Timestamp { return cr.Chunk().TimestampValues() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *TimestampChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the TimestampChunkIterator
func (cr *TimestampChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the TimestampChunkIterator
func (cr *TimestampChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Time32ChunkIterator is an iterator for reading an Arrow Column value by value.
type Time32ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Time32 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Time32 // current chunk
}
// NewTime32ChunkIterator creates a new Time32ChunkIterator for reading an Arrow Column.
func NewTime32ChunkIterator(col *array.Column) *Time32ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Time32, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Time32)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Time32ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Time32ChunkIterator) Chunk() *array.Time32 { return cr.currentChunk }
// ChunkValues returns the underlying []arrow.Time32 chunk values.
// Keep in mind the []arrow.Time32 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Time32ChunkIterator) ChunkValues() []arrow.Time32 { return cr.Chunk().Time32Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Time32ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Time32ChunkIterator
func (cr *Time32ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Time32ChunkIterator
func (cr *Time32ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Time64ChunkIterator is an iterator for reading an Arrow Column value by value.
type Time64ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Time64 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Time64 // current chunk
}
// NewTime64ChunkIterator creates a new Time64ChunkIterator for reading an Arrow Column.
func NewTime64ChunkIterator(col *array.Column) *Time64ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Time64, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Time64)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Time64ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Time64ChunkIterator) Chunk() *array.Time64 { return cr.currentChunk }
// ChunkValues returns the underlying []arrow.Time64 chunk values.
// Keep in mind the []arrow.Time64 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Time64ChunkIterator) ChunkValues() []arrow.Time64 { return cr.Chunk().Time64Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Time64ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Time64ChunkIterator
func (cr *Time64ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Time64ChunkIterator
func (cr *Time64ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Date32ChunkIterator is an iterator for reading an Arrow Column value by value.
type Date32ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Date32 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Date32 // current chunk
}
// NewDate32ChunkIterator creates a new Date32ChunkIterator for reading an Arrow Column.
func NewDate32ChunkIterator(col *array.Column) *Date32ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Date32, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Date32)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Date32ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Date32ChunkIterator) Chunk() *array.Date32 { return cr.currentChunk }
// ChunkValues returns the underlying []arrow.Date32 chunk values.
// Keep in mind the []arrow.Date32 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Date32ChunkIterator) ChunkValues() []arrow.Date32 { return cr.Chunk().Date32Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Date32ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Date32ChunkIterator
func (cr *Date32ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Date32ChunkIterator
func (cr *Date32ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
}
// Date64ChunkIterator is an iterator for reading an Arrow Column value by value.
type Date64ChunkIterator struct {
refCount int64
col *array.Column
// Things Chunked maintains. We're going to maintain it ourselves.
chunks []*array.Date64 // cache the chunks on this iterator
length int64 // this isn't set right on Chunked so we won't rely on it there. Instead we keep the correct value here.
nulls int64
dtype arrow.DataType
// Things we need to maintain for the iterator
currentIndex int // current chunk
currentChunk *array.Date64 // current chunk
}
// NewDate64ChunkIterator creates a new Date64ChunkIterator for reading an Arrow Column.
func NewDate64ChunkIterator(col *array.Column) *Date64ChunkIterator {
col.Retain()
// Chunked is not using the correct type to keep track of length so we have to recalculate it.
columnChunks := col.Data().Chunks()
chunks := make([]*array.Date64, len(columnChunks))
var length int64
var nulls int64
for i, chunk := range columnChunks {
// Keep our own refs to chunks
chunks[i] = chunk.(*array.Date64)
// Retain the chunk
chunks[i].Retain()
// Keep our own counters instead of Chunked's
length += int64(chunk.Len())
nulls += int64(chunk.NullN())
}
return &Date64ChunkIterator{
refCount: 1,
col: col,
chunks: chunks,
length: length,
nulls: nulls,
dtype: col.DataType(),
currentIndex: 0,
currentChunk: nil,
}
}
// Chunk will return the current chunk that the iterator is on.
func (cr *Date64ChunkIterator) Chunk() *array.Date64 { return cr.currentChunk }
// ChunkValues returns the underlying []arrow.Date64 chunk values.
// Keep in mind the []arrow.Date64 type might not be able
// to account for nil values. You must check for those explicitly via the chunk.
func (cr *Date64ChunkIterator) ChunkValues() []arrow.Date64 { return cr.Chunk().Date64Values() }
// Next moves the iterator to the next chunk. This will return false
// when there are no more chunks.
func (cr *Date64ChunkIterator) Next() bool {
if cr.currentIndex >= len(cr.chunks) {
return false
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
}
cr.currentChunk = cr.chunks[cr.currentIndex]
cr.currentChunk.Retain()
cr.currentIndex++
return true
}
// Retain keeps a reference to the Date64ChunkIterator
func (cr *Date64ChunkIterator) Retain() {
atomic.AddInt64(&cr.refCount, 1)
}
// Release removes a reference to the Date64ChunkIterator
func (cr *Date64ChunkIterator) Release() {
debug.Assert(atomic.LoadInt64(&cr.refCount) > 0, "too many releases")
ref := atomic.AddInt64(&cr.refCount, -1)
if ref == 0 {
cr.col.Release()
for i := range cr.chunks {
cr.chunks[i].Release()
}
if cr.currentChunk != nil {
cr.currentChunk.Release()
cr.currentChunk = nil
}
cr.col = nil
cr.chunks = nil
cr.dtype = nil
}
} | iterator/chunkiterator.gen.go | 0.688887 | 0.415907 | chunkiterator.gen.go | starcoder |
package simdjson
import (
"errors"
"fmt"
"math"
)
// Array represents a JSON array.
// There are methods that allows to get full arrays if the value type is the same.
// Otherwise an iterator can be retrieved.
type Array struct {
tape ParsedJson
off int
}
// Iter returns the array as an iterator.
// This can be used for parsing mixed content arrays.
// The first value is ready with a call to Advance.
// Calling after last element should have TypeNone.
func (a *Array) Iter() Iter {
i := Iter{
tape: a.tape,
off: a.off,
}
return i
}
// FirstType will return the type of the first element.
// If there are no elements, TypeNone is returned.
func (a *Array) FirstType() Type {
iter := a.Iter()
return iter.PeekNext()
}
// MarshalJSON will marshal the entire remaining scope of the iterator.
func (a *Array) MarshalJSON() ([]byte, error) {
return a.MarshalJSONBuffer(nil)
}
// MarshalJSONBuffer will marshal all elements.
// An optional buffer can be provided for fewer allocations.
// Output will be appended to the destination.
func (a *Array) MarshalJSONBuffer(dst []byte) ([]byte, error) {
dst = append(dst, '[')
i := a.Iter()
var elem Iter
for {
t, err := i.AdvanceIter(&elem)
if err != nil {
return nil, err
}
if t == TypeNone {
break
}
dst, err = elem.MarshalJSONBuffer(dst)
if err != nil {
return nil, err
}
if i.PeekNextTag() == TagArrayEnd {
break
}
dst = append(dst, ',')
}
if i.PeekNextTag() != TagArrayEnd {
return nil, errors.New("expected TagArrayEnd as final tag in array")
}
dst = append(dst, ']')
return dst, nil
}
// Interface returns the array as a slice of interfaces.
// See Iter.Interface() for a reference on value types.
func (a *Array) Interface() ([]interface{}, error) {
// Estimate length. Assume one value per element.
lenEst := (len(a.tape.Tape) - a.off - 1) / 2
if lenEst < 0 {
lenEst = 0
}
dst := make([]interface{}, 0, lenEst)
i := a.Iter()
for i.Advance() != TypeNone {
elem, err := i.Interface()
if err != nil {
return nil, err
}
dst = append(dst, elem)
}
return dst, nil
}
// AsFloat returns the array values as float.
// Integers are automatically converted to float.
func (a *Array) AsFloat() ([]float64, error) {
// Estimate length
lenEst := (len(a.tape.Tape) - a.off - 1) / 2
if lenEst < 0 {
lenEst = 0
}
dst := make([]float64, 0, lenEst)
readArray:
for {
tag := Tag(a.tape.Tape[a.off] >> 56)
a.off++
switch tag {
case TagFloat:
if len(a.tape.Tape) <= a.off {
return nil, errors.New("corrupt input: expected float, but no more values")
}
dst = append(dst, math.Float64frombits(a.tape.Tape[a.off]))
case TagInteger:
if len(a.tape.Tape) <= a.off {
return nil, errors.New("corrupt input: expected integer, but no more values")
}
dst = append(dst, float64(int64(a.tape.Tape[a.off])))
case TagUint:
if len(a.tape.Tape) <= a.off {
return nil, errors.New("corrupt input: expected integer, but no more values")
}
dst = append(dst, float64(a.tape.Tape[a.off]))
case TagArrayEnd:
break readArray
default:
return nil, fmt.Errorf("unable to convert type %v to float", tag)
}
a.off++
}
return dst, nil
}
// AsInteger returns the array values as float.
// Integers are automatically converted to float.
func (a *Array) AsInteger() ([]int64, error) {
// Estimate length
lenEst := (len(a.tape.Tape) - a.off - 1) / 2
if lenEst < 0 {
lenEst = 0
}
dst := make([]int64, 0, lenEst)
readArray:
for {
tag := Tag(a.tape.Tape[a.off] >> 56)
a.off++
switch tag {
case TagFloat:
if len(a.tape.Tape) <= a.off {
return nil, errors.New("corrupt input: expected float, but no more values")
}
val := math.Float64frombits(a.tape.Tape[a.off])
if val > math.MaxInt64 {
return nil, errors.New("float value overflows int64")
}
if val < math.MinInt64 {
return nil, errors.New("float value underflows int64")
}
dst = append(dst, int64(val))
case TagInteger:
if len(a.tape.Tape) <= a.off {
return nil, errors.New("corrupt input: expected integer, but no more values")
}
dst = append(dst, int64(a.tape.Tape[a.off]))
case TagUint:
if len(a.tape.Tape) <= a.off {
return nil, errors.New("corrupt input: expected integer, but no more values")
}
val := a.tape.Tape[a.off]
if val > math.MaxInt64 {
return nil, errors.New("unsigned integer value overflows int64")
}
dst = append(dst)
case TagArrayEnd:
break readArray
default:
return nil, fmt.Errorf("unable to convert type %v to integer", tag)
}
a.off++
}
return dst, nil
}
// AsString returns the array values as a slice of strings.
// No conversion is done.
func (a *Array) AsString() ([]string, error) {
// Estimate length
lenEst := len(a.tape.Tape) - a.off - 1
if lenEst < 0 {
lenEst = 0
}
dst := make([]string, 0, lenEst)
i := a.Iter()
var elem Iter
for {
t, err := i.AdvanceIter(&elem)
if err != nil {
return nil, err
}
switch t {
case TypeNone:
return dst, nil
case TypeString:
s, err := elem.String()
if err != nil {
return nil, err
}
dst = append(dst, s)
default:
return nil, fmt.Errorf("element in array is not string, but %v", t)
}
}
} | parsed_array.go | 0.760295 | 0.459501 | parsed_array.go | starcoder |
package metadata
import (
"context"
"strconv"
"time"
"github.com/Netflix/p2plab/errdefs"
"github.com/pkg/errors"
bolt "go.etcd.io/bbolt"
)
type Scenario struct {
ID string
Definition ScenarioDefinition
Labels []string
CreatedAt, UpdatedAt time.Time
}
// ScenarioDefinition defines a scenario.
type ScenarioDefinition struct {
Objects map[string]ObjectDefinition `json:"objects,omitempty"`
// Seed map a query to an action. Queries are executed in parallel to seed
// a cluster with initial data before running the benchmark.
Seed map[string]string `json:"seed,omitempty"`
// Benchmark maps a query to an action. Queries are executed in parallel
// during the benchmark and metrics are collected during this stage.
Benchmark map[string]string `json:"benchmark,omitempty"`
}
// ObjectDefinition define a type of data that will be distributed during the
// benchmark. The definition also specify options on how the data is converted
// into IPFS datastructures.
type ObjectDefinition struct {
// Type specifies what type is the source of the data and how the data is
// retrieved. Types must be one of the following: ["oci-image"].
Type string `json:"type"`
Source string `json:"source"`
// Layout specify how the DAG is shaped and constructed over the IPLD blocks.
Layout string `json:"layout"`
// Chunker specify which chunking algorithm to use to chunk the data into IPLD
// blocks.
Chunker string `json:"chunker"`
RawLeaves bool `json:"rawLeaves"`
HashFunc string `json:"hashFunc"`
MaxLinks int `json:"maxLinks"`
}
// ObjectType is the type of data retrieved.
type ObjectType string
var (
// ObjectContainerImage indicates that the object is an OCI image.
ObjectContainerImage ObjectType = "oci-image"
)
func (m *db) GetScenario(ctx context.Context, id string) (Scenario, error) {
var scenario Scenario
err := m.View(ctx, func(tx *bolt.Tx) error {
bkt := getScenariosBucket(tx)
if bkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "scenario %q", id)
}
sbkt := bkt.Bucket([]byte(id))
if sbkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "scenario %q", id)
}
scenario.ID = id
err := readScenario(sbkt, &scenario)
if err != nil {
return errors.Wrapf(err, "scenario %q", id)
}
return nil
})
if err != nil {
return Scenario{}, err
}
return scenario, nil
}
func (m *db) ListScenarios(ctx context.Context) ([]Scenario, error) {
var scenarios []Scenario
err := m.View(ctx, func(tx *bolt.Tx) error {
bkt := getScenariosBucket(tx)
if bkt == nil {
return nil
}
return bkt.ForEach(func(k, v []byte) error {
var (
scenario = Scenario{
ID: string(k),
}
sbkt = bkt.Bucket(k)
)
err := readScenario(sbkt, &scenario)
if err != nil {
return err
}
scenarios = append(scenarios, scenario)
return nil
})
})
if err != nil {
return nil, err
}
return scenarios, nil
}
func (m *db) CreateScenario(ctx context.Context, scenario Scenario) (Scenario, error) {
err := m.Update(ctx, func(tx *bolt.Tx) error {
bkt, err := createScenariosBucket(tx)
if err != nil {
return err
}
sbkt, err := bkt.CreateBucket([]byte(scenario.ID))
if err != nil {
if err != bolt.ErrBucketExists {
return err
}
return errors.Wrapf(errdefs.ErrAlreadyExists, "scenario %q", scenario.ID)
}
scenario.CreatedAt = time.Now().UTC()
scenario.UpdatedAt = scenario.CreatedAt
return writeScenario(sbkt, &scenario)
})
if err != nil {
return Scenario{}, err
}
return scenario, err
}
func (m *db) UpdateScenario(ctx context.Context, scenario Scenario) (Scenario, error) {
if scenario.ID == "" {
return Scenario{}, errors.Wrapf(errdefs.ErrInvalidArgument, "scenario id required for update")
}
err := m.Update(ctx, func(tx *bolt.Tx) error {
bkt, err := createScenariosBucket(tx)
if err != nil {
return err
}
sbkt := bkt.Bucket([]byte(scenario.ID))
if sbkt == nil {
return errors.Wrapf(errdefs.ErrNotFound, "scenario %q", scenario.ID)
}
scenario.UpdatedAt = time.Now().UTC()
return writeScenario(sbkt, &scenario)
})
if err != nil {
return Scenario{}, err
}
return scenario, nil
}
func (m *db) LabelScenarios(ctx context.Context, ids, adds, removes []string) ([]Scenario, error) {
var scenarios []Scenario
err := m.Update(ctx, func(tx *bolt.Tx) error {
bkt, err := createScenariosBucket(tx)
if err != nil {
return err
}
err = batchUpdateLabels(bkt, ids, adds, removes, func(ibkt *bolt.Bucket, id string, labels []string) error {
var scenario Scenario
scenario.ID = id
err = readScenario(ibkt, &scenario)
if err != nil {
return err
}
scenario.Labels = labels
scenario.UpdatedAt = time.Now().UTC()
err = writeScenario(ibkt, &scenario)
if err != nil {
return err
}
scenarios = append(scenarios, scenario)
return nil
})
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return scenarios, nil
}
func (m *db) DeleteScenarios(ctx context.Context, ids ...string) error {
return m.Update(ctx, func(tx *bolt.Tx) error {
bkt := getScenariosBucket(tx)
if bkt == nil {
return nil
}
for _, id := range ids {
err := bkt.DeleteBucket([]byte(id))
if err != nil {
if err == bolt.ErrBucketNotFound {
return errors.Wrapf(errdefs.ErrNotFound, "scenario %q", id)
}
return err
}
}
return nil
})
}
func readScenario(bkt *bolt.Bucket, scenario *Scenario) error {
err := ReadTimestamps(bkt, &scenario.CreatedAt, &scenario.UpdatedAt)
if err != nil {
return err
}
scenario.Definition, err = readScenarioDefinition(bkt)
if err != nil {
return err
}
scenario.Labels, err = readLabels(bkt)
if err != nil {
return err
}
return bkt.ForEach(func(k, v []byte) error {
if v == nil {
return nil
}
switch string(k) {
case string(bucketKeyID):
scenario.ID = string(v)
}
return nil
})
}
func readScenarioDefinition(bkt *bolt.Bucket) (ScenarioDefinition, error) {
var sdef ScenarioDefinition
dbkt := bkt.Bucket(bucketKeyDefinition)
if dbkt == nil {
return sdef, nil
}
var err error
sdef.Objects, err = readObjects(dbkt)
if err != nil {
return sdef, err
}
sdef.Seed, err = readMap(dbkt, bucketKeySeed)
if err != nil {
return sdef, err
}
sdef.Benchmark, err = readMap(dbkt, bucketKeyBenchmark)
if err != nil {
return sdef, err
}
return sdef, nil
}
func writeScenario(bkt *bolt.Bucket, scenario *Scenario) error {
err := WriteTimestamps(bkt, scenario.CreatedAt, scenario.UpdatedAt)
if err != nil {
return err
}
err = writeScenarioDefinition(bkt, scenario.Definition)
if err != nil {
return err
}
err = writeLabels(bkt, scenario.Labels)
if err != nil {
return err
}
for _, f := range []field{
{bucketKeyID, []byte(scenario.ID)},
} {
err = bkt.Put(f.key, f.value)
if err != nil {
return err
}
}
return nil
}
func writeScenarioDefinition(bkt *bolt.Bucket, sdef ScenarioDefinition) error {
dbkt := bkt.Bucket(bucketKeyDefinition)
if dbkt != nil {
err := bkt.DeleteBucket(bucketKeyDefinition)
if err != nil {
return err
}
}
dbkt, err := bkt.CreateBucket(bucketKeyDefinition)
if err != nil {
return err
}
err = writeObjects(dbkt, sdef.Objects)
if err != nil {
return err
}
err = writeMap(dbkt, bucketKeySeed, sdef.Seed)
if err != nil {
return err
}
err = writeMap(dbkt, bucketKeyBenchmark, sdef.Benchmark)
if err != nil {
return err
}
return nil
}
func readObjects(bkt *bolt.Bucket) (map[string]ObjectDefinition, error) {
obkt := bkt.Bucket(bucketKeyObjects)
if obkt == nil {
return nil, nil
}
objects := map[string]ObjectDefinition{}
err := obkt.ForEach(func(name, v []byte) error {
nbkt := obkt.Bucket(name)
if nbkt == nil {
return nil
}
var object ObjectDefinition
err := nbkt.ForEach(func(k, v []byte) error {
switch string(k) {
case string(bucketKeyType):
object.Type = string(v)
case string(bucketKeySource):
object.Source = string(v)
case string(bucketKeyLayout):
object.Layout = string(v)
case string(bucketKeyChunker):
object.Chunker = string(v)
case string(bucketKeyRawLeaves):
object.RawLeaves, _ = strconv.ParseBool(string(v))
case string(bucketKeyHashFunc):
object.HashFunc = string(v)
case string(bucketKeyMaxLinks):
object.MaxLinks, _ = strconv.Atoi(string(v))
}
return nil
})
if err != nil {
return err
}
objects[string(name)] = object
return nil
})
if err != nil {
return nil, err
}
return objects, nil
}
func writeObjects(bkt *bolt.Bucket, objects map[string]ObjectDefinition) error {
obkt, err := RecreateBucket(bkt, bucketKeyObjects)
if err != nil {
return err
}
for name, object := range objects {
nbkt, err := obkt.CreateBucket([]byte(name))
if err != nil {
return err
}
for _, f := range []field{
{bucketKeyType, []byte(object.Type)},
{bucketKeySource, []byte(object.Source)},
{bucketKeyLayout, []byte(object.Layout)},
{bucketKeyChunker, []byte(object.Chunker)},
{bucketKeyRawLeaves, []byte(strconv.FormatBool(object.RawLeaves))},
{bucketKeyHashFunc, []byte(object.HashFunc)},
{bucketKeyMaxLinks, []byte(strconv.Itoa(object.MaxLinks))},
} {
err = nbkt.Put(f.key, f.value)
if err != nil {
return err
}
}
}
return nil
} | metadata/scenario.go | 0.652574 | 0.436142 | scenario.go | starcoder |
package output
import (
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message/batch"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/output/writer"
"github.com/Jeffail/benthos/v3/lib/types"
sess "github.com/Jeffail/benthos/v3/lib/util/aws/session"
"github.com/Jeffail/benthos/v3/lib/util/http/auth"
"github.com/Jeffail/benthos/v3/lib/util/retries"
"github.com/Jeffail/benthos/v3/lib/util/tls"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeElasticsearch] = TypeSpec{
constructor: fromSimpleConstructor(NewElasticsearch),
Summary: `
Publishes messages into an Elasticsearch index. If the index does not exist then
it is created with a dynamic mapping.`,
Description: `
Both the ` + "`id` and `index`" + ` fields can be dynamically set using function
interpolations described [here](/docs/configuration/interpolation#bloblang-queries). When
sending batched messages these interpolations are performed per message part.
### AWS
It's possible to enable AWS connectivity with this output using the ` + "`aws`" + `
fields. However, you may need to set ` + "`sniff` and `healthcheck`" + ` to
false for connections to succeed.`,
Async: true,
Batches: true,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("urls", "A list of URLs to connect to. If an item of the list contains commas it will be expanded into multiple URLs.", []string{"http://localhost:9200"}).Array(),
docs.FieldCommon("index", "The index to place messages.").IsInterpolated(),
docs.FieldAdvanced("action", "The action to take on the document.").IsInterpolated().HasOptions("index", "update", "delete"),
docs.FieldAdvanced("pipeline", "An optional pipeline id to preprocess incoming documents.").IsInterpolated(),
docs.FieldCommon("id", "The ID for indexed messages. Interpolation should be used in order to create a unique ID for each message.").IsInterpolated(),
docs.FieldCommon("type", "The document type."),
docs.FieldAdvanced("routing", "The routing key to use for the document.").IsInterpolated(),
docs.FieldAdvanced("sniff", "Prompts Benthos to sniff for brokers to connect to when establishing a connection."),
docs.FieldAdvanced("healthcheck", "Whether to enable healthchecks."),
docs.FieldAdvanced("timeout", "The maximum time to wait before abandoning a request (and trying again)."),
tls.FieldSpec(),
docs.FieldCommon("max_in_flight", "The maximum number of messages to have in flight at a given time. Increase this to improve throughput."),
}.Merge(retries.FieldSpecs()).Add(
auth.BasicAuthFieldSpec(),
batch.FieldSpec(),
docs.FieldAdvanced("aws", "Enables and customises connectivity to Amazon Elastic Service.").WithChildren(
docs.FieldSpecs{
docs.FieldCommon("enabled", "Whether to connect to Amazon Elastic Service."),
}.Merge(sess.FieldSpecs())...,
),
docs.FieldAdvanced("gzip_compression", "Enable gzip compression on the request side."),
),
Categories: []Category{
CategoryServices,
},
}
}
//------------------------------------------------------------------------------
// NewElasticsearch creates a new Elasticsearch output type.
func NewElasticsearch(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
elasticWriter, err := writer.NewElasticsearch(conf.Elasticsearch, log, stats)
if err != nil {
return nil, err
}
var w Type
if conf.Elasticsearch.MaxInFlight == 1 {
w, err = NewWriter(
TypeElasticsearch, elasticWriter, log, stats,
)
} else {
w, err = NewAsyncWriter(
TypeElasticsearch, conf.Elasticsearch.MaxInFlight, elasticWriter, log, stats,
)
}
if err != nil {
return w, err
}
return NewBatcherFromConfig(conf.Elasticsearch.Batching, w, mgr, log, stats)
}
//------------------------------------------------------------------------------ | lib/output/elasticsearch.go | 0.752104 | 0.497192 | elasticsearch.go | starcoder |
package math
import (
"time"
"math"
"math/rand"
"strconv"
)
// Abs
func Abs(number float64) float64 {
return math.Abs(number)
}
// Range: [0, 2147483647]
func Rand(min, max int) int {
if min > max {
// 替换
min, max = max, min
}
// 重设最大值
if int31 := 1<<31 - 1; max > int31 {
max = int31
}
if min == max {
return min
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return r.Intn(max + 1 - min) + min
}
// Round
func Round(value float64, precision int) float64 {
p := math.Pow10(precision)
return math.Trunc((value+0.5/p)*p) / p
}
// Floor
func Floor(value float64) float64 {
return math.Floor(value)
}
// Ceil
func Ceil(value float64) float64 {
return math.Ceil(value)
}
// Pi
func Pi() float64 {
return math.Pi
}
// Max
func Max(nums ...float64) float64 {
if len(nums) < 2 {
if len(nums) == 1 {
return nums[0]
}
return 0
}
max := nums[0]
for i := 1; i < len(nums); i++ {
max = math.Max(max, nums[i])
}
return max
}
// Min
func Min(nums ...float64) float64 {
if len(nums) < 2 {
if len(nums) == 1 {
return nums[0]
}
return 0
}
min := nums[0]
for i := 1; i < len(nums); i++ {
min = math.Min(min, nums[i])
}
return min
}
// IsNan
func IsNan(val float64) bool {
return math.IsNaN(val)
}
// 各种进制互转
// BaseConvert("12312", 8, 16)
func BaseConvert(number string, frombase, tobase int) string {
i, err := strconv.ParseInt(number, frombase, 0)
if err != nil {
return ""
}
return strconv.FormatInt(i, tobase)
}
// 十进制转二进制
func Decbin(number int64) string {
return strconv.FormatInt(number, 2)
}
// 二进制转十进制
func Bindec(str string) int64 {
data, _ := strconv.ParseInt(str, 2, 0)
return data
}
// 十进制转八进制
func Decoct(number int64) string {
return strconv.FormatInt(number, 8)
}
// 八进制转十进制
func Octdec(str string) int64 {
data, _ := strconv.ParseInt(str, 8, 0)
return data
}
// 十进制转十六进制
func Dechex(number int64) string {
return strconv.FormatInt(number, 16)
}
// 十六进制转十进制
func Hexdec(str string) int64 {
data, _ := strconv.ParseInt(str, 16, 0)
return data
} | pkg/lakego-pkg/lakego-doak/lakego/math/math.go | 0.569613 | 0.437643 | math.go | starcoder |
package cocoa
import "sync"
const SegmentCount = 64
type SegmentHashMap struct {
// length must be the power of 2
table []*Segment
// mast must be 2^n -1, for example: 0x00000000000000ff
mask int
}
func newSegmentHashMap() *SegmentHashMap {
m := &SegmentHashMap{
table: make([]*Segment, SegmentCount, SegmentCount),
mask: SegmentCount - 1,
}
for i := 0; i < SegmentCount; i++ {
m.table[i] = &Segment{
data: make(map[string]*Node),
mux: sync.RWMutex{},
}
}
return m
}
func (m *SegmentHashMap) hash(key []byte) int {
if len(key) == 0 {
return 0
}
h := hash(key)
return int(h ^ h>>32)
}
func (m *SegmentHashMap) getSegment(hash int) *Segment {
return m.table[hash&m.mask]
}
func (m *SegmentHashMap) Get(key []byte) (value *Node, existed bool) {
if len(key) == 0 {
return nil, false
}
return m.getSegment(m.hash(key)).Get(key)
}
func (m *SegmentHashMap) Remove(key []byte) (prior *Node) {
if len(key) == 0 {
return nil
}
return m.getSegment(m.hash(key)).Remove(key)
}
func (m *SegmentHashMap) Contains(key []byte) (ok bool) {
if len(key) == 0 {
return false
}
return m.getSegment(m.hash(key)).Contains(key)
}
func (m *SegmentHashMap) Len() int {
count := 0
for i := 0; i < len(m.table); i++ {
count += m.table[i].Len()
}
return count
}
type Segment struct {
data map[string]*Node
mux sync.RWMutex
}
func (s *Segment) Get(key []byte) (value *Node, existed bool) {
if key == nil {
panic("key is nil")
}
s.mux.RLock()
defer s.mux.RUnlock()
value, existed = s.data[*bytesToString(key)]
return
}
func (s *Segment) Remove(key []byte) (prior *Node) {
s.mux.Lock()
defer s.mux.Unlock()
priorNode, existed := s.data[*bytesToString(key)]
if existed {
delete(s.data, *bytesToString(key))
}
return priorNode
}
func (s *Segment) Contains(key []byte) (ok bool) {
s.mux.RLock()
defer s.mux.RUnlock()
_, ok = s.data[*bytesToString(key)]
return
}
func (s *Segment) Len() int {
s.mux.RLock()
defer s.mux.RUnlock()
return len(s.data)
} | map.go | 0.570571 | 0.405566 | map.go | starcoder |
package core
import (
"github.com/btcsuite/btcutil/base58"
"nebulas-p2p/crypto/hash"
"nebulas-p2p/util/byteutils"
)
// AddressType address type
type AddressType byte
// address type enum
const (
AccountAddress AddressType = 0x57 + iota
ContractAddress
)
// const
const (
Padding byte = 0x19
NebulasFaith = 'n'
)
const (
// AddressPaddingLength the length of headpadding in byte
AddressPaddingLength = 1
// AddressPaddingIndex the index of headpadding bytes
AddressPaddingIndex = 0
// AddressTypeLength the length of address type in byte
AddressTypeLength = 1
// AddressTypeIndex the index of address type bytes
AddressTypeIndex = 1
// AddressDataLength the length of data of address in byte.
AddressDataLength = 20
// AddressChecksumLength the checksum of address in byte.
AddressChecksumLength = 4
// AddressLength the length of address in byte.
AddressLength = AddressPaddingLength + AddressTypeLength + AddressDataLength + AddressChecksumLength
// AddressDataEnd the end of the address data
AddressDataEnd = 22
// AddressBase58Length length of base58(Address.address)
AddressBase58Length = 35
// PublicKeyDataLength length of public key
PublicKeyDataLength = 65
)
// Address design of nebulas address
/*
[Account Address]
Similar to Bitcoin and Ethereum, Nebulas also adopts elliptic curve algorithm as its basic encryption algorithm for Nebulas accounts.
The address is derived from **public key**, which is in turn derived from the **private key** that encrypted with user's **passphrase**.
Also we have the checksum design aiming to prevent a user from sending _Nas_ to a wrong user account accidentally due to entry of several incorrect characters.
The specific calculation formula is as follows:
Content = ripemd160( sha3_256( Public Key ) )
CheckSum = sha3_256( 0x19 + 0x57 + Content )[0:4]
Address = base58( 0x19 + 0x57 + Content + CheckSum )
0x57 is a one-byte "type code" for account address, 0x19 is a one-byte fixed "padding"
The ripemd160 digest of SHA3-256 digest of a public key serve as the major component of an address,
for which another SHA3-256 digest should be conducted and the first 4 bytes should be used as a checksum. For example:
The final address of Nebulas Wallet should be: n1TV3sU6jyzR4rJ1D7jCAmtVGSntJagXZHC
[Smart Contract Address]
Calculating contract address differs slightly from account, passphrase of contract sender is not required but address & nonce.
For more information, plz check (https://github.com/nebulasio/wiki/blob/master/tutorials/%5BEnglish%5D%20Nebulas%20101%20-%2003%20Smart%20Contracts%20JavaScript.md) and [rpc.sendTransaction](https://github.com/nebulasio/wiki/blob/master/rpc.md#sendtransaction).
Calculation formula is as follows:
Content = ripemd160( sha3_256( tx.from, tx.nonce ) )
CheckSum = sha3_256( 0x19 + 0x58 + Content )[0:4]
Address = base58( 0x19 + 0x58 + Content + CheckSum )
0x58 is a one-byte "type code" for smart contract address, 0x19 is a one-byte fixed "padding"
[TODO]
In addition to standard address with 50 characters, we also support extended address in order to ensure the security of transfers conducted by users.
The traditional bank transfer design is used for reference:
In the process of a bank transfer, bank card number of the remittee should be verified, in addition to which the remitter must enter the name of the remittee.
The transfer can be correctly processed only when the bank card number and the name match each other.
The generating algorithm for extended address is described as follows:
ExtData = Utf8Bytes({Nickname or any string})
ExtHash = sha3_256(Data + ExtData)[0:2]
ExtAddress = Account Address + Hex(ExtHash)
An extended address is generated through addition of 2-byte extended verification to the end of a standard address and contains a total of 54 characters.
Addition of extended information allows the addition of another element verification to the Nebulas Wallet APP. For example:
The standard address of Alice’s wallet is 0xdf4d22611412132d3e9bd322f82e2940674ec1bc03b20e40, and the extended address after addition of the nickname "alice" should be 0xdf4d22611412132d3e9bd322f82e2940674ec1bc03b20e40e345.
Alice tells Bob the extended address 0xdf4d22611412132d3e9bd322f82e2940674ec1<PASSWORD>20e40e345 and her nickname alice.
Bob enters 0xdf4d22611412132d3e9bd322f82e2940674ec1bc03b20e40e345 and alice in the Wallet App.
The Wallet App verifies the consistency between the wallet address and the nickname in order to avoid the circumstance that Bob enters the account number of another user by mistake.
*/
type Address struct {
address byteutils.Hash
}
// ContractTxFrom tx from
type ContractTxFrom []byte
// ContractTxNonce tx nonce
type ContractTxNonce []byte
// Bytes returns address bytes
func (a *Address) Bytes() []byte {
return a.address
}
// String returns address string
func (a *Address) String() string {
return base58.Encode(a.address)
}
// Equals compare two Address. True is equal, otherwise false.
func (a *Address) Equals(b *Address) bool {
if a == nil {
return b == nil
}
if b == nil {
return false
}
return a.address.Equals(b.address)
}
// Type return the type of address.
func (a *Address) Type() AddressType {
return AddressType(a.address[AddressTypeIndex])
}
// NewAddress create new #Address according to data bytes.
func newAddress(t AddressType, args ...[]byte) (*Address, error) {
if len(args) == 0 {
return nil, ErrInvalidArgument
}
switch t {
case AccountAddress, ContractAddress:
default:
return nil, ErrInvalidArgument
}
buffer := make([]byte, AddressLength)
buffer[AddressPaddingIndex] = Padding
buffer[AddressTypeIndex] = byte(t)
sha := hash.Sha3256(args...)
content := hash.Ripemd160(sha)
copy(buffer[AddressTypeIndex+1:AddressDataEnd], content)
cs := checkSum(buffer[:AddressDataEnd])
copy(buffer[AddressDataEnd:], cs)
return &Address{address: buffer}, nil
}
// NewAddressFromPublicKey return new address from publickey bytes
func NewAddressFromPublicKey(s []byte) (*Address, error) {
if len(s) != PublicKeyDataLength {
return nil, ErrInvalidArgument
}
return newAddress(AccountAddress, s)
}
// NewContractAddressFromData return new contract address from bytes.
func NewContractAddressFromData(from ContractTxFrom, nonce ContractTxNonce) (*Address, error) {
if len(from) == 0 || len(nonce) == 0 {
return nil, ErrInvalidArgument
}
return newAddress(ContractAddress, from, nonce)
}
// AddressParse parse address string.
func AddressParse(s string) (*Address, error) {
if len(s) != AddressBase58Length || s[0] != NebulasFaith {
return nil, ErrInvalidAddressFormat
}
return AddressParseFromBytes(base58.Decode(s))
}
// AddressParseFromBytes parse address from bytes.
func AddressParseFromBytes(b []byte) (*Address, error) {
if len(b) != AddressLength || b[AddressPaddingIndex] != Padding {
return nil, ErrInvalidAddressFormat
}
switch AddressType(b[AddressTypeIndex]) {
case AccountAddress, ContractAddress:
default:
return nil, ErrInvalidAddressType
}
if !byteutils.Equal(checkSum(b[:AddressDataEnd]), b[AddressDataEnd:]) {
return nil, ErrInvalidAddressChecksum
}
return &Address{address: b}, nil
}
func checkSum(data []byte) []byte {
return hash.Sha3256(data)[:AddressChecksumLength]
} | core/address.go | 0.689933 | 0.518424 | address.go | starcoder |
package encoding
// Generic converters for multibyte character sets.
// An mbcsTrie contains the data to convert from the character set to Unicode.
// If a character would be encoded as "\x01\x02\x03", its unicode value would be found at t.children[1].children[2].children[3].rune
// children either is nil or has 256 elements.
type mbcsTrie struct {
// For leaf nodes, the Unicode character that is represented.
char rune
// For non-leaf nodes, the trie to decode the remainder of the character.
children []mbcsTrie
}
// A MBCSTable holds the data to convert to and from Unicode.
type MBCSTable struct {
toUnicode mbcsTrie
fromUnicode map[rune]string
}
// AddCharacter adds a character to the table. rune is its Unicode code point,
// and bytes contains the bytes used to encode it in the character set.
func (table *MBCSTable) AddCharacter(c rune, bytes string) {
if table.fromUnicode == nil {
table.fromUnicode = make(map[rune]string)
}
table.fromUnicode[c] = bytes
trie := &table.toUnicode
for i := 0; i < len(bytes); i++ {
if trie.children == nil {
trie.children = make([]mbcsTrie, 256)
}
b := bytes[i]
trie = &trie.children[b]
}
trie.char = c
}
func (table *MBCSTable) Decoder() Decoder {
return func(p []byte) (c rune, size int, status Status) {
if len(p) == 0 {
status = NO_ROOM
return
}
if p[0] == 0 {
return 0, 1, SUCCESS
}
trie := &table.toUnicode
for trie.char == 0 {
if trie.children == nil {
return 0xfffd, 1, INVALID_CHAR
}
if len(p) < size+1 {
return 0, 0, NO_ROOM
}
trie = &trie.children[p[size]]
size++
}
c = trie.char
status = SUCCESS
return
}
}
func (table *MBCSTable) Encoder() Encoder {
return func(p []byte, c rune) (size int, status Status) {
bytes := table.fromUnicode[c]
if bytes == "" {
if len(p) > 0 {
p[0] = '?'
return 1, INVALID_CHAR
} else {
return 0, NO_ROOM
}
}
if len(p) < len(bytes) {
return 0, NO_ROOM
}
return copy(p, bytes), SUCCESS
}
} | mbcs.go | 0.654343 | 0.501587 | mbcs.go | starcoder |
package iconv
import (
"fmt"
"google.golang.org/protobuf/reflect/protoreflect"
"math"
"reflect"
"strconv"
)
// IsNil check interface value is nil
func IsNil(v interface{}) (isNil bool) {
if v == nil {
return true
}
vv := reflect.ValueOf(v)
switch vv.Kind() {
case
reflect.Chan,
reflect.Func,
reflect.Map,
reflect.UnsafePointer,
reflect.Interface,
reflect.Slice,
reflect.Ptr:
return vv.IsNil()
}
return false
}
// ToString interface to string
func ToString(v interface{}, def string) string {
if v == nil {
return def
}
switch v := v.(type) {
case string:
return v
case []byte:
return string(v)
case bool:
return strconv.FormatBool(v)
case int:
return strconv.FormatInt(int64(v), 10)
case int8:
return strconv.FormatInt(int64(v), 10)
case int16:
return strconv.FormatInt(int64(v), 10)
case int32:
return strconv.FormatInt(int64(v), 10)
case int64:
return strconv.FormatInt(int64(v), 10)
case uint:
return strconv.FormatUint(uint64(v), 10)
case uint8:
return strconv.FormatUint(uint64(v), 10)
case uint16:
return strconv.FormatUint(uint64(v), 10)
case uint32:
return strconv.FormatUint(uint64(v), 10)
case uint64:
return strconv.FormatUint(uint64(v), 10)
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 32)
case float64:
return strconv.FormatFloat(float64(v), 'f', -1, 64)
case complex64:
return strconv.FormatComplex(complex128(v), 'f', -1, 64)
case complex128:
return strconv.FormatComplex(complex128(v), 'f', -1, 128)
case *string:
if v == nil {
return def
}
return *v
case *bool:
if v == nil {
return def
}
return strconv.FormatBool(*v)
case *int:
if v == nil {
return def
}
return strconv.FormatInt(int64(*v), 10)
case *int8:
if v == nil {
return def
}
return strconv.FormatInt(int64(*v), 10)
case *int16:
if v == nil {
return def
}
return strconv.FormatInt(int64(*v), 10)
case *int32:
if v == nil {
return def
}
return strconv.FormatInt(int64(*v), 10)
case *int64:
if v == nil {
return def
}
return strconv.FormatInt(int64(*v), 10)
case *uint:
if v == nil {
return def
}
return strconv.FormatUint(uint64(*v), 10)
case *uint8:
if v == nil {
return def
}
return strconv.FormatUint(uint64(*v), 10)
case *uint16:
if v == nil {
return def
}
return strconv.FormatUint(uint64(*v), 10)
case *uint32:
if v == nil {
return def
}
return strconv.FormatUint(uint64(*v), 10)
case *uint64:
if v == nil {
return def
}
return strconv.FormatUint(uint64(*v), 10)
case *float32:
if v == nil {
return def
}
return strconv.FormatFloat(float64(*v), 'f', -1, 32)
case *float64:
if v == nil {
return def
}
return strconv.FormatFloat(float64(*v), 'f', -1, 64)
case *complex64:
if v == nil {
return def
}
return strconv.FormatComplex(complex128(*v), 'f', -1, 64)
case *complex128:
if v == nil {
return def
}
return strconv.FormatComplex(complex128(*v), 'f', -1, 128)
case fmt.Stringer:
if v == nil {
return def
}
return v.String()
case error:
if v == nil {
return def
}
return v.Error()
}
return fmt.Sprintf("%v", v)
}
func ToInt64(v interface{}, def int64) int64 {
if v, ok := ParseInt64(v); ok {
return v
}
return def
}
func ToUInt64(v interface{}, def uint64) uint64 {
if v, ok := ParseUInt64(v); ok {
return v
}
return def
}
type I2Int interface{ ToInt() int }
type ProtoEnumNumber interface {
Number() protoreflect.EnumNumber
}
func ParseInt64(v interface{}) (int64, bool) {
if v == nil {
return 0, false
}
{
switch v := v.(type) {
case int8:
return int64(v), true
case int16:
return int64(v), true
case int32:
return int64(v), true
case int64:
return int64(v), true
case int:
return int64(v), true
case uint8:
return int64(v), true
case uint16:
return int64(v), true
case uint32:
return int64(v), true
case uint64:
if v <= math.MaxInt64 {
return int64(v), true
}
case uint:
if v <= math.MaxInt64 {
return int64(v), true
}
case string:
if vv, err := strconv.ParseInt(v, 10, 64); err == nil {
return vv, true
}
case *int8:
if v == nil {
return 0, false
}
return int64(*v), true
case *int16:
if v == nil {
return 0, false
}
return int64(*v), true
case *int32:
if v == nil {
return 0, false
}
return int64(*v), true
case *int64:
if v == nil {
return 0, false
}
return int64(*v), true
case *int:
if v == nil {
return 0, false
}
return int64(*v), true
case *uint8:
if v == nil {
return 0, false
}
return int64(*v), true
case *uint16:
if v == nil {
return 0, false
}
return int64(*v), true
case *uint32:
if v == nil {
return 0, false
}
return int64(*v), true
case *uint64:
if v == nil {
return 0, false
}
if (*v) <= math.MaxInt64 {
return int64(*v), true
}
case *uint:
if v == nil {
return 0, false
}
if (*v) <= math.MaxInt64 {
return int64(*v), true
}
case *string:
if v == nil {
return 0, false
}
if vv, err := strconv.ParseInt(*v, 10, 64); err == nil {
return vv, true
}
// NOTE: interface{}((*int64)(nil))!=nil
// See: TestInterfaceNil
case I2Int:
if v == nil {
return 0, false
}
return int64(v.ToInt()), true
case ProtoEnumNumber:
if v == nil {
return 0, false
}
return int64(v.Number()), true
}
}
val := reflect.Indirect(reflect.ValueOf(v))
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return val.Int(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if vv := val.Uint(); vv <= math.MaxInt64 {
return int64(vv), true
}
}
return 0, false
}
func ParseUInt64(v interface{}) (uint64, bool) {
if v == nil {
return 0, false
}
{
switch v := v.(type) {
case int8:
if v < 0 {
return 0, false
}
return uint64(v), true
case int16:
if v < 0 {
return 0, false
}
return uint64(v), true
case int32:
if v < 0 {
return 0, false
}
return uint64(v), true
case int64:
if v < 0 {
return 0, false
}
return uint64(v), true
case int:
if v < 0 {
return 0, false
}
return uint64(v), true
case uint8:
return uint64(v), true
case uint16:
return uint64(v), true
case uint32:
return uint64(v), true
case uint64:
return uint64(v), true
case uint:
return uint64(v), true
case string:
if vv, err := strconv.ParseUint(v, 10, 64); err == nil {
return vv, true
}
case *int8:
if v == nil || *v < 0 {
return 0, false
}
return uint64(*v), true
case *int16:
if v == nil || *v < 0 {
return 0, false
}
return uint64(*v), true
case *int32:
if v == nil || *v < 0 {
return 0, false
}
return uint64(*v), true
case *int64:
if v == nil || *v < 0 {
return 0, false
}
return uint64(*v), true
case *int:
if v == nil || *v < 0 {
return 0, false
}
return uint64(*v), true
case *uint8:
if v == nil {
return 0, false
}
return uint64(*v), true
case *uint16:
if v == nil {
return 0, false
}
return uint64(*v), true
case *uint32:
if v == nil {
return 0, false
}
return uint64(*v), true
case *uint64:
if v == nil {
return 0, false
}
return uint64(*v), true
case *uint:
if v == nil {
return 0, false
}
return uint64(*v), true
case *string:
if v == nil {
return 0, false
}
if vv, err := strconv.ParseUint(*v, 10, 64); err == nil {
return vv, true
}
// NOTE: interface{}((*int64)(nil))!=nil
// See: TestInterfaceNil
case I2Int:
if v == nil {
return 0, false
}
return uint64(v.ToInt()), true
case ProtoEnumNumber:
if v == nil {
return 0, false
}
return uint64(v.Number()), true
}
}
val := reflect.Indirect(reflect.ValueOf(v))
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if vv := val.Int(); vv > 0 {
return uint64(vv), true
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return val.Uint(), true
}
return 0, false
} | iconv/iconv.go | 0.53437 | 0.400691 | iconv.go | starcoder |
package behaviors
//--------------------
// IMPORTS
//--------------------
import (
"time"
"github.com/tideland/gocells/cells"
)
//--------------------
// CONSTANTS
//--------------------
const (
// TopicRate signals the rate of detected matching events.
TopicRate = "rate"
)
//--------------------
// RATE BEHAVIOR
//--------------------
// RateCriterion is used by the rate behavior and has to return true, if
// the passed event matches a criterion for rate measuring.
type RateCriterion func(event cells.Event) (bool, error)
// Rate describes the rate of events matching the given criterion. It
// contains the matching time, the duration from the last match to this
// one, and the highest, lowest, and avaerage duration between matches.
type Rate struct {
Time time.Time
Duration time.Duration
High time.Duration
Low time.Duration
Average time.Duration
}
// rateBehavior calculates the average rate of events matching a criterion.
type rateBehavior struct {
cell cells.Cell
matches RateCriterion
count int
last time.Time
durations []time.Duration
}
// NewRateBehavior creates an even rate measuiring behavior. Each time the
// criterion function returns true for a received event the duration between
// this and the last one is calculated and emitted together with the timestamp.
// Additionally a moving average, lowest, and highest duration is calculated
// and emitted too. A "reset!" as topic resets the stored values.
func NewRateBehavior(matches RateCriterion, count int) cells.Behavior {
return &rateBehavior{
matches: matches,
count: count,
last: time.Now(),
durations: []time.Duration{},
}
}
// Init implements the cells.Behavior interface.
func (b *rateBehavior) Init(c cells.Cell) error {
b.cell = c
return nil
}
// Terminate implements the cells.Behavior interface.
func (b *rateBehavior) Terminate() error {
return nil
}
// ProcessEvent implements the cells.Behavior interface.
func (b *rateBehavior) ProcessEvent(event cells.Event) error {
switch event.Topic() {
case cells.TopicReset:
b.last = time.Now()
b.durations = []time.Duration{}
default:
ok, err := b.matches(event)
if err != nil {
return err
}
if ok {
current := event.Timestamp()
duration := current.Sub(b.last)
b.last = current
b.durations = append(b.durations, duration)
if len(b.durations) > b.count {
b.durations = b.durations[1:]
}
total := 0 * time.Nanosecond
low := 0x7FFFFFFFFFFFFFFF * time.Nanosecond
high := 0 * time.Nanosecond
for _, d := range b.durations {
total += d
if d < low {
low = d
}
if d > high {
high = d
}
}
avg := total / time.Duration(len(b.durations))
return b.cell.EmitNew(TopicRate, Rate{
Time: current,
Duration: duration,
High: high,
Low: low,
Average: avg,
})
}
}
return nil
}
// Recover implements the cells.Behavior interface.
func (b *rateBehavior) Recover(err interface{}) error {
b.last = time.Now()
b.durations = []time.Duration{}
return nil
}
// EOF | behaviors/rate.go | 0.821259 | 0.54583 | rate.go | starcoder |
package regex
import (
"fmt"
"io"
"netbsd.org/pkglint/histogram"
"regexp"
"time"
)
type Pattern string
type Registry struct {
res map[Pattern]*regexp.Regexp
rematch *histogram.Histogram
renomatch *histogram.Histogram
retime *histogram.Histogram
profiling bool
}
func NewRegistry() Registry {
return Registry{make(map[Pattern]*regexp.Regexp), nil, nil, nil, false}
}
func (r *Registry) Profiling() {
if !r.profiling {
r.rematch = histogram.New()
r.renomatch = histogram.New()
r.retime = histogram.New()
r.profiling = true
}
}
func (r *Registry) Compile(re Pattern) *regexp.Regexp {
cre := r.res[re]
if cre == nil {
cre = regexp.MustCompile(string(re))
r.res[re] = cre
}
return cre
}
// Consider defining an alternative CompileX method that implements the
// /x modifier to allow whitespace in the regular expression.
// This makes the regular expressions more readable.
func (r *Registry) Match(s string, re Pattern) []string {
if !r.profiling {
return r.Compile(re).FindStringSubmatch(s)
}
before := time.Now()
immediatelyBefore := time.Now()
m := r.Compile(re).FindStringSubmatch(s)
after := time.Now()
delay := immediatelyBefore.UnixNano() - before.UnixNano()
timeTaken := after.UnixNano() - immediatelyBefore.UnixNano() - delay
r.retime.Add(string(re), int(timeTaken))
if m != nil {
r.rematch.Add(string(re), 1)
} else {
r.renomatch.Add(string(re), 1)
}
return m
}
func (r *Registry) Matches(s string, re Pattern) bool {
matches := r.Compile(re).MatchString(s)
if r.profiling {
if matches {
r.rematch.Add(string(re), 1)
} else {
r.renomatch.Add(string(re), 1)
}
}
return matches
}
func (r *Registry) Match1(s string, re Pattern) (matched bool, m1 string) {
if m := r.matchn(s, re, 1); m != nil {
return true, m[1]
}
return
}
func (r *Registry) Match2(s string, re Pattern) (matched bool, m1, m2 string) {
if m := r.matchn(s, re, 2); m != nil {
return true, m[1], m[2]
}
return
}
func (r *Registry) Match3(s string, re Pattern) (matched bool, m1, m2, m3 string) {
if m := r.matchn(s, re, 3); m != nil {
return true, m[1], m[2], m[3]
}
return
}
func (r *Registry) ReplaceFirst(s string, re Pattern, replacement string) ([]string, string) {
if m := r.Compile(re).FindStringSubmatchIndex(s); m != nil {
replaced := s[:m[0]] + replacement + s[m[1]:]
mm := make([]string, len(m)/2)
for i := 0; i < len(m); i += 2 {
if m[i] < 0 {
mm[i/2] = ""
} else {
mm[i/2] = s[m[i]:m[i+1]]
}
}
return mm, replaced
}
return nil, s
}
func (r *Registry) PrintStats(out io.Writer) {
if r.profiling {
r.rematch.PrintStats(out, "rematch", 10)
r.renomatch.PrintStats(out, "renomatch", 10)
r.retime.PrintStats(out, "retime", 10)
}
}
func (r *Registry) matchn(s string, re Pattern, n int) []string {
if m := r.Match(s, re); m != nil {
if len(m) != 1+n {
panic(fmt.Sprintf("expected match%d, got match%d for %q", len(m)-1, n, re))
}
return m
}
return nil
} | regex/regex.go | 0.519765 | 0.425009 | regex.go | starcoder |
package simulation
import "github.com/pointlesssoft/godevs/pkg/modeling"
type AbstractSimulator interface {
Initialize() // performs all the required operations before starting a simulation.
Exit() // performs all the required operations to exit after a simulation.
TA() float64 // time advance function.
Collect() // collection function.
Transition() // transition function.
Clear() // clear function.
GetModel() modeling.Component // returns the Component attached to the simulator.
GetTL() float64 // returns simulator's last simulation time.
SetTL(tl float64) // sets simulator's last simulation time.
GetTN() float64 // returns simulator's next timeout.
SetTN(tn float64) // sets simulator's next timeout.
GetClock() Clock // returns abstract simulator's simulation Clock.
}
func NewAbstractSimulator(clock Clock) AbstractSimulator {
s := abstractSimulator{clock, 0, 0}
return &s
}
type abstractSimulator struct {
clock Clock // Simulation Clock.
tL float64 // Time of last event
tN float64 // Time of next event
}
// Initialize performs all the required operations before starting a simulation.
func (a *abstractSimulator) Initialize() {
panic("implement me")
}
// Exit performs all the required operations to exit after a simulation.
func (a *abstractSimulator) Exit() {
panic("implement me")
}
// TA is the time advance function. It returns the elapsed time for the next timeout.
func (a *abstractSimulator) TA() float64 {
panic("implement me")
}
// Collect is the collection function. It is invoked on imminent simulators before the transition function.
func (a *abstractSimulator) Collect() {
panic("implement me")
}
// Transition is the transition function. It deals with internal and external transitions.
func (a *abstractSimulator) Transition() {
panic("implement me")
}
// Clear performs all the required operation to clear the simulation state.
func (a *abstractSimulator) Clear() {
panic("implement me")
}
// GetModel returns simulator's DEVS component.
func (a *abstractSimulator) GetModel() modeling.Component {
panic("implement me")
}
// GetTL returns simulator's last simulation time.
func (a *abstractSimulator) GetTL() float64 {
return a.tL
}
// SetTL sets simulator's last simulation time.
func (a *abstractSimulator) SetTL(tL float64) {
a.tL = tL
}
// GetTN returns simulator's next timeout.
func (a *abstractSimulator) GetTN() float64 {
return a.tN
}
// SetTN sets simulator's next timeout.
func (a *abstractSimulator) SetTN(tN float64) {
a.tN = tN
}
// GetClock returns abstract simulator's simulation Clock.
func (a *abstractSimulator) GetClock() Clock {
return a.clock
} | pkg/simulation/abstract_simulator.go | 0.773302 | 0.450239 | abstract_simulator.go | starcoder |
package taleslabmappers
import (
"github.com/johnfercher/taleslab/internal/talespireadapter/talespirecontracts"
"github.com/johnfercher/taleslab/pkg/taleslab/taleslabdomain/taleslabconsts"
"github.com/johnfercher/taleslab/pkg/taleslab/taleslabdomain/taleslabentities"
)
func TaleSpireSlabFromAssets(assets taleslabentities.Assets) *talespirecontracts.Slab {
uniqueAssets := getUniqueAssets(assets)
taleSpire := &talespirecontracts.Slab{
MagicBytes: taleslabconsts.MagicBytes,
Version: taleslabconsts.SlabVersion,
AssetsCount: int16(len(uniqueAssets)),
}
for _, uniqueAsset := range uniqueAssets {
layouts := getBoundFromAsset(uniqueAsset.Id, assets)
taleSpireAsset := &talespirecontracts.Asset{
Id: uniqueAsset.Id,
LayoutsCount: int16(len(layouts)),
Layouts: layouts,
}
taleSpire.Assets = append(taleSpire.Assets, taleSpireAsset)
}
return taleSpire
}
func getUniqueAssets(assets taleslabentities.Assets) map[string]*taleslabentities.Asset {
uniqueAssets := make(map[string]*taleslabentities.Asset)
for _, asset := range assets {
if uniqueAssets[string(asset.Id)] == nil {
uniqueAssets[string(asset.Id)] = asset
}
}
return uniqueAssets
}
func getBoundFromAsset(id []byte, assets taleslabentities.Assets) []*talespirecontracts.Bounds {
bounds := []*talespirecontracts.Bounds{}
for _, asset := range assets {
if string(id) == string(asset.Id) {
bound := &talespirecontracts.Bounds{
Coordinates: &talespirecontracts.Vector3d{
X: uint16(asset.Coordinates.X),
Y: uint16(asset.Coordinates.Y),
Z: uint16(asset.Coordinates.Z),
},
Rotation: uint16(asset.Rotation),
}
bounds = append(bounds, bound)
}
}
return bounds
}
func AssetsFromTaleSpireSlab(taleSpire *talespirecontracts.Slab) taleslabentities.Assets {
assets := taleslabentities.Assets{}
for _, asset := range taleSpire.Assets {
for _, layout := range asset.Layouts {
entity := &taleslabentities.Asset{
Id: asset.Id,
Coordinates: &taleslabentities.Vector3d{
X: int(layout.Coordinates.X),
Y: int(layout.Coordinates.Y),
Z: int(layout.Coordinates.Z),
},
}
assets = append(assets, entity)
}
}
return assets
}
/*func entityAssetFromTaleSpire(taleSpire *talespirecontracts.Asset) *taleslabentities.Asset {
entity := &taleslabentities.Asset{
Id: taleSpire.Id,
}
for _, layout := range taleSpire.Layouts {
entity.Layouts = append(entity.Layouts, entityBoundsFromTaleSpire(layout))
}
return entity
}
func entityBoundsFromTaleSpire(taleSpire *talespirecontracts.Bounds) *taleslabentities.Bounds {
entity := &taleslabentities.Bounds{
Coordinates: &taleslabentities.Vector3d{
X: int(taleSpire.Coordinates.X),
Y: int(taleSpire.Coordinates.Y),
Z: int(taleSpire.Coordinates.Z),
},
Rotation: int(taleSpire.Rotation),
}
return entity
}*/ | pkg/taleslab/taleslabmappers/slabmapper.go | 0.644673 | 0.426441 | slabmapper.go | starcoder |
package three
import (
"math"
"math/rand"
"strconv"
)
// NewVector3 :
func NewVector3(x, y, z float64) *Vector3 {
return &Vector3{x, y, z, true}
}
// Vector3 :
type Vector3 struct {
X float64
Y float64
Z float64
IsVector3 bool
}
var _vector3 = NewVector3(0, 0, 0)
var _quaternionV3 = NewQuaternion(0, 0, 0, 1)
// Set :
func (v Vector3) Set(x, y, z float64) *Vector3 {
v.X = x
v.Y = y
v.Z = z
return &v
}
// SetScalar :
func (v Vector3) SetScalar(scalar float64) *Vector3 {
v.X = scalar
v.Y = scalar
v.Z = scalar
return &v
}
// SetX :
func (v Vector3) SetX(x float64) *Vector3 {
v.X = x
return &v
}
// SetY :
func (v Vector3) SetY(y float64) *Vector3 {
v.Y = y
return &v
}
// SetZ :
func (v Vector3) SetZ(z float64) *Vector3 {
v.Z = z
return &v
}
// SetComponent :
func (v Vector3) SetComponent(index int, value float64) *Vector3 {
switch index {
default:
panic("index is out of range: " + strconv.Itoa(index))
case 0:
v.X = value
case 1:
v.Y = value
case 2:
v.Z = value
}
return &v
}
// GetComponent :
func (v Vector3) GetComponent(index int) float64 {
switch index {
default:
panic("index is out of range: " + strconv.Itoa(index))
case 0:
return v.X
case 1:
return v.Y
case 2:
return v.Z
}
}
// Clone :
func (v Vector3) Clone() *Vector3 {
return NewVector3(v.X, v.Y, v.Z)
}
// Copy :
func (v Vector3) Copy(w Vector3) *Vector3 {
v.X = w.X
v.Y = w.Y
v.Z = w.Z
return &v
}
// Add :
func (v Vector3) Add(w Vector3) *Vector3 {
v.X += w.X
v.Y += w.Y
v.Z += w.Z
return &v
}
// AddScalar :
func (v Vector3) AddScalar(s float64) *Vector3 {
v.X += s
v.Y += s
v.Z += s
return &v
}
// AddVectors :
func (v Vector3) AddVectors(a, b Vector3) *Vector3 {
v.X = a.X + b.X
v.Y = a.Y + b.Y
v.Z = a.Z + b.Z
return &v
}
// AddScaledVector :
func (v Vector3) AddScaledVector(w Vector3, s float64) *Vector3 {
v.X += w.X * s
v.Y += w.Y * s
v.Z += w.Z * s
return &v
}
// Sub :
func (v Vector3) Sub(w Vector3) *Vector3 {
v.X -= w.X
v.Y -= w.Y
v.Z -= w.Z
return &v
}
// SubScalar :
func (v Vector3) SubScalar(s float64) *Vector3 {
v.X -= s
v.Y -= s
v.Z -= s
return &v
}
// SubVectors :
func (v Vector3) SubVectors(a, b Vector3) *Vector3 {
v.X = a.X - b.X
v.Y = a.Y - b.Y
v.Z = a.Z - b.Z
return &v
}
// Multiply :
func (v Vector3) Multiply(w Vector3) *Vector3 {
v.X *= w.X
v.Y *= w.Y
v.Z *= w.Z
return &v
}
// MultiplyScalar :
func (v Vector3) MultiplyScalar(scalar float64) *Vector3 {
v.X *= scalar
v.Y *= scalar
v.Z *= scalar
return &v
}
// MultiplyVectors :
func (v Vector3) MultiplyVectors(a, b Vector3) *Vector3 {
v.X = a.X * b.X
v.Y = a.Y * b.Y
v.Z = a.Z * b.Z
return &v
}
// ApplyEuler :
func (v Vector3) ApplyEuler(euler Euler) *Vector3 {
return v.ApplyQuaternion(*_quaternionV3.SetFromEuler(euler, true))
}
// ApplyAxisAngle :
func (v Vector3) ApplyAxisAngle(axis Vector3, angle float64) *Vector3 {
return v.ApplyQuaternion(*_quaternionV3.SetFromAxisAngle(axis, angle))
}
// ApplyMatrix3 :
func (v Vector3) ApplyMatrix3(m Matrix3) *Vector3 {
x, y, z := v.X, v.Y, v.Z
me := m.Elements
v.X = me[0]*x + me[3]*y + me[6]*z
v.Y = me[1]*x + me[4]*y + me[7]*z
v.Z = me[2]*x + me[5]*y + me[8]*z
return &v
}
// ApplyNormalMatrix :
func (v Vector3) ApplyNormalMatrix(m Matrix3) *Vector3 {
return v.ApplyMatrix3(m).Normalize()
}
// ApplyMatrix4 :
func (v Vector3) ApplyMatrix4(m Matrix4) *Vector3 {
x, y, z := v.X, v.Y, v.Z
e := m.Elements
w := 1 / (e[3]*x + e[7]*y + e[11]*z + e[15])
v.X = (e[0]*x + e[4]*y + e[8]*z + e[12]) * w
v.Y = (e[1]*x + e[5]*y + e[9]*z + e[13]) * w
v.Z = (e[2]*x + e[6]*y + e[10]*z + e[14]) * w
return &v
}
// ApplyQuaternion :
func (v Vector3) ApplyQuaternion(q Quaternion) *Vector3 {
x, y, z := v.X, v.Y, v.Z
qx, qy, qz, qw := q.X(), q.Y(), q.Z(), q.W()
// calculate quat * vector
ix := qw*x + qy*z - qz*y
iy := qw*y + qz*x - qx*z
iz := qw*z + qx*y - qy*x
iw := -qx*x - qy*y - qz*z
// calculate result * inverse quat
v.X = ix*qw + iw*-qx + iy*-qz - iz*-qy
v.Y = iy*qw + iw*-qy + iz*-qx - ix*-qz
v.Z = iz*qw + iw*-qz + ix*-qy - iy*-qx
return &v
}
// Project :
func (v Vector3) Project(matrixWorldInverse, projectionMatrix Matrix4) *Vector3 {
return v.ApplyMatrix4(matrixWorldInverse).ApplyMatrix4(projectionMatrix)
}
// Unproject :
func (v Vector3) Unproject(projectionMatrixInverse, matrixWorld Matrix4) *Vector3 {
return v.ApplyMatrix4(projectionMatrixInverse).ApplyMatrix4(matrixWorld)
}
// TransformDirection :
func (v Vector3) TransformDirection(m Matrix4) *Vector3 {
// input: THREE.Matrix4 affine matrix
// vector interpreted as a direction
x, y, z := v.X, v.Y, v.Z
e := m.Elements
v.X = e[0]*x + e[4]*y + e[8]*z
v.Y = e[1]*x + e[5]*y + e[9]*z
v.Z = e[2]*x + e[6]*y + e[10]*z
return v.Normalize()
}
// Divide :
func (v Vector3) Divide(w Vector3) *Vector3 {
v.X /= w.X
v.Y /= w.Y
v.Z /= w.Z
return &v
}
// DivideScalar :
func (v Vector3) DivideScalar(scalar float64) *Vector3 {
return v.MultiplyScalar(1 / scalar)
}
// Min :
func (v Vector3) Min(w Vector3) *Vector3 {
v.X = math.Min(v.X, w.X)
v.Y = math.Min(v.Y, w.Y)
v.Z = math.Min(v.Z, w.Z)
return &v
}
// Max :
func (v Vector3) Max(w Vector3) *Vector3 {
v.X = math.Max(v.X, w.X)
v.Y = math.Max(v.Y, w.Y)
v.Z = math.Max(v.Z, w.Z)
return &v
}
// Clamp :
func (v Vector3) Clamp(min, max Vector3) *Vector3 {
// assumes min < max, componentwise
v.X = math.Max(min.X, math.Min(max.X, v.X))
v.Y = math.Max(min.Y, math.Min(max.Y, v.Y))
v.Z = math.Max(min.Z, math.Min(max.Z, v.Z))
return &v
}
// ClampScalar :
func (v Vector3) ClampScalar(minVal, maxVal float64) *Vector3 {
v.X = math.Max(minVal, math.Min(maxVal, v.X))
v.Y = math.Max(minVal, math.Min(maxVal, v.Y))
v.Z = math.Max(minVal, math.Min(maxVal, v.Z))
return &v
}
// ClampLength :
func (v Vector3) ClampLength(min, max float64) *Vector3 {
length := v.Length()
if length == 0 {
length = 1
}
return v.DivideScalar(length).MultiplyScalar(math.Max(min, math.Min(max, length)))
}
// Floor :
func (v Vector3) Floor() *Vector3 {
v.X = math.Floor(v.X)
v.Y = math.Floor(v.Y)
v.Z = math.Floor(v.Z)
return &v
}
// Ceil :
func (v Vector3) Ceil() *Vector3 {
v.X = math.Ceil(v.X)
v.Y = math.Ceil(v.Y)
v.Z = math.Ceil(v.Z)
return &v
}
// Round :
func (v Vector3) Round() *Vector3 {
v.X = math.Round(v.X)
v.Y = math.Round(v.Y)
v.Z = math.Round(v.Z)
return &v
}
// RoundToZero :
func (v Vector3) RoundToZero() *Vector3 {
if v.X < 0 {
v.X = math.Ceil(v.X)
} else {
v.X = math.Floor(v.X)
}
if v.Y < 0 {
v.Y = math.Ceil(v.Y)
} else {
v.Y = math.Floor(v.Y)
}
if v.Z < 0 {
v.Z = math.Ceil(v.Z)
} else {
v.Z = math.Floor(v.Z)
}
return &v
}
// Negate :
func (v Vector3) Negate() *Vector3 {
v.X = -v.X
v.Y = -v.Y
v.Z = -v.Z
return &v
}
// Dot :
func (v Vector3) Dot(w Vector3) float64 {
return v.X*w.X + v.Y*w.Y + v.Z*w.Z
}
// LengthSq :
func (v Vector3) LengthSq() float64 {
return v.X*v.X + v.Y*v.Y + v.Z*v.Z
}
// Length :
func (v Vector3) Length() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y + v.Z*v.Z)
}
// ManhattanLength :
func (v Vector3) ManhattanLength() float64 {
return math.Abs(v.X) + math.Abs(v.Y) + math.Abs(v.Z)
}
// Normalize :
func (v Vector3) Normalize() *Vector3 {
length := v.Length()
if length == 0 {
length = 1
}
return v.DivideScalar(length)
}
// SetLength :
func (v Vector3) SetLength(length float64) *Vector3 {
return v.Normalize().MultiplyScalar(length)
}
// Lerp :
func (v Vector3) Lerp(w Vector3, alpha float64) *Vector3 {
v.X += (w.X - v.X) * alpha
v.Y += (w.Y - v.Y) * alpha
v.Z += (w.Z - v.Z) * alpha
return &v
}
// LerpVectors :
func (v Vector3) LerpVectors(v1, v2 Vector3, alpha float64) *Vector3 {
v.X = v1.X + (v2.X-v1.X)*alpha
v.Y = v1.Y + (v2.Y-v1.Y)*alpha
v.Z = v1.Z + (v2.Z-v1.Z)*alpha
return &v
}
// Cross :
func (v Vector3) Cross(a Vector3) *Vector3 { // bug?
return v.CrossVectors(v, a)
}
// CrossVectors :
func (v Vector3) CrossVectors(a, b Vector3) *Vector3 {
ax, ay, az := a.X, a.Y, a.Z
bx, by, bz := b.X, b.Y, b.Z
v.X = ay*bz - az*by
v.Y = az*bx - ax*bz
v.Z = ax*by - ay*bx
return &v
}
// ProjectOnVector :
func (v Vector3) ProjectOnVector(w Vector3) *Vector3 {
denominator := w.LengthSq()
if denominator == 0 {
return v.Set(0, 0, 0)
}
scalar := w.Dot(v) / denominator
return v.Copy(w).MultiplyScalar(scalar)
}
// ProjectOnPlane :
func (v Vector3) ProjectOnPlane(planeNormal Vector3) *Vector3 {
_vector3.Copy(v).ProjectOnVector(planeNormal)
return v.Sub(*_vector3)
}
// Reflect :
func (v Vector3) Reflect(normal Vector3) *Vector3 {
// reflect incident vector off plane orthogonal to normal
// normal is assumed to have unit length
return v.Sub(*_vector3.Copy(normal).MultiplyScalar(2 * v.Dot(normal)))
}
// AngleTo :
func (v Vector3) AngleTo(w Vector3) float64 {
denominator := math.Sqrt(w.LengthSq() * w.LengthSq())
if denominator == 0 {
return math.Pi / 2
}
theta := v.Dot(v) / denominator
// clamp, to handle numerical problems
return math.Acos(Clamp(theta, -1, 1))
}
// DistanceTo :
func (v Vector3) DistanceTo(w Vector3) float64 {
return math.Sqrt(v.DistanceToSquared(w))
}
// DistanceToSquared :
func (v Vector3) DistanceToSquared(w Vector3) float64 {
dx, dy, dz := v.X-w.X, v.Y-w.Y, v.Z-w.Z
return dx*dx + dy*dy + dz*dz
}
// ManhattanDistanceTo :
func (v Vector3) ManhattanDistanceTo(w Vector3) float64 {
return math.Abs(v.X-w.X) + math.Abs(v.Y-w.Y) + math.Abs(v.Z-w.Z)
}
// SetFromSpherical :
func (v Vector3) SetFromSpherical(s Spherical) *Vector3 {
return v.SetFromSphericalCoords(s.Radius, s.Phi, s.Theta)
}
// SetFromSphericalCoords :
func (v Vector3) SetFromSphericalCoords(radius, phi, theta float64) *Vector3 {
sinPhiRadius := math.Sin(phi) * radius
v.X = sinPhiRadius * math.Sin(theta)
v.Y = math.Cos(phi) * radius
v.Z = sinPhiRadius * math.Cos(theta)
return &v
}
// SetFromCylindrical :
func (v Vector3) SetFromCylindrical(c Cylindrical) *Vector3 {
return v.SetFromCylindricalCoords(c.Radius, c.Theta, c.Y)
}
// SetFromCylindricalCoords :
func (v Vector3) SetFromCylindricalCoords(radius, theta, y float64) *Vector3 {
v.X = radius * math.Sin(theta)
v.Y = y
v.Z = radius * math.Cos(theta)
return &v
}
// SetFromMatrixPosition :
func (v Vector3) SetFromMatrixPosition(m Matrix4) *Vector3 {
e := m.Elements
v.X = e[12]
v.Y = e[13]
v.Z = e[14]
return &v
}
// SetFromMatrixScale :
func (v Vector3) SetFromMatrixScale(m Matrix4) *Vector3 {
sx := v.SetFromMatrixColumn(m, 0).Length()
sy := v.SetFromMatrixColumn(m, 1).Length()
sz := v.SetFromMatrixColumn(m, 2).Length()
v.X = sx
v.Y = sy
v.Z = sz
return &v
}
// SetFromMatrixColumn :
func (v Vector3) SetFromMatrixColumn(m Matrix4, index int) *Vector3 {
elems := []float64{}
for i := 0; i < 3; i++ {
elems = append(elems, m.Elements[index*4+i])
}
return v.FromArray(elems, 0)
}
// SetFromMatrix3Column :
func (v Vector3) SetFromMatrix3Column(m Matrix3, index int) *Vector3 {
elems := []float64{}
for i := 0; i < 3; i++ {
elems = append(elems, m.Elements[index*3+i])
}
return v.FromArray(elems, 0)
}
// Equals :
func (v Vector3) Equals(w Vector3) bool {
return w.X == v.X && w.Y == v.Y && w.Z == v.Z
}
// FromArray :
func (v Vector3) FromArray(array []float64, offset int) *Vector3 {
if len(array) < offset+3 {
panic("array length should be greater than offset+3")
}
v.X = array[offset]
v.Y = array[offset+1]
v.Z = array[offset+2]
return &v
}
// ToArray :
func (v Vector3) ToArray(array []float64, offset int) []float64 {
if len(array) < offset+3 {
panic("array length should be greater than offset+3")
}
array[offset] = v.X
array[offset+1] = v.Y
array[offset+2] = v.Z
return array
}
// Random :
func (v Vector3) Random() *Vector3 {
v.X = rand.Float64()
v.Y = rand.Float64()
v.Z = rand.Float64()
return &v
} | server/three/vector3.go | 0.781289 | 0.638413 | vector3.go | starcoder |
package jade
import (
"fmt"
"runtime"
"strings"
)
// Tree is the representation of a single parsed template.
type tree struct {
Name string // name of the template represented by the tree.
ParseName string // name of the top-level template during parsing, for error messages.
Root *listNode // top-level root of the tree.
text string // text parsed to create the template (or its parent)
// Parsing only; cleared after parse.
funcs []map[string]interface{}
lex *lexer
token [3]item // three-token lookahead for parser.
peekCount int
vars []string // variables defined at the moment.
}
// Copy returns a copy of the Tree. Any parsing state is discarded.
func (t *tree) Copy() *tree {
if t == nil {
return nil
}
return &tree{
Name: t.Name,
ParseName: t.ParseName,
Root: t.Root.CopyList(),
text: t.text,
}
}
// Parse returns a map from template name to parse.Tree, created by parsing the
// templates described in the argument string. The top-level template will be
// given the specified name. If an error is encountered, parsing stops and an
// empty map is returned with the error.
/*
func Parse(name, text, LeftDelim, RightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
treeSet = make(map[string]*Tree)
t := New(name)
t.text = text
_, err = t.Parse(text, LeftDelim, RightDelim, treeSet, funcs...)
return
}
// */
// next returns the next token.
func (t *tree) next() item {
if t.peekCount > 0 {
t.peekCount--
} else {
t.token[0] = t.lex.nextItem()
}
return t.token[t.peekCount]
}
// backup backs the input stream up one token.
func (t *tree) backup() {
t.peekCount++
}
// backup2 backs the input stream up two tokens.
// The zeroth token is already there.
func (t *tree) backup2(t1 item) {
t.token[1] = t1
t.peekCount = 2
}
// backup3 backs the input stream up three tokens
// The zeroth token is already there.
func (t *tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
t.token[1] = t1
t.token[2] = t2
t.peekCount = 3
}
// peek returns but does not consume the next token.
func (t *tree) peek() item {
if t.peekCount > 0 {
return t.token[t.peekCount-1]
}
t.peekCount = 1
t.token[0] = t.lex.nextItem()
return t.token[0]
}
/*
// nextNonSpace returns the next non-space token.
func (t *Tree) nextNonSpace() (token item) {
for {
token = t.next()
if token.typ != itemSpace {
break
}
}
return token
}
// peekNonSpace returns but does not consume the next non-space token.
func (t *Tree) peekNonSpace() (token item) {
for {
token = t.next()
if token.typ != itemSpace {
break
}
}
t.backup()
return token
}
// */
// Parsing.
// New allocates a new parse tree with the given name.
func newTree(name string, funcs ...map[string]interface{}) *tree {
return &tree{
Name: name,
funcs: funcs,
}
}
// ErrorContext returns a textual representation of the location of the node in the input text.
// The receiver is only used when the node does not have a pointer to the tree inside,
// which can occur in old code.
func (t *tree) ErrorContext(n node) (location, context string) {
pos := int(n.position())
tree := n.tree()
if tree == nil {
tree = t
}
text := tree.text[:pos]
byteNum := strings.LastIndex(text, "\n")
if byteNum == -1 {
byteNum = pos // On first line.
} else {
byteNum++ // After the newline.
byteNum = pos - byteNum
}
lineNum := 1 + strings.Count(text, "\n")
context = n.String()
if len(context) > 20 {
context = fmt.Sprintf("%.20s...", context)
}
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
}
// errorf formats the error and terminates processing.
func (t *tree) errorf(format string, args ...interface{}) {
t.Root = nil
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
panic(fmt.Errorf(format, args...))
}
// error terminates processing.
func (t *tree) error(err error) {
t.errorf("%s", err)
}
/*
// expect consumes the next token and guarantees it has the required type.
func (t *Tree) expect(expected itemType, context string) item {
token := t.nextNonSpace()
if token.typ != expected {
t.unexpected(token, context)
}
return token
}
// expectOneOf consumes the next token and guarantees it has one of the required types.
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
token := t.nextNonSpace()
if token.typ != expected1 && token.typ != expected2 {
t.unexpected(token, context)
}
return token
}
// */
// unexpected complains about the token and terminates processing.
func (t *tree) unexpected(token item, context string) {
t.errorf("unexpected %s in %s", token, context)
}
// recover is the handler that turns panics into returns from the top level of Parse.
func (t *tree) recover(errp *error) {
e := recover()
if e != nil {
if _, ok := e.(runtime.Error); ok {
panic(e)
}
if t != nil {
t.stopParse()
}
*errp = e.(error)
}
return
}
// startParse initializes the parser, using the lexer.
func (t *tree) startParse(funcs []map[string]interface{}, lex *lexer) {
t.Root = nil
t.lex = lex
t.vars = []string{"$"}
t.funcs = funcs
}
// stopParse terminates parsing.
func (t *tree) stopParse() {
t.lex = nil
t.vars = nil
t.funcs = nil
}
// Parse parses the template definition string to construct a representation of
// the template for execution. If either action delimiter string is empty, the
// default ("{{" or "}}") is used. Embedded template definitions are added to
// the treeSet map.
func (t *tree) Parse(text, LeftDelim, RightDelim string, treeSet map[string]*tree, funcs ...map[string]interface{}) (tree *tree, err error) {
defer t.recover(&err)
t.ParseName = t.Name
t.startParse(funcs, lex(t.Name, text, LeftDelim, RightDelim))
t.text = text
t.parse(treeSet)
t.add(treeSet)
t.stopParse()
return t, nil
}
// add adds tree to the treeSet.
func (t *tree) add(treeSet map[string]*tree) {
tree := treeSet[t.Name]
if tree == nil || isEmptyTree(tree.Root) {
treeSet[t.Name] = t
return
}
if !isEmptyTree(t.Root) {
t.errorf("template: multiple definition of template %q", t.Name)
}
}
// IsEmptyTree reports whether this tree (node) is empty of everything but space.
func isEmptyTree(n node) bool {
switch n := n.(type) {
case nil:
return true
// case *ActionNode:
case *listNode:
for _, node := range n.Nodes {
if !isEmptyTree(node) {
return false
}
}
return true
// case *TextNode:
// return len(bytes.TrimSpace(n.Text)) == 0
default:
panic("unknown node: " + n.String())
}
} | vendor/github.com/Joker/jade/parse.go | 0.741861 | 0.417509 | parse.go | starcoder |
package waffleiron
import (
"github.com/pkg/errors"
"github.com/hashicorp/go-multierror"
)
// And returns a parser that runs p0 and then runs p1
func And[T, U any](p0 Parser[T], p1 Parser[U]) Parser[Tuple2[T, U]] {
return Parser[Tuple2[T, U]]{p: andParser[T, U]{p0, p1}}
}
type andParser[T, U any] struct {
p0 Parser[T]
p1 Parser[U]
}
func (p andParser[T, U]) parse(r *reader) (Tuple2[T, U], error) {
a, err := p.p0.parse(r)
if err != nil {
return Tuple2[T, U]{}, err
}
b, err := p.p1.parse(r)
if err != nil {
return Tuple2[T, U]{}, err
}
return NewTuple2(a, b), nil
}
// And returns a parser that runs p0 and then runs p1 and then p2
func And3[T, U, V any](p0 Parser[T], p1 Parser[U], p2 Parser[V]) Parser[Tuple3[T, U, V]] {
return Parser[Tuple3[T, U, V]]{p: and3Parser[T, U, V]{p0, p1, p2}}
}
type and3Parser[T, U, V any] struct {
p0 Parser[T]
p1 Parser[U]
p2 Parser[V]
}
func (p and3Parser[T, U, V]) parse(r *reader) (Tuple3[T, U, V], error) {
v0, err := p.p0.parse(r)
if err != nil {
return Tuple3[T, U, V]{}, err
}
v1, err := p.p1.parse(r)
if err != nil {
return Tuple3[T, U, V]{}, err
}
v2, err := p.p2.parse(r)
if err != nil {
return Tuple3[T, U, V]{}, err
}
return NewTuple3(v0, v1, v2), nil
}
// Choice returns a parser that tries each parsers and returns first successful result
func Choice[T any](p0 Parser[T], ps ...Parser[T]) Parser[T] {
parsers := make([]Parser[T], len(ps)+1)
parsers[0] = p0
for i, p := range ps {
parsers[i+1] = p
}
return Parser[T]{p: choiceParser[T]{ps: parsers}}
}
type choiceParser[T any] struct {
ps []Parser[T]
}
func (p choiceParser[T]) parse(r *reader) (T, error) {
var totalErr error
for _, p := range p.ps {
var t T
err := r.try(func() error {
var e error
t, e = p.parse(r)
return e
})
if err == nil {
return t, nil
}
totalErr = multierror.Append(totalErr, err)
}
return *new(T), totalErr
}
// Repeat returns a parser that repeatedly tries p until it fails
// If first trial fails, it results empty slice without any errors
func Repeat[T any](p Parser[T]) Parser[[]T] {
return Parser[[]T]{p: repeatParser[T]{p}}
}
type repeatParser[T any] struct {
p Parser[T]
}
func (p repeatParser[T]) parse(r *reader) ([]T, error) {
ts := make([]T, 0)
for {
err := r.try(func() error {
t, e := p.p.parse(r)
if e != nil {
return e
}
ts = append(ts, t)
return nil
})
if err != nil {
return ts, nil
}
}
}
// SepBy returns a parser that repeatedly parse with p separated by sep
func SepBy[T, U any](p Parser[T], sep Parser[U]) Parser[[]T] {
return Choice(
Map(
And(
Repeat(And(p, sep)),
p,
),
func(v Tuple2[[]Tuple2[T, U], T]) []T {
ts := make([]T, 0)
for _, t := range v.Get0() {
ts = append(ts, t.Get0())
}
return append(ts, v.Get1())
},
),
Pure[[]T](nil),
)
}
// Maybe returns a parser that tries to run p
// If p fails, it results nil without any errors
func Maybe[T any](p Parser[T]) Parser[*T] {
return Parser[*T]{p: maybeParser[T]{p}}
}
type maybeParser[T any] struct {
p Parser[T]
}
func (p maybeParser[T]) parse(r *reader) (*T, error) {
var v T
err := r.try(func() error {
var e error
v, e = p.p.parse(r)
return e
})
if err != nil {
return nil, nil
}
return &v, nil
}
// Between returns a parser that runs open and then runs p and then runs close
// The results of open and close are discarded
func Between[T, U, V any](open Parser[T], p Parser[U], close Parser[V]) Parser[U] {
return Map(
And3(
open,
p,
close,
),
func(t Tuple3[T, U, V]) U {
return t.Get1()
},
)
}
func Untype[T any](p Parser[T]) Parser[any] {
return Parser[any]{p: untypeParser[T]{p}}
}
type untypeParser[T any] struct {
p Parser[T]
}
func (p untypeParser[T]) parse(r *reader) (interface{}, error) {
return p.p.parse(r)
}
// Ref takes a pointer of a parser and returns a parser that works same as original one
// It can be useful for recursive parser
func Ref[T any](p *Parser[T]) Parser[T] {
return Parser[T]{p: refParser[T]{p}}
}
type refParser[T any] struct {
p *Parser[T]
}
func (p refParser[T]) parse(r *reader) (T, error) {
a := *p.p
return a.parse(r)
}
// Trace adds name for debug
func Trace[T any](name string, p Parser[T]) Parser[T] {
return Parser[T]{p: traceParser[T]{name, p}}
}
type traceParser[T any] struct {
name string
p Parser[T]
}
func (p traceParser[T]) parse(r *reader) (T, error) {
var t T
var err error
r.withTrace(p.name, func() {
t, err = p.p.parse(r)
})
if err != nil {
return t, errors.Wrapf(err, "%q >", p.name)
}
return t, nil
} | combinator.go | 0.629205 | 0.440048 | combinator.go | starcoder |
package tester
import (
"errors"
"fmt"
"strconv"
"strings"
)
// Growth is used to determine what test should be ran next.
type Growth interface {
OnSuccess(test int) int
OnFail(test int) int
String() string
}
// LinearGrowth increases test by a specified amount with every successful test.
type LinearGrowth struct {
Increase int
}
// LinearGrowthPrefix prefix used in linear growth string representation.
const LinearGrowthPrefix = "+"
func (g *LinearGrowth) String() string {
return fmt.Sprintf("%s%d", LinearGrowthPrefix, g.Increase)
}
// OnSuccess increases test by a specified amount.
func (g *LinearGrowth) OnSuccess(test int) int {
return test + g.Increase
}
// OnFail stops the tests.
func (g *LinearGrowth) OnFail(test int) int {
return 0
}
// PercentageGrowth increases test by a specified percentage with every successful test.
type PercentageGrowth struct {
Increase float64
}
// PercentageGrowthPrefix prefix used in percentage growth string representation.
const PercentageGrowthPrefix = "%"
func (g *PercentageGrowth) String() string {
return fmt.Sprintf("%s%.2f", PercentageGrowthPrefix, g.Increase)
}
// OnSuccess increases test by a specified percentage.
func (g *PercentageGrowth) OnSuccess(test int) int {
return int((100. + g.Increase) / 100. * float64(test))
}
// OnFail stops the tests.
func (g *PercentageGrowth) OnFail(test int) int {
return 0
}
// ExponentialGrowth performs binary search up to a given precision.
type ExponentialGrowth struct {
Precision int
left, right int
bound bool
}
// ExponentialGrowthPrefix prefix used in exponential growth string representation.
const ExponentialGrowthPrefix = "^"
func (g *ExponentialGrowth) String() string {
return fmt.Sprintf("%s%d", ExponentialGrowthPrefix, g.Precision)
}
// OnSuccess sets the lower bound to the last test and returns (left+right) / 2
// unless the precision has been achieved.
func (g *ExponentialGrowth) OnSuccess(test int) int {
g.left = test
if !g.bound {
return test * 2
}
if g.right-g.left <= g.Precision {
return 0
}
return int(float64(g.right+g.left) / 2)
}
// OnFail sets the upper bound to the last test and returns (left+right) / 2
// unless the precision has been achieved.
func (g *ExponentialGrowth) OnFail(test int) int {
g.right = test
g.bound = true
if g.right-g.left <= g.Precision {
return 0
}
return int(float64(g.right+g.left) / 2)
}
// GrowthHelp provides usage help about the growth.
const GrowthHelp = `Growth determines what will be the next value used for a test.
* linear growth (+int) increases test value by a fixed amount after each success,
stops immediately after the first failure
* percentage growth (%float) increases test value by a fixed percentage after
each success, stops immediately after the first failure
* exponential growth (^int) first doubles the test value after each success to
find an upper bound, then performs a binary search up to a given precision`
// ErrInvalidGrowth is returned when a growth cannot be found.
var ErrInvalidGrowth = errors.New("unknown growth, want +int, %%flaot, ^int")
// ParseGrowth creates a growth from its string representation.
func ParseGrowth(value string) (Growth, error) {
switch {
case strings.HasPrefix(value, LinearGrowthPrefix):
inc, err := strconv.Atoi(strings.TrimPrefix(value, LinearGrowthPrefix))
if err != nil {
//nolint:wrapcheck
return nil, err
}
return &LinearGrowth{Increase: inc}, nil
case strings.HasPrefix(value, PercentageGrowthPrefix):
inc, err := strconv.ParseFloat(strings.TrimPrefix(value, PercentageGrowthPrefix), 64)
if err != nil {
//nolint:wrapcheck
return nil, err
}
return &PercentageGrowth{Increase: inc}, nil
case strings.HasPrefix(value, ExponentialGrowthPrefix):
prec, err := strconv.Atoi(strings.TrimPrefix(value, ExponentialGrowthPrefix))
if err != nil {
//nolint:wrapcheck
return nil, err
}
return &ExponentialGrowth{Precision: prec}, nil
default:
return nil, ErrInvalidGrowth
}
} | tester/growth.go | 0.840619 | 0.430866 | growth.go | starcoder |
package fuzz
import (
"math/rand"
"time"
"github.com/go-spatial/tegola/geom"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func genNil(withNil bool) bool { return withNil && rand.Intn(100) < 2 }
// GenRandPoint will generate a random point. It is possible that the point may be nil.
func GenRandPoint() *geom.Point {
if genNil(true) {
return (*geom.Point)(nil)
}
return &geom.Point{rand.NormFloat64(), rand.NormFloat64()}
}
func genRandSlicePoint(size int) (pts [][2]float64) {
for i := 0; i < size; i++ {
pts = append(pts, [2]float64{rand.NormFloat64(), rand.NormFloat64()})
}
return pts
}
// GenRandMultiPoint will generate a MultiPoint that may be nil, and will have a random number of points. There is no guarantee that all points are unique.
func GenRandMultiPoint() *geom.MultiPoint {
if genNil(true) {
return (*geom.MultiPoint)(nil)
}
mp := geom.MultiPoint(genRandSlicePoint(rand.Intn(1000)))
return &mp
}
// GenRandLineString will generate a random LineString (that may be nil depending on withNil), and a randome number of points. There is no guarantee that the line string is simple.
func GenRandLineString(withNil bool) *geom.LineString {
if genNil(withNil) {
return (*geom.LineString)(nil)
}
ls := geom.LineString(genRandSlicePoint(rand.Intn(1000)))
return &ls
}
// GenRandMultiLineString will generate a random MultiLineString (that may be nil depending on withNil), and a random number of linestrings. There is no gaurantee that the line strings are simple.
func GenRandMultiLineString(withNil bool) *geom.MultiLineString {
if genNil(withNil) {
return (*geom.MultiLineString)(nil)
}
num := rand.Intn(1000)
var ml geom.MultiLineString
for i := 0; i < num; i++ {
ls := GenRandLineString(false)
ml = append(ml, [][2]float64(*ls))
}
return &ml
}
// GenRandPolygon will generate a random Polygon (that may be nil depending on withNil). The Polygon may not be valid or simple.
func GenRandPolygon(withNil bool) *geom.Polygon {
if genNil(withNil) {
return (*geom.Polygon)(nil)
}
num := rand.Intn(100)
var p geom.Polygon
for i := 0; i < num; i++ {
ls := GenRandLineString(false)
p = append(p, [][2]float64(*ls))
}
return &p
}
// GenRandMultiPolygon will generate a random MultiPolygon (that may be nil depending on withNil). The Polygons may not be valid or simple.
func GenRandMultiPolygon(withNil bool) *geom.MultiPolygon {
if genNil(withNil) {
return (*geom.MultiPolygon)(nil)
}
num := rand.Intn(10)
var mp geom.MultiPolygon
for i := 0; i < num; i++ {
p := GenRandPolygon(false)
mp = append(mp, [][][2]float64(*p))
}
return &mp
}
// GenRandCollection will generate a random Collection (that may be nil depending on withNil).
func GenRandCollection(withNil bool) *geom.Collection {
if genNil(withNil) {
return (*geom.Collection)(nil)
}
num := rand.Intn(10)
var col geom.Collection
for i := 0; i < num; i++ {
col = append(col, GenGeometry())
}
return &col
}
// GenGenometry will generate a random Geometry. The geometry may be nil.
func GenGeometry() geom.Geometry {
switch rand.Intn(22) {
default:
return nil
case 0, 13, 20:
return GenRandPoint()
case 2, 11, 19:
return GenRandMultiPoint()
case 4, 9, 18:
return GenRandLineString(true)
case 6, 7, 17:
return GenRandMultiLineString(true)
case 8, 5, 16:
return GenRandPolygon(true)
case 10, 3, 15:
return GenRandMultiPolygon(true)
case 12, 1, 14:
return GenRandCollection(true)
}
} | geom/encoding/wkt/internal/cmd/fuzz/fuzz/fuzz.go | 0.601359 | 0.580084 | fuzz.go | starcoder |
package main
import (
"sync"
"github.com/dhconnelly/rtreego"
"github.com/gravestench/mathlib"
"github.com/hajimehoshi/ebiten/v2"
)
const (
fWidth = float64(Width)
fHeight = float64(Height)
)
type Game struct {
boidCount int
boids []*Boid
tick int
pixels []byte
}
func (g *Game) Update() error {
g.checkInput()
var wg sync.WaitGroup
currentCount := len(g.boids)
toCreateCount := g.boidCount - currentCount
boidChan := make(chan *Boid, toCreateCount)
wg.Add(g.boidCount)
for i := 0; i < g.boidCount; i++ {
idx := i
go func() {
defer wg.Done()
if idx >= currentCount {
boidChan <- newBoid(idx, nil)
} else {
boid := g.boids[idx]
boid.update(g.tick)
}
}()
}
wg.Wait()
close(boidChan)
points := make([]rtreego.Spatial, 0, g.boidCount)
for _, boid := range g.boids {
points = append(points, *boid)
}
if toCreateCount > 0 {
boids := make([]*Boid, 0, toCreateCount)
for boid := range boidChan {
boids = append(boids, boid)
points = append(points, *boid)
}
g.boids = append(g.boids, boids...)
}
global.setIndex(newIndex(points...))
g.tick++
return nil
}
func (g *Game) resetPixels() {
for i := range g.pixels {
g.pixels[i] = 255
}
}
func (g *Game) drawBoid(boid *Boid, wg *sync.WaitGroup) {
defer wg.Done()
trailChan := make(chan *TrailPixel, global.params.trailLength.value())
boid.getTrailPixels(g.tick, trailChan)
for trailPixel := range trailChan {
g.pixels[trailPixel.pixelIndex] = trailPixel.colourValue
g.pixels[trailPixel.pixelIndex+1] = trailPixel.colourValue
}
position := boid.Position()
x := int(position.X)
y := int(position.Y)
pixelDataPosition := (y*Width + x) * 4
g.pixels[pixelDataPosition] = 0
g.pixels[pixelDataPosition+1] = 0
}
func (g *Game) Draw(screen *ebiten.Image) {
g.resetPixels()
var wg sync.WaitGroup
wg.Add(len(g.boids))
for _, boid := range g.boids {
go g.drawBoid(boid, &wg)
}
wg.Wait()
screen.ReplacePixels(g.pixels)
}
func (g *Game) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) {
return Width, Height
}
func (g *Game) addBoid(position *mathlib.Vector2) {
boid := newBoid(g.boidCount, position)
g.boids = append(g.boids, boid)
g.boidCount++
}
func NewGame() *Game {
boidCount := boidCount
boids := make([]*Boid, 0, boidCount)
pixels := make([]byte, 4*Width*Height)
return &Game{boidCount: boidCount, boids: boids, pixels: pixels}
} | game.go | 0.593374 | 0.4474 | game.go | starcoder |
package algebra
import (
"fmt"
"image/color"
"math"
)
type Vector3 struct {
X MnFloat
Y MnFloat
Z MnFloat
}
var (
ZeroVector3 = Vector3{0, 0, 0}
UpVector3 = Vector3{0, 1, 0}
RightVector3 = Vector3{1, 0, 0}
ForwardVector3 = Vector3{0, 0, -1}
)
func (v Vector3) Dump() Vector3 {
fmt.Println(fmt.Sprintf("X = %f, Y = %f, Z = %f", v.X, v.Y, v.Z))
return v
}
// Calculates the dot product between two vectors.
func (v Vector3) Dot(other Vector3) MnFloat {
return v.X*other.X + v.Y*other.Y + v.Z*other.Z
}
// Calculates the squared magnitude of the vector.
func (v Vector3) LengthSq() MnFloat {
return v.Dot(v)
}
// Calculates the magnitude of the vector
func (v Vector3) Length() MnFloat {
return Sqrt(v.LengthSq())
}
// Returns the vector with a magnitude of 1.
func (v Vector3) Normalize() Vector3 {
length := v.Length()
return Vector3{
v.X / length,
v.Y / length,
v.Z / length,
}
}
func (v Vector3) Pow(t MnFloat) Vector3 {
return Vector3{
math.Pow(v.X, t),
math.Pow(v.Y, t),
math.Pow(v.Z, t),
}
}
// Computes the vector result of adding two vectors together.
func (v Vector3) Add(other Vector3) Vector3 {
return Vector3{
v.X + other.X,
v.Y + other.Y,
v.Z + other.Z,
}
}
// Computes the vector result of subtracting two vectors together.
func (v Vector3) Subtract(other Vector3) Vector3 {
return Vector3{
v.X - other.X,
v.Y - other.Y,
v.Z - other.Z,
}
}
func (v Vector3) DivideScalar(value MnFloat) Vector3 {
return Vector3{
v.X / value,
v.Y / value,
v.Z / value,
}
}
// Computes the vector result by multiplying each component by a scalar value.
func (v Vector3) MultiplyScalar(value MnFloat) Vector3 {
return Vector3{
v.X * value,
v.Y * value,
v.Z * value,
}
}
func (v Vector3) Multiply(other Vector3) Vector3 {
return Vector3{
v.X * other.X,
v.Y * other.Y,
v.Z * other.Z,
}
}
// Computes the cross product of two vectors.
func (v Vector3) Cross(other Vector3) Vector3 {
return Vector3{
(v.Y * other.Z) - (v.Z * other.Y),
(v.Z * other.X) - (v.X * other.Z),
(v.X * other.Y) - (v.Y * other.X),
}
}
func (v Vector3) Saturate() Vector3 {
return Vector3{
math.Max(MnZero, math.Min(MnOne, v.X)),
math.Max(MnZero, math.Min(MnOne, v.Y)),
math.Max(MnZero, math.Min(MnOne, v.Z)),
}
}
// Computes the vector consisting of the smallest component values from two vectors.
func (v Vector3) Min(other Vector3) Vector3 {
return Vector3{
math.Min(v.X, other.X),
math.Min(v.Y, other.Y),
math.Min(v.Z, other.Z),
}
}
// Computes the vector consisting of the largest component values from two vectors.
func (v Vector3) Max(other Vector3) Vector3 {
return Vector3{
math.Max(v.X, other.X),
math.Max(v.Y, other.Y),
math.Max(v.Z, other.Z),
}
}
func (v Vector3) Negate() Vector3 {
return Vector3{
-v.X,
-v.Y,
-v.Z,
}
}
// Performs a linear interpolation between two vectors and returns the result.
func (v Vector3) Lerp(other Vector3, t MnFloat) Vector3 {
return Vector3{
Lerp(v.X, other.X, t),
Lerp(v.Y, other.Y, t),
Lerp(v.Z, other.Z, t),
}
}
// Computes a reflection vector, given the surface normal.
func (v Vector3) Reflect(normal Vector3) Vector3 {
return v.Subtract(normal.MultiplyScalar(2.0 * v.Dot(normal)))
}
// Computes the refracted vector given the surface normal and index of refraction (ior).
func (v Vector3) Refract(normal Vector3, ior MnFloat) Vector3 {
a := v.Dot(normal)
k := 1.0 - ior * ior * (1.0 - a * a)
if k < 0.0 {
return Vector3{0.0, 0.0,0.0 }
}
return v.MultiplyScalar(ior).Subtract(normal.MultiplyScalar(ior * a + Sqrt(k)))
}
func (v Vector3) ToColor() color.NRGBA {
return color.NRGBA{
R: (uint8)(v.X * 255),
G: (uint8)(v.Y * 255),
B: (uint8)(v.Z * 255),
A: 255,
}
}
func (v Vector3) Transform(m Matrix4) Vector3 {
w := m.M03 * v.X + m.M13 * v.Y + m.M23 * v.Z + m.M33
if w == MnZero {
w = MnOne
}
return Vector3{
X: (m.M00 * v.X + m.M10 * v.Y + m.M20 * v.Z + m.M30) / w,
Y: (m.M01 * v.X + m.M11 * v.Y + m.M21 * v.Z + m.M31) / w,
Z: (m.M02 * v.X + m.M12 * v.Y + m.M22 * v.Z + m.M32) / w,
}
} | algebra/vector3.go | 0.871461 | 0.688884 | vector3.go | starcoder |
package alt
import (
"strconv"
"time"
"github.com/ngjaying/ojg/gen"
)
// String converts the value provided to a string. If conversion is not
// possible such as if the provided value is an array then the first option
// default value is returned or if not provided and empty string is
// returned. If the type is not a string or gen.String and there is a second
// optional default then that second default value is returned. This approach
// keeps the return as a single value and gives the caller the choice of how
// to indicate a bad value.
func String(v interface{}, defaults ...string) (s string) {
switch ts := v.(type) {
case string:
s = ts
case gen.String:
s = string(ts)
default:
if 1 < len(defaults) {
s = defaults[1]
} else {
switch tv := v.(type) {
case nil:
s = ""
case bool:
if tv {
s = "true"
} else {
s = "false"
}
case int64:
s = strconv.FormatInt(tv, 10)
case int:
s = strconv.FormatInt(int64(tv), 10)
case int8:
s = strconv.FormatInt(int64(tv), 10)
case int16:
s = strconv.FormatInt(int64(tv), 10)
case int32:
s = strconv.FormatInt(int64(tv), 10)
case uint:
s = strconv.FormatInt(int64(tv), 10)
case uint8:
s = strconv.FormatInt(int64(tv), 10)
case uint16:
s = strconv.FormatInt(int64(tv), 10)
case uint32:
s = strconv.FormatInt(int64(tv), 10)
case uint64:
s = strconv.FormatInt(int64(tv), 10)
case float32:
s = strconv.FormatFloat(float64(tv), 'g', -1, 32)
case float64:
s = strconv.FormatFloat(tv, 'g', -1, 64)
case time.Time:
s = tv.Format(time.RFC3339Nano)
case gen.Bool:
if tv {
s = "true"
} else {
s = "false"
}
case gen.Int:
s = strconv.FormatInt(int64(tv), 10)
case gen.Float:
s = strconv.FormatFloat(float64(tv), 'g', -1, 32)
case gen.Time:
s = time.Time(tv).Format(time.RFC3339Nano)
case gen.Big:
return string(tv)
default:
if 0 < len(defaults) {
s = defaults[0]
}
}
}
}
return
} | alt/string.go | 0.547222 | 0.407157 | string.go | starcoder |
package mathg
import "math"
// Useful functions for physics/graphics
func QuadraticEaseOut(f float64) float64 {
return -f * (f - 2.)
}
func QuadraticEaseIn(f float64) float64 {
return f * f
}
func QuadraticEaseInOut(f float64) float64 {
a := 0.
if f < 0.5 {
a = 2. * f * f
} else {
a = -2.*f*f + 4.*f - 1.
}
return a
}
func CubicEaseOut(f float64) float64 {
a := f - 1.
return a*a*a + 1.
}
func CubicEaseIn(f float64) float64 {
return f * f * f
}
func CubicEaseInOut(f float64) float64 {
a := 0.
if f < 0.5 {
a = 4. * f * f * f
} else {
a = 2.*f - 2.
a = 0.5*a*a*a + 1.
}
return a
}
func QuarticEaseOut(f float64) float64 {
a := f - 1.
return a*a*a*(1.-f) + 1.
}
func QuarticEaseIn(f float64) float64 {
return f * f * f * f
}
func QuarticEaseInOut(f float64) float64 {
a := 0.
if f < 0.5 {
a = 8. * f * f * f * f
} else {
a = f - 1.
a = -8.*a*a*a*a + 1.
}
return a
}
func QuinticEaseOut(f float64) float64 {
a := f - 1.
return a*a*a*a*a + 1.
}
func QuinticEaseIn(f float64) float64 {
return f * f * f * f * f
}
func QuinticEaseInOut(f float64) float64 {
a := 0.
if f < 0.5 {
a = 8. * f * f * f * f * f
} else {
a = 2.*f - 2.
a = 0.5*a*a*a*a*a + 1.
}
return a
}
func SinEaseOut(f float64) float64 {
return math.Sin(f * pi_2)
}
func SinEaseIn(f float64) float64 {
return math.Sin((f-1.)*pi_2) + 1.
}
func SinEaseInOut(f float64) float64 {
return 0.5 * (1. - math.Cos(f*math.Pi))
}
func CircularEaseOut(f float64) float64 {
return math.Sqrt((2. - f) * f)
}
func CircularEaseIn(f float64) float64 {
return 1. - math.Sqrt((1. - (f * f)))
}
func CircularEaseInOut(f float64) float64 {
a := 0.
if f < 0.5 {
a = 0.5 * (1. - math.Sqrt(1.-4.*f*f))
} else {
a = 0.5 * (math.Sqrt(-2.*f-3.)*(2.*f-1.) + 1.)
}
return a
}
func ExponentialEaseOut(f float64) float64 {
a := f
if math.Abs(a) > epsilon {
a = 1. - math.Pow(2., -10.*f)
}
return a
}
func ExponentialEaseIn(f float64) float64 {
a := f
if math.Abs(a) > epsilon {
a = math.Pow(2., 10.*(f-1.))
}
return a
}
func ExponentialEaseInOut(f float64) float64 {
a := f
if f < 0.5 {
a = 0.5 * math.Pow(2., (20.*f)-10.)
} else {
a = -0.5*math.Pow(2., -20.*f+10.) + 1.
}
return a
}
func ElasticEaseOut(f float64) float64 {
return math.Sin(-13.*pi_2*(f+1.))*math.Pow(2., -10.*f) + 1.
}
func ElasticEaseIn(f float64) float64 {
return math.Sin(13.*pi_2*f) * math.Pow(2., 10.*(f-1.))
}
func ElasticEaseInOut(f float64) float64 {
a := 0.
if f < 0.5 {
a = 0.5 * math.Sin(13.*pi_2*(2.*f)) * math.Pow(2., 10.*((2.*f)-1.))
} else {
a = 0.5 * (math.Sin(-13.*pi_2*((2.*f-1.)+1.))*math.Pow(2., -10.*(2.*f-1.)) + 2.)
}
return a
}
func BackEaseOut(f float64) float64 {
a := 1. - f
return 1. - (a*a*a - a*math.Sin(a*math.Pi))
}
func BackEaseIn(f float64) float64 {
return f*f*f - f*math.Sin(f*math.Pi)
}
func BackEaseInOut(f float64) float64 {
a := 0.
if f < 0.5 {
a = 2. * f
a = 0.5 * (a*a*a - a*math.Sin(a*math.Pi))
} else {
a = 1. - (2.*f - 1.)
a = 0.5*(1.-(a*a*a-a*math.Sin(f*math.Pi))) + 0.5
}
return a
}
func BounceEaseOut(f float64) float64 {
a := 0.
if f < 4./11. {
a = (121. * f * f) / 16.
} else if f < 8./11. {
a = (363. / 40. * f * f) - (99. / 10. * f) + 17./5.
} else if f < 9./10. {
a = (4356. / 361. * f * f) - (513. / 25. * f) + 268./25.
} else {
a = (54. / 5. * f * f) - (513. / 25. * f) + 268./25.
}
return a
}
func BounceEaseIn(f float64) float64 {
return 1. - BounceEaseOut(1.-f)
}
func BounceEaseInOut(f float64) float64 {
a := 0.
if f < 0.5 {
a = 0.5 * BounceEaseIn(f*2.)
} else {
a = 0.5*BounceEaseOut(f*2.-1.) + 0.5
}
return a
} | ease.go | 0.801392 | 0.581184 | ease.go | starcoder |
package math
import (
stdmath "math"
"reflect"
"time"
)
// Mean returns the mean value from the array or slice of ints, floats, or durations.
// For durations, returns the average duration.
// For ints and floats, returns a float64.
func Mean(in interface{}) (interface{}, error) {
switch in := in.(type) {
case []uint8:
if len(in) == 0 {
return nil, ErrEmptyInput
}
out := 0
for i := 0; i < len(in); i++ {
out += int(in[i])
}
return float64(out) / float64(len(in)), nil
case []int:
if len(in) == 0 {
return nil, ErrEmptyInput
}
out := 0
for i := 0; i < len(in); i++ {
out += int(in[i])
}
return float64(out) / float64(len(in)), nil
case []int32:
if len(in) == 0 {
return nil, ErrEmptyInput
}
out := 0
for i := 0; i < len(in); i++ {
out += int(in[i])
}
return float64(out) / float64(len(in)), nil
case []int64:
if len(in) == 0 {
return nil, ErrEmptyInput
}
out := int64(0)
for i := 0; i < len(in); i++ {
out += in[i]
}
return float64(out) / float64(len(in)), nil
case []float64:
if len(in) == 0 {
return nil, ErrEmptyInput
}
out := 0.0
for i := 0; i < len(in); i++ {
out += in[i]
}
return out / float64(len(in)), nil
case []time.Duration:
if len(in) == 0 {
return nil, ErrEmptyInput
}
out := int64(0)
for i := 0; i < len(in); i++ {
out += int64(in[i])
}
return time.Duration(int64(stdmath.Floor(float64(out) / float64(len(in))))), nil
}
v := reflect.ValueOf(in)
t := v.Type()
k := t.Kind()
if k != reflect.Array && k != reflect.Slice {
return nil, &ErrInvalidKind{Value: reflect.TypeOf(in), Expected: []reflect.Kind{reflect.Array, reflect.Slice}}
}
if v.Len() == 0 {
return nil, ErrEmptyInput
}
sum, err := Sum(in)
if err != nil {
return nil, err
}
switch sum := sum.(type) {
case uint8:
return float64(sum) / float64(v.Len()), nil
case int32:
return float64(sum) / float64(v.Len()), nil
case int64:
return float64(sum) / float64(v.Len()), nil
case int:
return float64(sum) / float64(v.Len()), nil
case float64:
return sum / float64(v.Len()), nil
case time.Duration:
return time.Duration(int64(stdmath.Floor(float64(int64(sum)) / float64(v.Len())))), nil
}
return nil, &ErrInvalidKind{Value: reflect.TypeOf(sum), Expected: []reflect.Kind{reflect.Array, reflect.Slice}}
} | pkg/math/Mean.go | 0.759493 | 0.440469 | Mean.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
)
const WHITE = 0
const BLACK = 1
const SW = "sw"
const W = "w"
const NW = "nw"
const NE = "ne"
const E = "e"
const SE = "se"
type Hex struct {
id int
color int
neighbor map[string]*Hex
}
func NewHex() *Hex {
this := Hex{}
this.color = WHITE
this.neighbor = make(map[string]*Hex)
this.neighbor[SW] = nil
this.neighbor[W] = nil
this.neighbor[NW] = nil
this.neighbor[NE] = nil
this.neighbor[E] = nil
this.neighbor[SE] = nil
return &this
}
func (this *Hex) Get(direction string) *Hex {
return this.neighbor[direction]
}
func (this *Hex) ToggleColor() {
this.color = (this.color + 1) % 2
}
func (this *Hex) ConnectNeighbors() {
dirs := clockwiseDirectionsFrom(W)
for i, dir := range dirs {
if this.neighbor[dir] != nil {
this.neighbor[dir].neighbor[dirs[(i+3)%6]] = this
}
}
}
func (this *Hex) String() string {
var retval strings.Builder
fmt.Fprintf(&retval, "[%d] is [%s]:", this.id, intToColor(this.color))
for _, dir := range clockwiseDirectionsFrom(W) {
fmt.Fprintf(&retval, " %2s=", dir)
if this.neighbor[dir] != nil {
fmt.Fprintf(&retval, "[%4d]", this.neighbor[dir].id)
} else {
retval.WriteString(" nil ")
}
}
return retval.String()
}
type Floor struct {
size int
hexes []*Hex
}
func NewFloor() *Floor {
this := Floor{}
this.hexes = make([]*Hex, 35000) // hexes with a radius of 106 = 34027, this is close enough
this.size = 0
this.recordHex(NewHex())
for i := 1; i <= 25; i++ {
this.extendFloor()
}
return &this
}
func (this *Floor) recordHex(hex *Hex) {
hex.ConnectNeighbors()
hex.id = this.size
// fmt.Printf("Storing hex %d: %s\n", this.size, hex.String())
this.hexes[this.size] = hex
this.size += 1
}
func (this *Floor) extendFloor() {
// go west until there isn't anything
current := this.hexes[0]
currentRadius := 0
for current.neighbor[W] != nil {
current = current.neighbor[W]
currentRadius += 1
}
current = this.addEdge(current, currentRadius, clockwiseDirectionsFrom(W))
current = this.addEdge(current, currentRadius, clockwiseDirectionsFrom(NW))
current = this.addEdge(current, currentRadius, clockwiseDirectionsFrom(NE))
current = this.addEdge(current, currentRadius, clockwiseDirectionsFrom(E))
current = this.addEdge(current, currentRadius, clockwiseDirectionsFrom(SE))
current = this.addEdge(current, currentRadius, clockwiseDirectionsFrom(SW))
}
func (this *Floor) extendFloorIfEdgeTileIsBlack() {
dirs := clockwiseDirectionsFrom(W)
blackEdge := false
for i := this.size - 1; i >= 0 && ! blackEdge; i-- {
if this.hexes[i].color == 1 {
for _, dir := range dirs {
if this.hexes[i].neighbor[dir] == nil {
blackEdge = true
break
}
}
}
}
if blackEdge {
this.extendFloor()
}
}
func (this *Floor) addEdge(current *Hex, currentRadius int, hexDir []string) *Hex {
// hexDir directions should be clockwise. E.g. w, nw, ne, e, se, sw.
// hexDir[0] = current -> new Hex
// hexDir[1] =
// hexDir[2] =
// hexDir[3] = new Hex -> current: opposite of 0
// hexDir[4] = opposite of 1
// hexDir[5] = opposite of 2
cornerHex := NewHex()
cornerHex.neighbor[hexDir[3]] = current
if current.neighbor[hexDir[5]] != nil {
cornerHex.neighbor[hexDir[4]] = current.neighbor[hexDir[5]]
}
if current.neighbor[hexDir[1]] != nil {
cornerHex.neighbor[hexDir[2]] = current.neighbor[hexDir[1]]
}
this.recordHex(cornerHex)
for i := 0; i < currentRadius; i++ {
current = current.neighbor[hexDir[2]]
newHex := NewHex()
newHex.neighbor[hexDir[3]] = current
newHex.neighbor[hexDir[4]] = current.neighbor[hexDir[5]]
newHex.neighbor[hexDir[5]] = current.neighbor[hexDir[5]].neighbor[hexDir[0]]
if current.neighbor[hexDir[1]] != nil {
newHex.neighbor[hexDir[2]] = current.neighbor[hexDir[1]]
}
this.recordHex(newHex)
}
return current
}
func (this *Floor) CountBlack() int {
retval := 0
for _, hex := range this.hexes {
if hex != nil {
retval += hex.color
}
}
return retval
}
func (this *Floor) FlipTile(directions []string) {
current := this.hexes[0]
for _, dir := range directions {
current = current.neighbor[dir]
}
current.ToggleColor()
}
func (this *Floor) FlipTiles(directionsList [][]string) {
for _, directions := range directionsList {
this.FlipTile(directions)
}
}
func (this *Floor) Evolve() {
toFlip := []int{}
dirs := clockwiseDirectionsFrom(W)
for id := 0; id < this.size; id++ {
hex := this.hexes[id]
blackNeighbors := 0 // :nervous-laughter:
for _, dir := range dirs {
if hex.neighbor[dir] != nil {
blackNeighbors += hex.neighbor[dir].color
}
}
switch hex.color {
case WHITE:
if blackNeighbors == 2 {
toFlip = append(toFlip, id)
}
case BLACK:
if blackNeighbors == 0 || blackNeighbors > 2 {
toFlip = append(toFlip, id)
}
}
}
for _, f := range toFlip {
this.hexes[f].ToggleColor()
}
}
func (this *Floor) RunFor(days int) {
for i := 0; i < days; i++ {
this.extendFloorIfEdgeTileIsBlack()
this.Evolve()
}
}
func clockwiseDirectionsFrom(dir string) []string {
switch dir {
case SW: return []string{SW, W, NW, NE, E, SE}
case W: return []string{W, NW, NE, E, SE, SW}
case NW: return []string{NW, NE, E, SE, SW, W}
case NE: return []string{NE, E, SE, SW, W, NW}
case E: return []string{E, SE, SW, W, NW, NE}
case SE: return []string{SE, SW, W, NW, NE, E}
}
panic(fmt.Errorf("Unknown direction string [%s].", dir))
}
func intToColor(color int) string {
switch color {
case WHITE: return "WHITE"
case BLACK: return "BLACK"
}
panic(fmt.Errorf("Unknown color [%d].", color))
}
// -------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------
func main() {
if err := run(); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}
func getCliParams() string {
args := os.Args[1:]
switch len(args) {
case 0:
return "example.input"
case 1:
return args[0]
}
panic(fmt.Errorf("Invalid command-line arguments: %s.", args))
}
func parseInput(input string) ([][]string, error) {
lines := strings.Split(input, "\n")
retval := [][]string{}
hasDataRegex := regexp.MustCompile("[^[:space:]]")
directionRegex := regexp.MustCompile("(sw|w|nw|ne|e|se)")
for _, line := range lines {
if hasDataRegex.MatchString(line) {
matches := directionRegex.FindAllStringSubmatch(line, -1)
if matches != nil && len(matches) > 0 {
subRetval := make([]string, len(matches))
for i, match := range matches {
subRetval[i] = match[1]
}
retval = append(retval, subRetval)
}
}
}
return retval, nil
}
func run() error {
filename := getCliParams()
fmt.Printf("Getting input from [%s].\n", filename)
dat, err := ioutil.ReadFile(filename)
if err != nil {
return err
}
// fmt.Println(string(dat))
input, err := parseInput(string(dat))
if err != nil {
return err
}
maxLen := 0
for i, line := range input {
if len(line) > maxLen {
maxLen = len(line)
}
fmt.Printf("%3d: %v\n", i, line)
}
fmt.Printf("Max length: %d\n", maxLen)
floor := NewFloor()
floor.FlipTiles(input)
floor.RunFor(100)
answer := floor.CountBlack()
fmt.Printf("Answer: %d\n", answer)
return nil
} | advent_of_code/2020/day-24b/day-24b.go | 0.640299 | 0.414247 | day-24b.go | starcoder |
package activation
import (
"math"
"math/rand"
"github.com/dowlandaiello/eve/common"
)
// Condition represents a type of condition regarding a link.
type Condition int
// ConditionalLinkInitializationOption is an initialization option used to
// modify a conditional link's behavior.
type ConditionalLinkInitializationOption = func(link ConditionalLink) ConditionalLink
const (
// EqualTo is the == operator.
EqualTo Condition = iota
// NotEqualTo is the != operator.
NotEqualTo
// LessThan is the < operator.
LessThan
// LessThanOrEqualTo is the <= operator.
LessThanOrEqualTo
// GreaterThan is the > operator.
GreaterThan
// GreaterThanOrEqualTo is the >= operator.
GreaterThanOrEqualTo
// Unconditional is a condition representing a lack of a condition.
Unconditional
)
// ConditionalLink is a link embedded in a node's structure indicating that
// some child node must only be activated when some term is met.
type ConditionalLink struct {
Condition Condition // the condition associated with the link
Comparator Parameter // the parameter to compare the given value against (right side of operator)
Destination Node // the node to trigger
Alive bool // whether or not the conditional link can activate under any circumstances
}
/* BEGIN EXPORTED METHODS */
// NewConditionalLink initializes a new conditional link with the given
// condition, comparator (right side of comparisons), and destination node.
func NewConditionalLink(condition Condition, comparator Parameter, destination Node) ConditionalLink {
return ConditionalLink{
Condition: condition, // Set the condition
Comparator: comparator, // Set the comparator
Destination: destination, // Set the destination
Alive: true, // Set the conditional link to alive by default
} // Return the initialized link
}
// RandomConditionalLinks initializes a slice of random conditional links.
func RandomConditionalLinks(opts ...[]ConditionalLinkInitializationOption) []ConditionalLink {
n := rand.Intn(int(math.Pow(4, float64(rand.Intn(common.ComputationalDifficulty))))) // Get a random number of links to initialize
var links []ConditionalLink // Declare a buffer to store the initialized links in
// Make the desired number of conditional links
for i := 0; i < n; i++ {
// Check options for the link exist
if len(opts) > n {
links = append(links, RandomConditionalLink(opts[i]...)) // Add the conditional link to the stack of links
continue // Continue
}
links = append(links, RandomConditionalLink()) // Add the conditional link to the stack of links
}
return links // Return the generated links
}
// RandomConditionalLink initializes a new random conditional link with the
// given initialization options.
func RandomConditionalLink(opts ...ConditionalLinkInitializationOption) ConditionalLink {
var destination Node // Declare a buffer to store a potential destination in
// Generate a destination node based on a 50/50 coin flip
if rand.Intn(2) == 0 {
destination = RandomNode() // Set the destination to a random node
}
link := ConditionalLink{
Condition: Condition(rand.Intn(7)), // Set the condition of the link to a random condition
Comparator: RandomParameter(), // Set the comparator of the link to a random parameter
Destination: destination, // Set the destination to the conditionally generated destination node (exists only if 50/50 coin flip lands on heads)
Alive: true, // All nodes are alive by default
} // Initialize a random link
// Iterate through the provided options
for _, opt := range opts {
link = opt(link) // Apply the option
}
return link // Return the final link
}
// CanActivate checks that the condition can activate, given a certain parameter.
// NOTE: The parameter in this case refers to the value on the left side of the operator.
func (link *ConditionalLink) CanActivate(param *Parameter) bool {
// Check the link is dead
if !link.Alive {
return false // Link cannot activate under any circumstances
}
// Handle different conditions
switch link.Condition {
// Handle the == operator
case EqualTo:
return param.Equals(&link.Comparator) // Return whether or not the comparator is equivalent to the parameter
// Handle the != operator
case NotEqualTo:
return !param.Equals(&link.Comparator) // Return whether or not the comparator is not equivalent to the parameter
// Handle the < operator
case LessThan:
return param.LessThan(&link.Comparator) // Return whether or not the parameter is less than the comparator
// Handle the <= operator
case LessThanOrEqualTo:
return param.LessThan(&link.Comparator) || param.Equals(&link.Comparator) // Return the result
// Handle the > operator
case GreaterThan:
return param.GreaterThan(&link.Comparator) // Return whether or not the parameter is greater than the comparator
// Handle the >= operator
case GreaterThanOrEqualTo:
return param.GreaterThan(&link.Comparator) || param.Equals(&link.Comparator) // Return the result
// Handle an unconditional operator
case Unconditional:
return true // condition should always activate
default:
return false // Unrecognized condition
}
}
// HasDestination checks that the condition has a destination.
func (link *ConditionalLink) HasDestination() bool {
return !link.Destination.IsZero() // Return whether or not the destination exists
}
/* END EXPORTED METHODS */ | activation/conditional_link.go | 0.799286 | 0.510252 | conditional_link.go | starcoder |
package main
//. Matches any character except a newline
//[.] Matches .
//* Matches the preceding pattern element >=0 times, the same as {1,}
//+ Matches the proceding pattern element >0 times, the same as {0,}
//? Matches the proceding pattern element 0 or 1 time, the same as {0,1}
//{M,N} Matches the proceding pattern element at least M times and at most N times
//^ Matches the beginning of a line or string
//$ Matches the end of a line or string
//\A Matches the beginning of a string(but not an internal line)
//\z Matches the end of a string(but not an internal line)
//\w the same as [A-Za-z0-9_]
//\W the same as [^A-Za-z0-9_]
//\d the same as [0-9]
//\D the same as [^0-9]
//\s Matches a whitespace charactor
//\S Matches anything but a whitespace
import (
"fmt"
"io/ioutil"
"regexp"
"strconv"
)
var (
digitRegexp = regexp.MustCompile(`[0-9]+`)
)
//findFileDigits finds alldigits in a file
//and return a []byte
func findFileDigits(filename string) []byte {
fileBytes, _ := ioutil.ReadFile(filename)
b := digitRegexp.FindAll(fileBytes, len(fileBytes))
c := make([]byte, 0)
for _, bytes := range b {
c = append(c, bytes...)
}
return c
}
//findStrDigits finds all digits in a string
//and return a []byte
func findStrDigits(str string) []byte {
b := digitRegexp.FindAll([]byte(str), len(str))
c := make([]byte, 0)
for _, bytes := range b {
c = append(c, bytes...)
}
return c
}
//doubleAllFloats doubles all the float-type-digits
//found in searchIn
func doubleAllFloats(searchIn string) string {
f := func(s string) string {
v, err := strconv.ParseFloat(s, 32)
if err != nil {
v = 0.0
}
return strconv.FormatFloat(v*2, 'f', 2, 32)
}
pat := `[0-9]+[.][0-9]+`
if ok, _ := regexp.MatchString(pat, searchIn); !ok {
fmt.Println("Match not found!")
return ""
}
dRegexp := regexp.MustCompile(pat)
str1 := dRegexp.ReplaceAllString(searchIn, "####.####")
str2 := dRegexp.ReplaceAllStringFunc(searchIn, f)
fmt.Println(str1)
fmt.Println(str2)
return str2
}
func scanBytes(bytes []byte) {
for _, v := range bytes {
fmt.Printf("%c ", v)
}
} | main/regex.go | 0.554712 | 0.523116 | regex.go | starcoder |
package graphsample4
import "github.com/wangyoucao577/algorithms_practice/graph"
/* This sample directed graph comes from
"Introduction to Algorithms - Third Edition" 22.5 strongly connected component
V = 8 (node count)
E = 13 (edge count)
define directed graph G(V,E) as below:
a(0) -> b(1) → c(2) ← → d(3)
↑ ↙ ↓ ↓ ↓
e(4) → f(5) ← → g(6) → h(7) ->
↑ ↓
<-
NOTE: node `h` has a spin edge pointer to itself.
*/
const (
nodeCount = 8
directedGraph = true
)
func initializeGraphEdges(g graph.Graph) graph.Graph {
g.AddEdge(0, 1)
g.AddEdge(1, 2)
g.AddEdge(1, 4)
g.AddEdge(1, 5)
g.AddEdge(2, 3)
g.AddEdge(2, 6)
g.AddEdge(3, 2)
g.AddEdge(3, 7)
g.AddEdge(4, 0)
g.AddEdge(4, 5)
g.AddEdge(5, 6)
g.AddEdge(6, 5)
g.AddEdge(6, 7)
g.AddEdge(7, 7)
return g
}
type nodeIDNameConverter struct {
orderedNodesName []string
nodeNameToIDMap map[string]graph.NodeID
}
// define fixed nodes order in the graph, then we use the `index` as nodeID for search,
// will be easier to implement by code.
// node name only for print.
var nodeConverter = nodeIDNameConverter{
[]string{"a", "b", "c", "d", "e", "f", "g", "h"},
map[string]graph.NodeID{}, // will be inited during import
}
// initialization during package import
func init() {
for i, v := range nodeConverter.orderedNodesName {
nodeConverter.nodeNameToIDMap[v] = graph.NodeID(i)
}
}
// IDToName convert NodeID to human readable name
func IDToName(i graph.NodeID) string {
if i == graph.InvalidNodeID {
return "InvalidNodeID"
}
return nodeConverter.orderedNodesName[i]
}
// NameToID convert node human readable name to NodeID
func NameToID(name string) graph.NodeID {
return nodeConverter.nodeNameToIDMap[name]
}
// AdjacencyListGraphSample return the adjacency list based graph sample instance
func AdjacencyListGraphSample() graph.Graph {
sample := graph.NewAdjacencyListGraph(nodeCount, directedGraph)
return initializeGraphEdges(sample)
}
// AdjacencyMatrixGraphSample return the adjacency matrix based graph sample instance
func AdjacencyMatrixGraphSample() graph.Graph {
sample := graph.NewAdjacencyMatrixGraph(nodeCount, directedGraph)
return initializeGraphEdges(sample)
} | graphsamples/graphsample4/sample4.go | 0.742515 | 0.454472 | sample4.go | starcoder |
package main
import (
"fmt"
"math"
"testing"
)
func floatify(value interface{}) float64 {
switch value.(type) {
case int:
return float64(value.(int))
case int16:
return float64(value.(int16))
case int32:
return float64(value.(int32))
case int64:
return float64(value.(int64))
case uint:
return float64(value.(uint))
case uint16:
return float64(value.(uint16))
case uint32:
return float64(value.(uint32))
case uint64:
return float64(value.(uint64))
case float32:
return float64(value.(float32))
case float64:
return value.(float64)
}
return math.NaN()
}
func floatifyArray(array []interface{}) []float64 {
floatified := make([]float64, 0)
for _, value := range array {
floatified = append(floatified, floatify(value))
}
return floatified
}
func extractParameters(arguments map[string]interface{}, names []string) map[string]float64 {
parameters := make(map[string]float64)
for _, name := range names {
if parameter, found := arguments[name]; found {
parameters[name] = floatify(parameter)
} else {
return nil
}
}
return parameters
}
func getParameter(parameters map[string]float64, name string) float64 {
parameter, _ := parameters[name]
return parameter
}
func symmetric(format string, value float64) string {
if value < 0.0 {
mirrored := fmt.Sprintf(format, -value)
if mirrored == "0" {
return "0"
}
return "-" + mirrored
}
return fmt.Sprintf(format, value)
}
func equalsFormatted(value1, value2 float64, format string) bool {
return symmetric(format, value1) == symmetric(format, value2)
}
func equalsFormattedArray(t *testing.T, value1, value2 []float64, format string) bool {
if len(value1) != len(value2) {
return false
}
for i := 0; i < len(value1); i++ {
if !equalsFormatted(value1[i], value2[i], format) {
return false
}
}
return true
}
func checkWithAccuracy(t *testing.T, result map[string]interface{}, expected map[string]interface{}, name string, accuracy string, simple bool) {
if simple {
value, _ := result[name]
valueExpected, _ := expected[name]
if !equalsFormatted(floatify(value), floatify(valueExpected), accuracy) {
t.Errorf("The value '%s' is incorrect (got: %v, expected: %v)!", name, value, valueExpected)
}
} else {
valuesRaw, _ := result[name]
values := floatifyArray(valuesRaw.([]interface{}))
valuesExpectedRaw, _ := expected[name]
valuesExpected := floatifyArray(valuesExpectedRaw.([]interface{}))
if !equalsFormattedArray(t, values, valuesExpected, accuracy) {
t.Errorf("The value '%s' is incorrect (got: %v, expected: %v)!", name, values, valuesExpected)
}
}
}
func genericTester(t *testing.T, result map[string]interface{}, expected map[string]interface{}, names []string, accuracies []string, types []bool) {
for index, name := range names {
accuracy := accuracies[index]
simple := types[index]
checkWithAccuracy(t, result, expected, name, accuracy, simple)
}
} | core/utilities.go | 0.683736 | 0.578835 | utilities.go | starcoder |
package hamming
import "strconv"
// References: check out Hacker's Delight, about p. 70
func table() [256]uint8 {
return [256]uint8{
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
}
}
// CountBitsByteAlt table-less, branch-free implementation
func CountBitsByteAlt(x byte) int {
x = (x & 0x55) + ((x >> 1) & 0x55)
x = (x & 0x33) + ((x >> 2) & 0x33)
return int((x & 0x0f) + ((x >> 4) & 0x0f))
}
// CountBitsInt8 count 1's in x
func CountBitsInt8(x int8) int { return CountBitsByte(byte(x)) }
// CountBitsInt16 count 1's in x
func CountBitsInt16(x int16) int { return CountBitsUint16(uint16(x)) }
// CountBitsInt32 count 1's in x
func CountBitsInt32(x int32) int { return CountBitsUint32(uint32(x)) }
// CountBitsInt64 count 1's in x
func CountBitsInt64(x int64) int { return CountBitsUint64(uint64(x)) }
// CountBitsInt count 1's in x
func CountBitsInt(x int) int { return CountBitsUint(uint(x)) }
// CountBitsByte count 1's in x
func CountBitsByte(x byte) int { return CountBitsUint8(x) }
// CountBitsRune count 1's in x
func CountBitsRune(x rune) int { return CountBitsInt32(x) }
// CountBitsUint8 count 1's in x
func CountBitsUint8(x uint8) int { return int(table()[x]) }
// CountBitsUint16 count 1's in x
func CountBitsUint16(x uint16) int {
return int(table()[x&0xFF] + table()[(x>>8)&0xFF])
}
const (
m1d uint32 = 0x55555555
m2d = 0x33333333
m4d = 0x0f0f0f0f
)
// CountBitsUint32 count 1's in x
func CountBitsUint32(x uint32) int {
x -= ((x >> 1) & m1d)
x = (x & m2d) + ((x >> 2) & m2d)
x = (x + (x >> 4)) & m4d
x += x >> 8
x += x >> 16
return int(x & 0x3f)
}
const (
m1q uint64 = 0x5555555555555555
m2q = 0x3333333333333333
m4q = 0x0f0f0f0f0f0f0f0f
hq = 0x0101010101010101
)
// CountBitsUint64 count 1's in x
func CountBitsUint64(x uint64) int {
// put count of each 2 bits into those 2 bits
x -= (x >> 1) & m1q
// put count of each 4 bits into those 4 bits
x = (x & m2q) + ((x >> 2) & m2q)
// put count of each 8 bits into those 8 bits
x = (x + (x >> 4)) & m4q
// returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ...
return int((x * hq) >> 56)
}
// CountBitsUint64Alt count 1's in x
func CountBitsUint64Alt(x uint64) int {
return CountBitsUint32(uint32(x>>32)) + CountBitsUint32(uint32(x))
}
// CountBitsUintReference count 1's in x
func CountBitsUintReference(x uint) int {
c := 0
for x != 0 {
x &= x - 1
c++
}
return c
}
// CountBitsUint count 1's in x
func CountBitsUint(x uint) int {
if strconv.IntSize == 64 {
return CountBitsUint64(uint64(x))
} else if strconv.IntSize == 32 {
return CountBitsUint32(uint32(x))
}
panic("strconv.IntSize must be 32 or 64 bits")
} | popcount.go | 0.580828 | 0.742678 | popcount.go | starcoder |
package wavelettree
import (
"github.com/hillbig/rsdic"
"github.com/ugorji/go/codec"
)
func New() WaveletTree {
return &waveletMatrix{
layers: make([]rsdic.RSDic, 0),
dim: 0,
num: 0,
blen: 0}
}
type waveletMatrix struct {
layers []rsdic.RSDic
dim uint64
num uint64
blen uint64 // =len(layers)
}
func (wm waveletMatrix) Num() uint64 {
return wm.num
}
func (wm waveletMatrix) Dim() uint64 {
return wm.dim
}
func (wm waveletMatrix) Lookup(pos uint64) uint64 {
val := uint64(0)
for depth := 0; depth < len(wm.layers); depth++ {
val <<= 1
rsd := wm.layers[depth]
if !rsd.Bit(pos) {
pos = rsd.Rank(pos, false)
} else {
val |= 1
pos = rsd.ZeroNum() + rsd.Rank(pos, true)
}
}
return val
}
func (wm waveletMatrix) Rank(pos uint64, val uint64) uint64 {
ranze := wm.RankRange(Range{0, pos}, val)
return ranze.Epos - ranze.Bpos
}
func (wm waveletMatrix) RankRange(ranze Range, val uint64) Range {
for depth := uint64(0); depth < wm.blen; depth++ {
bit := getMSB(val, depth, wm.blen)
rsd := wm.layers[depth]
ranze.Bpos = rsd.Rank(ranze.Bpos, bit)
ranze.Epos = rsd.Rank(ranze.Epos, bit)
if bit {
ranze.Bpos += rsd.ZeroNum()
ranze.Epos += rsd.ZeroNum()
}
}
return ranze
}
func (wm waveletMatrix) Select(rank uint64, val uint64) uint64 {
return wm.selectHelper(rank, val, 0, 0)
}
func (wm waveletMatrix) selectHelper(rank uint64, val uint64, pos uint64, depth uint64) uint64 {
if depth == wm.blen {
return pos + rank
}
bit := getMSB(val, depth, wm.blen)
rsd := wm.layers[depth]
pos = rsd.Rank(pos, bit)
if !bit {
rank = wm.selectHelper(rank, val, pos, depth+1)
return rsd.Select(rank, false)
} else {
zeroNum := rsd.ZeroNum()
pos += zeroNum
rank = wm.selectHelper(rank, val, pos, depth+1)
return rsd.Select(rank-zeroNum, true)
}
}
func (wm waveletMatrix) LookupAndRank(pos uint64) (uint64, uint64) {
val := uint64(0)
bpos := uint64(0)
epos := uint64(pos)
for depth := uint64(0); depth < wm.blen; depth++ {
rsd := wm.layers[depth]
bit := rsd.Bit(epos)
bpos = rsd.Rank(bpos, bit)
epos = rsd.Rank(epos, bit)
val <<= 1
if bit {
bpos += rsd.ZeroNum()
epos += rsd.ZeroNum()
val |= 1
}
}
return val, epos - bpos
}
func (wm waveletMatrix) Quantile(ranze Range, k uint64) uint64 {
val := uint64(0)
bpos, epos := ranze.Bpos, ranze.Epos
for depth := 0; depth < len(wm.layers); depth++ {
val <<= 1
rsd := wm.layers[depth]
nzBpos := rsd.Rank(bpos, false)
nzEpos := rsd.Rank(epos, false)
nz := nzEpos - nzBpos
if k < nz {
bpos = nzBpos
epos = nzEpos
} else {
k -= nz
val |= 1
bpos = rsd.ZeroNum() + bpos - nzBpos
epos = rsd.ZeroNum() + epos - nzEpos
}
}
return val
}
func (wm waveletMatrix) Intersect(ranges []Range, k int) []uint64 {
return wm.intersectHelper(ranges, k, 0, 0)
}
func (wm waveletMatrix) intersectHelper(ranges []Range, k int, depth uint64, prefix uint64) []uint64 {
if depth == wm.blen {
ret := make([]uint64, 1)
ret[0] = prefix
return ret
}
rsd := wm.layers[depth]
zeroRanges := make([]Range, 0)
oneRanges := make([]Range, 0)
for _, ranze := range ranges {
bpos, epos := ranze.Bpos, ranze.Epos
nzBpos := rsd.Rank(bpos, false)
nzEpos := rsd.Rank(epos, false)
noBpos := bpos - nzBpos + rsd.ZeroNum()
noEpos := epos - nzEpos + rsd.ZeroNum()
if nzEpos-nzBpos > 0 {
zeroRanges = append(zeroRanges, Range{nzBpos, nzEpos})
}
if noEpos-noBpos > 0 {
oneRanges = append(oneRanges, Range{noBpos, noEpos})
}
}
ret := make([]uint64, 0)
if len(zeroRanges) >= k {
ret = append(ret, wm.intersectHelper(zeroRanges, k, depth+1, prefix<<1)...)
}
if len(oneRanges) >= k {
ret = append(ret, wm.intersectHelper(oneRanges, k, depth+1, (prefix<<1)|1)...)
}
return ret
}
func (wm waveletMatrix) MarshalBinary() (out []byte, err error) {
var bh codec.MsgpackHandle
enc := codec.NewEncoderBytes(&out, &bh)
err = enc.Encode(len(wm.layers))
if err != nil {
return
}
for i := 0; i < len(wm.layers); i++ {
err = enc.Encode(wm.layers[i])
if err != nil {
return
}
}
err = enc.Encode(wm.dim)
if err != nil {
return
}
err = enc.Encode(wm.num)
if err != nil {
return
}
err = enc.Encode(wm.blen)
if err != nil {
return
}
return
}
func (wm *waveletMatrix) UnmarshalBinary(in []byte) (err error) {
var bh codec.MsgpackHandle
dec := codec.NewDecoderBytes(in, &bh)
layerNum := 0
err = dec.Decode(&layerNum)
if err != nil {
return
}
wm.layers = make([]rsdic.RSDic, layerNum)
for i := 0; i < layerNum; i++ {
wm.layers[i] = rsdic.New()
err = dec.Decode(&wm.layers[i])
if err != nil {
return
}
}
err = dec.Decode(&wm.dim)
if err != nil {
return
}
err = dec.Decode(&wm.num)
if err != nil {
return
}
err = dec.Decode(&wm.blen)
if err != nil {
return
}
return
}
func getMSB(x uint64, pos uint64, blen uint64) bool {
return ((x >> (blen - pos - 1)) & 1) == 1
} | waveletMatrix.go | 0.506591 | 0.730638 | waveletMatrix.go | starcoder |
package treap
import (
"constraints"
"github.com/Tv0ridobro/data-structure/math"
"math/rand"
)
// Treap represents a treap
// Zero value of Treap is invalid treap, should be used only with New() or NewWithSource()
type Treap[T any] struct {
comp func(T, T) int
rand *rand.Rand
root *Node[T]
}
// New returns an initialized treap
// rand.Rand is used with zero seed
// For custom rand.Rand use NewWithSource
func New[T constraints.Ordered]() *Treap[T] {
return &Treap[T]{
rand: rand.New(rand.NewSource(0)),
comp: math.Comparator[T](),
}
}
// NewWithComparator returns an initialized treap using given comparator
func NewWithComparator[T any](comp func(T, T) int) *Treap[T] {
return &Treap[T]{
rand: rand.New(rand.NewSource(0)),
comp: comp,
}
}
// SetSource sets rand source
func (t *Treap[T]) SetSource(s rand.Source) {
t.rand = rand.New(s)
}
// Insert inserts value in a tree
func (t *Treap[T]) Insert(value T) {
n := &Node[T]{
priority: t.rand.Int(),
value: value,
size: 1,
}
if t.root == nil {
t.root = n
return
}
left, right := split(t.root, n.value, t.comp)
left1 := merge(left, n)
right1 := merge(left1, right)
t.root = right1
}
// Remove removes value from tree
// returns true if tree contained given value, false otherwise
func (t *Treap[T]) Remove(value T) bool {
if t.root == nil {
return false
}
oldSize := t.root.size
left, right := split(t.root, value, t.comp)
if right != nil {
right = tryRemoveMin(right, value, t.comp)
}
t.root = merge(left, right)
return oldSize != t.Size()
}
// Contains returns true if tree contains given value, false otherwise
func (t *Treap[T]) Contains(value T) bool {
if t.root == nil {
return false
}
return t.root.contains(value, t.comp)
}
// Size returns size of the tree
func (t *Treap[T]) Size() int {
if t.root == nil {
return 0
}
return t.root.size
}
// GetAll returns all elements from tree
// returned slice is sorted
func (t *Treap[T]) GetAll() []T {
if t.root == nil {
return nil
}
d := make([]T, t.Size())
t.root.getAll(d)
return d
} | treap/treap.go | 0.746971 | 0.479016 | treap.go | starcoder |
package toy
import (
"fmt"
"math/big"
C "github.com/armfazh/tozan-ecc/curve"
GF "github.com/armfazh/tozan-ecc/field"
)
// ID is an identifier of a toy curve.
type ID string
const (
W0 ID = "W0"
W1 ID = "W1"
W1ISO ID = "W1ISO"
W2 ID = "W2"
W3 ID = "W3"
W4 ID = "W4"
WC0 ID = "WC0"
M0 ID = "M0"
M1 ID = "M1"
E0 ID = "E0"
E1 ID = "E1"
)
type params struct {
model C.Model
p, m int
a, b int
h, r int
x, y interface{}
}
// Curves is a list of toy curves.
var Curves []ID
var toyCurves map[ID]*params
func init() {
Curves = make([]ID, 0, 10)
toyCurves = make(map[ID]*params)
W0.register(¶ms{model: C.Weierstrass, p: 53, m: 1, a: 3, b: 2, r: 51, h: 3, x: 46, y: 3})
W1.register(¶ms{model: C.Weierstrass, p: 53, m: 1, a: 0, b: 1, r: 54, h: 2, x: 13, y: 5})
W1ISO.register(¶ms{model: C.Weierstrass, p: 53, m: 1, a: 38, b: 22, r: 54, h: 2, x: 41, y: 45})
W2.register(¶ms{model: C.Weierstrass, p: 53, m: 1, a: 0, b: 2, r: 51, h: 3, x: 37, y: 27})
W3.register(¶ms{model: C.Weierstrass, p: 59, m: 1, a: 16, b: 0, r: 60, h: 4, x: 33, y: 11})
WC0.register(¶ms{model: C.WeierstrassC, p: 53, m: 1, a: 2, b: 3, r: 66, h: 6, x: 45, y: 4})
M0.register(¶ms{model: C.Montgomery, p: 53, m: 1, a: 4, b: 3, r: 44, h: 4, x: 16, y: 4})
M1.register(¶ms{model: C.Montgomery, p: 53, m: 1, a: 3, b: 1, r: 48, h: 4, x: 14, y: 22})
E0.register(¶ms{model: C.TwistedEdwards, p: 53, m: 1, a: 1, b: 3, r: 44, h: 4, x: 17, y: 49})
E1.register(¶ms{model: C.TwistedEdwards, p: 53, m: 1, a: -1, b: 12, r: 48, h: 4, x: 3, y: 19})
W4.register(¶ms{model: C.Weierstrass, p: 19, m: 2, a: 1, b: 4, r: 399, h: 3, x: []interface{}{0, 1}, y: 17})
}
func (id ID) register(p *params) { toyCurves[id] = p; Curves = append(Curves, id) }
// New returns an elliptic curve and a generator point.
func (id ID) New() (C.EllCurve, C.Point, error) {
if v, ok := toyCurves[id]; ok {
var F GF.Field
if v.m == 1 {
F = GF.NewFp(fmt.Sprintf("%v", v.p), v.p)
} else if v.m == 2 {
F = GF.NewFp2(fmt.Sprintf("%v", v.p), v.p)
}
E := v.model.New(string(id), F,
F.Elt(v.a), F.Elt(v.b),
big.NewInt(int64(v.r)), big.NewInt(int64(v.h)))
P := E.NewPoint(F.Elt(v.x), F.Elt(v.y))
return E, P, nil
}
return nil, nil, fmt.Errorf("curve not supported")
} | curve/toy/toy.go | 0.680135 | 0.408395 | toy.go | starcoder |
package brotli
import "encoding/binary"
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
This is a hash map of fixed size (bucket_size_) to a ring buffer of
fixed size (block_size_). The ring buffer contains the last block_size_
index positions of the given hash key in the compressed data. */
func (*h5) HashTypeLength() uint {
return 4
}
func (*h5) StoreLookahead() uint {
return 4
}
/* HashBytes is the function that chooses the bucket to place the address in. */
func hashBytesH5(data []byte, shift int) uint32 {
var h uint32 = binary.LittleEndian.Uint32(data) * kHashMul32
/* The higher bits contain more mixture from the multiplication,
so we take our results from there. */
return uint32(h >> uint(shift))
}
type h5 struct {
hasherCommon
bucket_size_ uint
block_size_ uint
hash_shift_ int
block_mask_ uint32
num []uint16
buckets []uint32
}
func (h *h5) Initialize(params *encoderParams) {
h.hash_shift_ = 32 - h.params.bucket_bits
h.bucket_size_ = uint(1) << uint(h.params.bucket_bits)
h.block_size_ = uint(1) << uint(h.params.block_bits)
h.block_mask_ = uint32(h.block_size_ - 1)
h.num = make([]uint16, h.bucket_size_)
h.buckets = make([]uint32, h.block_size_*h.bucket_size_)
}
func (h *h5) Prepare(one_shot bool, input_size uint, data []byte) {
var num []uint16 = h.num
var partial_prepare_threshold uint = h.bucket_size_ >> 6
/* Partial preparation is 100 times slower (per socket). */
if one_shot && input_size <= partial_prepare_threshold {
var i uint
for i = 0; i < input_size; i++ {
var key uint32 = hashBytesH5(data[i:], h.hash_shift_)
num[key] = 0
}
} else {
for i := 0; i < int(h.bucket_size_); i++ {
num[i] = 0
}
}
}
/* Look at 4 bytes at &data[ix & mask].
Compute a hash from these, and store the value of ix at that position. */
func (h *h5) Store(data []byte, mask uint, ix uint) {
var num []uint16 = h.num
var key uint32 = hashBytesH5(data[ix&mask:], h.hash_shift_)
var minor_ix uint = uint(num[key]) & uint(h.block_mask_)
var offset uint = minor_ix + uint(key<<uint(h.params.block_bits))
h.buckets[offset] = uint32(ix)
num[key]++
}
func (h *h5) StoreRange(data []byte, mask uint, ix_start uint, ix_end uint) {
var i uint
for i = ix_start; i < ix_end; i++ {
h.Store(data, mask, i)
}
}
func (h *h5) StitchToPreviousBlock(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint) {
if num_bytes >= h.HashTypeLength()-1 && position >= 3 {
/* Prepare the hashes for three last bytes of the last write.
These could not be calculated before, since they require knowledge
of both the previous and the current block. */
h.Store(ringbuffer, ringbuffer_mask, position-3)
h.Store(ringbuffer, ringbuffer_mask, position-2)
h.Store(ringbuffer, ringbuffer_mask, position-1)
}
}
func (h *h5) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check)
}
/* Find a longest backward match of &data[cur_ix] up to the length of
max_length and stores the position cur_ix in the hash table.
REQUIRES: PrepareDistanceCacheH5 must be invoked for current distance cache
values; if this method is invoked repeatedly with the same distance
cache values, it is enough to invoke PrepareDistanceCacheH5 once.
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
func (h *h5) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var num []uint16 = h.num
var buckets []uint32 = h.buckets
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var min_score uint = out.score
var best_score uint = out.score
var best_len uint = out.len
var i uint
var bucket []uint32
/* Don't accept a short copy from far away. */
out.len = 0
out.len_code_delta = 0
/* Try last distance first. */
for i = 0; i < uint(h.params.num_last_distances_to_check); i++ {
var backward uint = uint(distance_cache[i])
var prev_ix uint = uint(cur_ix - backward)
if prev_ix >= cur_ix {
continue
}
if backward > max_backward {
continue
}
prev_ix &= ring_buffer_mask
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
continue
}
{
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 3 || (len == 2 && i < 2) {
/* Comparing for >= 2 does not change the semantics, but just saves for
a few unnecessary binary logarithms in backward reference score,
since we are not interested in such short matches. */
var score uint = backwardReferenceScoreUsingLastDistance(uint(len))
if best_score < score {
if i != 0 {
score -= backwardReferencePenaltyUsingLastDistance(i)
}
if best_score < score {
best_score = score
best_len = uint(len)
out.len = best_len
out.distance = backward
out.score = best_score
}
}
}
}
}
{
var key uint32 = hashBytesH5(data[cur_ix_masked:], h.hash_shift_)
bucket = buckets[key<<uint(h.params.block_bits):]
var down uint
if uint(num[key]) > h.block_size_ {
down = uint(num[key]) - h.block_size_
} else {
down = 0
}
for i = uint(num[key]); i > down; {
var prev_ix uint
i--
prev_ix = uint(bucket[uint32(i)&h.block_mask_])
var backward uint = cur_ix - prev_ix
if backward > max_backward {
break
}
prev_ix &= ring_buffer_mask
if cur_ix_masked+best_len > ring_buffer_mask || prev_ix+best_len > ring_buffer_mask || data[cur_ix_masked+best_len] != data[prev_ix+best_len] {
continue
}
{
var len uint = findMatchLengthWithLimit(data[prev_ix:], data[cur_ix_masked:], max_length)
if len >= 4 {
/* Comparing for >= 3 does not change the semantics, but just saves
for a few unnecessary binary logarithms in backward reference
score, since we are not interested in such short matches. */
var score uint = backwardReferenceScore(uint(len), backward)
if best_score < score {
best_score = score
best_len = uint(len)
out.len = best_len
out.distance = backward
out.score = best_score
}
}
}
}
bucket[uint32(num[key])&h.block_mask_] = uint32(cur_ix)
num[key]++
}
if min_score == out.score {
searchInStaticDictionary(dictionary, h, data[cur_ix_masked:], max_length, max_backward+gap, max_distance, out, false)
}
} | vendor/github.com/andybalholm/brotli/h5.go | 0.640748 | 0.425426 | h5.go | starcoder |
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
type cardinalDir int
const (
north cardinalDir = iota
east
south
west
)
type turn int
const (
left turn = iota
right
)
type position struct {
x int
y int
}
type ship struct {
curPos position
facing cardinalDir
}
func (s *ship) move(dir cardinalDir, units int) {
switch dir {
case north:
s.curPos.y += units
case east:
s.curPos.x += units
case south:
s.curPos.y -= units
case west:
s.curPos.x -= units
}
}
func (s *ship) turn(turnDir turn, degrees int) {
// degrees in the input file only take the values 90, 180, and 270
// 180 means to turn in the opposite direction regardless of L or R,
// a left turn at 90 degrees is equal to a right turn at 270,
// and the same in reverse
switch s.facing {
case north:
if degrees == 180 {
s.facing = south
} else if (turnDir == left && degrees == 90) || (turnDir == right && degrees == 270) {
s.facing = west
} else {
s.facing = east
}
case east:
if degrees == 180 {
s.facing = west
} else if (turnDir == left && degrees == 90) || (turnDir == right && degrees == 270) {
s.facing = north
} else {
s.facing = south
}
case south:
if degrees == 180 {
s.facing = north
} else if (turnDir == left && degrees == 90) || (turnDir == right && degrees == 270) {
s.facing = east
} else {
s.facing = west
}
case west:
if degrees == 180 {
s.facing = east
} else if (turnDir == left && degrees == 90) || (turnDir == right && degrees == 270) {
s.facing = south
} else {
s.facing = north
}
}
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
func main() {
if len(os.Args) != 2 {
fmt.Println("Usage: go run pt1.go <input_file>")
os.Exit(1)
}
fileName := os.Args[1]
file, err := os.Open(fileName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
curPos := position{0, 0}
ferry := ship{curPos, east}
lineNo := 0
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
// Each line is a single character for an action followed by an integer for units to move (or degrees if L or R)
action := line[0]
units, ok := strconv.Atoi(line[1:])
if ok != nil {
fmt.Printf("expected integer unit on line %d", lineNo)
os.Exit(1)
}
switch action {
case 'F':
ferry.move(ferry.facing, units)
case 'L':
ferry.turn(left, units)
case 'R':
ferry.turn(right, units)
case 'N':
ferry.move(north, units)
case 'E':
ferry.move(east, units)
case 'S':
ferry.move(south, units)
case 'W':
ferry.move(west, units)
default:
fmt.Println("unexpected action on line", lineNo)
os.Exit(1)
}
lineNo++
}
manhattanDist := abs(ferry.curPos.x) + abs(ferry.curPos.y)
fmt.Printf("Manhattan distance is %d after sailing to (%d,%d)\n", manhattanDist, ferry.curPos.x, ferry.curPos.y)
} | day12/pt1.go | 0.561936 | 0.447279 | pt1.go | starcoder |
package matchers
import "bytes"
// Zip matches a zip archive.
func Zip(in []byte) bool {
return len(in) > 3 &&
in[0] == 0x50 && in[1] == 0x4B &&
(in[2] == 0x3 || in[2] == 0x5 || in[2] == 0x7) &&
(in[3] == 0x4 || in[3] == 0x6 || in[3] == 0x8)
}
// Odt matches an OpenDocument Text file.
func Odt(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/vnd.oasis.opendocument.text"))
}
// Ott matches an OpenDocument Text Template file.
func Ott(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/vnd.oasis.opendocument.text-template"))
}
// Ods matches an OpenDocument Spreadsheet file.
func Ods(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet"))
}
// Ots matches an OpenDocument Spreadsheet Template file.
func Ots(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet-template"))
}
// Odp matches an OpenDocument Presentation file.
func Odp(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/vnd.oasis.opendocument.presentation"))
}
// Otp matches an OpenDocument Presentation Template file.
func Otp(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/vnd.oasis.opendocument.presentation-template"))
}
// Odg matches an OpenDocument Drawing file.
func Odg(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/vnd.oasis.opendocument.graphics"))
}
// Otg matches an OpenDocument Drawing Template file.
func Otg(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/vnd.oasis.opendocument.graphics-template"))
}
// Odf matches an OpenDocument Formula file.
func Odf(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/vnd.oasis.opendocument.formula"))
}
// Epub matches an EPUB file.
func Epub(in []byte) bool {
return len(in) > 30 && bytes.HasPrefix(in[30:], []byte("mimetypeapplication/epub+zip"))
}
// Jar matches a Java archive file.
func Jar(in []byte) bool {
t := zipTokenizer{in: in}
for i, tok := 0, t.next(); i < 10 && tok != ""; i, tok = i+1, t.next() {
if tok == "META-INF/MANIFEST.MF" {
return true
}
}
return false
} | vendor/github.com/gabriel-vasile/mimetype/internal/matchers/zip.go | 0.602997 | 0.466785 | zip.go | starcoder |
package mat
import (
"math"
)
// Vector4 is 1x4 matrix for 3D transformations and coordinate representation.
type Vector4 [4]float64
// Matrix4 is 4x4 matrix for 3D transformations.
type Matrix4 [16]float64
var Identity4 = Matrix4{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
}
// DotMatrix calculates dot product of m·o.
// The result is stored in m.
func (m *Matrix4) DotMatrix(o *Matrix4) {
*m = Matrix4{
m[0]*o[0] + m[1]*o[4] + m[2]*o[8] + m[3]*o[12],
m[0]*o[1] + m[1]*o[5] + m[2]*o[9] + m[3]*o[13],
m[0]*o[2] + m[1]*o[6] + m[2]*o[10] + m[3]*o[14],
m[0]*o[3] + m[1]*o[7] + m[2]*o[11] + m[3]*o[15],
m[4]*o[0] + m[5]*o[4] + m[6]*o[8] + m[7]*o[12],
m[4]*o[1] + m[5]*o[5] + m[6]*o[9] + m[7]*o[13],
m[4]*o[2] + m[5]*o[6] + m[6]*o[10] + m[7]*o[14],
m[4]*o[3] + m[5]*o[7] + m[6]*o[11] + m[7]*o[15],
m[8]*o[0] + m[9]*o[4] + m[10]*o[8] + m[11]*o[12],
m[8]*o[1] + m[9]*o[5] + m[10]*o[9] + m[11]*o[13],
m[8]*o[2] + m[9]*o[6] + m[10]*o[10] + m[11]*o[14],
m[8]*o[3] + m[9]*o[7] + m[10]*o[11] + m[11]*o[15],
m[12]*o[0] + m[13]*o[4] + m[14]*o[8] + m[15]*o[12],
m[12]*o[1] + m[13]*o[5] + m[14]*o[9] + m[15]*o[13],
m[12]*o[2] + m[13]*o[6] + m[14]*o[10] + m[15]*o[14],
m[12]*o[3] + m[13]*o[7] + m[14]*o[11] + m[15]*o[15],
}
}
// MatrixDot calculates dot product of m·v where v is a column vector.
// The result is stored in v.
func (v *Vector4) MatrixDot(m *Matrix4) {
*v = Vector4{
m[0]*v[0] + m[1]*v[1] + m[2]*v[2] + m[3]*v[3],
m[4]*v[0] + m[5]*v[1] + m[6]*v[2] + m[7]*v[3],
m[8]*v[0] + m[9]*v[1] + m[10]*v[2] + m[11]*v[3],
m[12]*v[0] + m[13]*v[1] + m[14]*v[2] + m[15]*v[3],
}
}
// Translate applies translation transformation that
// translates points along axes.
func (m *Matrix4) Translate(x, y, z float64) {
o := Matrix4Translate(x, y, z)
m.DotMatrix(&o)
}
// Scale applies scaling transformation that
// scales points with provided scale factors.
func (m *Matrix4) Scale(x, y, z float64) {
o := Matrix4Scale(x, y, z)
m.DotMatrix(&o)
}
// Rotate applies rotation transformation that rotates point around unit
// vector (x, y, z) by the provided angle in degrees.
// Vector must be normalized.
func (m *Matrix4) Rotate(angle, x, y, z float64) {
o := Matrix4Rotate(angle, x, y, z)
m.DotMatrix(&o)
}
// Matrix4Translate creates translation 4x4 matrix that
// translates points along axes.
func Matrix4Translate(x, y, z float64) Matrix4 {
return Matrix4{
1, 0, 0, x,
0, 1, 0, y,
0, 0, 1, z,
0, 0, 0, 1,
}
}
// Matrix4Scale creates scaling 4x4 matrix that
// scales points with provided scale factors.
func Matrix4Scale(x, y, z float64) Matrix4 {
return Matrix4{
x, 0, 0, 0,
0, y, 0, 0,
0, 0, z, 0,
0, 0, 0, 1,
}
}
// Matrix4Rotate creates rotation 4x4 matrix that rotates point around unit
// vector (x, y, z) by the provided angle in degrees.
// Vector must be normalized.
func Matrix4Rotate(angle, x, y, z float64) Matrix4 {
a := angle * math.Pi / 180
sa := math.Sin(a)
ca := math.Cos(a)
ca1 := 1 - ca
xy1 := x * y * ca1
xz1 := x * z * ca1
yz1 := y * z * ca1
x1 := x * sa
y1 := y * sa
z1 := z * sa
return Matrix4{
ca + x*x*ca1, xy1 - z1, xz1 + y1, 0,
xy1 + z1, ca + y*y*ca1, yz1 - x1, 0,
xz1 - y1, yz1 + x1, ca + z*z*ca1, 0,
0, 0, 0, 1,
}
}
// Matrix4Frustum creates frustrum projection 4x4 matrix.
func Matrix4Frustum(l, r, t, b, n, f float64) Matrix4 {
dx1 := 1 / (r - l)
dy1 := 1 / (b - t)
dz1 := 1 / (f - n)
n2 := n * 2
return Matrix4{
n2 * dx1, 0, (r + l) * dx1, 0,
0, n2 * dy1, (t + b) * dy1, 0,
0, 0, -(f + n) * dz1, -f * n2 * dz1,
0, 0, -1, 0,
}
}
// Matrix4Ortho creates orthographic projection 4x4 matrix.
func Matrix4Ortho(l, r, t, b, n, f float64) Matrix4 {
dx1 := 1 / (r - l)
dy1 := 1 / (b - t)
dz1 := 1 / (f - n)
return Matrix4{
2 * dx1, 0, 0, (r + l) * dx1,
0, 2 * n * dy1, 0, (t + b) * dy1,
0, 0, -2 * dz1, (f + n) * dz1,
0, 0, 0, 1,
}
}
// FovToY returns y coordinate to pass to Matrix4Frustum as
// (-y, y, -y, y) for given field of view (in degrees) and near coordinate.
// Example usage:
// near := 1
// far := 10
// fov := 120
// y := FovToY(fov, near)
// m := Matrix4Frustum(-y, y, -y, y, near, far)
func FovToY(fov, n float64) float64 {
return n * math.Tan(fov*math.Pi/360)
}
// Normalize returns normalized vector of length 1.
func Normalize(x, y, z float64) (xn, yn, zn float64) {
l := math.Sqrt(x*x + y*y + z*z)
return x / l, y / l, z / l
}
func (v *Vector4) WDiv() {
v[0] /= v[3]
v[1] /= v[3]
v[2] /= v[3]
v[3] = 1 / v[3]
}
func (v *Vector4) ViewportProject(vp *Viewport) {
v[0] = vp.Left + vp.w2 + vp.w2*v[0]
v[1] = vp.Top + vp.h2 + vp.h2*v[1]
v[2] = vp.fpn2 + vp.fmn2*v[2]
v[2] = 1 / (v[2] * v[2])
} | pkg/mat/mat4.go | 0.771585 | 0.624379 | mat4.go | starcoder |
package mapping
import (
"fmt"
"github.com/Jeffail/benthos/v3/lib/bloblang/x/query"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/gabs/v2"
)
//------------------------------------------------------------------------------
// AssignmentContext contains references to all potential assignment
// destinations of a given mapping.
type AssignmentContext struct {
Maps map[string]query.Function
Vars map[string]interface{}
Meta types.Metadata
Value *interface{}
}
// Assignment represents a way of assigning a queried value to something within
// an assignment context. This could be a Benthos message, a variable, a
// metadata field, etc.
type Assignment interface {
Apply(value interface{}, ctx AssignmentContext) error
}
//------------------------------------------------------------------------------
type varAssignment struct {
Name string
}
func (v *varAssignment) Apply(value interface{}, ctx AssignmentContext) error {
if _, deleted := value.(query.Delete); deleted {
delete(ctx.Vars, v.Name)
} else {
ctx.Vars[v.Name] = value
}
return nil
}
//------------------------------------------------------------------------------
type metaAssignment struct {
Key *string
}
func (v *metaAssignment) Apply(value interface{}, ctx AssignmentContext) error {
_, deleted := value.(query.Delete)
if v.Key == nil {
if deleted {
ctx.Meta.Iter(func(k, _ string) error {
ctx.Meta.Delete(k)
return nil
})
} else {
if m, ok := value.(map[string]interface{}); ok {
for k, v := range m {
ctx.Meta.Set(k, query.IToString(v))
}
} else {
return fmt.Errorf("setting root meta object requires object value, received: %T", value)
}
}
return nil
}
if deleted {
ctx.Meta.Delete(*v.Key)
} else {
ctx.Meta.Set(*v.Key, query.IToString(value))
}
return nil
}
//------------------------------------------------------------------------------
type jsonAssignment struct {
Path []string
}
func (v *jsonAssignment) Apply(value interface{}, ctx AssignmentContext) error {
_, deleted := value.(query.Delete)
if !deleted {
value = query.IClone(value)
}
if len(v.Path) == 0 {
*ctx.Value = value
}
if _, isNothing := (*ctx.Value).(query.Nothing); isNothing || *ctx.Value == nil {
*ctx.Value = map[string]interface{}{}
}
gObj := gabs.Wrap(*ctx.Value)
if deleted {
gObj.Delete(v.Path...)
} else {
gObj.Set(value, v.Path...)
}
*ctx.Value = gObj.Data()
return nil
}
//------------------------------------------------------------------------------ | lib/bloblang/x/mapping/assignment.go | 0.558207 | 0.404155 | assignment.go | starcoder |
package canvas
import (
"fmt"
"math"
)
type vec [2]float64
func (v vec) String() string {
return fmt.Sprintf("[%f,%f]", v[0], v[1])
}
func (v vec) add(v2 vec) vec {
return vec{v[0] + v2[0], v[1] + v2[1]}
}
func (v vec) sub(v2 vec) vec {
return vec{v[0] - v2[0], v[1] - v2[1]}
}
func (v vec) mul(v2 vec) vec {
return vec{v[0] * v2[0], v[1] * v2[1]}
}
func (v vec) mulf(f float64) vec {
return vec{v[0] * f, v[1] * f}
}
func (v vec) mulMat(m mat) vec {
return vec{
m[0]*v[0] + m[2]*v[1] + m[4],
m[1]*v[0] + m[3]*v[1] + m[5]}
}
func (v vec) mulMat2(m mat2) vec {
return vec{m[0]*v[0] + m[2]*v[1], m[1]*v[0] + m[3]*v[1]}
}
func (v vec) div(v2 vec) vec {
return vec{v[0] / v2[0], v[1] / v2[1]}
}
func (v vec) divf(f float64) vec {
return vec{v[0] / f, v[1] / f}
}
func (v vec) dot(v2 vec) float64 {
return v[0]*v2[0] + v[1]*v2[1]
}
func (v vec) len() float64 {
return math.Sqrt(v[0]*v[0] + v[1]*v[1])
}
func (v vec) lenSqr() float64 {
return v[0]*v[0] + v[1]*v[1]
}
func (v vec) norm() vec {
return v.mulf(1.0 / v.len())
}
func (v vec) atan2() float64 {
return math.Atan2(v[1], v[0])
}
func (v vec) angle() float64 {
return math.Pi*0.5 - math.Atan2(v[1], v[0])
}
func (v vec) angleTo(v2 vec) float64 {
return math.Acos(v.norm().dot(v2.norm()))
}
type mat [6]float64
func (m *mat) String() string {
return fmt.Sprintf("[%f,%f,0,\n %f,%f,0,\n %f,%f,1,]", m[0], m[2], m[4], m[1], m[3], m[5])
}
func matIdentity() mat {
return mat{
1, 0,
0, 1,
0, 0}
}
func matTranslate(v vec) mat {
return mat{
1, 0,
0, 1,
v[0], v[1]}
}
func matScale(v vec) mat {
return mat{
v[0], 0,
0, v[1],
0, 0}
}
func matRotate(radians float64) mat {
s, c := math.Sincos(radians)
return mat{
c, s,
-s, c,
0, 0}
}
func (m mat) mul(m2 mat) mat {
return mat{
m[0]*m2[0] + m[1]*m2[2],
m[0]*m2[1] + m[1]*m2[3],
m[2]*m2[0] + m[3]*m2[2],
m[2]*m2[1] + m[3]*m2[3],
m[4]*m2[0] + m[5]*m2[2] + m2[4],
m[4]*m2[1] + m[5]*m2[3] + m2[5]}
}
func (m mat) invert() mat {
identity := 1.0 / (m[0]*m[3] - m[2]*m[1])
return mat{
m[3] * identity,
-m[1] * identity,
-m[2] * identity,
m[0] * identity,
(m[2]*m[5] - m[3]*m[4]) * identity,
(m[1]*m[4] - m[0]*m[5]) * identity,
}
}
type mat2 [4]float64
func (m mat) mat2() mat2 {
return mat2{m[0], m[1], m[2], m[3]}
}
func (m *mat2) String() string {
return fmt.Sprintf("[%f,%f,\n %f,%f]", m[0], m[2], m[1], m[3])
} | math.go | 0.804214 | 0.458409 | math.go | starcoder |
package rbxmk
import (
"fmt"
lua "github.com/anaminus/gopher-lua"
"github.com/anaminus/rbxmk/rtypes"
"github.com/robloxapi/types"
)
// FrameType indicates the kind of frame for a State.
type FrameType uint8
const (
// Frame is a regular function.
FunctionFrame FrameType = iota
// Frame is a method; exclude first argument.
MethodFrame
// Frame is an operator, avoid displaying arguments.
OperatorFrame
)
// State facilitates the reflection of values to a particular Lua state.
type State struct {
*World
L *lua.LState
// FrameType provides a hint to how errors should be produced.
FrameType FrameType
}
// Count returns the number of arguments in the stack frame.
func (s State) Count() int {
return s.L.GetTop()
}
// ReflectorError raises an error indicating that a reflector pushed or pulled
// an unexpected type. Under normal circumstances, this error should be
// unreachable.
func (s State) ReflectorError(n int) int {
if n <= 0 {
return s.RaiseError("unreachable error: reflector mismatch")
}
return s.ArgError(n, "unreachable error: reflector mismatch")
}
// PushTuple pushes each value.
func (s State) PushTuple(values ...types.Value) int {
lvs := make([]lua.LValue, len(values))
for i, value := range values {
rfl := s.MustReflector(value.Type())
lv, err := rfl.PushTo(s.Context(), value)
if err != nil {
return s.RaiseError("%s", err)
}
lvs[i] = lv
}
for _, lv := range lvs {
s.L.Push(lv)
}
return len(lvs)
}
// PullTuple pulls each value starting from n as a Variant.
func (s State) PullTuple(n int) rtypes.Tuple {
c := s.Count()
length := c - n + 1
if length <= 0 {
return nil
}
rfl := s.MustReflector("Variant")
vs := make(rtypes.Tuple, length)
for i := n; i <= c; i++ {
lv := s.L.Get(i)
v, err := rfl.PullFrom(s.Context(), lv)
if err != nil {
s.ArgError(i, err.Error())
return nil
}
vs[i-n] = v
}
return vs
}
// RaiseError is a shortcut for LState.RaiseError that returns 0.
func (s State) RaiseError(format string, args ...interface{}) int {
s.L.RaiseError(format, args...)
return 0
}
// ArgError raises an argument error depending on the state's frame type.
func (s State) ArgError(n int, msg string, v ...interface{}) int {
if len(v) > 0 {
msg = fmt.Sprintf(msg, v...)
}
switch s.FrameType {
case MethodFrame:
if n <= 1 {
s.RaiseError("bad method receiver: %s", msg)
} else {
s.L.ArgError(n-1, msg)
}
case OperatorFrame:
s.RaiseError(msg)
default:
s.L.ArgError(n, msg)
}
return 0
}
// TypeError raises an argument type error depending on the state's frame type.
func (s State) TypeError(n int, want, got string) int {
err := TypeError{Want: want, Got: got}
switch s.FrameType {
case MethodFrame:
if n <= 1 {
s.RaiseError("bad method receiver: %s", err)
} else {
s.L.ArgError(n-1, err.Error())
}
case OperatorFrame:
s.RaiseError("%s", err.Error())
default:
s.L.ArgError(n, err.Error())
}
return 0
}
// CheckAny returns the nth argument, which can be any type as long as the
// argument exists.
func (s State) CheckAny(n int) lua.LValue {
if n > s.Count() {
s.ArgError(n, "value expected")
return nil
}
return s.L.Get(n)
}
// CheckBool returns the nth argument, expecting a boolean.
func (s State) CheckBool(n int) bool {
v := s.L.Get(n)
if lv, ok := v.(lua.LBool); ok {
return bool(lv)
}
s.TypeError(n, lua.LTBool.String(), v.Type().String())
return false
}
// CheckInt returns the nth argument as an int, expecting a number.
func (s State) CheckInt(n int) int {
v := s.L.Get(n)
if lv, ok := v.(lua.LNumber); ok {
return int(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// CheckInt64 returns the nth argument as an int64, expecting a number.
func (s State) CheckInt64(n int) int64 {
v := s.L.Get(n)
if lv, ok := v.(lua.LNumber); ok {
return int64(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// CheckNumber returns the nth argument, expecting a number.
func (s State) CheckNumber(n int) lua.LNumber {
v := s.L.Get(n)
if lv, ok := v.(lua.LNumber); ok {
return lv
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// CheckString returns the nth argument, expecting a string. Unlike
// LState.CheckString, it does not try to convert non-string values into a
// string.
func (s State) CheckString(n int) string {
v := s.L.Get(n)
if lv, ok := v.(lua.LString); ok {
return string(lv)
}
s.TypeError(n, lua.LTString.String(), v.Type().String())
return ""
}
// CheckTable returns the nth argument, expecting a table.
func (s State) CheckTable(n int) *lua.LTable {
v := s.L.Get(n)
if lv, ok := v.(*lua.LTable); ok {
return lv
}
s.TypeError(n, lua.LTTable.String(), v.Type().String())
return nil
}
// CheckFunction returns the nth argument, expecting a function.
func (s State) CheckFunction(n int) *lua.LFunction {
v := s.L.Get(n)
if lv, ok := v.(*lua.LFunction); ok {
return lv
}
s.TypeError(n, lua.LTFunction.String(), v.Type().String())
return nil
}
// CheckUserData returns the nth argument, expecting a userdata.
func (s State) CheckUserData(n int) *lua.LUserData {
v := s.L.Get(n)
if lv, ok := v.(*lua.LUserData); ok {
return lv
}
s.TypeError(n, lua.LTUserData.String(), v.Type().String())
return nil
}
// CheckThread returns the nth argument, expecting a thread.
func (s State) CheckThread(n int) *lua.LState {
v := s.L.Get(n)
if lv, ok := v.(*lua.LState); ok {
return lv
}
s.TypeError(n, lua.LTThread.String(), v.Type().String())
return nil
}
// OptBool returns the nth argument as a bool, or d if the argument is nil.
func (s State) OptBool(n int, d bool) bool {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LBool); ok {
return bool(lv)
}
s.TypeError(n, lua.LTBool.String(), v.Type().String())
return false
}
// OptInt returns the nth argument as an int, or d if the argument is nil.
func (s State) OptInt(n int, d int) int {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LNumber); ok {
return int(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// OptInt64 returns the nth argument as an int64, or d if the argument is nil.
func (s State) OptInt64(n int, d int64) int64 {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LNumber); ok {
return int64(lv)
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// OptNumber returns the nth argument as a number, or d if the argument is nil.
func (s State) OptNumber(n int, d lua.LNumber) lua.LNumber {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LNumber); ok {
return lv
}
s.TypeError(n, lua.LTNumber.String(), v.Type().String())
return 0
}
// OptString returns the nth argument as a string, or d if the argument is nil.
func (s State) OptString(n int, d string) string {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(lua.LString); ok {
return string(lv)
}
s.TypeError(n, lua.LTString.String(), v.Type().String())
return ""
}
// OptTable returns the nth argument as a table, or d if the argument is nil.
func (s State) OptTable(n int, d *lua.LTable) *lua.LTable {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(*lua.LTable); ok {
return lv
}
s.TypeError(n, lua.LTTable.String(), v.Type().String())
return nil
}
// OptFunction returns the nth argument as a function, or d if the argument is
// nil.
func (s State) OptFunction(n int, d *lua.LFunction) *lua.LFunction {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(*lua.LFunction); ok {
return lv
}
s.TypeError(n, lua.LTFunction.String(), v.Type().String())
return nil
}
// OptUserData returns the nth argument as a userdata, or d if the argument is
// nil.
func (s State) OptUserData(n int, d *lua.LUserData) *lua.LUserData {
v := s.L.Get(n)
if v == lua.LNil {
return d
}
if lv, ok := v.(*lua.LUserData); ok {
return lv
}
s.TypeError(n, lua.LTUserData.String(), v.Type().String())
return nil
} | state.go | 0.716119 | 0.468487 | state.go | starcoder |
package main
// Homework:
// Loading other kinds of images
// Add this to pong, use images for the paddles and ball
// See if you can speed up alphablending
import (
"fmt"
"github.com/jackmott/noise"
"github.com/veandco/go-sdl2/sdl"
"image/png"
"os"
"time"
)
const winWidth, winHeight int = 800, 600
type texture struct {
pos
pixels []byte
w, h, pitch int
scale float32
}
type rgba struct {
r, g, b byte
}
type pos struct {
x, y float32
}
func clear(pixels []byte) {
for i := range pixels {
pixels[i] = 0
}
}
func setPixel(x, y int, c rgba, pixels []byte) {
index := (y*winWidth + x) * 4
if index < len(pixels)-4 && index >= 0 {
pixels[index] = c.r
pixels[index+1] = c.g
pixels[index+2] = c.b
}
}
func (tex *texture) drawScaled(scaleX, scaleY float32, pixels []byte) {
newWidth := int(float32(tex.w) * scaleX)
newHeight := int(float32(tex.h) * scaleY)
texW4 := tex.w * 4
for y := 0; y < newHeight; y++ {
fy := float32(y) / float32(newHeight) * float32(tex.h-1)
fyi := int(fy)
screenY := int(fy*scaleY) + int(tex.y)
screenIndex := screenY*winWidth*4 + int(tex.x)*4
for x := 0; x < newWidth; x++ {
fx := float32(x) / float32(newWidth) * float32(tex.w-1)
screenX := int(fx*scaleX) + int(tex.x)
if screenX >= 0 && screenX < winWidth && screenY >= 0 && screenY < winHeight {
fxi4 := int(fx) * 4
pixels[screenIndex] = tex.pixels[fyi*texW4+fxi4]
screenIndex++
pixels[screenIndex] = tex.pixels[fyi*texW4+fxi4+1]
screenIndex++
pixels[screenIndex] = tex.pixels[fyi*texW4+fxi4+2]
screenIndex++ // skip alpha
screenIndex++
}
}
}
}
func flerp(a, b, pct float32) float32 {
return a + (b-a)*pct
}
func blerp(c00, c10, c01, c11, tx, ty float32) float32 {
return flerp(flerp(c00, c10, tx), flerp(c01, c11, tx), ty)
}
func (tex *texture) drawBilinearScaled(scaleX, scaleY float32, pixels []byte) {
newWidth := int(float32(tex.w) * scaleX)
newHeight := int(float32(tex.h) * scaleY)
texW4 := tex.w * 4
for y := 0; y < newHeight; y++ {
fy := float32(y) / float32(newHeight) * float32(tex.h-1)
fyi := int(fy)
screenY := int(fy*scaleY) + int(tex.y)
screenIndex := screenY*winWidth*4 + int(tex.x)*4
ty := fy - float32(fyi)
for x := 0; x < newWidth; x++ {
fx := float32(x) / float32(newWidth) * float32(tex.w-1)
screenX := int(fx*scaleX) + int(tex.x)
if screenX >= 0 && screenX < winWidth && screenY >= 0 && screenY < winHeight {
fxi := int(fx)
c00i := fyi*texW4 + fxi*4
c10i := fyi*texW4 + (fxi+1)*4
c01i := (fyi+1)*texW4 + fxi*4
c11i := (fyi+1)*texW4 + (fxi+1)*4
tx := fx - float32(fxi)
for i := 0; i < 4; i++ {
c00 := float32(tex.pixels[c00i+i])
c10 := float32(tex.pixels[c10i+i])
c01 := float32(tex.pixels[c01i+i])
c11 := float32(tex.pixels[c11i+i])
pixels[screenIndex] = byte(blerp(c00, c10, c01, c11, tx, ty))
screenIndex++
}
}
}
}
}
func (tex *texture) draw(pixels []byte) {
for y := 0; y < tex.h; y++ {
for x := 0; x < tex.w; x++ {
screenY := y + int(tex.y)
screenX := x + int(tex.x)
if screenX >= 0 && screenX < winWidth && screenY >= 0 && screenY < winHeight {
texIndex := y*tex.pitch + x*4
screenIndex := screenY*winWidth*4 + screenX*4
pixels[screenIndex] = tex.pixels[texIndex]
pixels[screenIndex+1] = tex.pixels[texIndex+1]
pixels[screenIndex+2] = tex.pixels[texIndex+2]
pixels[screenIndex+3] = tex.pixels[texIndex+3]
}
}
}
}
func (tex *texture) drawAlpha(pixels []byte) {
for y := 0; y < tex.h; y++ {
screenY := y + int(tex.y)
for x := 0; x < tex.w; x++ {
screenX := x + int(tex.x)
if screenX >= 0 && screenX < winWidth && screenY >= 0 && screenY < winHeight {
texIndex := y*tex.pitch + x*4
screenIndex := screenY*winWidth*4 + screenX*4
srcR := int(tex.pixels[texIndex])
srcG := int(tex.pixels[texIndex+1])
srcB := int(tex.pixels[texIndex+2])
srcA := int(tex.pixels[texIndex+3])
dstR := int(pixels[screenIndex])
dstG := int(pixels[screenIndex+1])
dstB := int(pixels[screenIndex+2])
rstR := (srcR*255 + dstR*(255-srcA)) / 255
rstG := (srcG*255 + dstG*(255-srcA)) / 255
rstB := (srcB*255 + dstB*(255-srcA)) / 255
pixels[screenIndex] = byte(rstR)
pixels[screenIndex+1] = byte(rstG)
pixels[screenIndex+2] = byte(rstB)
//pixels[screenIndex+3] = tex.pixels[texIndex+3]
}
}
}
}
func loadBalloons() []texture {
balloonStrs := []string{"balloon_red.png", "balloon_green.png", "balloon_blue.png"}
balloonTextures := make([]texture, len(balloonStrs))
for i, bstr := range balloonStrs {
infile, err := os.Open(bstr)
if err != nil {
panic(err)
}
defer infile.Close()
img, err := png.Decode(infile)
if err != nil {
panic(err)
}
w := img.Bounds().Max.X
h := img.Bounds().Max.Y
balloonPixels := make([]byte, w*h*4)
bIndex := 0
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
r, g, b, a := img.At(x, y).RGBA()
balloonPixels[bIndex] = byte(r / 256)
bIndex++
balloonPixels[bIndex] = byte(g / 256)
bIndex++
balloonPixels[bIndex] = byte(b / 256)
bIndex++
balloonPixels[bIndex] = byte(a / 256)
bIndex++
}
}
balloonTextures[i] = texture{pos{float32(i * 60), float32(i * 60)}, balloonPixels, w, h, w * 4, float32(1 + i)}
}
return balloonTextures
}
func lerp(b1 byte, b2 byte, pct float32) byte {
return byte(float32(b1) + pct*(float32(b2)-float32(b1)))
}
func colorLerp(c1, c2 rgba, pct float32) rgba {
return rgba{lerp(c1.r, c2.r, pct), lerp(c1.g, c2.g, pct), lerp(c1.b, c2.b, pct)}
}
func getGradient(c1, c2 rgba) []rgba {
result := make([]rgba, 256)
for i := range result {
pct := float32(i) / float32(255)
result[i] = colorLerp(c1, c2, pct)
}
return result
}
func getDualGradient(c1, c2, c3, c4 rgba) []rgba {
result := make([]rgba, 256)
for i := range result {
pct := float32(i) / float32(255)
if pct < 0.5 {
result[i] = colorLerp(c1, c2, pct*float32(2))
} else {
result[i] = colorLerp(c3, c4, pct*float32(1.5)-float32(0.5))
}
}
return result
}
func clamp(min, max, v int) int {
if v < min {
v = min
} else if v > max {
v = max
}
return v
}
func rescaleAndDraw(noise []float32, min, max float32, gradient []rgba, w, h int) []byte {
result := make([]byte, w*h*4)
scale := 255.0 / (max - min)
offset := min * scale
for i := range noise {
noise[i] = noise[i]*scale - offset
c := gradient[clamp(0, 255, int(noise[i]))]
p := i * 4
result[p] = c.r
result[p+1] = c.g
result[p+2] = c.b
}
return result
}
func main() {
// Added after EP06 to address macosx issues
err := sdl.Init(sdl.INIT_EVERYTHING)
if err != nil {
fmt.Println(err)
return
}
defer sdl.Quit()
window, err := sdl.CreateWindow("Testing SDL2", sdl.WINDOWPOS_UNDEFINED, sdl.WINDOWPOS_UNDEFINED,
int32(winWidth), int32(winHeight), sdl.WINDOW_SHOWN)
if err != nil {
fmt.Println(err)
return
}
defer window.Destroy()
renderer, err := sdl.CreateRenderer(window, -1, sdl.RENDERER_ACCELERATED)
if err != nil {
fmt.Println(err)
return
}
defer renderer.Destroy()
tex, err := renderer.CreateTexture(sdl.PIXELFORMAT_ABGR8888, sdl.TEXTUREACCESS_STREAMING, int32(winWidth), int32(winHeight))
if err != nil {
fmt.Println(err)
return
}
defer tex.Destroy()
cloudNoise, min, max := noise.MakeNoise(noise.FBM, .009, .5, 3, 3, winWidth, winHeight)
cloudGradient := getGradient(rgba{0, 0, 255}, rgba{255, 255, 255})
cloudPixels := rescaleAndDraw(cloudNoise, min, max, cloudGradient, winWidth, winHeight)
cloudTexture := texture{pos{0, 0}, cloudPixels, winWidth, winHeight, winWidth * 4, 1}
pixels := make([]byte, winWidth*winHeight*4)
balloonTextures := loadBalloons()
dir := 1
for {
frameStart := time.Now()
for event := sdl.PollEvent(); event != nil; event = sdl.PollEvent() {
switch event.(type) {
case *sdl.QuitEvent:
return
}
}
cloudTexture.draw(pixels)
for _, tex := range balloonTextures {
tex.drawBilinearScaled(tex.scale, tex.scale, pixels)
}
balloonTextures[1].x += float32(1 * dir)
if balloonTextures[1].x > 400 || balloonTextures[1].x < 0 {
dir = dir * -1
}
tex.Update(nil, pixels, winWidth*4)
renderer.Copy(tex, nil, nil)
renderer.Present()
elapsedTime := float32(time.Since(frameStart).Seconds() * 1000)
fmt.Println("ms per frame:", elapsedTime)
if elapsedTime < 5 {
sdl.Delay(5 - uint32(elapsedTime))
elapsedTime = float32(time.Since(frameStart).Seconds())
}
sdl.Delay(16)
}
} | balloons/balloons.go | 0.676086 | 0.465691 | balloons.go | starcoder |
package expression
import (
"fmt"
"time"
)
type expr interface {
exprNode()
KernelString() string
String() string
}
type (
identExpr struct {
name string
}
valueExpr struct {
v interface{}
}
binaryExpr struct {
x expr
y expr
op binaryOp
}
unaryExpr struct {
x expr
op unaryOp
}
)
func (e identExpr) exprNode() {}
func (e valueExpr) exprNode() {}
func (e binaryExpr) exprNode() {}
func (e unaryExpr) exprNode() {}
func (e identExpr) String() string {
return e.name
}
func (e identExpr) KernelString() string {
return e.String()
}
func (e valueExpr) String() string {
switch v := e.v.(type) {
case string:
return fmt.Sprintf("%q", v)
case int8:
return fmt.Sprintf("%d", v)
case int16:
return fmt.Sprintf("%d", v)
case int32:
return fmt.Sprintf("%d", v)
case int64:
return fmt.Sprintf("%d", v)
case uint8:
return fmt.Sprintf("%d", v)
case uint16:
return fmt.Sprintf("%d", v)
case uint32:
return fmt.Sprintf("%d", v)
case uint64:
return fmt.Sprintf("%d", v)
case bool:
if v {
return "TRUE"
}
return "FALSE"
case float64:
return fmt.Sprintf("%f", v)
case time.Time:
return fmt.Sprintf("TIMESTAMP(%d)", v.UnixNano())
}
panic("internal error: invalid valueExpr")
}
func (e valueExpr) KernelString() string {
return e.String()
}
func (e valueExpr) isInteger() bool {
switch e.v.(type) {
case int8, int16, int32, int64, uint8, uint16, uint32, uint64:
return true
}
return false
}
func (e valueExpr) isNumeric() bool {
switch e.v.(type) {
case float64, time.Time:
return true
}
return e.isInteger()
}
func (e valueExpr) isString() bool {
_, ok := e.v.(string)
return ok
}
func (e binaryExpr) isValid() bool {
switch e.op {
case binaryOpLogicalAnd, binaryOpLogicalOr, binaryOpEQ, binaryOpNE,
binaryOpLT, binaryOpLE, binaryOpGT, binaryOpGE, binaryOpLike,
binaryOpBitwiseAnd:
default:
return false
}
return true
}
func (e binaryExpr) String() string {
if !e.isValid() {
panic("internal error: invalid binaryExpr")
}
if y, ok := e.y.(binaryExpr); ok {
if y.op == binaryOpLogicalAnd || y.op == binaryOpLogicalOr {
return fmt.Sprintf("%s %s (%s)", e.x, binaryOpStrings[e.op], e.y)
}
}
return fmt.Sprintf("%s %s %s", e.x, binaryOpStrings[e.op], e.y)
}
func (e binaryExpr) KernelString() string {
if !e.isValid() {
panic("internal error: invalid binaryExpr")
}
if e.op == binaryOpNE {
if x, ok := e.x.(binaryExpr); ok && x.op == binaryOpBitwiseAnd {
// Assume that the rhs is 0 because prior validation
// should ensure that to be the case
return e.x.KernelString()
}
}
if y, ok := e.y.(binaryExpr); ok {
if y.op == binaryOpLogicalAnd || y.op == binaryOpLogicalOr {
return fmt.Sprintf("%s %s (%s)", e.x.KernelString(),
binaryOpKernelStrings[e.op], e.y.KernelString())
}
}
return fmt.Sprintf("%s %s %s", e.x.KernelString(),
binaryOpKernelStrings[e.op], e.y.KernelString())
}
func (e unaryExpr) String() string {
switch e.op {
case unaryOpIsNull, unaryOpIsNotNull:
return fmt.Sprintf("%s %s", e.x, unaryOpStrings[e.op])
}
panic("internal error: invalid unaryExpr")
}
func (e unaryExpr) KernelString() string {
return ""
} | pkg/expression/ast.go | 0.574156 | 0.422922 | ast.go | starcoder |
package oak
import (
"image"
"image/draw"
)
// A Background can be used as a background draw layer. Backgrounds will be drawn as the first
// element in each frame, and are expected to cover up data drawn on the previous frame.
type Background interface {
GetRGBA() *image.RGBA
}
// DrawLoop
// Unless told to stop, the draw channel will repeatedly
// 1. draw the background color to a temporary buffer
// 2. draw all visible rendered elements onto the temporary buffer.
// 3. draw the buffer's data at the viewport's position to the screen.
// 4. publish the screen to display in window.
func (w *Window) drawLoop() {
<-w.drawCh
draw.Draw(w.winBuffers[w.bufferIdx].RGBA(), w.winBuffers[w.bufferIdx].Bounds(), w.bkgFn(), zeroPoint, draw.Src)
w.publish()
for {
select {
case <-w.quitCh:
return
case <-w.drawCh:
<-w.drawCh
loadingSelect:
for {
select {
case <-w.ParentContext.Done():
return
case <-w.quitCh:
return
case <-w.drawCh:
break loadingSelect
case <-w.animationFrame:
w.publish()
draw.Draw(w.winBuffers[w.bufferIdx].RGBA(), w.winBuffers[w.bufferIdx].Bounds(), w.bkgFn(), zeroPoint, draw.Src)
if w.LoadingR != nil {
w.LoadingR.Draw(w.winBuffers[w.bufferIdx].RGBA(), 0, 0)
}
case <-w.DrawTicker.C:
w.publish()
draw.Draw(w.winBuffers[w.bufferIdx].RGBA(), w.winBuffers[w.bufferIdx].Bounds(), w.bkgFn(), zeroPoint, draw.Src)
if w.LoadingR != nil {
w.LoadingR.Draw(w.winBuffers[w.bufferIdx].RGBA(), 0, 0)
}
}
}
case f := <-w.betweenDrawCh:
f()
case <-w.animationFrame:
w.publish()
draw.Draw(w.winBuffers[w.bufferIdx].RGBA(), w.winBuffers[w.bufferIdx].Bounds(), w.bkgFn(), zeroPoint, draw.Src)
w.DrawStack.PreDraw()
p := w.viewPos
w.DrawStack.DrawToScreen(w.winBuffers[w.bufferIdx].RGBA(), &p, w.ScreenWidth, w.ScreenHeight)
case <-w.DrawTicker.C:
// Publish what was drawn last frame to screen, then work on preparing the next frame.
w.publish()
draw.Draw(w.winBuffers[w.bufferIdx].RGBA(), w.winBuffers[w.bufferIdx].Bounds(), w.bkgFn(), zeroPoint, draw.Src)
w.DrawStack.PreDraw()
p := w.viewPos
w.DrawStack.DrawToScreen(w.winBuffers[w.bufferIdx].RGBA(), &p, w.ScreenWidth, w.ScreenHeight)
}
}
}
func (w *Window) publish() {
w.prePublish(w, w.windowTextures[w.bufferIdx])
w.windowTextures[w.bufferIdx].Upload(zeroPoint, w.winBuffers[w.bufferIdx], w.winBuffers[w.bufferIdx].Bounds())
w.windowControl.Scale(w.windowRect, w.windowTextures[w.bufferIdx], w.windowTextures[w.bufferIdx].Bounds(), draw.Src)
w.windowControl.Publish()
// every frame, swap buffers. This enables drivers which might hold on to the rgba buffers we publish as if they
// were immutable.
w.bufferIdx = (w.bufferIdx + 1) % 2
}
// DoBetweenDraws will execute the given function in-between draw frames
func (w *Window) DoBetweenDraws(f func()) {
go func() {
w.betweenDrawCh <- f
}()
} | drawLoop.go | 0.615666 | 0.472501 | drawLoop.go | starcoder |
package neural
// Layer is a set of neurons + config
type Layer struct {
// Amount of inputs (default is previous layer units)
Inputs int `json:"-"`
Units int `json:"-"`
Neurons []*Neuron `json:"Neurons"`
// Default activation is sigmoid
Activation string `json:"Activation,omitempty"`
Forward ForwardFn `json:"-"`
Backward BackwardFn `json:"-"`
// Default loss is mse
Loss string `json:"Loss,omitempty"`
LossFn LossFn `json:"-"`
// Default rate is 0.001
Rate float64 `json:"-"`
// Default momentum is 0.999
Momentum float64 `json:"-"`
// Range of arbitrary values for input/output layers
Range [][]float64 `json:"Range,omitempty"`
}
// NewLayer creates a layer based on simple layer definition
func NewLayer(layer *Layer) *Layer {
if layer.Rate == 0.0 {
layer.Rate = 0.001
}
if layer.Momentum == 0.0 {
layer.Momentum = 0.999
}
layer.Neurons = make([]*Neuron, layer.Units)
for i := 0; i < layer.Units; i++ {
layer.Neurons[i] = NewNeuron(layer, layer.Inputs)
}
activation := layer.SetActivation(layer.Activation)
if len(activation.Ranges) == 0 {
layer.Range = [][]float64{}
}
for i, total := 0, len(layer.Range); i < total; i++ {
layer.Range[i] = append(layer.Range[i], activation.Ranges[0], activation.Ranges[1])
}
return layer
}
// Think process the layer forward based on inputs
func (layer *Layer) Think(inputs []float64) []float64 {
outs := make([]float64, layer.Units)
for i := 0; i < layer.Units; i++ {
outs[i] = layer.Neurons[i].Think(inputs)
}
return outs
}
// Clone layer with same neurons, activation, range, etc
func (layer *Layer) Clone() *Layer {
clone := NewLayer(&Layer{
Inputs: layer.Inputs,
Units: layer.Units,
Activation: layer.Activation,
Rate: layer.Rate,
Momentum: layer.Momentum,
})
for i := 0; i < clone.Units; i++ {
clone.Neurons[i] = layer.Neurons[i].Clone()
}
clone.Range = make([][]float64, len(layer.Range))
copy(clone.Range, layer.Range)
return clone
}
// Mutate neurons of layer based on probability
func (layer *Layer) Mutate(probability float64) {
for i := 0; i < layer.Units; i++ {
layer.Neurons[i].Mutate(probability)
}
}
// Crossover two layers merging neurons
func (layer *Layer) Crossover(layerB *Layer, dominant float64) *Layer {
new := NewLayer(&Layer{
Inputs: layer.Inputs,
Units: layer.Units,
Activation: layer.Activation,
Rate: layer.Rate,
Momentum: layer.Momentum,
})
for i := 0; i < layer.Units; i++ {
new.Neurons[i] = layer.Neurons[i].Crossover(*layerB.Neurons[i], dominant)
}
new.Range = make([][]float64, len(layer.Range))
copy(new.Range, layer.Range)
return new
}
// Reset every neuron (weights, bias, etc)
func (layer *Layer) Reset() {
for i := 0; i < layer.Units; i++ {
layer.Neurons[i].Reset()
}
}
// SetActivation set or change the activation functions based on name
func (layer *Layer) SetActivation(activation string) ActivationSet {
set := selectActivation(activation)
layer.Forward = set.Forward
layer.Backward = set.Backward
return set
} | layer.go | 0.830113 | 0.44059 | layer.go | starcoder |
package genfuncs
import (
"strings"
)
// All returns true if all elements of slice match the predicate.
func All[T any](slice []T, predicate Predicate[T]) bool {
for _, e := range slice {
if !predicate(e) {
return false
}
}
return true
}
// Any returns true if any element of the slice matches the predicate.
func Any[T any](slice []T, predicate Predicate[T]) bool {
for _, e := range slice {
if predicate(e) {
return true
}
}
return false
}
// Associate returns a map containing key/values provided by transform function applied to elements of the slice.
func Associate[T any, K comparable, V any](slice []T, transform TransformKV[T, K, V]) map[K]V {
m := make(map[K]V)
for _, e := range slice {
k, v := transform(e)
m[k] = v
}
return m
}
// AssociateWith returns a Map where keys are elements from the given sequence and values are produced by the
// valueSelector function applied to each element.
func AssociateWith[K comparable, V any](slice []K, valueSelector ValueSelector[K, V]) map[K]V {
m := make(map[K]V)
for _, k := range slice {
v := valueSelector(k)
m[k] = v
}
return m
}
// Contains returns true if element is found in slice.
func Contains[T comparable](slice []T, element T) bool {
for _, e := range slice {
if e == element {
return true
}
}
return false
}
// Distinct returns a slice containing only distinct elements from the given slice.
func Distinct[T comparable](slice []T) []T {
var resultSet []T
distinctMap := make(map[T]struct{})
for _, e := range slice {
if _, ok := distinctMap[e]; ok {
continue
}
distinctMap[e] = struct{}{}
resultSet = append(resultSet, e)
}
return resultSet
}
// Filter returns a slice containing only elements matching the given predicate.
func Filter[T any](slice []T, predicate Predicate[T]) []T {
var results []T
for _, t := range slice {
if predicate(t) {
results = append(results, t)
}
}
return results
}
// Find returns the first element matching the given predicate and true, or false when no such element was found.
func Find[T any](slice []T, predicate Predicate[T]) (T, bool) {
for _, t := range slice {
if predicate(t) {
return t, true
}
}
var t T
return t, false
}
// FindLast returns the last element matching the given predicate and true, or false when no such element was found.
func FindLast[T any](slice []T, predicate Predicate[T]) (T, bool) {
var last T
var found = false
for _, t := range slice {
if predicate(t) {
found = true
last = t
}
}
return last, found
}
// FlatMap returns a slice of all elements from results of transform function being invoked on each element of
// original slice, and those resultant slices concatenated.
func FlatMap[T, R any](slice []T, transform Transform[T, []R]) []R {
var results []R
for _, e := range slice {
results = append(results, transform(e)...)
}
return results
}
// Fold accumulates a value starting with initial value and applying operation from left to right to current
// accumulated value and each element.
func Fold[T, R any](slice []T, initial R, operation Operation[T, R]) R {
r := initial
for _, t := range slice {
r = operation(r, t)
}
return r
}
// GroupBy groups elements of the slice by the key returned by the given keySelector function applied to
// each element and returns a map where each group key is associated with a slice of corresponding elements.
func GroupBy[T any, K comparable](slice []T, keySelector KeySelector[T, K]) map[K][]T {
m := make(map[K][]T)
for _, e := range slice {
k := keySelector(e)
m[k] = append(m[k], e)
}
return m
}
// JoinToString creates a string from all the elements using the stringer on each, separating them using separator, and
// using the given prefix and postfix.
func JoinToString[T any](slice []T, stringer Stringer[T], separator string, prefix string, postfix string) string {
var sb strings.Builder
sb.WriteString(prefix)
last := len(slice) - 1
for i, e := range slice {
sb.WriteString(stringer(e))
if i == last {
continue
}
sb.WriteString(separator)
}
sb.WriteString(postfix)
return sb.String()
}
// Map returns a slice containing the results of applying the given transform function to each element in the original slice.
func Map[T, R any](slice []T, transform Transform[T, R]) []R {
var results = make([]R, len(slice))
for i, e := range slice {
results[i] = transform(e)
}
return results
} | slice.go | 0.865309 | 0.505188 | slice.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTSMDefinitionEntityTypeFilter1651 struct for BTSMDefinitionEntityTypeFilter1651
type BTSMDefinitionEntityTypeFilter1651 struct {
BTQueryFilter183
BtType *string `json:"btType,omitempty"`
SmDefinitionEntityType *string `json:"smDefinitionEntityType,omitempty"`
}
// NewBTSMDefinitionEntityTypeFilter1651 instantiates a new BTSMDefinitionEntityTypeFilter1651 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTSMDefinitionEntityTypeFilter1651() *BTSMDefinitionEntityTypeFilter1651 {
this := BTSMDefinitionEntityTypeFilter1651{}
return &this
}
// NewBTSMDefinitionEntityTypeFilter1651WithDefaults instantiates a new BTSMDefinitionEntityTypeFilter1651 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTSMDefinitionEntityTypeFilter1651WithDefaults() *BTSMDefinitionEntityTypeFilter1651 {
this := BTSMDefinitionEntityTypeFilter1651{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTSMDefinitionEntityTypeFilter1651) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTSMDefinitionEntityTypeFilter1651) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTSMDefinitionEntityTypeFilter1651) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTSMDefinitionEntityTypeFilter1651) SetBtType(v string) {
o.BtType = &v
}
// GetSmDefinitionEntityType returns the SmDefinitionEntityType field value if set, zero value otherwise.
func (o *BTSMDefinitionEntityTypeFilter1651) GetSmDefinitionEntityType() string {
if o == nil || o.SmDefinitionEntityType == nil {
var ret string
return ret
}
return *o.SmDefinitionEntityType
}
// GetSmDefinitionEntityTypeOk returns a tuple with the SmDefinitionEntityType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTSMDefinitionEntityTypeFilter1651) GetSmDefinitionEntityTypeOk() (*string, bool) {
if o == nil || o.SmDefinitionEntityType == nil {
return nil, false
}
return o.SmDefinitionEntityType, true
}
// HasSmDefinitionEntityType returns a boolean if a field has been set.
func (o *BTSMDefinitionEntityTypeFilter1651) HasSmDefinitionEntityType() bool {
if o != nil && o.SmDefinitionEntityType != nil {
return true
}
return false
}
// SetSmDefinitionEntityType gets a reference to the given string and assigns it to the SmDefinitionEntityType field.
func (o *BTSMDefinitionEntityTypeFilter1651) SetSmDefinitionEntityType(v string) {
o.SmDefinitionEntityType = &v
}
func (o BTSMDefinitionEntityTypeFilter1651) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTQueryFilter183, errBTQueryFilter183 := json.Marshal(o.BTQueryFilter183)
if errBTQueryFilter183 != nil {
return []byte{}, errBTQueryFilter183
}
errBTQueryFilter183 = json.Unmarshal([]byte(serializedBTQueryFilter183), &toSerialize)
if errBTQueryFilter183 != nil {
return []byte{}, errBTQueryFilter183
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.SmDefinitionEntityType != nil {
toSerialize["smDefinitionEntityType"] = o.SmDefinitionEntityType
}
return json.Marshal(toSerialize)
}
type NullableBTSMDefinitionEntityTypeFilter1651 struct {
value *BTSMDefinitionEntityTypeFilter1651
isSet bool
}
func (v NullableBTSMDefinitionEntityTypeFilter1651) Get() *BTSMDefinitionEntityTypeFilter1651 {
return v.value
}
func (v *NullableBTSMDefinitionEntityTypeFilter1651) Set(val *BTSMDefinitionEntityTypeFilter1651) {
v.value = val
v.isSet = true
}
func (v NullableBTSMDefinitionEntityTypeFilter1651) IsSet() bool {
return v.isSet
}
func (v *NullableBTSMDefinitionEntityTypeFilter1651) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTSMDefinitionEntityTypeFilter1651(val *BTSMDefinitionEntityTypeFilter1651) *NullableBTSMDefinitionEntityTypeFilter1651 {
return &NullableBTSMDefinitionEntityTypeFilter1651{value: val, isSet: true}
}
func (v NullableBTSMDefinitionEntityTypeFilter1651) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTSMDefinitionEntityTypeFilter1651) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btsm_definition_entity_type_filter_1651.go | 0.690559 | 0.432902 | model_btsm_definition_entity_type_filter_1651.go | starcoder |
package ast
import (
"bytes"
"github.com/butlermatt/monlox/token"
"strings"
)
// Node is a node within the AST tree.
type Node interface {
// TokenLiteral returns the string literal of the token associated with this ast node.
TokenLiteral() string
String() string
}
// Statement represents an AST statement node.
type Statement interface {
Node
statementNode()
}
// Expression represents an AST expression node.
type Expression interface {
Node
expressionNode()
}
// Program represents the statements comprising nodes of the AST tree.
type Program struct {
Statements []Statement
}
// TokenLiteral returns the string literal of the token associated with this ast node.
func (p *Program) TokenLiteral() string {
if len(p.Statements) > 0 {
return p.Statements[0].TokenLiteral()
}
return ""
}
// String returns a string representation of the program.
func (p *Program) String() string {
var out bytes.Buffer
for _, s := range p.Statements {
out.WriteString(s.String())
}
return out.String()
}
// LetStatement is an AST node representing a variable assignment
type LetStatement struct {
Token token.Token
Name *Identifier
Value Expression
}
func (ls *LetStatement) statementNode() {}
// TokenLiteral returns the string literal of the token associated with this ast node.
func (ls *LetStatement) TokenLiteral() string { return ls.Token.Literal }
// String returns a string representation of the Let statement.
func (ls *LetStatement) String() string {
var out bytes.Buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.String())
out.WriteString(" = ")
if ls.Value != nil {
out.WriteString(ls.Value.String())
}
out.WriteByte(';')
return out.String()
}
// Identifier represents an variable identifier
type Identifier struct {
Token token.Token // The token.IDENT token.
Value string
}
func (i *Identifier) expressionNode() {}
// TokenLiteral returns the string literal of the token associated with this ast node.
func (i *Identifier) TokenLiteral() string { return i.Token.Literal }
// String returns a string representation of the identifier.
func (i *Identifier) String() string { return i.Value }
// ReturnStatement is an AST node representing just the return statement and the associated expression.
type ReturnStatement struct {
Token token.Token
Value Expression
}
func (rs *ReturnStatement) statementNode() {}
// TokenLiteral returns the string literal of the token associated with this ast node.
func (rs *ReturnStatement) TokenLiteral() string { return rs.Token.Literal }
// String returns a string representation of the Return statement
func (rs *ReturnStatement) String() string {
var out bytes.Buffer
out.WriteString(rs.TokenLiteral() + " ")
if rs.Value != nil {
out.WriteString(rs.Value.String())
}
out.WriteByte(';')
return out.String()
}
// ExpressionStatement is a AST node representing a statement that consists of a single expression.
type ExpressionStatement struct {
Token token.Token
Expression Expression
}
func (es *ExpressionStatement) statementNode() {}
// TokenLiteral returns the string literal of the token associated with this ast node.
func (es *ExpressionStatement) TokenLiteral() string { return es.Token.Literal }
// String returns a string representation of an Expression statement.
func (es *ExpressionStatement) String() string {
if es.Expression != nil {
return es.Expression.String()
}
return ""
}
// NumberLiteral is an AST node representing a number literal. Stored as a float32.
type NumberLiteral struct {
Token token.Token
Value float32
}
func (nl *NumberLiteral) expressionNode() {}
// TokenLiteral returns a string representation of the token associated with this node.
func (nl *NumberLiteral) TokenLiteral() string { return nl.Token.Literal }
// String returns a string representation of the Number Literal.
func (nl *NumberLiteral) String() string { return nl.Token.Literal }
// Boolean is an AST node representing boolean literals.
type Boolean struct {
Token token.Token
Value bool
}
func (b *Boolean) expressionNode() {}
// TokenLiteral returns a string representation of the token associated with this node.
func (b *Boolean) TokenLiteral() string { return b.Token.Literal }
// String returns a string representation of the boolean literal.
func (b *Boolean) String() string { return b.Token.Literal }
// PrefixExpression is an AST node representing a prefix expression such as -5 or !x
type PrefixExpression struct {
Token token.Token
Operator string
Right Expression
}
func (pe *PrefixExpression) expressionNode() {}
// TokenLiteral returns the string literal of the associated token.
func (pe *PrefixExpression) TokenLiteral() string { return pe.Token.Literal }
// String returns a string representation of the prefix expression.
func (pe *PrefixExpression) String() string {
var out bytes.Buffer
out.WriteByte('(')
out.WriteString(pe.Operator)
out.WriteString(pe.Right.String())
out.WriteByte(')')
return out.String()
}
type InfixExpression struct {
Token token.Token
Left Expression
Operator string
Right Expression
}
func (oe *InfixExpression) expressionNode() {}
// TokenLiteral returns the string representation of this token.
func (oe *InfixExpression) TokenLiteral() string { return oe.Token.Literal }
// String return a string representation of this expression.
func (oe *InfixExpression) String() string {
var out bytes.Buffer
out.WriteByte('(')
out.WriteString(oe.Left.String())
out.WriteString(" " + oe.Operator + " ")
out.WriteString(oe.Right.String())
out.WriteByte(')')
return out.String()
}
type BlockStatement struct {
Token token.Token // The { token
Statements []Statement
}
func (bs *BlockStatement) statementNode() {}
func (bs *BlockStatement) TokenLiteral() string { return bs.Token.Literal }
func (bs *BlockStatement) String() string {
var out bytes.Buffer
for _, s := range bs.Statements {
out.WriteString(s.String())
}
return out.String()
}
type IfExpression struct {
Token token.Token
Condition Expression
Consequence *BlockStatement
Alternative *BlockStatement
}
func (ie *IfExpression) expressionNode() {}
// TokenLiteral returns the string representation of this token.
func (ie *IfExpression) TokenLiteral() string { return ie.Token.Literal }
// String returns the string representation of this expression.
func (ie *IfExpression) String() string {
var out bytes.Buffer
out.WriteString("if")
out.WriteString(ie.Condition.String())
out.WriteByte(' ')
out.WriteString(ie.Consequence.String())
if ie.Alternative != nil {
out.WriteString("else ")
out.WriteString(ie.Alternative.String())
}
return out.String()
}
type FunctionLiteral struct {
Token token.Token // the 'fn' token.
Parameters []*Identifier
Body *BlockStatement
}
func (fl *FunctionLiteral) expressionNode() {}
// TokenLiteral returns the string representation of this token.
func (fl *FunctionLiteral) TokenLiteral() string { return fl.Token.Literal }
// String returns the string representation of this expression
func (fl *FunctionLiteral) String() string {
var out bytes.Buffer
var params []string
for _, p := range fl.Parameters {
params = append(params, p.String())
}
out.WriteString(fl.TokenLiteral())
out.WriteByte('(')
out.WriteString(strings.Join(params, ", "))
out.WriteString(") ")
out.WriteString(fl.Body.String())
return out.String()
}
type CallExpression struct {
Token token.Token
Function Expression
Arguments []Expression
}
func (ce *CallExpression) expressionNode() {}
// TokenLiteral returns the string representation of this token.
func (ce *CallExpression) TokenLiteral() string { return ce.Token.Literal }
// String returns a string representation of this call expression.
func (ce *CallExpression) String() string {
var out bytes.Buffer
var args []string
for _, a := range ce.Arguments {
args = append(args, a.String())
}
out.WriteString(ce.Function.String())
out.WriteByte('(')
out.WriteString(strings.Join(args, ", "))
out.WriteByte(')')
return out.String()
}
type StringLiteral struct {
Token token.Token
Value string
}
func (sl *StringLiteral) expressionNode() {}
// TokenLiteral returns a string representation of this token.
func (sl *StringLiteral) TokenLiteral() string { return sl.Token.Literal }
// String returns a string representation of this string literal
func (sl *StringLiteral) String() string { return sl.Token.Literal }
type ArrayLiteral struct {
Token token.Token // The '[' token
Elements []Expression
}
func (al *ArrayLiteral) expressionNode() {}
func (al *ArrayLiteral) TokenLiteral() string { return al.Token.Literal }
func (al *ArrayLiteral) String() string {
var out bytes.Buffer
var elements []string
for _, el := range al.Elements {
elements = append(elements, el.String())
}
out.WriteByte('[')
out.WriteString(strings.Join(elements, ", "))
out.WriteByte(']')
return out.String()
}
type IndexExpression struct {
Token token.Token // the [ token
Left Expression
Index Expression
}
func (ie *IndexExpression) expressionNode() {}
func (ie *IndexExpression) TokenLiteral() string { return ie.Token.Literal }
func (ie *IndexExpression) String() string {
var out bytes.Buffer
out.WriteByte('(')
out.WriteString(ie.Left.String())
out.WriteByte('[')
out.WriteString(ie.Index.String())
out.WriteString("])")
return out.String()
}
type HashLiteral struct {
Token token.Token // The '{' token
Pairs map[Expression]Expression
}
func (hl *HashLiteral) expressionNode() {}
func (hl *HashLiteral) TokenLiteral() string { return hl.Token.Literal }
func (hl *HashLiteral) String() string {
var out bytes.Buffer
var pairs []string
for key, value := range hl.Pairs {
pairs = append(pairs, key.String()+":"+value.String())
}
out.WriteByte('{')
out.WriteString(strings.Join(pairs, ", "))
out.WriteByte('}')
return out.String()
} | ast/ast.go | 0.841956 | 0.440469 | ast.go | starcoder |
package set
// Clone returns a new Set with the same contents as this one.
func (s Set[T]) Clone() Set[T] {
ns := New[T]()
for k := range s {
ns[k] = struct{}{}
}
return ns
}
// Add adds the given value to the Set.
func (s Set[T]) Add(v T) {
s[v] = struct{}{}
}
// Remove removes the given value from the Set.
func (s Set[T]) Remove(v T) {
delete(s, v)
}
// Contains returns true if the given value is in the Set.
func (s Set[T]) Contains(v T) bool {
_, ok := s[v]
return ok
}
// Union returns a new Set with the union of this Set and the given Set.
func (s1 Set[T]) Union(s2 Set[T]) Set[T] {
union := Set[T]{}
for k := range s1 {
union[k] = struct{}{}
}
for k := range s2 {
union[k] = struct{}{}
}
return union
}
// Intersect returns a new Set with the intersection of this Set and the given Set.
func (s1 Set[T]) Intersect(s2 Set[T]) Set[T] {
intersection := Set[T]{}
for k := range s1 {
if _, ok := s2[k]; ok {
intersection[k] = struct{}{}
}
}
return intersection
}
// Subtract returns a new Set with the elements of this Set that are not in the given Set.
func (s1 Set[T]) Subtract(s2 Set[T]) Set[T] {
subtract := Set[T]{}
for k := range s1 {
if _, ok := s2[k]; !ok {
subtract[k] = struct{}{}
}
}
return subtract
}
// Equal returns true if this Set and the given Set are equal.
func (s1 Set[T]) Equal(s2 Set[T]) bool {
if len(s1) != len(s2) {
return false
}
for k := range s1 {
if _, ok := s2[k]; !ok {
return false
}
}
return true
}
// IsSubset returns true if this Set is a subset of the given Set.
func (s1 Set[T]) IsSubset(s2 Set[T]) bool {
for k := range s1 {
if _, ok := s2[k]; !ok {
return false
}
}
return true
}
// IsSuperset returns true if this Set is a superset of the given Set.
func (s1 Set[T]) IsSuperset(s2 Set[T]) bool {
for k := range s2 {
if _, ok := s1[k]; !ok {
return false
}
}
return true
}
// IsDisjoint returns true if this Set and the given Set have no elements in common.
func (s1 Set[T]) IsDisjoint(s2 Set[T]) bool {
for k := range s1 {
if _, ok := s2[k]; ok {
return false
}
}
return true
}
//Foreach calls the given function for each element in the Set.
func (s1 Set[T]) Foreach(f func(T)) {
for k := range s1 {
f(k)
}
} | collections/set/methods.go | 0.872102 | 0.563738 | methods.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"log"
"strings"
"strconv"
"math"
)
type Pair struct {
X, Y int64
}
func main() {
// read input
data, err := ioutil.ReadFile("input.txt")
handleError(err)
// parse input
wires := strings.Split(string(data), "\n")
wire1 := wires[0]
wire2 := wires[1]
// follow and store path and distance of wire 1
wire1visited := make(map[Pair]int64)
startingPoint := Pair{0, 0}
var startingDistance int64 = 0
for _, step := range strings.Split(wire1, ",") {
direction := string(step[0])
stepsInt, err := strconv.Atoi(string(step[1:]))
steps := int64(stepsInt)
handleError(err)
switch direction {
case "U":
var i int64
for i = 1; i <= steps; i++ {
wire1visited[Pair{startingPoint.X, startingPoint.Y+i}] = startingDistance+i
}
startingPoint = Pair{startingPoint.X, startingPoint.Y+steps}
startingDistance += steps
case "R":
var i int64
for i = 1; i <= steps; i++ {
wire1visited[Pair{startingPoint.X+i, startingPoint.Y}] = startingDistance+i
}
startingPoint = Pair{startingPoint.X+steps, startingPoint.Y}
startingDistance += steps
case "D":
var i int64
for i = 1; i <= steps; i++ {
wire1visited[Pair{startingPoint.X, startingPoint.Y-i}] = startingDistance+i
}
startingPoint = Pair{startingPoint.X, startingPoint.Y-steps}
startingDistance += steps
case "L":
var i int64
for i = 1; i <= steps; i++ {
wire1visited[Pair{startingPoint.X-i, startingPoint.Y}] = startingDistance+i
}
startingPoint = Pair{startingPoint.X-steps, startingPoint.Y}
startingDistance += steps
}
}
// follow wire 2 and store intersections with wire 1
intersections := make(map[Pair]int64)
startingPoint = Pair{0, 0}
var wire2distance int64 = 0
for _, step := range strings.Split(wire2, ",") {
direction := string(step[0])
stepsInt, err := strconv.Atoi(string(step[1:]))
steps := int64(stepsInt)
handleError(err)
switch direction {
case "U":
var i int64
for i = 1; i <= steps; i++ {
if wire1distance, ok := wire1visited[Pair{startingPoint.X, startingPoint.Y+i}]; ok {
intersections[Pair{startingPoint.X, startingPoint.Y+i}] = wire1distance + wire2distance + i
}
}
startingPoint = Pair{startingPoint.X, startingPoint.Y+steps}
wire2distance += steps
case "R":
var i int64
for i = 1; i <= steps; i++ {
if wire1distance, ok := wire1visited[Pair{startingPoint.X+i, startingPoint.Y}]; ok {
intersections[Pair{startingPoint.X+i, startingPoint.Y}] = wire1distance + wire2distance + i
}
}
startingPoint = Pair{startingPoint.X+steps, startingPoint.Y}
wire2distance += steps
case "D":
var i int64
for i = 1; i <= steps; i++ {
if wire1distance, ok := wire1visited[Pair{startingPoint.X, startingPoint.Y-i}]; ok {
intersections[Pair{startingPoint.X, startingPoint.Y-i}] = wire1distance + wire2distance + i
}
}
startingPoint = Pair{startingPoint.X, startingPoint.Y-steps}
wire2distance += steps
case "L":
var i int64
for i = 1; i <= steps; i++ {
if wire1distance, ok := wire1visited[Pair{startingPoint.X-i, startingPoint.Y}]; ok {
intersections[Pair{startingPoint.X-i, startingPoint.Y}] = wire1distance + wire2distance + i
}
}
startingPoint = Pair{startingPoint.X-steps, startingPoint.Y}
wire2distance += steps
}
}
// calculate distance for each point
var minDist int64 = math.MaxInt64
minDistPoint := Pair{0, 0}
for intersection, distance := range intersections {
if distance < minDist {
minDistPoint = intersection
minDist = distance
}
}
fmt.Printf("Minimum distance of a wire intersection to central port: %d (intersection %v)\n", minDist, minDistPoint)
}
func handleError(err error) {
if err != nil {
log.Fatal(err)
}
} | 2019/3/part2/main.go | 0.50293 | 0.428532 | main.go | starcoder |
package entity
import "time"
type User struct {
ID uint64 `json:"id"`
Name string `json:"name"`
Sex string `json:"sex"`
Age int `json:"age"`
SkillID uint64 `json:"skillID"`
SkillRank int `json:"skillRank"`
GroupID uint64 `json:"groupID"`
WorldID uint64 `json:"worldID"`
FieldID uint64 `json:"fieldID"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
type Users []*User
func (e Users) IDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.ID)
}
return values
}
func (e Users) Names() []string {
values := make([]string, 0, len(e))
for _, value := range e {
values = append(values, value.Name)
}
return values
}
func (e Users) Sexes() []string {
values := make([]string, 0, len(e))
for _, value := range e {
values = append(values, value.Sex)
}
return values
}
func (e Users) Ages() []int {
values := make([]int, 0, len(e))
for _, value := range e {
values = append(values, value.Age)
}
return values
}
func (e Users) SkillIDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.SkillID)
}
return values
}
func (e Users) SkillRanks() []int {
values := make([]int, 0, len(e))
for _, value := range e {
values = append(values, value.SkillRank)
}
return values
}
func (e Users) GroupIDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.GroupID)
}
return values
}
func (e Users) WorldIDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.WorldID)
}
return values
}
func (e Users) FieldIDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.FieldID)
}
return values
}
func (e Users) CreatedAts() []time.Time {
values := make([]time.Time, 0, len(e))
for _, value := range e {
values = append(values, value.CreatedAt)
}
return values
}
func (e Users) UpdatedAts() []time.Time {
values := make([]time.Time, 0, len(e))
for _, value := range e {
values = append(values, value.UpdatedAt)
}
return values
} | _example/04_dao_plugin/entity/user.go | 0.571527 | 0.438244 | user.go | starcoder |
package kgo
import (
"encoding/json"
"fmt"
"log"
"strconv"
"strings"
"text/template"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
func inc(i int) int {
return i + 1
}
func dec(i int) int {
return i - 1
}
func mod(i, d int) int {
return i % d
}
func fmul(x, y float64) float64 {
return x * y
}
func fdiv(x, y float64) float64 {
return x / y
}
func fadd(x, y float64) float64 {
return x + y
}
func fsub(x, y float64) float64 {
return x - y
}
func mul(x, y int) int {
return x * y
}
func div(x, y int) int {
return x / y
}
func add(x, y int) int {
return x + y
}
func sub(x, y int) int {
return x - y
}
func ToInt(x interface{}) int {
i := 0
switch v := x.(type) {
case int8:
i = int(v)
case int16:
i = int(v)
case int32:
i = int(v)
case int64:
i = int(v)
case int:
i = int(v)
case uint:
i = int(v)
case uint32:
i = int(v)
case uint16:
i = int(v)
case uint8:
i = int(v)
case uint64:
i = int(v)
case float32:
i = int(v)
case float64:
i = int(v)
case string:
if len(v) == 0 {
return 0
}
var err error
i, err = strconv.Atoi(v)
if err != nil {
return 0
}
}
return i
}
func ToFloat(x interface{}) float64 {
var f float64 = 0
switch v := x.(type) {
case int8:
f = float64(v)
case int16:
f = float64(v)
case int32:
f = float64(v)
case int64:
f = float64(v)
case int:
f = float64(v)
case uint:
f = float64(v)
case uint32:
f = float64(v)
case uint16:
f = float64(v)
case uint8:
f = float64(v)
case uint64:
f = float64(v)
case float32:
f = float64(v)
case float64:
f = float64(v)
case string:
var err error
f, err = strconv.ParseFloat(v, 64)
if err != nil {
return 0
}
}
return f
}
func ToBool(x interface{}) bool {
switch v := x.(type) {
case bool:
return v
case int64:
return v != 0
case uint64:
return v != 0
case int32:
return v != 0
case uint32:
return v != 0
case float64:
return v != 0
case float32:
return v != 0
}
return false
}
func ToArray(x interface{}) []interface{} {
a, _ := x.([]interface{})
return a
}
func noPrint(x ...interface{}) string {
return ""
}
func CorrectBackSlash(str string) string {
return strings.ReplaceAll(str, `\`, `\\`)
}
// TempMap ...
type TempMap map[string]interface{}
// Set ...
func (tm TempMap) Set(key string, value interface{}) string {
tm[key] = value
return ""
}
// Get ...
func (tm TempMap) Get(key string) interface{} {
v, ok := tm[key]
if ok {
return v
}
return ""
}
// Has ...
func (tm TempMap) Has(key string) bool {
_, ok := tm[key]
return ok
}
// ToJSON ...
func (tm TempMap) ToJSON() string {
data, err := json.Marshal(tm)
if err != nil {
Error(err)
return ""
}
return string(data)
}
func CreateMap(x ...interface{}) TempMap {
n := len(x)
if n == 0 {
return TempMap{}
}
n = n - 1
m := TempMap{}
for i := 0; i < n; i = i + 2 {
k, ok := x[i].(string)
if ok {
m[k] = x[i+1]
}
}
return m
}
func CreateList(x ...interface{}) []interface{} {
if len(x) == 0 {
return []interface{}{}
}
return x
}
func AppendList(ls []interface{}, x ...interface{}) []interface{} {
return append(ls, x...)
}
func xlog(x ...interface{}) string {
log.Println(x...)
return ""
}
func SafeAt(a interface{}, index int) interface{} {
switch x := a.(type) {
case []interface{}:
if index >= len(x) {
return nil
}
return x[index]
case []string:
if index >= len(x) {
return nil
}
return x[index]
case []map[string]interface{}:
if index >= len(x) {
return nil
}
return x[index]
case []TempMap:
if index >= len(x) {
return nil
}
return x[index]
case []int:
if index >= len(x) {
return nil
}
return x[index]
case []float64:
if index >= len(x) {
return nil
}
return x[index]
case []int64:
if index >= len(x) {
return nil
}
return x[index]
case []int32:
if index >= len(x) {
return nil
}
return x[index]
case []bool:
if index >= len(x) {
return nil
}
return x[index]
case map[string]interface{}:
if index == 0 {
return x
}
return nil
case TempMap:
if index == 0 {
return x
}
return nil
}
return nil
}
func ToMap(x interface{}) TempMap {
switch v := x.(type) {
case map[string]interface{}:
return TempMap(v)
case TempMap:
return v
case bson.M:
return TempMap(v)
case map[string]string:
{
m := TempMap{}
for k, s := range v {
m[k] = s
}
return m
}
}
return TempMap{}
}
func Field(x map[string]interface{}, key string) interface{} {
v, _ := x[key]
return v
}
func ToString(x interface{}) string {
s, _ := x.(string)
return s
}
func ToJSONString(x interface{}) string {
data, err := json.Marshal(x)
if err != nil {
Error(err)
return ""
}
return string(data)
}
func ParseJSONString(str string, x interface{}) error {
return json.Unmarshal([]byte(str), x)
}
func w3(s string) string {
return fmt.Sprintf("{{ %s }}", s)
}
func w3x(s string) string {
return fmt.Sprintf("{{- %s -}}", s)
}
// JoinString ...
func JoinString(sep string, s ...string) string {
return strings.Join(s, sep)
}
// FMap ...
var FMap = template.FuncMap{
"w3": w3,
"w3x": w3x,
"inc": inc,
"dec": dec,
"mod": mod,
"fmul": fmul,
"fdiv": fdiv,
"fadd": fadd,
"fsub": fsub,
"mul": mul,
"div": div,
"add": add,
"sub": sub,
"toInt": ToInt,
"toFloat": ToFloat,
"toBool": ToBool,
"toArray": ToArray,
"toMap": ToMap,
"toString": ToString,
"newObjectID": primitive.NewObjectID,
"hashPassword": <PASSWORD>,
"randPassword": <PASSWORD>,
"noPrint": noPrint,
"correctBackSlash": CorrectBackSlash,
"now": time.Now,
"map": CreateMap,
"log": xlog,
"list": CreateList,
"append": AppendList,
"field": Field,
"split": strings.Split,
"toLower": strings.ToLower,
"title": strings.ToTitle,
"toUpper": strings.ToUpper,
"toJSON": ToJSONString,
"reverse": Reverse,
"at": SafeAt,
"convertDate": ConvertDate,
"joinString": JoinString,
} | api.go | 0.536313 | 0.434281 | api.go | starcoder |
package tokei
import (
"sort"
"time"
)
// Schedule represents the schedule on which the job will fire for a given timezone.
type Schedule struct {
location *time.Location
// Cache these ranges on creation to avoid allocations in Next()
month, dayOfMonth, dayOfWeek, hours, minutes []int
}
// NewSchedule creates a new schedule for an expression in the given timezone.
func NewSchedule(location *time.Location, ex *CronExpression) *Schedule {
return &Schedule{
location: location,
month: ex.month.Enumerate(),
dayOfMonth: ex.dayOfMonth.Enumerate(),
dayOfWeek: ex.dayOfWeek.Enumerate(),
hours: ex.hours.Enumerate(),
minutes: ex.minutes.Enumerate(),
}
}
// NewScheduleUTC creates a new schedule for the expression in UTC.
func NewScheduleUTC(ex *CronExpression) *Schedule {
return NewSchedule(time.UTC, ex)
}
// Timer returns a ScheduleTimer which fires on this schedule.
func (s *Schedule) Timer() *ScheduleTimer {
return NewScheduleTimer(s)
}
// Next returns the next time that matches the schedule.
func (s *Schedule) Next() time.Time {
return s.Project(1)[0]
}
// NextFrom returns the next time >= t which matches the schedule.
func (s *Schedule) NextFrom(t time.Time) time.Time {
return s.ProjectFrom(t, 1)[0]
}
// Project returns the next N times that the expression is matched.
func (s *Schedule) Project(n int) []time.Time {
return s.ProjectFrom(time.Now(), n)
}
// ProjectFrom returns the next N matching times after t. If t matches the expression,
// it is counted in the results.
func (s *Schedule) ProjectFrom(t time.Time, n int) []time.Time {
last := t.In(s.location)
results := make([]time.Time, n)
for i := 0; i < n; i++ {
next := s.calculateNextFromTime(last, i == 0)
results[i] = next
last = next
}
return results
}
func (s *Schedule) calculateNextFromTime(t time.Time, matchSame bool) time.Time {
getNext := func(current time.Time) time.Time {
// Starting at t, find the next month in which t matches the schedule
for !s.matchesMonth(current.Month()) {
current = current.AddDate(0, 1, 0)
// NOTE: Add similar logic to below if we want to match specific years.
}
for !s.matchesDayOfMonth(current.Day()) || !s.matchesDayOfWeek(current.Weekday()) {
current = current.AddDate(0, 0, 1)
// Wrapped around to the 1st which increments the month, so now we need to start from the top
// of that month again.
if current.Day() == 1 {
reset := time.Date(current.Year(), current.Month(), current.Day(), 0, 0, 0, 0, t.Location())
return s.calculateNextFromTime(reset, true)
}
}
for !s.matchesHour(current.Hour()) {
current = current.Add(time.Hour)
}
for !s.matchesMinute(current.Minute()) {
current = current.Add(time.Minute)
}
return current
}
// If we don't want to match the current time (maybe because we want to generate the next N times from now)
// add a minute to move us along.
if matchSame {
return getNext(t)
}
return getNext(t.Add(time.Minute))
}
func (s *Schedule) matchesMonth(month time.Month) bool {
return contains(s.month, int(month))
}
func (s *Schedule) matchesDayOfMonth(dayOfMonth int) bool {
return contains(s.dayOfMonth, dayOfMonth)
}
func (s *Schedule) matchesDayOfWeek(dayOfWeek time.Weekday) bool {
weekday := int(dayOfWeek)
if weekday == 0 {
weekday = 7
}
return contains(s.dayOfWeek, weekday)
}
func (s *Schedule) matchesHour(hour int) bool {
return contains(s.hours, hour)
}
func (s *Schedule) matchesMinute(minute int) bool {
return contains(s.minutes, minute)
}
// ScheduleTimer is a timer which runs on the cron schedule.
type ScheduleTimer struct {
schedule *Schedule
timeChan chan time.Time
closeChan chan struct{}
}
// NewScheduleTimer creates a new timer.
func NewScheduleTimer(schedule *Schedule) *ScheduleTimer {
return &ScheduleTimer{
schedule: schedule,
timeChan: make(chan time.Time),
closeChan: make(chan struct{}),
}
}
// Next returns a channel which upon which times will be sent when
// the schedule matches the cron expression.
// Timers must be started with Start().
func (st *ScheduleTimer) Next() <-chan time.Time {
return st.timeChan
}
// Start starts the timer.
func (st *ScheduleTimer) Start() {
for {
next := st.schedule.Next()
diff := next.Sub(time.Now().In(st.schedule.location))
time.Sleep(diff)
st.timeChan <- next
}
}
// contains makes use of the fact that all expression enumerations are inherently sorted
// and uses a binary search to determine if there is a match.
func contains(haystack []int, needle int) bool {
index := sort.Search(len(haystack), func(i int) bool { return haystack[i] >= needle })
if index < len(haystack) && haystack[index] == needle {
return true
}
return false
} | schedule.go | 0.788868 | 0.534612 | schedule.go | starcoder |
package path
import (
"path/filepath"
"strings"
)
// Path is a slice of string segments, representing a filesystem path.
type Path []string
// Split cleans and splits the system-delimited filesystem path.
func New(s string) Path {
s = filepath.ToSlash(filepath.Clean(s))
switch {
case s == "", s == ".":
return nil
case s == "/":
return Path{"/"}
case s[0] == '/':
return append(Path{"/"}, strings.Split(s[1:], "/")...)
default:
return strings.Split(s, "/")
}
}
// ToPaths cleans and splits each of the system-delimited filesystem paths.
func ToPaths(paths []string) []Path {
split := make([]Path, len(paths))
for i, p := range paths {
split[i] = New(p)
}
return split
}
// LessThan provides lexicographic ordering of Paths.
func (p Path) LessThan(o Path) bool {
for i := 0; ; i++ {
if i >= len(p) {
return i < len(o)
} else if i >= len(o) {
return false
} else if p[i] != o[i] {
return p[i] < o[i]
}
}
return false
}
// String returns the properly platform-delimited form of the path.
func (p Path) String() string {
if len(p) == 0 {
return "."
}
return filepath.Join([]string(p)...)
}
// Append appends additional elements to the end of path, disregarding
// the leading '/' on appended elements.
func Append(p Path, ps ...Path) Path {
for _, e := range ps {
// Drop the leading '/' when appending/joining fully qualified paths.
if len(e) > 0 && e[0] == "/" {
e = e[1:]
}
p = append(p, e...)
}
return p
}
// AppendString appends additional string elements to the end of path.
func AppendString(p Path, elem ...string) Path {
return Append(p, ToPaths(elem)...)
}
// Join joins any number of paths and returns the result.
func Join(p Path, ps ...Path) Path {
if len(ps) == 0 {
return p[:]
}
return Append(p[:], ps...)
}
// JoinString joins path and any number of additional string elements, returning the result.
func JoinString(p Path, elem ...string) Path {
return Join(p, ToPaths(elem)...)
}
// SplitCommonRoot finds the longest command whole-segment prefix of the provided
// path and returns that along with each path stripped of that prefix.
func SplitCommonRoot(paths []Path) (Path, []Path) {
root := LongestCommonPrefix(paths)
if len(root) == 0 {
return root, paths
}
result := make([]Path, len(paths))
for i, p := range paths {
result[i] = p[len(root):]
}
return root, result
}
// SplitCommonRootString finds the longest command whole-segment prefix of the provided
// path and returns that along with each path stripped of that prefix as /-delimited strings.
func SplitCommonRootString(paths []string) (string, []string) {
root, stripped := SplitCommonRoot(ToPaths(paths))
strings := make([]string, len(stripped))
for i, p := range stripped {
strings[i] = p.String()
}
return root.String(), strings
}
// LongestCommonPrefix returns the longest shared Path prefix of all of the paths.
func LongestCommonPrefix(paths []Path) Path {
switch len(paths) {
case 0:
return nil
case 1:
return paths[0]
}
min, max := paths[0], paths[0]
for _, p := range paths[1:] {
switch {
case p.LessThan(min):
min = p
case max.LessThan(p):
max = p
}
}
for i := 0; i < len(min) && i < len(max); i++ {
if min[i] != max[i] {
return min[:i]
}
}
return min
} | path/path.go | 0.645567 | 0.445952 | path.go | starcoder |
package actions
import (
"github.com/LindsayBradford/crem/internal/pkg/model/action"
"github.com/LindsayBradford/crem/internal/pkg/model/planningunit"
)
const RiverBankRestorationType action.ManagementActionType = "RiverBankRestoration"
func NewRiverBankRestoration() *RiverBankRestoration {
return new(RiverBankRestoration).WithType(RiverBankRestorationType)
}
type RiverBankRestoration struct {
action.SimpleManagementAction
}
func (r *RiverBankRestoration) WithType(actionType action.ManagementActionType) *RiverBankRestoration {
r.SimpleManagementAction.WithType(actionType)
return r
}
func (r *RiverBankRestoration) WithPlanningUnit(planningUnit planningunit.Id) *RiverBankRestoration {
r.SimpleManagementAction.WithPlanningUnit(planningUnit)
return r
}
const RiverBankRestorationCost action.ModelVariableName = "RiverBankRestorationCost"
func (r *RiverBankRestoration) WithImplementationCost(costInDollars float64) *RiverBankRestoration {
return r.WithVariable(RiverBankRestorationCost, costInDollars)
}
const RiverBankRestorationOpportunityCost action.ModelVariableName = "RiverBankRestorationOpportunityCost"
func (r *RiverBankRestoration) WithOpportunityCost(costInDollars float64) *RiverBankRestoration {
return r.WithVariable(RiverBankRestorationOpportunityCost, costInDollars)
}
const ActionedBufferVegetation action.ModelVariableName = "ActionedBufferVegetation"
func (r *RiverBankRestoration) WithActionedBufferVegetation(proportionOfVegetation float64) *RiverBankRestoration {
return r.WithVariable(ActionedBufferVegetation, proportionOfVegetation)
}
const OriginalBufferVegetation action.ModelVariableName = "OriginalBufferVegetation"
func (r *RiverBankRestoration) WithOriginalBufferVegetation(proportionOfVegetation float64) *RiverBankRestoration {
return r.WithVariable(OriginalBufferVegetation, proportionOfVegetation)
}
const OriginalRiparianSedimentProduction action.ModelVariableName = "OriginalRiparianSedimentProduction"
func (r *RiverBankRestoration) WithOriginalRiparianSedimentProduction(sediment float64) *RiverBankRestoration {
return r.WithVariable(OriginalRiparianSedimentProduction, sediment)
}
const ActionedRiparianSedimentProduction action.ModelVariableName = "ActionedRiparianSedimentProduction"
func (r *RiverBankRestoration) WithActionedRiparianSedimentProduction(sediment float64) *RiverBankRestoration {
return r.WithVariable(ActionedRiparianSedimentProduction, sediment)
}
func (r *RiverBankRestoration) WithOriginalParticulateNitrogen(particulateNitrogen float64) *RiverBankRestoration {
return r.WithVariable(ParticulateNitrogenOriginalAttribute, particulateNitrogen)
}
func (r *RiverBankRestoration) WithActionedParticulateNitrogen(particulateNitrogen float64) *RiverBankRestoration {
return r.WithVariable(ParticulateNitrogenActionedAttribute, particulateNitrogen)
}
func (r *RiverBankRestoration) WithOriginalFineSediment(fineSediment float64) *RiverBankRestoration {
return r.WithVariable(FineSedimentOriginalAttribute, fineSediment)
}
func (r *RiverBankRestoration) WithActionedFineSediment(fineSediment float64) *RiverBankRestoration {
return r.WithVariable(FineSedimentActionedAttribute, fineSediment)
}
func (r *RiverBankRestoration) WithOriginalDissolvedNitrogen(dissolvedNitrogen float64) *RiverBankRestoration {
return r.WithVariable(DissolvedNitrogenOriginalAttribute, dissolvedNitrogen)
}
func (r *RiverBankRestoration) WithActionedDissolvedNitrogen(dissolvedNitrogen float64) *RiverBankRestoration {
return r.WithVariable(DissolvedNitrogenActionedAttribute, dissolvedNitrogen)
}
func (r *RiverBankRestoration) WithDissolvedNitrogenRemovalEfficiency(removalEfficiency float64) *RiverBankRestoration {
return r.WithVariable(DissolvedNitrogenRemovalEfficiency, removalEfficiency)
}
func (r *RiverBankRestoration) WithVariable(variableName action.ModelVariableName, value float64) *RiverBankRestoration {
r.SimpleManagementAction.WithVariable(variableName, value)
return r
} | internal/pkg/model/models/catchment/actions/RiverBankRestoration.go | 0.769773 | 0.64197 | RiverBankRestoration.go | starcoder |
package geojson
import (
"fmt"
"math"
)
func decodeBoundingBox(bb interface{}) ([]float64, error) {
if bb == nil {
return nil, nil
}
switch f := bb.(type) {
case []float64:
return f, nil
case []interface{}:
bb := make([]float64, 0, 4)
for _, v := range f {
switch c := v.(type) {
case float64:
bb = append(bb, c)
default:
return nil, fmt.Errorf("bounding box coordinate not usable, got %T", v)
}
}
return bb, nil
default:
return nil, fmt.Errorf("bounding box property not usable, got %T", bb)
}
}
// checkBoundingBox Check if BoundingBox is valid
func checkBoundingBox(boundingbox []float64) bool {
if boundingbox == nil || len(boundingbox)%2 != 0 {
return false
}
// x0,y0,... x1,y1,...
dimension := len(boundingbox) / 2
for i := 0; i < dimension; i++ {
if boundingbox[i] > boundingbox[i+dimension] {
return false
}
}
return true
}
// ComputeBoundingBox Get Geometry geo outsourcing box
func (g *Geometry) ComputeBoundingBox(force bool) (min, max []float64) {
if !force && checkBoundingBox(g.BoundingBox) {
dimension := len(g.BoundingBox) / 2
return g.BoundingBox[0:dimension], g.BoundingBox[dimension:]
}
bmin := make([]float64, 0, 16)
bmax := make([]float64, 0, 16)
switch g.Type {
case GeometryPoint:
bmin = append(bmin, g.Point...)
bmax = append(bmax, g.Point...)
case GeometryMultiPoint:
dimension := len(g.MultiPoint[0])
bmin = append(bmin, g.MultiPoint[0]...)
bmax = append(bmax, g.MultiPoint[0]...)
for i := 1; i < len(g.MultiPoint); i++ {
for d := 0; d < dimension; d++ {
bmin[d] = math.Min(bmin[d], g.MultiPoint[i][d])
bmax[d] = math.Max(bmax[d], g.MultiPoint[i][d])
}
}
case GeometryLineString:
dimension := len(g.LineString[0])
bmin = append(bmin, g.LineString[0]...)
bmax = append(bmax, g.LineString[0]...)
for i := 1; i < len(g.LineString); i++ {
for d := 0; d < dimension; d++ {
bmin[d] = math.Min(bmin[d], g.LineString[i][d])
bmax[d] = math.Max(bmax[d], g.LineString[i][d])
}
}
case GeometryMultiLineString:
dimension := len(g.MultiLineString[0][0])
bmin = append(bmin, g.MultiLineString[0][0]...)
bmax = append(bmax, g.MultiLineString[0][0]...)
for line := 0; line < len(g.MultiLineString); line++ {
linestring := g.MultiLineString[line]
for i := 0; i < len(linestring); i++ {
for d := 0; d < dimension; d++ {
bmin[d] = math.Min(bmin[d], linestring[i][d])
bmax[d] = math.Max(bmax[d], linestring[i][d])
}
}
}
case GeometryPolygon:
dimension := len(g.Polygon[0][0])
bmin = append(bmin, g.Polygon[0][0]...)
bmax = append(bmax, g.Polygon[0][0]...)
for line := 0; line < len(g.Polygon); line++ {
linestring := g.Polygon[line]
for i := 0; i < len(linestring); i++ {
for d := 0; d < dimension; d++ {
bmin[d] = math.Min(bmin[d], linestring[i][d])
bmax[d] = math.Max(bmax[d], linestring[i][d])
}
}
}
case GeometryMultiPolygon:
dimension := len(g.MultiPolygon[0][0][0])
bmin = append(bmin, g.MultiPolygon[0][0][0]...)
bmax = append(bmax, g.MultiPolygon[0][0][0]...)
for poly := 0; poly < len(g.MultiPolygon); poly++ {
for line := 0; line < len(g.MultiPolygon[poly]); line++ {
linestring := g.MultiPolygon[poly][line]
for i := 0; i < len(linestring); i++ {
for d := 0; d < dimension; d++ {
bmin[d] = math.Min(bmin[d], linestring[i][d])
bmax[d] = math.Max(bmax[d], linestring[i][d])
}
}
}
}
case GeometryCollection:
if g.Geometries == nil || len(g.Geometries) == 0 {
return nil, nil
}
if bmin, bmax = g.Geometries[0].ComputeBoundingBox(force); bmin == nil {
return nil, nil
}
dimension := len(bmin)
for i := 1; i < len(g.Geometries); i++ {
tmin, tmax := g.Geometries[i].ComputeBoundingBox(force)
if tmin == nil || len(tmin) != dimension {
return nil, nil
}
for d := 0; d < dimension; d++ {
bmin[d] = math.Min(bmin[d], tmin[d])
bmax[d] = math.Max(bmax[d], tmax[d])
}
}
}
return bmin, bmax
}
// ComputeBoundingBox Get Feature geo outsourcing box
func (f *Feature) ComputeBoundingBox(force bool) (bmin, bmax []float64) {
if !force && checkBoundingBox(f.BoundingBox) {
dimension := len(f.BoundingBox) / 2
bmin = f.BoundingBox[:dimension]
bmax = f.BoundingBox[dimension:]
return bmin, bmax
}
return f.Geometry.ComputeBoundingBox(force)
}
// ComputeBoundingBox Get FeatureCollection geo outsourcing box
func (fc *FeatureCollection) ComputeBoundingBox(force bool) (bmin, bmax []float64) {
if !force && checkBoundingBox(fc.BoundingBox) {
dimension := len(fc.BoundingBox) / 2
bmin = fc.BoundingBox[:dimension]
bmax = fc.BoundingBox[dimension:]
return bmin, bmax
}
if fc.Features == nil || len(fc.Features) == 0 {
return nil, nil
}
if bmin, bmax = fc.Features[0].ComputeBoundingBox(force); bmin == nil {
return nil, nil
}
dimension := len(bmin)
for i := 1; i < len(fc.Features); i++ {
tmin, tmax := fc.Features[i].ComputeBoundingBox(force)
if tmin == nil || len(tmin) != dimension {
return nil, nil
}
for d := 0; d < dimension; d++ {
bmin[d] = math.Min(bmin[d], tmin[d])
bmax[d] = math.Max(bmax[d], tmax[d])
}
}
return bmin, bmax
} | boundingbox.go | 0.70791 | 0.440108 | boundingbox.go | starcoder |
package impl
import (
"bytes"
"math"
)
// This defines the constant margin of error when transforming possible float
// values into another kind.
const dynintEpsilon = 1e-9
// LudwiegDynInt is used to represent an integer field with varying size,
// automatically setting its precision on-the-fly.
type LudwiegDynInt struct {
hasValue bool
value float64
UnderlyingType DynIntValueKind
}
// DynInt returns a safe nullable DynInt value
func DynInt(v interface{}) *LudwiegDynInt {
retVal := &LudwiegDynInt{
hasValue: true,
}
var kind DynIntValueKind
var value float64
switch val := v.(type) {
case int:
kind, value = dynintInferNumberType(float64(val))
case int8:
kind, value = dynintInferNumberType(float64(val))
case int16:
kind, value = dynintInferNumberType(float64(val))
case int32:
kind, value = dynintInferNumberType(float64(val))
case int64:
kind, value = dynintInferNumberType(float64(val))
case uint:
kind, value = dynintInferNumberType(float64(val))
case uint8:
kind, value = dynintInferNumberType(float64(val))
case uint16:
kind, value = dynintInferNumberType(float64(val))
case uint32:
kind, value = dynintInferNumberType(float64(val))
case uint64:
kind, value = dynintInferNumberType(float64(val))
case float32:
kind, value = dynintInferNumberType(float64(val))
case float64:
kind, value = dynintInferNumberType(float64(val))
case nil:
kind = DynIntValueKindInt8
value = 0
retVal.hasValue = false
default:
panic(illegalSetterValueError("dynint"))
}
retVal.value = value
retVal.UnderlyingType = kind
return retVal
}
func serializeDynInt(c *serializationCandidate, b *bytes.Buffer) error {
if c.writeType {
b.WriteByte(c.meta.byte())
}
vv := c.value.Interface()
if v, ok := vv.(*LudwiegDynInt); ok {
b.WriteByte(byte(v.UnderlyingType))
switch v.UnderlyingType {
case DynIntValueKindInvalid:
case DynIntValueKindUint8, DynIntValueKindInt8:
b.WriteByte(byte(v.value))
case DynIntValueKindInt16, DynIntValueKindUint16:
writeUint16(uint16(v.value), b)
case DynIntValueKindUint32, DynIntValueKindInt32:
writeUint32(uint32(v.value), b)
case DynIntValueKindUint64, DynIntValueKindInt64:
writeUint64(uint64(v.value), b)
case DynIntValueKindFloat64, DynIntValueKindFloat32:
writeDouble(v.value, b)
}
} else {
return illegalSetterValueError("dynint")
}
return nil
}
func dynintInferNumberType(val float64) (DynIntValueKind, float64) {
// First, we will want to give special attention to float kinds, since
// we're about to drop all the fraction part of its value.
integer, frac := math.Modf(val)
if frac > dynintEpsilon && frac < 1.0-dynintEpsilon {
// Float.
if val >= -math.MaxFloat32 && val <= math.MaxFloat32 {
return DynIntValueKindFloat32, val
}
return DynIntValueKindFloat32, val
}
maxMins := [][]float64{
{0, math.MaxUint8},
{0, math.MaxUint16},
{0, math.MaxUint32},
{0, math.MaxUint64},
{math.MinInt8, math.MaxInt8},
{math.MinInt16, math.MaxInt16},
{math.MinInt32, math.MaxInt32},
{math.MinInt64, math.MaxInt64},
}
types := []DynIntValueKind{
DynIntValueKindUint8,
DynIntValueKindUint16,
DynIntValueKindUint32,
DynIntValueKindUint64,
DynIntValueKindInt8,
DynIntValueKindInt16,
DynIntValueKindInt32,
DynIntValueKindInt64,
}
for i, v := range maxMins {
if integer >= v[0] && integer <= v[1] {
return types[i], integer
}
}
return DynIntValueKindInvalid, 0
}
func decodeDynInt(t metaProtocolByte, b []byte, offset *int) (interface{}, error) {
if t.Empty {
return &LudwiegDynInt{}, nil
}
dynKind := b[incrSize(offset, 1)]
var value float64
var tmpBuf []byte
switch DynIntValueKind(dynKind) {
case DynIntValueKindUint8, DynIntValueKindInt8:
value = float64(b[incrSize(offset, 1)])
case DynIntValueKindInt16, DynIntValueKindUint16:
tmpBuf = b[*offset : *offset+2]
incrSize(offset, 2)
value = float64(readUint16(tmpBuf))
case DynIntValueKindUint32, DynIntValueKindInt32:
tmpBuf = b[*offset : *offset+4]
incrSize(offset, 4)
value = float64(readUint32(tmpBuf))
case DynIntValueKindUint64, DynIntValueKindInt64:
tmpBuf = b[*offset : *offset+8]
incrSize(offset, 8)
value = float64(readUint64(tmpBuf))
case DynIntValueKindFloat64, DynIntValueKindFloat32:
tmpBuf = b[*offset : *offset+8]
incrSize(offset, 8)
value = readDouble(tmpBuf)
default:
return &LudwiegDynInt{}, nil
}
result := LudwiegDynInt{
hasValue: true,
value: value,
UnderlyingType: DynIntValueKind(dynKind),
}
return &result, nil
}
// Next we need to coerce back to known types
// Uint8 returns the internal representation of this type as an uint8
func (d *LudwiegDynInt) Uint8() uint8 {
if !d.hasValue {
return 0
}
return uint8(d.value)
}
// Uint16 returns the internal representation of this type as an uint16
func (d *LudwiegDynInt) Uint16() uint16 {
if !d.hasValue {
return 0
}
return uint16(d.value)
}
// Uint32 returns the internal representation of this type as an uint32
func (d *LudwiegDynInt) Uint32() uint32 {
if !d.hasValue {
return 0
}
return uint32(d.value)
}
// Uint64 returns the internal representation of this type as an uint64
func (d *LudwiegDynInt) Uint64() uint64 {
if !d.hasValue {
return 0
}
return uint64(d.value)
}
// Int8 returns the internal representation of this type as an int8
func (d *LudwiegDynInt) Int8() int8 {
if !d.hasValue {
return 0
}
return int8(d.value)
}
// Int16 returns the internal representation of this type as an int16
func (d *LudwiegDynInt) Int16() int16 {
if !d.hasValue {
return 0
}
return int16(d.value)
}
// Int32 returns the internal representation of this type as an int32
func (d *LudwiegDynInt) Int32() int32 {
if !d.hasValue {
return 0
}
return int32(d.value)
}
// Int64 returns the internal representation of this type as an int64
func (d *LudwiegDynInt) Int64() int64 {
if !d.hasValue {
return 0
}
return int64(d.value)
}
// Float32 returns the internal representation of this type as a float32
func (d *LudwiegDynInt) Float32() float32 {
if !d.hasValue {
return 0
}
return float32(d.value)
}
// Float64 returns the internal representation of this type as a float64
func (d *LudwiegDynInt) Float64() float64 {
if !d.hasValue {
return 0
}
return float64(d.value)
}
// Int returns the internal representation of this type as a int
func (d *LudwiegDynInt) Int() int {
if !d.hasValue {
return 0
}
return int(d.value)
} | impl/type_dynint.go | 0.73914 | 0.411879 | type_dynint.go | starcoder |
package unionfind
// UnionFind is the interface a union-find data type.
type UnionFind interface {
Union(int, int)
Find(int) (int, bool)
IsConnected(int, int) bool
Count() int
}
type quickFind struct {
count int // number of components (equivalence classes)
id []int // determines component IDs (class representatives)
}
// NewQuickFind creates a new union-find data structure with quick find.
func NewQuickFind(n int) UnionFind {
id := make([]int, n)
for i := 0; i < n; i++ {
id[i] = i
}
return &quickFind{
count: n,
id: id,
}
}
func (u *quickFind) isValid(i int) bool {
return 0 <= i && i < len(u.id)
}
func (u *quickFind) Union(p, q int) {
if !u.isValid(p) || !u.isValid(q) {
return
}
pid, _ := u.Find(p)
qid, _ := u.Find(q)
if pid == qid {
return
}
// Rename p's component to q's id
for i := range u.id {
if u.id[i] == pid {
u.id[i] = qid
}
}
u.count--
}
func (u *quickFind) Find(p int) (int, bool) {
if !u.isValid(p) {
return -1, false
}
return u.id[p], true
}
func (u *quickFind) IsConnected(p, q int) bool {
if !u.isValid(p) || !u.isValid(q) {
return false
}
pid, _ := u.Find(p)
qid, _ := u.Find(q)
return pid == qid
}
func (u *quickFind) Count() int {
return u.count
}
type quickUnion struct {
count int // number of components (equivalence classes)
root []int // determines component parents (class representatives)
}
// NewQuickUnion creates a new union-find data structure with quick union.
func NewQuickUnion(n int) UnionFind {
root := make([]int, n)
for i := 0; i < n; i++ {
root[i] = i
}
return &quickUnion{
count: n,
root: root,
}
}
func (u *quickUnion) isValid(i int) bool {
return 0 <= i && i < len(u.root)
}
func (u *quickUnion) Union(p, q int) {
if !u.isValid(p) || !u.isValid(q) {
return
}
proot, _ := u.Find(p)
qroot, _ := u.Find(q)
if proot == qroot {
return
}
u.root[proot] = qroot
u.count--
}
func (u *quickUnion) Find(p int) (int, bool) {
if !u.isValid(p) {
return -1, false
}
for p != u.root[p] {
p = u.root[p]
}
return p, true
}
func (u *quickUnion) IsConnected(p, q int) bool {
if !u.isValid(p) || !u.isValid(q) {
return false
}
proot, _ := u.Find(p)
qroot, _ := u.Find(q)
return proot == qroot
}
func (u *quickUnion) Count() int {
return u.count
}
type weightedQuickUnion struct {
count int // number of components (equivalence classes)
root []int // determines component parents (class representatives)
size []int // number of elements in component (class) rooted at i
}
// NewWeightedQuickUnion creates a new weighted union-find data structure with quick union.
func NewWeightedQuickUnion(n int) UnionFind {
root := make([]int, n)
size := make([]int, n)
for i := 0; i < n; i++ {
root[i] = i
size[i] = 1
}
return &weightedQuickUnion{
count: n,
root: root,
size: size,
}
}
func (u *weightedQuickUnion) isValid(i int) bool {
return 0 <= i && i < len(u.root)
}
func (u *weightedQuickUnion) Union(p, q int) {
if !u.isValid(p) || !u.isValid(q) {
return
}
proot, _ := u.Find(p)
qroot, _ := u.Find(q)
if proot == qroot {
return
}
// make smaller root point to larger one
if u.size[proot] < u.size[qroot] {
u.root[proot] = qroot
u.size[qroot] += u.size[proot]
} else {
u.root[qroot] = proot
u.size[proot] += u.size[qroot]
}
u.count--
}
func (u *weightedQuickUnion) Find(p int) (int, bool) {
if !u.isValid(p) {
return -1, false
}
for p != u.root[p] {
p = u.root[p]
}
return p, true
}
func (u *weightedQuickUnion) IsConnected(p, q int) bool {
if !u.isValid(p) || !u.isValid(q) {
return false
}
proot, _ := u.Find(p)
qroot, _ := u.Find(q)
return proot == qroot
}
func (u *weightedQuickUnion) Count() int {
return u.count
} | unionfind/unionfind.go | 0.794863 | 0.483526 | unionfind.go | starcoder |
package phomath
import "math"
type Vector4Like interface {
Vector2Like
Vector3Like
XYZW() (x, y, z, w float64)
}
// static check that Vector4 is Vector4Like
var _ Vector4Like = &Vector4{}
// NewVector4 creates a new Vector4
func NewVector4(x, y, z, w float64) *Vector4 {
return &Vector4{
X: x,
Y: y,
Z: z,
W: w,
}
}
// Vector4 is a representation of a vector in 4D space.
type Vector4 struct {
X, Y, Z, W float64
}
// XYZ returns the x and y components of the vector
func (v *Vector4) XY() (float64, float64) {
return v.X, v.Y
}
// XYZ returns the x, y, and z components of the vector
func (v *Vector4) XYZ() (x, y, z float64) {
return v.X, v.Y, v.Z
}
// XYZW returns the x, y, z, and w components of the vector
func (v *Vector4) XYZW() (x, y, z, w float64) {
return v.X, v.Y, v.Z, v.W
}
// Clone makes a clone of this vector
func (v *Vector4) Clone() *Vector4 {
return NewVector4(v.XYZW())
}
// Copy the components of a given Vector into this Vector.
func (v *Vector4) Copy(other *Vector4) *Vector4 {
return v.Set(other.XYZW())
}
// Equals checks if this vector is equal to the given vector
func (v *Vector4) Equals(other *Vector4) bool {
return math.Abs(v.X-other.X) < Epsilon &&
math.Abs(v.Y-other.Y) < Epsilon &&
math.Abs(v.Z-other.Z) < Epsilon &&
math.Abs(v.W-other.W) < Epsilon
}
// Set the x, y, z, and w components of this vector
func (v *Vector4) Set(x, y, z, w float64) *Vector4 {
v.X, v.Y, v.Z, v.W = x, y, z, w
return v
}
// Add the given vector to this vector
func (v *Vector4) Add(other *Vector4) *Vector4 {
return v.Set(v.X+other.X, v.Y+other.Y, v.Z+other.Z, v.W+other.W)
}
// Subtract the given vector from this vector
func (v *Vector4) Subtract(other *Vector4) *Vector4 {
return v.Set(v.X-other.X, v.Y-other.Y, v.Z-other.Z, v.W-other.W)
}
// Scale this vector by a scalar value
func (v *Vector4) Scale(s float64) *Vector4 {
return v.Set(v.X*s, v.Y*s, v.Z*s, v.W*s)
}
// Length returns the length (magnitude) of this vector
func (v *Vector4) Length() float64 {
return math.Sqrt(v.LengthSquared())
}
// LengthSquared returns the length of this vector, squared
func (v *Vector4) LengthSquared() float64 {
return v.X*v.X + v.Y*v.Y + v.Z*v.Z + v.W*v.W
}
// Normalize this vector to unit length of 1 in the same direction
func (v *Vector4) Normalize() *Vector4 {
l := v.LengthSquared()
if l > 0 {
v.Scale(1 / math.Sqrt(l))
}
return v
}
// Dot calculates the dot product with the given vector
func (v *Vector4) Dot(other *Vector4) float64 {
return v.X*other.X + v.Y*other.Y + v.Z*other.Z + v.W*other.W
}
// Lerp linearly interpolates with the given vector using position `t`
func (v *Vector4) Lerp(other *Vector4, t float64) *Vector4 {
return v.Set(
v.X+t*(other.X-v.X),
v.Y+t*(other.Y-v.Y),
v.Z+t*(other.Z-v.Z),
v.W+t*(other.W-v.W),
)
}
// Multiply this vector by the given vector
func (v *Vector4) Multiply(other *Vector4) *Vector4 {
return v.Set(
v.X*other.X,
v.Y*other.Y,
v.Z*other.Z,
v.W*other.W,
)
}
// Divide this vector by the given vector
func (v *Vector4) Divide(other *Vector4) *Vector4 {
return v.Set(
v.X/other.X,
v.Y/other.Y,
v.Z/other.Z,
v.W/other.W,
)
}
// Distance calculates the distance between this vector and the given vector
func (v *Vector4) Distance(other *Vector4) float64 {
return math.Sqrt(v.DistanceSquared(other))
}
// DistanceSquared calculates the distance between this vector and the given vector, squared
func (v *Vector4) DistanceSquared(other *Vector4) float64 {
dx := other.X - v.X
dy := other.Y - v.Y
dz := other.Z - v.Z
dw := other.W - v.W
return dx*dx + dy*dy + dz*dz + dw*dw
}
// Negate the signs of the components of this vector
func (v *Vector4) Negate() *Vector4 {
return v.Scale(-1)
}
// TransformMatrix4 transforms this vector with the given Matrix4
func (v *Vector4) TransformMatrix4(m4 *Matrix4) *Vector4 {
m := m4.Values
return v.Set(
m[0]*v.X+m[4]*v.Y+m[8]*v.Z+m[12]*v.W,
m[1]*v.X+m[5]*v.Y+m[9]*v.Z+m[13]*v.W,
m[2]*v.X+m[6]*v.Y+m[10]*v.Z+m[14]*v.W,
m[3]*v.X+m[7]*v.Y+m[11]*v.Z+m[15]*v.W,
)
}
// TransformQuaternion transforms this vector with the given Quaternion
func (v *Vector4) TransformQuaternion(q *Quaternion) *Vector4 {
ix := q.W*v.X + q.Y*v.Z - q.Z*v.Y
iy := q.W*v.Y + q.Z*v.X - q.X*v.Z
iz := q.W*v.Z + q.X*v.Y - q.Y*v.X
iw := -q.X*v.X - q.Y*v.Y - q.Z*v.Z
v.X = ix*q.W + iw*-q.X + iy*-q.Z - iz*-q.Y
v.Y = iy*q.W + iw*-q.Y + iz*-q.X - ix*-q.Z
v.Z = iz*q.W + iw*-q.Z + ix*-q.Y - iy*-q.X
return v
}
// Reset all components of this vector to 0's
func (v *Vector4) Reset() *Vector4 {
return v.Set(0, 0, 0, 0)
} | phomath/vector4.go | 0.921481 | 0.810066 | vector4.go | starcoder |
package iavl
import (
"bytes"
"github.com/pkg/errors"
)
// PathToKey represents an inner path to a leaf node.
// Note that the nodes are ordered such that the last one is closest
// to the root of the tree.
type PathToKey struct {
InnerNodes []proofInnerNode `json:"inner_nodes"`
}
func (p *PathToKey) String() string {
str := ""
for i := len(p.InnerNodes) - 1; i >= 0; i-- {
str += p.InnerNodes[i].String() + "\n"
}
return str
}
// verify check that the leafNode's hash matches the path's LeafHash and that
// the root is the merkle hash of all the inner nodes.
func (p *PathToKey) verify(leafNode proofLeafNode, root []byte) error {
hash := leafNode.Hash()
for _, branch := range p.InnerNodes {
hash = branch.Hash(hash)
}
if !bytes.Equal(root, hash) {
return errors.WithStack(ErrInvalidProof)
}
return nil
}
func (p *PathToKey) isLeftmost() bool {
for _, node := range p.InnerNodes {
if len(node.Left) > 0 {
return false
}
}
return true
}
func (p *PathToKey) isRightmost() bool {
for _, node := range p.InnerNodes {
if len(node.Right) > 0 {
return false
}
}
return true
}
func (p *PathToKey) isEmpty() bool {
return p == nil || len(p.InnerNodes) == 0
}
func (p *PathToKey) dropRoot() *PathToKey {
if p.isEmpty() {
return p
}
return &PathToKey{
InnerNodes: p.InnerNodes[:len(p.InnerNodes)-1],
}
}
func (p *PathToKey) hasCommonRoot(p2 *PathToKey) bool {
if p.isEmpty() || p2.isEmpty() {
return false
}
leftEnd := p.InnerNodes[len(p.InnerNodes)-1]
rightEnd := p2.InnerNodes[len(p2.InnerNodes)-1]
return bytes.Equal(leftEnd.Left, rightEnd.Left) &&
bytes.Equal(leftEnd.Right, rightEnd.Right)
}
func (p *PathToKey) isLeftAdjacentTo(p2 *PathToKey) bool {
for p.hasCommonRoot(p2) {
p, p2 = p.dropRoot(), p2.dropRoot()
}
p, p2 = p.dropRoot(), p2.dropRoot()
return p.isRightmost() && p2.isLeftmost()
}
// PathWithNode is a path to a key which includes the leaf node at that key.
type pathWithNode struct {
Path *PathToKey `json:"path"`
Node proofLeafNode `json:"node"`
}
func (p *pathWithNode) verify(root []byte) error {
return p.Path.verify(p.Node, root)
}
// verifyPaths verifies the left and right paths individually, and makes sure
// the ordering is such that left < startKey <= endKey < right.
func verifyPaths(left, right *pathWithNode, startKey, endKey, root []byte) error {
if bytes.Compare(startKey, endKey) == 1 {
return ErrInvalidInputs
}
if left != nil {
if err := left.verify(root); err != nil {
return err
}
if !left.Node.isLesserThan(startKey) {
return errors.WithStack(ErrInvalidProof)
}
}
if right != nil {
if err := right.verify(root); err != nil {
return err
}
if !right.Node.isGreaterThan(endKey) {
return errors.WithStack(ErrInvalidProof)
}
}
return nil
}
// Checks that all paths are adjacent to one another, ie. that there are no
// keys missing.
func verifyNoMissingKeys(paths []*PathToKey) error {
ps := make([]*PathToKey, 0, len(paths))
for _, p := range paths {
if p != nil {
ps = append(ps, p)
}
}
for i := 0; i < len(ps)-1; i++ {
// Always check from left to right, since paths are always in ascending order.
if !ps[i].isLeftAdjacentTo(ps[i+1]) {
return errors.Errorf("paths #%d and #%d are not adjacent", i, i+1)
}
}
return nil
}
// Checks that with the given left and right paths, no keys can exist in between.
// Supports nil paths to signify out-of-range.
func verifyKeyAbsence(left, right *pathWithNode) error {
if left != nil && left.Path.isRightmost() {
// Range starts outside of the right boundary.
return nil
} else if right != nil && right.Path.isLeftmost() {
// Range ends outside of the left boundary.
return nil
} else if left != nil && right != nil &&
left.Path.isLeftAdjacentTo(right.Path) {
// Range is between two existing keys.
return nil
}
return errors.WithStack(ErrInvalidProof)
} | .vendor/src/github.com/tendermint/iavl/path.go | 0.743913 | 0.52342 | path.go | starcoder |
package graphql
// UnionConfig provides specification to define a Union type. It is served as a convenient way to
// create a UnionTypeDefinition for creating a union type.
type UnionConfig struct {
ThisIsTypeDefinition
// Name of the defining Union
Name string
// Description for the Union type
Description string
// PossibleTypes describes which Object types can be represented by the defining union.
PossibleTypes []ObjectTypeDefinition
// TypeResolver resolves the concrete Object type implementing the defining interface from given
// value.
TypeResolver TypeResolver
}
var (
_ TypeDefinition = (*UnionConfig)(nil)
_ UnionTypeDefinition = (*UnionConfig)(nil)
)
// TypeData implements UnionTypeDefinition.
func (config *UnionConfig) TypeData() UnionTypeData {
return UnionTypeData{
Name: config.Name,
Description: config.Description,
PossibleTypes: config.PossibleTypes,
}
}
// NewTypeResolver implments UnionTypeDefinition.
func (config *UnionConfig) NewTypeResolver(union Union) (TypeResolver, error) {
return config.TypeResolver, nil
}
// unionTypeCreator is given to newTypeImpl for creating a Union.
type unionTypeCreator struct {
typeDef UnionTypeDefinition
}
// unionTypeCreator implements typeCreator.
var _ typeCreator = (*unionTypeCreator)(nil)
// TypeDefinition implements typeCreator.
func (creator *unionTypeCreator) TypeDefinition() TypeDefinition {
return creator.typeDef
}
// LoadDataAndNew implements typeCreator.
func (creator *unionTypeCreator) LoadDataAndNew() (Type, error) {
typeDef := creator.typeDef
// Load data.
data := typeDef.TypeData()
if len(data.Name) == 0 {
return nil, NewError("Must provide name for Union.")
}
return &union{
data: data,
}, nil
}
// Finalize implements typeCreator.
func (creator *unionTypeCreator) Finalize(t Type, typeDefResolver typeDefinitionResolver) error {
union := t.(*union)
// Initialize type resolver for the Interface type.
typeResolver, err := creator.typeDef.NewTypeResolver(union)
if err != nil {
return err
}
union.typeResolver = typeResolver
// Resolve possible object types.
possibleTypes := NewPossibleTypeSet()
for _, possibleTypeDef := range union.data.PossibleTypes {
possibleType, err := typeDefResolver(possibleTypeDef)
if err != nil {
return err
}
possibleTypes.Add(possibleType.(Object))
}
union.possibleTypes = possibleTypes
return nil
}
// union is our built-in implementation for Union. It is configured with and built from
// UnionTypeDefinition.
type union struct {
ThisIsUnionType
data UnionTypeData
possibleTypes PossibleTypeSet
typeResolver TypeResolver
}
var _ Union = (*union)(nil)
// NewUnion initializes an instance of "union".
func NewUnion(typeDef UnionTypeDefinition) (Union, error) {
t, err := newTypeImpl(&unionTypeCreator{
typeDef: typeDef,
})
if err != nil {
return nil, err
}
return t.(Union), nil
}
// MustNewUnion is a convenience function equivalent to NewUnion but panics on failure instead of
// returning an error.
func MustNewUnion(typeDef UnionTypeDefinition) Union {
u, err := NewUnion(typeDef)
if err != nil {
panic(err)
}
return u
}
// TypeResolver implements AbstractType.
func (u *union) TypeResolver() TypeResolver {
return u.typeResolver
}
// Name implements TypeWithName.
func (u *union) Name() string {
return u.data.Name
}
// Description implements TypeWithDescription.
func (u *union) Description() string {
return u.data.Description
}
// PossibleTypes implements Union.
func (u *union) PossibleTypes() PossibleTypeSet {
return u.possibleTypes
} | graphql/union.go | 0.860589 | 0.492859 | union.go | starcoder |
package machine
import (
"fmt"
"math/rand"
)
// Default values for rotor properties.
const (
DefaultPosition = 0
DefaultStep = 1
DefaultCycle = 26
)
// Rotor represents a mechanical rotor used in xenigma. A rotor contains connections
// used to make electric pathways and generate a path through the machine.
type Rotor struct {
pathways [alphabetSize]int // Connections that form electric pathways.
position int // Current position.
takenSteps int // Number of taken steps.
step int // Size of shift between steps, in characters.
cycle int // Number of steps considered a full cycle.
}
// NewRotor returns a pointer to a new, initialized Rotor, and an error if
// given fields are invalid.
func NewRotor(pathways [alphabetSize]int, position, step, cycle int) (*Rotor, error) {
if err := verifyRotor(pathways, position, step, cycle); err != nil {
return nil, err
}
return &Rotor{
pathways: pathways,
position: position,
takenSteps: (position / (step % alphabetSize)) % cycle,
step: step % alphabetSize,
cycle: cycle,
}, nil
}
// GenerateRotor generates and returns a rotor with random config.
func GenerateRotor() *Rotor {
var pathways [alphabetSize]int
for i := 0; i < alphabetSize; i++ {
pathways[i] = i
}
rand.Shuffle(
alphabetSize,
func(j, k int) {
pathways[j], pathways[k] = pathways[k], pathways[j]
},
)
position := rand.Intn(alphabetSize)
return &Rotor{
pathways: pathways,
position: position,
takenSteps: (position / (DefaultStep % alphabetSize)) % DefaultCycle,
step: DefaultStep,
cycle: DefaultCycle,
}
}
// takeStep moves rotor one step forward.
func (r *Rotor) takeStep() {
r.position = (r.position + r.step) % alphabetSize
r.takenSteps = (r.takenSteps + 1) % r.cycle
}
// Verify verifies rotor's current configuration, returns an error if rotor's
// fields are incorrect or incompatible.
func (r *Rotor) Verify() error {
return verifyRotor(r.pathways, r.position, r.step, r.cycle)
}
// verifyRotor verifies given pathway connections, position, step size, and
// cycle size, and returns an error if given values are incorrect or incompatible.
func verifyRotor(pathways [alphabetSize]int, position, step, cycle int) (err error) {
switch {
case !zeroToNSlice(pathways[:]):
err = fmt.Errorf("electric pathways are incorrect")
case step <= 0:
err = fmt.Errorf("invalid step: %d", step)
case cycle <= 0:
err = fmt.Errorf("invalid cycle: %d", cycle)
case (position)%step != 0 || position < 0 || position > alphabetSize:
err = fmt.Errorf("invalid position: %d", position)
case ((alphabetSize) % (step * cycle)) != 0:
err = fmt.Errorf("cycle and step are incompatible, some collisions may occur")
}
return err
}
// UseDefaults sets all rotor's fields, except pathways, to their default
// values. Defaults are 'a' for position, 1 for step, and 26 for cycle.
func (r *Rotor) UseDefaults() {
r.position = DefaultPosition
r.takenSteps = (DefaultPosition / (DefaultStep % alphabetSize)) % DefaultCycle
r.step = DefaultStep
r.cycle = DefaultCycle
}
// Pathways returns rotor's pathway connections.
func (r *Rotor) Pathways() [alphabetSize]int {
return r.pathways
}
// Position returns rotor's current position.
func (r *Rotor) Position() int {
return r.position
}
// Step returns rotor's step size. Step represents the number of positions
// a rotor jumps when moving one step forward, and defaults to 1.
func (r *Rotor) Step() int {
return r.step
}
// Cycle returns rotor's cycle size. Cycle is the number of steps that
// represent a rotor's full cycle. When one rotor in a machine completes a
// full cycle the following rotor is shifted.
func (r *Rotor) Cycle() int {
return r.cycle
} | pkg/machine/rotor.go | 0.824709 | 0.446495 | rotor.go | starcoder |
package entity
import (
"go.knocknote.io/rapidash"
"golang.org/x/xerrors"
"time"
)
type User struct {
ID uint64 `json:"id"`
Name string `json:"name"`
Sex string `json:"sex"`
Age int `json:"age"`
SkillID uint64 `json:"skillID"`
SkillRank int `json:"skillRank"`
GroupID uint64 `json:"groupID"`
WorldID uint64 `json:"worldID"`
FieldID uint64 `json:"fieldID"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
type Users []*User
func (e Users) IDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.ID)
}
return values
}
func (e Users) Names() []string {
values := make([]string, 0, len(e))
for _, value := range e {
values = append(values, value.Name)
}
return values
}
func (e Users) Sexes() []string {
values := make([]string, 0, len(e))
for _, value := range e {
values = append(values, value.Sex)
}
return values
}
func (e Users) Ages() []int {
values := make([]int, 0, len(e))
for _, value := range e {
values = append(values, value.Age)
}
return values
}
func (e Users) SkillIDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.SkillID)
}
return values
}
func (e Users) SkillRanks() []int {
values := make([]int, 0, len(e))
for _, value := range e {
values = append(values, value.SkillRank)
}
return values
}
func (e Users) GroupIDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.GroupID)
}
return values
}
func (e Users) WorldIDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.WorldID)
}
return values
}
func (e Users) FieldIDs() []uint64 {
values := make([]uint64, 0, len(e))
for _, value := range e {
values = append(values, value.FieldID)
}
return values
}
func (e Users) CreatedAts() []time.Time {
values := make([]time.Time, 0, len(e))
for _, value := range e {
values = append(values, value.CreatedAt)
}
return values
}
func (e Users) UpdatedAts() []time.Time {
values := make([]time.Time, 0, len(e))
for _, value := range e {
values = append(values, value.UpdatedAt)
}
return values
}
func (e *User) Struct() *rapidash.Struct {
return rapidash.NewStruct("users").
FieldUint64("id").
FieldString("name").
FieldString("sex").
FieldInt("age").
FieldUint64("skill_id").
FieldInt("skill_rank").
FieldUint64("group_id").
FieldUint64("world_id").
FieldUint64("field_id").
FieldTime("created_at").
FieldTime("updated_at")
}
func (e *User) EncodeRapidash(enc rapidash.Encoder) error {
if e.ID != 0 {
enc.Uint64("id", e.ID)
}
enc.String("name", e.Name)
enc.String("sex", e.Sex)
enc.Int("age", e.Age)
enc.Uint64("skill_id", e.SkillID)
enc.Int("skill_rank", e.SkillRank)
enc.Uint64("group_id", e.GroupID)
enc.Uint64("world_id", e.WorldID)
enc.Uint64("field_id", e.FieldID)
enc.Time("created_at", e.CreatedAt)
enc.Time("updated_at", e.UpdatedAt)
if err := enc.Error(); err != nil {
return xerrors.Errorf("failed to encode: %w", err)
}
return nil
}
func (e *Users) EncodeRapidash(enc rapidash.Encoder) error {
for _, v := range *e {
if err := v.EncodeRapidash(enc.New()); err != nil {
return xerrors.Errorf("failed to encode: %w", err)
}
}
return nil
}
func (e *User) DecodeRapidash(dec rapidash.Decoder) error {
e.ID = dec.Uint64("id")
e.Name = dec.String("name")
e.Sex = dec.String("sex")
e.Age = dec.Int("age")
e.SkillID = dec.Uint64("skill_id")
e.SkillRank = dec.Int("skill_rank")
e.GroupID = dec.Uint64("group_id")
e.WorldID = dec.Uint64("world_id")
e.FieldID = dec.Uint64("field_id")
e.CreatedAt = dec.Time("created_at")
e.UpdatedAt = dec.Time("updated_at")
if err := dec.Error(); err != nil {
return xerrors.Errorf("failed to decode: %w", err)
}
return nil
}
func (e *Users) DecodeRapidash(dec rapidash.Decoder) error {
decLen := dec.Len()
values := make(Users, decLen)
for i := 0; i < decLen; i++ {
var v User
if err := v.DecodeRapidash(dec.At(i)); err != nil {
return xerrors.Errorf("failed to decode: %w", err)
}
values[i] = &v
}
*e = values
return nil
} | _example/05_rapidash_plugin/entity/user.go | 0.538012 | 0.426083 | user.go | starcoder |
package ast
// Node is the interface implemented by all nodes in the AST. It
// provides information about the span of this AST node in terms
// of location in the source file. It also provides information
// about all prior comments (attached as leading comments) and
// optional subsequent comments (attached as trailing comments).
type Node interface {
Start() Token
End() Token
}
// TerminalNode represents a leaf in the AST. These represent
// the tokens/lexemes in the protobuf language. Comments and
// whitespace are accumulated by the lexer and associated with
// the following lexed token.
type TerminalNode interface {
Node
Token() Token
}
var _ TerminalNode = (*StringLiteralNode)(nil)
var _ TerminalNode = (*UintLiteralNode)(nil)
var _ TerminalNode = (*FloatLiteralNode)(nil)
var _ TerminalNode = (*IdentNode)(nil)
var _ TerminalNode = (*BoolLiteralNode)(nil)
var _ TerminalNode = (*SpecialFloatLiteralNode)(nil)
var _ TerminalNode = (*KeywordNode)(nil)
var _ TerminalNode = (*RuneNode)(nil)
// CompositeNode represents any non-terminal node in the tree. These
// are interior or root nodes and have child nodes.
type CompositeNode interface {
Node
// All AST nodes that are immediate children of this one.
Children() []Node
}
// terminalNode contains book-keeping shared by all TerminalNode
// implementations. It is embedded in all such node types in this
// package. It provides the implementation of the TerminalNode
// interface.
type terminalNode Token
func (n terminalNode) Start() Token {
return Token(n)
}
func (n terminalNode) End() Token {
return Token(n)
}
func (n terminalNode) Token() Token {
return Token(n)
}
// compositeNode contains book-keeping shared by all CompositeNode
// implementations. It is embedded in all such node types in this
// package. It provides the implementation of the CompositeNode
// interface.
type compositeNode struct {
children []Node
}
func (n *compositeNode) Children() []Node {
return n.children
}
func (n *compositeNode) Start() Token {
return n.children[0].Start()
}
func (n *compositeNode) End() Token {
return n.children[len(n.children)-1].End()
}
// RuneNode represents a single rune in protobuf source. Runes
// are typically collected into tokens, but some runes stand on
// their own, such as punctuation/symbols like commas, semicolons,
// equals signs, open and close symbols (braces, brackets, angles,
// and parentheses), and periods/dots.
// TODO: make this more compact; if runes don't have attributed comments
// then we don't need a Token to represent them and only need an offset
// into the file's contents
type RuneNode struct {
terminalNode
Rune rune
}
// NewRuneNode creates a new *RuneNode with the given properties.
func NewRuneNode(r rune, tok Token) *RuneNode {
return &RuneNode{
terminalNode: tok.asTerminalNode(),
Rune: r,
}
}
// EmptyDeclNode represents an empty declaration in protobuf source.
// These amount to extra semicolons, with no actual content preceding
// the semicolon.
type EmptyDeclNode struct {
compositeNode
Semicolon *RuneNode
}
// NewEmptyDeclNode creates a new *EmptyDeclNode. The one argument must
// be non-nil.
func NewEmptyDeclNode(semicolon *RuneNode) *EmptyDeclNode {
if semicolon == nil {
panic("semicolon is nil")
}
return &EmptyDeclNode{
compositeNode: compositeNode{
children: []Node{semicolon},
},
Semicolon: semicolon,
}
}
func (e *EmptyDeclNode) fileElement() {}
func (e *EmptyDeclNode) msgElement() {}
func (e *EmptyDeclNode) extendElement() {}
func (e *EmptyDeclNode) oneOfElement() {}
func (e *EmptyDeclNode) enumElement() {}
func (e *EmptyDeclNode) serviceElement() {}
func (e *EmptyDeclNode) methodElement() {} | ast/node.go | 0.565539 | 0.558086 | node.go | starcoder |
package xxtea
import (
"encoding/binary"
"errors"
)
const (
_Delta = 0x9e3779b9
)
// Decrypt is used to decode data stream from key
func Decrypt(data []byte, key []byte) ([]byte, error) {
if data == nil || key == nil || len(data) == 0 || len(key) == 0 {
return nil, errors.New("data or key is nill or 0-length")
}
if len(data)%4 != 0 {
return nil, errors.New("invalid data length")
}
uint32Arr := asUint32Array(data, false)
decryptedData := btea(uint32Arr, -len(uint32Arr), asKey(key))
return asByteArray(decryptedData, true)
}
// Encrypt is used to encode data stream by key
func Encrypt(data []byte, key []byte) ([]byte, error) {
if data == nil || key == nil || len(data) == 0 || len(key) == 0 {
return nil, errors.New("data or key is nill or 0-length")
}
uint32Arr := asUint32Array(data, true)
encryptedArr := btea(uint32Arr, len(uint32Arr), asKey(key))
return asByteArray(encryptedArr, false)
}
func mx(z, y, sum, p, e uint32, key []uint32) uint32 {
return (((z>>5 ^ y<<2) + (y>>3 ^ z<<4)) ^ ((sum ^ y) + (key[(p&3)^e] ^ z)))
}
func asKey(key []byte) []uint32 {
if len(key) > 16 {
key = key[:16]
} else if len(key) < 16 {
padding := make([]byte, 16-len(key))
key = append(key, padding...)
}
return asUint32Array(key, false)
}
func asByteArray(data []uint32, includeLength bool) ([]byte, error) {
var result []byte
dataLen := uint32(len(data))
size := dataLen << 2
if includeLength {
lastByte := data[len(data)-1]
if lastByte > (size-4) || lastByte < (size-7) {
return nil, errors.New("invalid data with length")
}
size = lastByte
dataLen--
if size%4 != 0 {
result = make([]byte, ((size/4)+1)*4)
} else {
result = make([]byte, size)
}
} else {
result = make([]byte, size)
}
for idx := uint32(0); idx < dataLen; idx++ {
binary.LittleEndian.PutUint32(result[idx*4:(idx+1)*4], data[idx])
}
return result[:size], nil
}
func asUint32Array(data []byte, includeLength bool) []uint32 {
var uint32Arr []uint32
size := uint32(len(data) / 4)
if len(data)&3 != 0 {
size++
}
if includeLength {
uint32Arr = make([]uint32, size+1)
uint32Arr[size] = uint32(len(data))
} else {
uint32Arr = make([]uint32, size)
}
for idx := uint32(0); idx < size; idx++ {
uint32Arr[idx] = toUint32(data[idx*4:])
}
return uint32Arr
}
func toUint32(b []byte) uint32 {
switch len(b) {
case 0:
return uint32(0)
case 1:
return uint32(b[0])
case 2:
return uint32(b[0]) | uint32(b[1])<<8
case 3:
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16
default:
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
}
func btea(v []uint32, n int, key []uint32) []uint32 {
var y, z, sum uint32
var p, rounds, e uint32
if n > 1 {
rounds = uint32(6 + 52/n)
sum = 0
z = v[n-1]
for i := uint32(0); i < rounds; i++ {
sum += _Delta
e = (sum >> 2) & 3
for p = 0; p < uint32(n-1); p++ {
y = v[p+1]
z = v[p] + mx(z, y, sum, p, e, key)
v[p] = z
}
y = v[0]
z = v[p] + mx(z, y, sum, p, e, key)
v[p] = z
}
} else if n < -1 {
n = -n
rounds = uint32(6 + 52/n)
sum = rounds * _Delta
y = v[0]
for i := uint32(0); i < rounds; i++ {
e = (sum >> 2) & 3
for p = uint32(n - 1); p > 0; p-- {
z = v[p-1]
y = v[p] - mx(z, y, sum, p, e, key)
v[p] = y
}
z = v[n-1]
y = v[0] - mx(z, y, sum, p, e, key)
v[0] = y
sum -= _Delta
}
}
return v
} | xxtea.go | 0.500732 | 0.413536 | xxtea.go | starcoder |
package data
import (
"encoding/json"
"errors"
)
// MappingType is an enum for possible MappingDef Types
type MappingType int
const (
// MtAssign denotes an attribute to attribute assignment
MtAssign MappingType = 1
// MtLiteral denotes a literal to attribute assignment
MtLiteral MappingType = 2
// MtExpression denotes a expression execution to perform mapping
MtExpression MappingType = 3
// MtObject denotes a object construction mapping
MtObject MappingType = 4
MtArray MappingType = 5
)
// MappingDef is a simple structure that defines a mapping
type MappingDef struct {
//Type the mapping type
Type MappingType
//Value the mapping value to execute to determine the result (rhs)
Value interface{}
//Result the name of attribute to place the result of the mapping in (lhs)
MapTo string
}
// Mapper is a constructs that maps values from one scope to another
type Mapper interface {
Apply(inputScope Scope, outputScope Scope) error
}
// MapperDef represents a Mapper, which is a collection of mappings
type MapperDef struct {
//todo possibly add optional lang/mapper type so we can fast fail on unsupported mappings/mapper combo
Mappings []*MappingDef
}
type IOMappings struct {
Input []*MappingDef `json:"input,omitempty"`
Output []*MappingDef `json:"output,omitempty"`
}
func (md *MappingDef) UnmarshalJSON(b []byte) error {
ser := &struct {
Type interface{} `json:"type"`
Value interface{} `json:"value"`
MapTo string `json:"mapTo"`
}{}
if err := json.Unmarshal(b, ser); err != nil {
return err
}
md.MapTo = ser.MapTo
md.Value = ser.Value
intType, err := ConvertMappingType(ser.Type)
if err == nil {
md.Type = intType
}
return err
}
func ConvertMappingType(mapType interface{}) (MappingType, error) {
strType, _ := CoerceToString(mapType)
switch strType {
case "assign", "1":
return MtAssign, nil
case "literal", "2":
return MtLiteral, nil
case "expression", "3":
return MtExpression, nil
case "object", "4":
return MtObject, nil
case "array", "5":
return MtArray, nil
default:
return 0, errors.New("unsupported mapping type: " + strType)
}
} | core/data/mapping.go | 0.662687 | 0.430866 | mapping.go | starcoder |
package algorithms
import (
"sort"
)
// Interface is the interface used by the algorithms.
type Interface interface {
sort.Interface
// Set stores the value located at b[j] to a[i].
Set(i int, a Interface, j int, b Interface)
// Slice returns a slice (e.g., s[i:j]) of the object.
Slice(i, j int) Interface
// Append returns i and a appended to each other (e.g., append(i, a...)).
Append(a Interface) Interface
// Clone returns a clone of the object.
Clone() Interface
}
// Strings implements Interface for a slice of strings.
type Strings []string
// Len implements Interface.
func (s Strings) Len() int {
return len(s)
}
// Swap implements Interface.
func (s Strings) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less implements Interface.
func (s Strings) Less(i, j int) bool {
return s[i] < s[j]
}
// Set implements Interface.
func (s Strings) Set(i int, a Interface, j int, b Interface) {
a.(Strings)[i] = b.(Strings)[j]
}
// Clone implements Interface.
func (s Strings) Clone() Interface {
return append(Strings{}, s...)
}
// Append implements Interface.
func (s Strings) Append(a Interface) Interface {
return append(s, a.(Strings)...)
}
// Slice implements Interface.
func (s Strings) Slice(i, j int) Interface {
return s[i:j]
}
// Ints implements Interface for a slice of ints.
type Ints []int
// Len implements Interface.
func (s Ints) Len() int {
return len(s)
}
// Swap implements Interface.
func (s Ints) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less implements Interface.
func (s Ints) Less(i, j int) bool {
return s[i] < s[j]
}
// Set implements Interface.
func (s Ints) Set(i int, a Interface, j int, b Interface) {
a.(Ints)[i] = b.(Ints)[j]
}
// Clone implements Interface.
func (s Ints) Clone() Interface {
return append(Ints{}, s...)
}
// Append implements Interface.
func (s Ints) Append(a Interface) Interface {
return append(s, a.(Ints)...)
}
// Slice implements Interface.
func (s Ints) Slice(i, j int) Interface {
return s[i:j]
}
// Dedupe removes any duplicates from the given collection. This does not
// alter the input. First element is always chosen.
func Dedupe(s Interface) Interface {
s = s.Clone()
sort.Stable(s)
var idx int
for i := 0; i < s.Len(); i++ {
// We know that the interface is ascending order. Therefore, each
// value should be greater than the one previous. If they are equal,
// then they are not greater.
if i != 0 && !greater(i, i-1, s) {
continue
}
s.Set(idx, s, i, s)
idx++
}
return s.Slice(0, idx)
}
// Delete removes any items in 'b' from the 'a' collection. This does not
// alter the input. It outputs the lengh of the new collection. Therefore, if
// the interface is wrapping a slice, then the slice should be truncated via
// the result (e.g., slice[:returnValue]).
func Delete(a, b Interface) Interface {
a = a.Clone()
b = b.Clone()
sort.Sort(b)
var currentIdx int
for i := 0; i < a.Len(); i++ {
if Search(i, a, b) {
continue
}
a.Set(currentIdx, a, i, a)
currentIdx++
}
return a.Slice(0, currentIdx)
}
func equal(i, j int, s sort.Interface) bool {
// If something is neither less, nor greater than the given value, then it
// must be equal.
return !s.Less(i, j) && !s.Less(j, i)
}
func greater(i, j int, s sort.Interface) bool {
// sort.Interface only gives us less. Therefore, if we reverse that
// operation (via sort.Reverse), we get greater.
return sort.Reverse(s).Less(i, j)
}
// Search will look for the item at a[i] in b. If found, it will return true.
// Otherwise it will return false.
func Search(i int, a, b Interface) bool {
// b must be sorted, but we don't want to destroy the input.
b = b.Clone()
sort.Sort(b)
idx := index(i, a, b)
if idx >= b.Len() {
return false
}
n := a.Clone().Slice(i, i+1).Append(b.Slice(idx, idx+1))
return equal(0, 1, n)
}
func index(i int, a, b Interface) int {
return sort.Search(b.Len(), func(j int) bool {
n := b.Clone().Slice(j, j+1).Append(a.Slice(i, i+1))
return greater(0, 1, n) || equal(0, 1, n)
})
}
// Merge will combine the two collections. It replaces values from a with b if
// there is a collision. It assumes both a and b have been Deduped. It uses
// Append to create new memory and therefore does not destroy the input.
func Merge(a, b Interface) Interface {
b = b.Clone()
sort.Sort(b)
return Dedupe(b.Append(a))
} | pkg/kf/algorithms/algorithms.go | 0.792825 | 0.40645 | algorithms.go | starcoder |
package tree
/*
# Number of Islands
# https://leetcode.com/explore/interview/card/top-interview-questions-medium/108/trees-and-graphs/792/
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input:
11110
11010
11000
00000
Output: 1
Example 2:
Input:
11000
11000
00100
00011
Output: 3
// 解法:DFS和BFS解法
// # https://zhuanlan.zhihu.com/p/24986203
*/
func NumIslands(grid [][]byte) int {
return numIslandsBFS(grid)
}
func numIslandsDFS(grid [][]byte) int {
if grid == nil || len(grid) == 0 {
return 0
}
var count int
m, n := len(grid), len(grid[0])
visited := make([][]bool, m)
for i := range visited {
visited[i] = make([]bool, n)
}
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
if grid[i][j] == '0' || visited[i][j] {
continue
}
numIslandsWithDFS(grid, visited, i, j)
count++
}
}
return count
}
func numIslandsWithDFS(grid [][]byte, visited [][]bool, x, y int) {
if x < 0 || x >= len(grid) || y < 0 || y >= len(grid[0]) || grid[x][y] == '0' || visited[x][y] {
return
}
visited[x][y] = true
numIslandsWithDFS(grid, visited, x-1, y)
numIslandsWithDFS(grid, visited, x+1, y)
numIslandsWithDFS(grid, visited, x, y-1)
numIslandsWithDFS(grid, visited, x, y+1)
}
func numIslandsBFS(grid [][]byte) int {
if grid == nil || len(grid) == 0 {
return 0
}
var count int
m, n := len(grid), len(grid[0])
visited := make([][]bool, m)
for i := range visited {
visited[i] = make([]bool, n)
}
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
if grid[i][j] == '0' || visited[i][j] {
continue
}
count++
queue := []int{i*n + j}
for len(queue) != 0 {
t := queue[0]
queue = queue[1:]
numIslandsWithBFS(grid, visited, &queue, t/n-1, t%n)
numIslandsWithBFS(grid, visited, &queue, t/n, t%n-1)
numIslandsWithBFS(grid, visited, &queue, t/n+1, t%n)
numIslandsWithBFS(grid, visited, &queue, t/n, t%n+1)
}
}
}
return count
}
func numIslandsWithBFS(grid [][]byte, visited [][]bool, queue *[]int, x, y int) {
if x < 0 || x >= len(grid) || y < 0 || y >= len(grid[0]) || grid[x][y] == '0' || visited[x][y] {
return
}
visited[x][y] = true
*queue = append(*queue, x*len(grid[0])+y)
} | interview/medium/tree/graphs.go | 0.78037 | 0.492127 | graphs.go | starcoder |
package token
type Type int
const (
InvalidType Type = 1 << iota
KeywordType // keyword, e.g. "function", "end"
LiteralType // literal, e.g. 2.34, "a string", false
MarkerType // marker for tables, groupings, etc; e.g. {, (
OperatorType // operator, e.g. +, ==, &
IdentifierType // identifier, e.g. the function: print
Significant = KeywordType | LiteralType | MarkerType | IdentifierType
CommentType
WhitespaceType
)
var tokenTypes = map[Token]Type{
EOF: InvalidType,
Error: InvalidType,
Space: WhitespaceType,
FunctionName: IdentifierType,
TableBegin: MarkerType,
TableEnd: MarkerType,
Comma: MarkerType,
StatementEnd: MarkerType,
If: KeywordType,
Else: KeywordType,
ElseIf: KeywordType,
For: KeywordType,
Function: KeywordType,
End: KeywordType,
While: KeywordType,
Break: KeywordType,
Do: KeywordType,
Goto: KeywordType,
In: KeywordType,
Local: KeywordType,
Until: KeywordType,
Then: KeywordType,
Repeat: KeywordType,
Return: KeywordType,
OpenParen: MarkerType,
CloseParen: MarkerType,
Comment: CommentType,
Nil: IdentifierType,
StringLiteral: LiteralType,
NumberLiteral: LiteralType,
BooleanLiteral: LiteralType,
Identifier: IdentifierType,
AssignmentOperator: OperatorType,
AdditionOperator: OperatorType,
SubtractionOperator: OperatorType,
DivisionOperator: OperatorType,
MultOperator: OperatorType,
ConcatenationOperator: OperatorType,
VarargsOperator: OperatorType,
LengthOperator: OperatorType,
ObjectOperator: OperatorType,
MethodOperator: OperatorType,
//ComparisonOperator: OperatorType,
EqualityOperator: OperatorType,
NotEqualityOperator: OperatorType,
NotOperator: OperatorType,
AndOperator: OperatorType,
OrOperator: OperatorType,
TableLookupOperatorLeft: MarkerType,
TableLookupOperatorRight: MarkerType,
BitwiseShiftOperator: OperatorType,
BitwiseAndOperator: OperatorType,
BitwiseXorOperator: OperatorType,
BitwiseOrOperator: OperatorType,
BitwiseNotOperator: OperatorType,
Require: KeywordType,
} | token/types.go | 0.506591 | 0.433682 | types.go | starcoder |
package values
import "math"
func Add(a, b Scalar) Scalar {
if a.IsFloat() || b.IsFloat() {
return ScFloat(a.Float()+b.Float())
} else {
return ScInt(a.Integer()+b.Integer())
}
}
func Sub(a, b Scalar) Scalar {
if a.IsFloat() || b.IsFloat() {
return ScFloat(a.Float()-b.Float())
} else {
return ScInt(a.Integer()-b.Integer())
}
}
func Mul(a, b Scalar) Scalar {
if a.IsFloat() || b.IsFloat() {
return ScFloat(a.Float()*b.Float())
} else {
return ScInt(a.Integer()*b.Integer())
}
}
func Div(a, b Scalar) Scalar {
if a.IsFloat() || b.IsFloat() {
return ScFloat(a.Float()/b.Float())
} else {
return ScInt(a.Integer()/b.Integer())
}
}
func Mod(a, b Scalar) Scalar {
if a.IsFloat() || b.IsFloat() {
return ScFloat(math.Remainder(a.Float(),b.Float()))
} else {
return ScInt(a.Integer()%b.Integer())
}
}
func Concat(a, b Scalar) Scalar {
if !a.IsBytes() { return ScString(a.String()+b.String()) }
return ScBuffer(b.AppendTo(a.Bytes()))
}
func ForceTrue(a Scalar) Scalar {
if a.Bool() { return a }
return strue
}
func And(a, b Scalar) Scalar { return Bool2S(a.Bool() && b.Bool()) }
func Or(a, b Scalar) Scalar { return Bool2S(a.Bool() && b.Bool()) }
func LT(a, b Scalar) Scalar { return Bool2S(ScalarLess(a,b)) }
func GT(a, b Scalar) Scalar { return Bool2S(ScalarLess(b,a)) }
func LE(a, b Scalar) Scalar { return Bool2S(!ScalarLess(b,a)) }
func GE(a, b Scalar) Scalar { return Bool2S(!ScalarLess(a,b)) }
func EQ(a, b Scalar) Scalar { return Bool2S(ScalarComp(a,b)==0) }
func NE(a, b Scalar) Scalar { return Bool2S(ScalarComp(a,b)!=0) }
func Comp(a, b Scalar) Scalar { return ScInt(ScalarComp(a,b)) }
func UPlus(a Scalar) Scalar {
if a.IsFloat() { return ScFloat(a.Float()) }
return ScInt(a.Integer())
}
func UMinus(a Scalar) Scalar {
if a.IsFloat() { return -ScFloat(a.Float()) }
return -ScInt(a.Integer())
}
func UNot(a Scalar) Scalar { return Bool2S(!a.Bool()) }
func UBitInv(a Scalar) Scalar { return ScInt(^a.Integer()) }
func ScalarLess(a, b Scalar) bool {
at,bt := a.Type(),b.Type()
if at==bt { return a.Less(b) }
return at<bt
}
func ScalarComp(a, b Scalar) int {
at,bt := a.Type(),b.Type()
if at==bt {
if a.Less(b) { return -1 }
if b.Less(a) { return 1 }
return 0
}
if at<bt { return -1 }
return 1
}
func RawScalarComp(a, b interface{}) int { return ScalarComp(a.(Scalar),b.(Scalar)) } | values/funcs.go | 0.663778 | 0.642517 | funcs.go | starcoder |
package reflect
import (
"bytes"
"reflect"
)
// IsBlank defined
func IsBlank(value reflect.Value) bool {
switch value.Kind() {
case reflect.String:
return value.Len() == 0
case reflect.Bool:
return !value.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return value.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return value.Uint() == 0
case reflect.Float32, reflect.Float64:
return value.Float() == 0
case reflect.Interface, reflect.Ptr:
return value.IsNil()
}
return reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface())
}
// OrOne defined TODO
func OrOne(arr ...interface{}) interface{} {
for i := range arr {
item := arr[i]
if !IsBlank(reflect.ValueOf(item)) {
return item
}
}
return reflect.Zero(SliceElem(reflect.TypeOf(arr))).Interface()
}
// SliceElem defined
func SliceElem(rtype reflect.Type) reflect.Type {
for {
if rtype.Kind() != reflect.Slice && rtype.Kind() != reflect.Array {
return rtype
}
rtype = rtype.Elem()
}
}
// RedirectValue defined
func RedirectValue(value reflect.Value) reflect.Value {
for {
if !value.IsValid() || (value.Kind() != reflect.Ptr && value.Kind() != reflect.Interface) {
return value
}
res := value.Elem()
// Test for a circular type.
if res.Kind() == reflect.Ptr && value.Kind() == reflect.Ptr && value.Pointer() == res.Pointer() {
return value
}
if !res.IsValid() && value.Kind() == reflect.Ptr {
return reflect.Zero(value.Type().Elem())
}
value = res
}
}
// IsFunction returns if the argument is a function.
func IsFunction(in interface{}, num ...int) bool {
funcType := reflect.TypeOf(in)
result := funcType != nil && funcType.Kind() == reflect.Func
if len(num) >= 1 {
result = result && funcType.NumIn() == num[0]
}
if len(num) == 2 {
result = result && funcType.NumOut() == num[1]
}
return result
}
// IsEqual returns if the two objects are equal
func IsEqual(expected interface{}, actual interface{}) bool {
if expected == nil || actual == nil {
return expected == actual
}
if exp, ok := expected.([]byte); ok {
act, ok := actual.([]byte)
if !ok {
return false
}
if exp == nil || act == nil {
return true
}
return bytes.Equal(exp, act)
}
return reflect.DeepEqual(expected, actual)
}
// IsType returns if the two objects are in the same type
func IsType(expected interface{}, actual interface{}) bool {
return IsEqual(reflect.TypeOf(expected), reflect.TypeOf(actual))
}
// Equal returns if the two objects are equal
func Equal(expected interface{}, actual interface{}) bool {
return IsEqual(expected, actual)
}
// NotEqual returns if the two objects are not equal
func NotEqual(expected interface{}, actual interface{}) bool {
return !IsEqual(expected, actual)
} | platform/util/reflect/reflect.go | 0.544317 | 0.574007 | reflect.go | starcoder |
package points
import (
"math"
"sort"
"github.com/go-spatial/tegola/maths"
)
// Extent describes a retangular region.
type Extent [2][2]float64
func (e Extent) TopLeft() [2]float64 { return e[0] }
func (e Extent) TopRight() [2]float64 { return [2]float64{e[1][0], e[0][1]} }
func (e Extent) LowerRight() [2]float64 { return e[1] }
func (e Extent) LowerLeft() [2]float64 { return [2]float64{e[0][0], e[1][1]} }
// Verticies return the verticies of the Extent.
func (e Extent) Verticies() [][2]float64 {
return [][2]float64{
e.TopLeft(),
e.TopRight(),
e.LowerRight(),
e.LowerLeft(),
}
}
// Edges return in clockwise order the edges that make up this extent. And edge is
// a line made up of two points.
func (e Extent) Edges() [][2][2]float64 {
return [][2][2]float64{
{e.TopLeft(), e.TopRight()},
{e.TopRight(), e.LowerRight()},
{e.LowerRight(), e.LowerLeft()},
{e.LowerLeft(), e.TopLeft()},
}
}
// LREdges returns the edges of the region so that the upper left most edges to the lower right edges are returned. Basically sorting by the x values first then the y values.
func (e Extent) LREdges() [][2][2]float64 {
return [][2][2]float64{
{e.TopLeft(), e.TopRight()},
{e.TopLeft(), e.LowerLeft()},
{e.LowerLeft(), e.LowerRight()},
{e.TopRight(), e.LowerRight()},
}
}
// Contains return weather the point is contained by the Extent.
func (e Extent) Contains(pt [2]float64) bool {
return e[0][0] <= pt[0] && pt[0] <= e[1][0] &&
e[0][1] <= pt[1] && pt[1] <= e[1][1]
}
// ContainsPoints returns weather all the given points are contained by the Extent.
func (e Extent) ContainsPoints(pts ...[2]float64) bool {
for i := range pts {
if !e.Contains(pts[i]) {
return false
}
}
return true
}
// ContainsLine returns weather both points of the line are contained by the Extent.
func (e Extent) ContainsLine(line [2][2]float64) bool {
return e.Contains(line[0]) && e.Contains(line[1])
}
func (e Extent) InclusiveContainsLine(line [2][2]float64) bool {
return e.Contains(line[0]) || e.Contains(line[1])
}
// ContainsExtent returns weather the points of the second extent are containted by the first extent.
func (e Extent) ContainsExtent(ee Extent) bool { return e.Contains(ee[1]) && e.Contains(ee[1]) }
// Area returns the are of the Extent.
func (e Extent) Area() float64 { return math.Abs(e[1][0]-e[0][0]) * (e[1][1] - e[0][1]) }
// IntersectPt returns the intersect point if one exists.
func (e Extent) IntersectPt(ln [2][2]float64) (pts [][2]float64, ok bool) {
lln := maths.NewLineWith2Float64(ln)
loop:
for _, edge := range e.Edges() {
eln := maths.NewLineWith2Float64(edge)
if pt, ok := maths.Intersect(eln, lln); ok {
// Only add if the point is actually on the line segment.
if !eln.InBetween(pt) || !lln.InBetween(pt) {
continue loop
}
// Only add if we have not see this point.
for i := range pts {
if pts[i][0] == pt.X && pts[i][1] == pt.Y {
continue loop
}
}
pts = append(pts, [2]float64{pt.X, pt.Y})
}
}
sort.Sort(byxy(pts))
return pts, len(pts) > 0
}
type byxy [][2]float64
func (b byxy) Len() int { return len(b) }
func (b byxy) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byxy) Less(i, j int) bool {
if b[i][0] != b[j][0] {
return b[i][0] < b[j][0]
}
return b[i][1] < b[j][1]
} | maths/points/extent.go | 0.76145 | 0.677452 | extent.go | starcoder |
package main
import (
"flag"
"fmt"
"image"
"image/color"
"image/png"
"log"
"os"
"strconv"
"github.com/llgcode/draw2d/draw2dimg"
)
const docString = `
funnel creates a scaled funnel graph based on percentage sizes by segment.
USAGE: funnel -width [width] -height [height] -out [filename] [entries...]
PARAMETERS:
-width [width] The width of the output image in pixels (default 400)
-height [height] The height of the output image pixels (default 600)
-out [filename] The filename of the output PNG image (default funnel.png)
[entries...] Percentages representing each segment. Max 10 entries.
MUST be between 0 and 100 (inclusive).
EXAMPLE:
funnel 100 70 40 10 0
`
var colorpal = []color.RGBA{
color.RGBA{13, 71, 161, 255},
color.RGBA{21, 101, 192, 255},
color.RGBA{25, 118, 210, 255},
color.RGBA{30, 136, 229, 255},
color.RGBA{33, 150, 243, 255},
color.RGBA{66, 165, 245, 255},
color.RGBA{100, 181, 246, 255},
color.RGBA{144, 202, 249, 255},
color.RGBA{187, 222, 251, 255},
color.RGBA{227, 242, 253, 255},
}
func getColorPal(n int) []color.RGBA {
switch n {
case 1:
return []color.RGBA{colorpal[9]}
case 2:
return []color.RGBA{colorpal[0], colorpal[9]}
case 3:
return []color.RGBA{colorpal[0], colorpal[4], colorpal[9]}
case 4:
return []color.RGBA{colorpal[0], colorpal[3], colorpal[6], colorpal[9]}
case 5:
return []color.RGBA{colorpal[0], colorpal[3], colorpal[5], colorpal[7], colorpal[9]}
case 6:
return []color.RGBA{colorpal[0], colorpal[2], colorpal[4], colorpal[6], colorpal[8], colorpal[9]}
case 7:
return []color.RGBA{colorpal[0], colorpal[1], colorpal[3], colorpal[4], colorpal[6], colorpal[8], colorpal[9]}
case 8:
return []color.RGBA{colorpal[0], colorpal[1], colorpal[3], colorpal[4], colorpal[5], colorpal[6], colorpal[8], colorpal[9]}
case 9:
return []color.RGBA{colorpal[0], colorpal[1], colorpal[2], colorpal[3], colorpal[4], colorpal[5], colorpal[6], colorpal[8], colorpal[9]}
}
return colorpal
}
func parseFunnel(entries []string) ([]float64, error) {
ret := []float64{}
for _, entry := range entries {
d, err := strconv.ParseFloat(entry, 64)
if err != nil {
return nil, fmt.Errorf("Entry %s could not be interpreted as a number: %s", entry, err)
}
if d > 100 || d < 0 {
return nil, fmt.Errorf("All entries must be between 100 and 0. Got:%d", d)
}
ret = append(ret, d)
}
if len(ret) == 0 {
return nil, fmt.Errorf("Funnel must have at least 1 entry")
}
if len(ret) > len(colorpal) {
return nil, fmt.Errorf("We support a max of %d funnel entries.", len(colorpal))
}
return ret, nil
}
func main() {
var width, height int
var outfile string
flag.IntVar(&width, "width", 400, "width of the complete funnel")
flag.IntVar(&height, "height", 600, "height of the complete funnel")
flag.StringVar(&outfile, "out", "funnel.png", "output file name for the image generated")
flag.Usage = func() {
fmt.Fprintln(os.Stderr, docString)
}
flag.Parse()
funnel, err := parseFunnel(flag.Args())
if err != nil {
log.Fatalf("Error reading funnel entries: %s", err)
}
skipLast := false
if funnel[len(funnel)-1] == 0 {
skipLast = true
}
dest := image.NewRGBA(image.Rect(0, 0, width, height))
gc := draw2dimg.NewGraphicContext(dest)
verticalDelta := float64(height) / float64(len(funnel))
if skipLast {
verticalDelta = float64(height) / float64(len(funnel)-1)
}
colorPal := getColorPal(len(funnel))
if skipLast {
colorPal = getColorPal(len(funnel) - 1)
}
for j, funnelVal := range funnel {
if skipLast && j == len(funnel)-1 {
break
}
topY := float64(j) * verticalDelta
botY := float64(j+1) * verticalDelta
topX := float64(width) * funnelVal / 200
botX := topX
if j+1 < len(funnel) {
botX = float64(width) * funnel[j+1] / 200
}
gc.SetFillColor(colorPal[j])
gc.SetStrokeColor(colorPal[j])
gc.MoveTo(float64(width)/2-topX, topY)
gc.LineTo(float64(width)/2+topX, topY)
gc.LineTo(float64(width)/2+botX, botY)
gc.LineTo(float64(width)/2-botX, botY)
gc.LineTo(float64(width)/2-topX, topY)
gc.Close()
gc.FillStroke()
}
//gc.SetLineWidth(5)
// Save to file
// draw2dimg.SaveToPngFile("hello.png", dest)
f, err := os.Create(outfile)
if err != nil {
log.Fatal(err)
}
if err := png.Encode(f, dest); err != nil {
f.Close()
log.Fatal(err)
}
if err := f.Close(); err != nil {
log.Fatal(err)
}
} | funnel.go | 0.601711 | 0.434281 | funnel.go | starcoder |
package zostate
// EventType represents all events that exist in the state machine.
type EventType string
// StateType represents all states that exist in the state machine.
type StateType string
// state represents the configuration of a state in the machine, one of which is the transitions.
type state struct {
// transitions map an event with a destination state.
transitions map[EventType]StateType
}
// Machine represents a finite state machine.
type Machine struct {
name string // Name of the machine
current StateType // Current state of the machine
initial StateType // Initial state of the machine
states map[StateType]state // states maps the name of a state with the state configuration
}
// Transition maps an event with a destination state.
type Transition struct {
Event EventType // Name of the event that triggers the transition
Dst StateType // Destination state of the transition
}
// Transitions is a slice of related transitions.
type Transitions []Transition
// StateDesc is the description of a state.
type StateDesc struct {
Name StateType // Name holds the name of state
Transitions Transitions // Transitions holds all valid transitions originating from the state
}
// States is an slice of related state descriptions (StateDesc).
type States []StateDesc
// NewMachine returns an instance of the state machine (Machine) after it accepts
// the details of the machine, such as initial state & state descriptions.
// It returns an error if the parameters are invalid.
func NewMachine(name string, initial StateType, states States) (*Machine, error) {
mStates := make(map[StateType]state)
for _, s := range states {
state := state{
transitions: make(map[EventType]StateType),
}
for _, t := range s.Transitions {
state.transitions[t.Event] = t.Dst
}
mStates[s.Name] = state
}
if _, ok := mStates[initial]; !ok {
return nil, ErrMachineCreationFailed
}
machine := &Machine{
name: name,
states: mStates,
initial: initial,
}
return machine, nil
}
// Returns the current state that the machine is in.
func (m *Machine) Current() StateType {
if m.current == "" {
return m.initial
}
return m.current
}
// getNextState returns the next state for a given event after considering
// the current state and it's transitions. If a valid transition doesn't exist
// it returns an error.
func (m *Machine) getNextState(event EventType) (StateType, error) {
current := m.Current()
next, ok := m.states[current].transitions[event]
if !ok {
return "", ErrEventDeclined
}
return next, nil
}
// Transition returns the final state after accepting a valid event, and transitioning
// to the next state after considering the current state and it's transitions.
// If a valid transition doesn't exist it returns an error.
func (m *Machine) Transition(event EventType) (StateType, error) {
next, err := m.getNextState(event)
if err != nil {
return m.current, ErrTransitionFailed
}
m.current = next
return next, nil
}
// States returns a slice of all state descriptions (StateDesc) of the state machine.
func (m *Machine) States() States {
states := make(States, 0)
for name, state := range m.states {
s := StateDesc{
Name: name,
Transitions: make(Transitions, 0),
}
for tname, dst := range state.transitions {
s.Transitions = append(s.Transitions, Transition{
Event: tname,
Dst: dst,
})
}
states = append(states, s)
}
return states
} | zostate.go | 0.848251 | 0.794982 | zostate.go | starcoder |
package scene
import (
"encoding/json"
"fmt"
"io/ioutil"
"math"
"github.com/ProjectMOA/goraytrace/camera"
"github.com/ProjectMOA/goraytrace/image"
"github.com/ProjectMOA/goraytrace/lighting"
"github.com/ProjectMOA/goraytrace/maputil"
"github.com/ProjectMOA/goraytrace/math3d"
"github.com/ProjectMOA/goraytrace/shape"
)
// Scene defines a 3D scene that holds volumetric shapes
type Scene struct {
Camera camera.PinHole `json:"camera"`
Shapes []shape.Shape `json:"shapes"`
Lights []lighting.PointLight `json:"lights"`
}
// New creates a new empty scene with a default pinhole camera
func New() *Scene {
return &Scene{Camera: camera.DefaultPinHole(), Shapes: make([]shape.Shape, 0, 10)}
}
// Elements returns the number of elements in the scene
func (s *Scene) Elements() int {
return len(s.Shapes)
}
// AddShape adds a shape to the scene.
func (s *Scene) AddShape(aShape shape.Shape) {
s.Shapes = append(s.Shapes, aShape)
}
// AddLight adds a light to the scene.
func (s *Scene) AddLight(aLightsource lighting.PointLight) {
s.Lights = append(s.Lights, aLightsource)
}
// TraceScene traces the scene as it currently is, returning
// the final image.
func (s *Scene) TraceScene(width, height int) *image.Image {
targetIt := s.Camera.GetIterator(width, height)
var x, y int
var point *math3d.Vector3
render := image.New(width, height)
for targetIt.HasNext() {
point, x, y = targetIt.Next()
s.traceRay(point, x, y, render)
}
return render
}
func (s *Scene) traceRay(p *math3d.Vector3, x int, y int, img *image.Image) {
// Construct the light ray
lr := &math3d.LightRay{Direction: *p.Subtract(&s.Camera.FocalPoint).Normalized(), Source: *p}
// Check intersections with the shapes in the scene
nearestDistance, nearestShape := s.getNearestIntersection(lr)
if nearestDistance != math.MaxFloat64 {
// The lightray intersected a shape
intersection := lr.Source.Add(lr.Direction.Multiply(nearestDistance))
// Calculate the radiance at the intersection
radiance := s.calculateRadianceAt(intersection, lr, nearestShape)
img.Set(x, y, radiance.ToNRGBA())
} else {
// The lightray didn't intersect any shape. Just fill the pixel in black
img.Set(x, y, image.Black.ToNRGBA())
}
}
func (s *Scene) calculateRadianceAt(intersection *math3d.Vector3, incidentalRay *math3d.LightRay, sh shape.Shape) image.Color {
// trace shadow rays towards all light sources
radiance := image.Color{}
for _, ls := range s.Lights {
pointToLightVector := ls.Position.Subtract(intersection)
shadowRay := math3d.LightRay{Direction: *pointToLightVector.Normalized(), Source: *intersection}
if !s.inShadow(&shadowRay, pointToLightVector.Abs()) {
normal := sh.NormalAt(&ls.Position).Normalized()
// Cosine of the ray of light with the visible normal.
cosine := shadowRay.Direction.Dot(normal)
if cosine > 0.0 {
reflected := incidentalRay.Direction.Multiply(-1).Reflect(normal)
rCosine := math3d.Clamp(incidentalRay.Direction.Dot(reflected), 0, 1)
shiny := 0.0
// Prepared to use phong materials
phong := image.White.Divide(math.Pi).Add(image.Black.Multiply((shiny + 2) / (2 * math.Pi) * math.Pow(rCosine, shiny)))
radiance = *radiance.Add(ls.Intensity.CMultiply(phong).Multiply(cosine))
}
}
}
return radiance
}
func (s *Scene) getNearestIntersection(lr *math3d.LightRay) (float64, shape.Shape) {
var nearestShape shape.Shape
nearestDistance := math.MaxFloat64
for _, s := range s.Shapes {
intersectionDistance := s.Intersect(lr)
if intersectionDistance < nearestDistance {
nearestDistance = intersectionDistance
nearestShape = s
}
}
return nearestDistance, nearestShape
}
// inShadow returns true if the lightray intersects any shape
// at a distance that is smaller than distance
func (s *Scene) inShadow(lr *math3d.LightRay, distance float64) bool {
for _, s := range s.Shapes {
intersectionDistance := s.Intersect(lr)
if intersectionDistance < distance {
return true
}
}
return false
}
// SaveSceneFile saves the scene as a file that can be loaded later
func (s *Scene) SaveSceneFile(path string) {
marshaledScene, err := json.Marshal(s)
if err != nil {
panic(err)
}
var mappedScene map[string]interface{}
err = json.Unmarshal(marshaledScene, &mappedScene)
if err != nil {
panic(err)
}
mappedScene["shapes"] = shape.AsMap(s.Shapes)
marshaledScene, err = json.MarshalIndent(mappedScene, "", "\t")
if err != nil {
panic(err)
}
fmt.Println(string(marshaledScene))
err = ioutil.WriteFile(path, marshaledScene, 0644)
if err != nil {
panic(err)
}
}
// LoadSceneFile loads a scene file to a scene object
func LoadSceneFile(path string) *Scene {
bytes, err := ioutil.ReadFile(path)
if err != nil {
panic(err)
}
var scenemap map[string]interface{}
err = json.Unmarshal(bytes, &scenemap)
if err != nil {
panic(err)
}
retscene := &Scene{}
retscene.Camera = camera.PinHoleFromMap(scenemap["camera"].(map[string]interface{}))
retscene.Lights = lighting.PointLightsFromMap(maputil.ToSliceOfMap(scenemap["lights"].([]interface{})))
retscene.Shapes = shape.FromMap(maputil.ToSliceOfMap(scenemap["shapes"].([]interface{})))
return retscene
} | scene/scene.go | 0.758332 | 0.48249 | scene.go | starcoder |
package expression
import "github.com/juju/errors"
// Visitor represents a visitor pattern.
type Visitor interface {
// VisitBetween visits Between expression.
VisitBetween(b *Between) (Expression, error)
// VisitBinaryOperation visits BinaryOperation expression.
VisitBinaryOperation(o *BinaryOperation) (Expression, error)
// VisitCall visits Call expression.
VisitCall(c *Call) (Expression, error)
// VisitCompareSubQuery visits CompareSubQuery expression.
VisitCompareSubQuery(cs *CompareSubQuery) (Expression, error)
// VisitDefault visits Default expression.
VisitDefault(d *Default) (Expression, error)
// VisitFunctionCase visits FunctionCase expression.
VisitFunctionCase(f *FunctionCase) (Expression, error)
// VisitFunctionCast visits FunctionCast expression.
VisitFunctionCast(f *FunctionCast) (Expression, error)
// VisitFunctionConvert visits FunctionConvert expression.
VisitFunctionConvert(f *FunctionConvert) (Expression, error)
// VisitFunctionSubstring visits FunctionSubstring expression.
VisitFunctionSubstring(ss *FunctionSubstring) (Expression, error)
// VisitFunctionSubstringIndex visits FunctionSubstringIndex expression.
VisitFunctionSubstringIndex(ss *FunctionSubstringIndex) (Expression, error)
// VisitFunctionLocate visits FunctionLocate expression.
VisitFunctionLocate(ss *FunctionLocate) (Expression, error)
// VisitExistsSubQuery visits ExistsSubQuery expression.
VisitExistsSubQuery(es *ExistsSubQuery) (Expression, error)
// VisitIdent visits Ident expression.
VisitIdent(i *Ident) (Expression, error)
// VisitIsNull visits IsNull expression.
VisitIsNull(is *IsNull) (Expression, error)
// VisitIsTruth visits IsTruth expression.
VisitIsTruth(is *IsTruth) (Expression, error)
// VisitParamMaker visits ParamMarker expression.
VisitParamMaker(pm *ParamMarker) (Expression, error)
// VisitPatternIn visits PatternIn expression.
VisitPatternIn(n *PatternIn) (Expression, error)
// VisitPatternLike visits PatternLike expression.
VisitPatternLike(p *PatternLike) (Expression, error)
// VisitPatternRegexp visits PatternRegexp expression.
VisitPatternRegexp(p *PatternRegexp) (Expression, error)
// VisitPExpr visits PExpr expression.
VisitPExpr(p *PExpr) (Expression, error)
// VisitPosition visits Position expression.
VisitPosition(p *Position) (Expression, error)
// VisitRow visits Row expression.
VisitRow(r *Row) (Expression, error)
// VisitSubQuery visits SubQuery expression.
VisitSubQuery(sq SubQuery) (Expression, error)
// VisitUnaryOperation visits UnaryOperation expression.
VisitUnaryOperation(u *UnaryOperation) (Expression, error)
// VisitValue visits Value expression.
VisitValue(v Value) (Expression, error)
// VisitValues visits Values expression.
VisitValues(v *Values) (Expression, error)
// VisitVariable visits Variable expression.
VisitVariable(v *Variable) (Expression, error)
// VisitWhenClause visits WhenClause expression.
VisitWhenClause(w *WhenClause) (Expression, error)
// VisitExtract visits Extract expression.
VisitExtract(v *Extract) (Expression, error)
// VisitFunctionTrim visits FunctionTrim expression.
VisitFunctionTrim(v *FunctionTrim) (Expression, error)
// VisitDateArith visits DateArith expression.
VisitDateArith(da *DateArith) (Expression, error)
}
// BaseVisitor is the base implementation of Visitor.
// It traverses the expression tree and call expression's Accept function.
// It can not be used directly.
// A specific Visitor implementation can embed it in and only implements
// desired methods to do the job.
type BaseVisitor struct {
V Visitor
}
// VisitBetween implements Visitor interface.
func (bv *BaseVisitor) VisitBetween(b *Between) (Expression, error) {
var err error
b.Expr, err = b.Expr.Accept(bv.V)
if err != nil {
return b, errors.Trace(err)
}
b.Left, err = b.Left.Accept(bv.V)
if err != nil {
return b, errors.Trace(err)
}
b.Right, err = b.Right.Accept(bv.V)
if err != nil {
return b, errors.Trace(err)
}
return b, nil
}
// VisitBinaryOperation implements Visitor interface.
func (bv *BaseVisitor) VisitBinaryOperation(o *BinaryOperation) (Expression, error) {
var err error
o.L, err = o.L.Accept(bv.V)
if err != nil {
return o, errors.Trace(err)
}
o.R, err = o.R.Accept(bv.V)
if err != nil {
return o, errors.Trace(err)
}
return o, nil
}
// VisitCall implements Visitor interface.
func (bv *BaseVisitor) VisitCall(c *Call) (Expression, error) {
var err error
for i := range c.Args {
c.Args[i], err = c.Args[i].Accept(bv.V)
if err != nil {
return c, errors.Trace(err)
}
}
return c, nil
}
// VisitCompareSubQuery implements Visitor interface.
func (bv *BaseVisitor) VisitCompareSubQuery(cs *CompareSubQuery) (Expression, error) {
var err error
cs.L, err = cs.L.Accept(bv.V)
if err != nil {
return cs, errors.Trace(err)
}
_, err = cs.R.Accept(bv.V)
if err != nil {
return cs, errors.Trace(err)
}
return cs, nil
}
// VisitDefault implements Visitor interface.
func (bv *BaseVisitor) VisitDefault(d *Default) (Expression, error) {
return d, nil
}
// VisitExistsSubQuery implements Visitor interface.
func (bv *BaseVisitor) VisitExistsSubQuery(es *ExistsSubQuery) (Expression, error) {
var err error
_, err = es.Sel.Accept(bv.V)
if err != nil {
return es, errors.Trace(err)
}
return es, nil
}
// VisitFunctionCase implements Visitor interface.
func (bv *BaseVisitor) VisitFunctionCase(f *FunctionCase) (Expression, error) {
var err error
if f.Value != nil {
f.Value, err = f.Value.Accept(bv.V)
if err != nil {
return f, errors.Trace(err)
}
}
for i := range f.WhenClauses {
_, err = f.WhenClauses[i].Accept(bv.V)
if err != nil {
return f, errors.Trace(err)
}
}
if f.ElseClause != nil {
f.ElseClause, err = f.ElseClause.Accept(bv.V)
if err != nil {
return f, errors.Trace(err)
}
}
return f, nil
}
//VisitFunctionCast implements Visitor interface.
func (bv *BaseVisitor) VisitFunctionCast(f *FunctionCast) (Expression, error) {
var err error
f.Expr, err = f.Expr.Accept(bv.V)
if err != nil {
return f, errors.Trace(err)
}
return f, nil
}
// VisitFunctionConvert implements Visitor interface.
func (bv *BaseVisitor) VisitFunctionConvert(f *FunctionConvert) (Expression, error) {
var err error
f.Expr, err = f.Expr.Accept(bv.V)
if err != nil {
return f, errors.Trace(err)
}
return f, nil
}
// VisitFunctionSubstring implements Visitor interface.
func (bv *BaseVisitor) VisitFunctionSubstring(ss *FunctionSubstring) (Expression, error) {
var err error
ss.StrExpr, err = ss.StrExpr.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
ss.Pos, err = ss.Pos.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
if ss.Len == nil {
return ss, nil
}
ss.Len, err = ss.Len.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
return ss, nil
}
// VisitFunctionSubstringIndex implements Visitor interface.
func (bv *BaseVisitor) VisitFunctionSubstringIndex(ss *FunctionSubstringIndex) (Expression, error) {
var err error
ss.StrExpr, err = ss.StrExpr.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
ss.Delim, err = ss.Delim.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
if ss.Count == nil {
return ss, nil
}
ss.Count, err = ss.Count.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
return ss, nil
}
// VisitFunctionLocate implements Visitor interface.
func (bv *BaseVisitor) VisitFunctionLocate(ss *FunctionLocate) (Expression, error) {
var err error
ss.Str, err = ss.Str.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
ss.SubStr, err = ss.SubStr.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
if ss.Pos == nil {
return ss, nil
}
ss.Pos, err = ss.Pos.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
return ss, nil
}
// VisitIdent implements Visitor interface.
func (bv *BaseVisitor) VisitIdent(i *Ident) (Expression, error) {
return i, nil
}
// VisitIsNull implements Visitor interface.
func (bv *BaseVisitor) VisitIsNull(is *IsNull) (Expression, error) {
var err error
is.Expr, err = is.Expr.Accept(bv.V)
if err != nil {
return is, errors.Trace(err)
}
return is, nil
}
// VisitIsTruth implements Visitor interface.
func (bv *BaseVisitor) VisitIsTruth(is *IsTruth) (Expression, error) {
var err error
is.Expr, err = is.Expr.Accept(bv.V)
if err != nil {
return is, errors.Trace(err)
}
return is, nil
}
// VisitParamMaker implements Visitor interface.
func (bv *BaseVisitor) VisitParamMaker(pm *ParamMarker) (Expression, error) {
if pm.Expr == nil {
return pm, nil
}
var err error
pm.Expr, err = pm.Expr.Accept(bv.V)
if err != nil {
return pm, errors.Trace(err)
}
return pm, nil
}
// VisitPatternIn implements Visitor interface.
func (bv *BaseVisitor) VisitPatternIn(n *PatternIn) (Expression, error) {
var err error
n.Expr, err = n.Expr.Accept(bv.V)
if err != nil {
return n, errors.Trace(err)
}
if n.Sel != nil {
_, err = n.Sel.Accept(bv.V)
if err != nil {
return n, errors.Trace(err)
}
}
for i := range n.List {
n.List[i], err = n.List[i].Accept(bv.V)
if err != nil {
return n, errors.Trace(err)
}
}
return n, nil
}
// VisitPatternLike implements Visitor interface.
func (bv *BaseVisitor) VisitPatternLike(p *PatternLike) (Expression, error) {
var err error
p.Expr, err = p.Expr.Accept(bv.V)
if err != nil {
return p, errors.Trace(err)
}
p.Pattern, err = p.Pattern.Accept(bv.V)
if err != nil {
return p, errors.Trace(err)
}
return p, nil
}
// VisitPatternRegexp implements Visitor interface.
func (bv *BaseVisitor) VisitPatternRegexp(p *PatternRegexp) (Expression, error) {
var err error
p.Expr, err = p.Expr.Accept(bv.V)
if err != nil {
return p, errors.Trace(err)
}
p.Pattern, err = p.Pattern.Accept(bv.V)
if err != nil {
return p, errors.Trace(err)
}
return p, nil
}
// VisitPExpr implements Visitor interface.
func (bv *BaseVisitor) VisitPExpr(p *PExpr) (Expression, error) {
var err error
p.Expr, err = p.Expr.Accept(bv.V)
if err != nil {
return p, errors.Trace(err)
}
return p, nil
}
// VisitPosition implements Visitor interface.
func (bv *BaseVisitor) VisitPosition(p *Position) (Expression, error) {
return p, nil
}
// VisitRow implements Visitor interface.
func (bv *BaseVisitor) VisitRow(r *Row) (Expression, error) {
var err error
for i := range r.Values {
r.Values[i], err = r.Values[i].Accept(bv.V)
if err != nil {
return r, errors.Trace(err)
}
}
return r, nil
}
// VisitSubQuery implements Visitor interface.
func (bv *BaseVisitor) VisitSubQuery(sq SubQuery) (Expression, error) {
return sq, nil
}
// VisitUnaryOperation implements Visitor interface.
func (bv *BaseVisitor) VisitUnaryOperation(u *UnaryOperation) (Expression, error) {
var err error
u.V, err = u.V.Accept(bv.V)
if err != nil {
return u, errors.Trace(err)
}
return u, nil
}
// VisitValue implements Visitor interface.
func (bv *BaseVisitor) VisitValue(v Value) (Expression, error) {
return v, nil
}
// VisitValues implements Visitor interface.
func (bv *BaseVisitor) VisitValues(v *Values) (Expression, error) {
return v, nil
}
// VisitVariable implements Visitor interface.
func (bv *BaseVisitor) VisitVariable(v *Variable) (Expression, error) {
return v, nil
}
// VisitWhenClause implements Visitor interface.
func (bv *BaseVisitor) VisitWhenClause(w *WhenClause) (Expression, error) {
var err error
w.Expr, err = w.Expr.Accept(bv.V)
if err != nil {
return w, errors.Trace(err)
}
w.Result, err = w.Result.Accept(bv.V)
if err != nil {
return w, errors.Trace(err)
}
return w, nil
}
// VisitExtract implements Visitor interface.
func (bv *BaseVisitor) VisitExtract(v *Extract) (Expression, error) {
var err error
v.Date, err = v.Date.Accept(bv.V)
return v, errors.Trace(err)
}
// VisitFunctionTrim implements Visitor interface.
func (bv *BaseVisitor) VisitFunctionTrim(ss *FunctionTrim) (Expression, error) {
var err error
ss.Str, err = ss.Str.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
if ss.RemStr != nil {
ss.RemStr, err = ss.RemStr.Accept(bv.V)
if err != nil {
return ss, errors.Trace(err)
}
}
return ss, nil
}
// VisitDateArith implements Visitor interface.
func (bv *BaseVisitor) VisitDateArith(da *DateArith) (Expression, error) {
var err error
da.Date, err = da.Date.Accept(bv.V)
if err != nil {
return da, errors.Trace(err)
}
da.Interval, err = da.Interval.Accept(bv.V)
if err != nil {
return da, errors.Trace(err)
}
return da, nil
} | expression/visitor.go | 0.71889 | 0.487795 | visitor.go | starcoder |
package techan
import "github.com/sdcoffey/big"
// Position is a pair of two Order objects
type Position struct {
orders [2]*Order
stopLossPrice big.Decimal
takeProfitPrice big.Decimal
}
// NewPosition returns a new Position with the passed-in order as the open order
func NewPosition(openOrder Order, slPrice, tpPrice big.Decimal) (t *Position) {
t = new(Position)
t.orders[0] = &openOrder
t.stopLossPrice = slPrice
t.takeProfitPrice = tpPrice
return t
}
// Enter sets the open order to the order passed in
func (p *Position) Enter(order Order) {
p.orders[0] = &order
}
// Exit sets the exit order to the order passed in
func (p *Position) Exit(order Order) {
p.orders[1] = &order
}
// IsLong returns true if the entrance order is a buy order
func (p *Position) IsLong() bool {
return p.EntranceOrder() != nil && p.EntranceOrder().Side == BUY
}
// IsShort returns true if the entrance order is a sell order
func (p *Position) IsShort() bool {
return p.EntranceOrder() != nil && p.EntranceOrder().Side == SELL
}
// IsOpen returns true if there is an entrance order but no exit order
func (p *Position) IsOpen() bool {
return p.EntranceOrder() != nil && p.ExitOrder() == nil
}
// IsClosed returns true of there are both entrance and exit orders
func (p *Position) IsClosed() bool {
return p.EntranceOrder() != nil && p.ExitOrder() != nil
}
// IsNew returns true if there is neither an entrance or exit order
func (p *Position) IsNew() bool {
return p.EntranceOrder() == nil && p.ExitOrder() == nil
}
// EntranceOrder returns the entrance order of this position
func (p *Position) EntranceOrder() *Order {
return p.orders[0]
}
// ExitOrder returns the exit order of this position
func (p *Position) ExitOrder() *Order {
return p.orders[1]
}
// CostBasis returns the price to enter this order
func (p *Position) CostBasis() big.Decimal {
if p.EntranceOrder() != nil {
return p.EntranceOrder().Amount.Mul(p.EntranceOrder().Price)
}
return big.ZERO
}
// ExitValue returns the value accrued by closing the position
func (p *Position) ExitValue() big.Decimal {
if p.IsClosed() {
return p.ExitOrder().Amount.Mul(p.ExitOrder().Price)
}
return big.ZERO
}
func (p *Position) ChangeStopLoss(newSLPrice big.Decimal) bool {
if p.IsClosed() {
return false
}
p.stopLossPrice = newSLPrice
return true
}
func (p *Position) ChangeTakeProfit(newTPPrice big.Decimal) bool {
if p.IsClosed() {
return false
}
p.takeProfitPrice = newTPPrice
return true
}
func (p *Position) ShouldClose(currentPrice big.Decimal) bool {
if !p.IsOpen() {
return true
}
if p.IsShort() {
return currentPrice.LTE(p.takeProfitPrice) || currentPrice.GTE(p.stopLossPrice)
} else {
return currentPrice.GTE(p.takeProfitPrice) || currentPrice.LTE(p.stopLossPrice)
}
} | position.go | 0.752104 | 0.433742 | position.go | starcoder |
package graph
import (
"sort"
"github.com/charypar/monobuild/set"
)
// Graph is a DAG with string labeled vertices and int colored edges
type Graph struct {
edges map[string]Edges
}
// New creates a new Graph from a map of the shape
// string: Edge
// where Edge is a struct with a Label and a Colour
func New(graph map[string][]Edge) Graph {
g := make(map[string]Edges)
// Copy and normalise the graph - every node needs a key in the map
for v, es := range graph {
g[v] = make(Edges, 0, len(es))
for _, e := range es {
g[v] = append(g[v], e)
_, ok := graph[e.Label]
if !ok {
g[e.Label] = Edges{}
}
}
}
return Graph{g}
}
// Vertices returns a full list of vertices in the graph
func (g Graph) Vertices() []string {
vs := make([]string, 0, len(g.edges))
for v := range g.edges {
vs = append(vs, v)
}
sort.Strings(vs)
return vs
}
// Children returns the vertices that are connected to given vertices with an edge
func (g Graph) Children(vertices []string) []string {
all := set.New([]string{})
for _, vertex := range vertices {
grandchildren, found := g.edges[vertex]
if !found {
continue
}
for _, gc := range grandchildren {
all.Add(gc.Label)
}
}
result := all.AsStrings()
sort.Strings(result)
return result
}
// Descendants returns all the vertices x for which a path to x exists from any of
// the vertices given
func (g Graph) Descendants(vertices []string) []string {
descendants := set.New(g.Children(vertices))
discovered := descendants
for discovered.Size() > 0 {
grandchildren := set.New(g.Children(discovered.AsStrings()))
discovered = grandchildren.Without(descendants)
descendants = descendants.Union(discovered)
}
result := descendants.AsStrings()
sort.Strings(result)
return result
}
// Reverse returns a new graph with edges reversed
func (g Graph) Reverse() Graph {
edges := make(map[string]Edges)
// loop over the map keys deterministically
sorted := make([]string, 0, len(g.edges))
for v := range g.edges {
sorted = append(sorted, v)
}
sort.Strings(sorted)
// here
for _, v := range sorted {
_, ok := edges[v]
if !ok {
edges[v] = Edges{}
}
for _, e := range g.edges[v] {
_, ok := edges[e.Label]
if !ok {
edges[e.Label] = Edges{}
}
edges[e.Label] = append(edges[e.Label], Edge{v, e.Colour})
}
}
return Graph{edges}
}
// Subgraph filters the graph to only the nodes listed
func (g Graph) Subgraph(nodes []string) Graph {
filter := set.New(nodes)
filtered := make(map[string]Edges, len(g.edges))
for v, es := range g.edges {
if !filter.Has(v) {
continue
}
filtered[v] = make(Edges, 0, len(es))
for _, e := range es {
if !filter.Has(e.Label) {
continue
}
filtered[v] = append(filtered[v], e)
}
}
return Graph{filtered}
}
// FilterEdges returns a new graph with edges with a colour not present in
// colours removed
func (g Graph) FilterEdges(colours []int) Graph {
filter := make(map[int]bool, len(colours))
for _, c := range colours {
filter[c] = true
}
filtered := make(map[string]Edges, len(g.edges))
for v, es := range g.edges {
_, ok := filtered[v]
if !ok {
filtered[v] = make([]Edge, 0, len(es))
}
for _, e := range es {
_, matches := filter[e.Colour]
if matches {
filtered[v] = append(filtered[v], e)
}
}
}
return Graph{filtered}
} | graph/graph.go | 0.781997 | 0.590189 | graph.go | starcoder |
package main
import (
"log"
"fmt"
"os"
)
func parabola(x float64) float64 {
return x * x
}
func line(x float64) float64 {
return 2 - x
}
func main() {
var x float64 = 1.0
var y float64 = 1.0
log.Println("Enter X")
fmt.Fscan(os.Stdin, &x)
log.Println("Enter Y")
fmt.Fscan(os.Stdin, &y)
log.Println(isInTarget(x, y))
}
func testLine() {
var x float64 = 1.0
var y float64 = 1.0
// A
x = -2
y = 1
log.Println("Sector A")
isPositionUnderTheLine(x, y)
// B
x = -3
y = 6
log.Println("Sector B")
isPositionUnderTheLine(x, y)
// C
x = -1
y = 2
log.Println("Sector C")
isPositionUnderTheLine(x, y)
// E
x = 0
y = 4
log.Println("Sector E")
isPositionUnderTheLine(x, y)
// On the line
x = -1
y = 3
log.Println("On the line")
isPositionUnderTheLine(x, y)
}
func testParabola() {
var x float64 = 1.0
var y float64 = 1.0
// A
x = -2
y = 1
log.Println("Sector A")
isPositionInsideTheParabola(x, y)
// B
x = -3
y = 6
log.Println("Sector B")
isPositionInsideTheParabola(x, y)
// C
x = -1
y = 2
log.Println("Sector C")
isPositionInsideTheParabola(x, y)
// E
x = 0
y = 4
log.Println("Sector E")
isPositionInsideTheParabola(x, y)
// On the parabola
x = 1
y = 1
log.Println("On the parabola")
isPositionInsideTheParabola(x, y)
}
func testTamGdeNado() {
var x float64 = 1.0
var y float64 = 1.0
// A
x = -2
y = 1
log.Println("Sector A")
isInSectorC(x, y)
// B
x = -3
y = 6
log.Println("Sector B")
isInSectorC(x, y)
// C
x = -1
y = 2
log.Println("Sector C")
isInSectorC(x, y)
// E
x = 0
y = 4
log.Println("Sector E")
isInSectorC(x, y)
// Tam gde nado
x = 1
y = 1
log.Println("Tam gde nado ")
isInSectorC(x, y)
}
func testSectorD() {
var x float64 = 1.0
var y float64 = 1.0
// A
x = -2
y = 1
log.Println("Sector A")
isInSectorD(x, y)
// B
x = -3
y = 6
log.Println("Sector B")
isInSectorD(x, y)
// C
x = -1
y = 2
log.Println("Sector C")
isInSectorD(x, y)
// E
x = 0
y = 4
log.Println("Sector E")
isInSectorD(x, y)
// D
x = 1
y = 0.5
log.Println("Sector D")
isInSectorD(x, y)
// D
x = 1
y = 1
log.Println("Sector D Border")
isInSectorD(x, y)
// D
x = 2
y = 0
log.Println("Sector D Angle")
isInSectorD(x, y)
}
func isInSectorD(x, y float64) bool {
if x >= 0 && y >= 0 && isPositionUnderTheLine(x, y) && !isPositionInsideTheParabola(x, y) {
log.Println("IS IN SECTOR D")
return true
} else {
log.Println("-------------")
return false
}
}
func isPositionUnderTheLine(x, y float64) bool {
if line(x) >= y {
log.Println("IS BELOW")
return true
} else {
log.Println("IS ABOVE")
return false
}
}
func isPositionInsideTheParabola(x, y float64) bool {
if parabola(x) > y {
log.Println("IS OUTSIDE")
return false
} else {
log.Println("IS INSIDE")
return true
}
}
func isInSectorC(x, y float64) bool {
if isPositionUnderTheLine(x, y) && isPositionInsideTheParabola(x, y) {
log.Println("IS IN SECTOR C")
return true
} else {
log.Println("-------------")
return false
}
}
func isInTarget(x, y float64) bool {
return isInSectorC(x, y) || isInSectorD(x, y)
} | sasha.go | 0.63409 | 0.47384 | sasha.go | starcoder |
package gobits
import (
"encoding/binary"
"math"
)
type pos struct {
byteOffset int64
bitOffset byte
}
type PosWrapper struct {
pos
}
type BitStream struct {
ba ByteAccessor
pos
}
func lowerBits(byt, count byte) byte {
return byt & ((1 << count) - 1)
}
func higherBits(byt, count byte) byte {
shift := 8 - count
mask := byte(0xff) << shift
return (byt & mask) >> shift
}
func highestByte(val uint64) byte {
return byte(val >> 56)
}
func countEffectiveBits(val uint64) byte {
bitCount := byte(0)
for ; val != 0; bitCount++ {
val >>= 1
}
return bitCount
}
func writePartialByte(srcByte byte, srcBitCount byte, dstByte byte, dstBitOffset byte) byte {
mask := byte(0xff<<(8-srcBitCount)) >> dstBitOffset
return (dstByte &^ mask) | (srcByte >> dstBitOffset)
}
func (bs *BitStream) RemainingBits(bitCount int64) bool {
bitCount += int64(bs.bitOffset)
byteOffset := bs.byteOffset
for bitCount > 0 {
_, ok := bs.ba.At(byteOffset)
if !ok {
return false
}
byteOffset++
bitCount -= 8
}
return true
}
func (bs *BitStream) PeekBits(bitCount byte) (uint64, bool) {
if bitCount == 0 {
return 0, true
}
if !bs.RemainingBits(int64(bitCount)) || bitCount > 64 {
return 0, false
}
byteOffset := bs.byteOffset
remainingBitsInCurrByte := 8 - bs.bitOffset
byt, ok := bs.ba.At(byteOffset)
if !ok {
return 0, false
}
bits_ := lowerBits(byt, remainingBitsInCurrByte)
byteOffset++
if bitCount < remainingBitsInCurrByte {
return uint64(higherBits(bits_, bs.bitOffset+bitCount)), true
}
bits := uint64(bits_)
bitCount -= remainingBitsInCurrByte
for bitCount >= 8 {
byt, ok := bs.ba.At(byteOffset)
if !ok {
return 0, false
}
bits = (bits << 8) | uint64(byt)
byteOffset++
bitCount -= 8
}
if bitCount > 0 {
byt, ok := bs.ba.At(byteOffset)
if !ok {
return 0, false
}
bits = (bits << bitCount) | uint64(higherBits(byt, bitCount))
}
return bits, true
}
func (bs *BitStream) ConsumeBits(bitCount int64) bool {
if !bs.RemainingBits(bitCount) {
return false
}
bs.byteOffset += (int64(bs.bitOffset) + bitCount) / 8
bs.bitOffset = byte((int64(bs.bitOffset) + bitCount) % 8)
return true
}
func (bs *BitStream) ConsumeBytes(byteCount int64) bool {
return bs.ConsumeBits(byteCount * 8)
}
func (bs *BitStream) ReadBits(bitCount byte) (uint64, bool) {
bits, ok := bs.PeekBits(bitCount)
if !ok {
return 0, false
}
if !bs.ConsumeBits(int64(bitCount)) {
return 0, false
}
return bits, true
}
func (bs *BitStream) ReadUint8() (uint8, bool) {
v, ok := bs.ReadBits(8)
return uint8(v), ok
}
func (bs *BitStream) ReadUint16(bo binary.ByteOrder) (uint16, bool) {
b, ok := bs.ReadBits(16)
v := []byte{byte((b >> 8) & 0xff), byte(b & 0xff)}
return bo.Uint16(v), ok
}
func (bs *BitStream) ReadUint32(bo binary.ByteOrder) (uint32, bool) {
b, ok := bs.ReadBits(32)
v := []byte{byte((b >> 24) & 0xff), byte((b >> 16) & 0xff), byte((b >> 8) & 0xff), byte(b & 0xff)}
return bo.Uint32(v), ok
}
func (bs *BitStream) ReadUint64(bo binary.ByteOrder) (uint64, bool) {
b, ok := bs.ReadBits(64)
v := []byte{
byte((b >> 56) & 0xff),
byte((b >> 48) & 0xff),
byte((b >> 40) & 0xff),
byte((b >> 32) & 0xff),
byte((b >> 24) & 0xff),
byte((b >> 16) & 0xff),
byte((b >> 8) & 0xff),
byte(b & 0xff),
}
return bo.Uint64(v), ok
}
func (bs *BitStream) Seek(byteOffset int64, bitOffset byte) bool {
_, ok := bs.ba.At(byteOffset)
if ok && bitOffset < 8 {
bs.byteOffset = byteOffset
bs.bitOffset = bitOffset
return true
}
return false
}
func (bs *BitStream) SavePos() PosWrapper {
return PosWrapper{bs.pos}
}
func (bs *BitStream) RestorePos(pw PosWrapper) {
bs.pos = pw.pos
}
func (bs *BitStream) ResetPos() {
bs.pos = pos{
byteOffset: 0,
bitOffset: 0,
}
}
func (bs *BitStream) ReadExponentialGolomb() (uint64, bool) {
originalbyteOffset := bs.byteOffset
originalBitOffset := bs.bitOffset
zeroBitCount := 0
peekedBit := uint64(0)
ok := true
for {
if peekedBit, ok = bs.PeekBits(1); peekedBit != 0 || !ok {
break
}
zeroBitCount++
bs.ConsumeBits(1)
}
val := uint64(0)
valueBitCount := zeroBitCount + 1
if !ok {
goto failed
} else if valueBitCount > 64 {
goto failed
} else if val, ok = bs.ReadBits(byte(valueBitCount)); !ok {
goto failed
}
return val - 1, true
failed:
bs.Seek(originalbyteOffset, originalBitOffset)
return 0, false
}
func (bs *BitStream) ReadSignedExponentialGolomb() (int64, bool) {
val := uint64(0)
ok := true
if val, ok = bs.ReadExponentialGolomb(); !ok {
return 0, false
}
if val&1 == 0 {
return -int64(val / 2), true
}
return int64(val+1) / 2, true
}
func (bs *BitStream) WriteBits(val uint64, bitCount byte) bool {
if bitCount == 0 {
return true
}
if !bs.RemainingBits(int64(bitCount)) || bitCount > 64 {
return false
}
consumeBits := int64(bitCount)
val <<= 64 - uint64(bitCount)
remainingBitsInCurrByte := 8 - bs.bitOffset
bitsInFirstByte := bitCount
if bitsInFirstByte > remainingBitsInCurrByte {
bitsInFirstByte = remainingBitsInCurrByte
}
dstByteOffset := bs.byteOffset
dstByte, ok := bs.ba.At(dstByteOffset)
if !ok {
return false
}
bytes := make([]byte, (bs.bitOffset+bitCount+7)/8)
bytes[0] = writePartialByte(highestByte(val), bitsInFirstByte, dstByte, bs.bitOffset)
if bitCount <= remainingBitsInCurrByte {
goto fin
}
val <<= bitsInFirstByte
bitCount -= bitsInFirstByte
for bitCount >= 8 {
dstByteOffset++
bytes[dstByteOffset-bs.byteOffset] = highestByte(val)
val <<= 8
bitCount -= 8
}
if bitCount > 0 {
dstByteOffset++
dstByte, ok := bs.ba.At(dstByteOffset)
if !ok {
return false
}
bytes[dstByteOffset-bs.byteOffset] = writePartialByte(highestByte(val), bitCount, dstByte, 0)
}
fin:
if !bs.ba.Put(bytes, bs.byteOffset) {
return false
}
return bs.ConsumeBits(consumeBits)
}
func (bs *BitStream) WriteUint8(val uint8) bool {
return bs.WriteBits(uint64(val), 8)
}
func (bs *BitStream) WriteUint16(val uint16, bo binary.ByteOrder) bool {
v := []byte{byte((val >> 8) & 0xff), byte(val & 0xff)}
return bs.WriteBits(uint64(bo.Uint16(v)), 16)
}
func (bs *BitStream) WriteUint32(val uint32, bo binary.ByteOrder) bool {
v := []byte{byte((val >> 24) & 0xff), byte((val >> 16) & 0xff), byte((val >> 8) & 0xff), byte(val & 0xff)}
return bs.WriteBits(uint64(bo.Uint32(v)), 32)
}
func (bs *BitStream) WriteUint64(val uint64, bo binary.ByteOrder) bool {
v := []byte{
byte((val >> 56) & 0xff),
byte((val >> 48) & 0xff),
byte((val >> 40) & 0xff),
byte((val >> 32) & 0xff),
byte((val >> 24) & 0xff),
byte((val >> 16) & 0xff),
byte((val >> 8) & 0xff),
byte(val & 0xff),
}
return bs.WriteBits(uint64(bo.Uint64(v)), 64)
}
func (bs *BitStream) WriteExponentialGolomb(val uint64) bool {
if val == math.MaxUint64 {
return false
}
val++
return bs.WriteBits(val, countEffectiveBits(val)*2-1)
}
func (bs *BitStream) WriteSignedExponentialGolomb(val int64) bool {
if val == 0 {
return bs.WriteExponentialGolomb(0)
} else if val > 0 {
return bs.WriteExponentialGolomb(uint64(val)*2 - 1)
} else {
if val == math.MinInt64 {
return false
}
return bs.WriteExponentialGolomb(uint64(-val) * 2)
}
}
func NewBitStream(ba ByteAccessor) *BitStream {
return &BitStream{
ba: ba,
pos: pos{
byteOffset: 0,
bitOffset: 0,
},
}
} | bitstream.go | 0.596433 | 0.408631 | bitstream.go | starcoder |
package strformat
import (
"math"
"strconv"
"strings"
)
/*
StrFormat package -> Numerals
ver 1.0 - 2019-03-18
by <NAME>
This package contains useful function to print number to text
*/
type Numeral struct {
// SplitDigit is how many digits will be taken until the conversion repeat
SplitDigit int
// Conversion will convert number to text. Value is taken from a digit, example: 1 -> one. Only works for 1-9
Conversion map[int]string
// ZeroConversion converts zero value (0) to this string
ZeroConversion string
// LiteralConversion will convert number to text if value is found in a group or group mod operation, example: 11 -> eleven
LiteralConversion map[int]string
// DigitNames is the name of the digits, example: 100 -> hundred (group at mod 100 is hundred)
DigitNames map[int]string
// GroupNames is the name of the group based on splitted digits, example: 1 -> thousand (group at index 1 is thousand)
GroupNames map[int]string
// PointConversion is the name of the point
PointConversion string
// Correction will correct substring into mapped string, example: two ty -> twenty
Correction map[string]string
// CurrencyName is the name of the currency
CurrencyName string
// CurrencyPointName is the name of the currency decimal point, example: cent
CurrencyPointName string
// CurrencyPointConversion is the joiner of the currency decimal point, example: and
CurrencyPointConversion string
// CurrencyPointLength is the length of the currency decimal point
CurrencyPointLength int
}
func (n *Numeral) ConvertCurrency(value float64) string {
strVal := strconv.FormatFloat(value, 'f', n.CurrencyPointLength, 64)
spl := strings.Split(strVal, ".")
digits := spl[0]
points := ""
if len(spl) == 2 {
points = spl[1]
}
digitInt, e := strconv.Atoi(digits)
res := ""
if e == nil {
res = n.Convert(float64(digitInt), 0)
}
res = strings.Trim(res, " \t")
if points != "" {
points = points[0:int(math.Min(float64(len(points)), float64(n.CurrencyPointLength)))]
for len(points) < n.CurrencyPointLength {
points += "0"
}
ptInt, e := strconv.Atoi(points)
if e == nil && ptInt != 0 {
res += " " + n.CurrencyPointConversion + " " + n.Convert(float64(ptInt), 0)
if n.CurrencyPointName != "" {
res += " " + n.CurrencyPointName
}
}
}
if n.CurrencyName != "" {
res += " " + n.CurrencyName
}
return res + n.CurrencyName
}
func (n *Numeral) Convert(value float64, prec int) string {
strVal := strconv.FormatFloat(value, 'f', prec, 64)
spl := strings.Split(strVal, ".")
digits := spl[0]
points := ""
if len(spl) == 2 {
points = spl[1]
}
res := ""
group := ""
gIdx := 0
for i := len(digits) - 1; i >= 0; i-- {
group = digits[i:i+1] + group
if len(group) == n.SplitDigit || i == 0 {
groupStr := n.groupConvert(group)
groupName, ok := n.GroupNames[gIdx]
if ok && groupName != "" && groupStr != "" {
groupStr += " " + groupName
}
if groupStr != "" {
res = groupStr + " " + res
}
group = ""
gIdx++
}
}
res = strings.Trim(res, " \t")
if points != "" {
ptWord := ""
for i := 0; i < len(points); i++ {
pt, e := strconv.Atoi(points[i : i+1])
if e == nil {
ptWord += " " + n.Conversion[pt]
} else {
ptWord += " " + n.ZeroConversion
}
}
ptWord = strings.Trim(ptWord, " ")
if ptWord != "" {
res += " " + n.PointConversion + " " + ptWord
}
}
return res
}
func (n *Numeral) groupConvert(group string) string {
res := ""
grVal, e := strconv.Atoi(group)
if e != nil {
return res
}
for i := n.SplitDigit - 1; i >= 1; i-- {
base := int(math.Pow(10, float64(i)))
rem := grVal % base
quo := (grVal / base) % 10
quoLit := quo * base
// left side
ql, ok := n.LiteralConversion[quoLit]
if ok {
res += " " + ql
} else {
if quo != 0 {
res += " " + n.Conversion[quo] + " " + n.DigitNames[base]
}
}
// right side
if i == 1 {
res += " " + n.Conversion[rem]
} else {
v, ok := n.LiteralConversion[rem]
if ok {
res += " " + v
break
}
}
}
for key, cor := range n.Correction {
res = strings.Replace(res, key, cor, -1)
}
return strings.Trim(res, " \t")
}
// NumeralCreateIndonesian creates numeral struct for Indonesian language
func NumeralCreateIndonesian() *Numeral {
num := Numeral{
SplitDigit: 3,
ZeroConversion: "nol",
Conversion: map[int]string{
1: "satu",
2: "dua",
3: "tiga",
4: "empat",
5: "lima",
6: "enam",
7: "tujuh",
8: "delapan",
9: "sembilan",
},
LiteralConversion: map[int]string{
10: "sepuluh",
11: "sebelas",
12: "dua belas",
13: "tiga belas",
14: "empat belas",
15: "lima belas",
16: "enam belas",
17: "tujuh belas",
18: "delapan belas",
19: "sembilan belas",
100: "seratus",
},
DigitNames: map[int]string{
10: "puluh",
100: "ratus",
},
GroupNames: map[int]string{
0: "",
1: "ribu",
2: "juta",
3: "miliar",
4: "trilyun",
},
Correction: map[string]string{
"satu ribu": "seribu",
},
PointConversion: "koma",
CurrencyName: "rupiah",
CurrencyPointConversion: "dan",
CurrencyPointName: "sen",
CurrencyPointLength: 2,
}
return &num
}
// NumeralCreateEnglish creates numeral struct for English language
func NumeralCreateEnglish() *Numeral {
num := Numeral{
SplitDigit: 3,
ZeroConversion: "zero",
Conversion: map[int]string{
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
},
LiteralConversion: map[int]string{
10: "ten",
11: "eleven",
12: "twelve",
13: "thirteen",
14: "fourteen",
15: "fifteen",
16: "sixteen",
17: "seventeen",
18: "eighteen",
19: "nineteen",
},
DigitNames: map[int]string{
10: "ty",
100: "hundred",
},
GroupNames: map[int]string{
0: "",
1: "thousand",
2: "million",
3: "billion",
4: "trillion",
},
Correction: map[string]string{
"two ty": "twenty",
"three ty": "thirty",
"four ty": "fourty",
"five ty": "fifty",
"six ty": "sixty",
"seven ty": "seventy",
"eight ty": "eighty",
"nine ty": "ninety",
},
PointConversion: "point",
CurrencyName: "",
CurrencyPointConversion: "and",
CurrencyPointName: "cents",
CurrencyPointLength: 2,
}
return &num
} | strformat/numerals.go | 0.586996 | 0.580887 | numerals.go | starcoder |
package model
import (
"fmt"
"math/rand"
"time"
)
/***** Constants *****/
const (
// BoardSize is the default size of the board
BoardSize = 4
)
/***** Types *****/
// Tile represents a single tile value on the board.
type Tile uint32
// Score represents the user's score
type Score uint32
// Grid represents tiles on the game board.
type Grid [BoardSize][BoardSize]Tile
// Coordinate is a convenience structure that stores a (row,col) pairing.
type Coordinate struct {
Row int
Col int
}
/*
DrawTile is a callback that renders a single tile when called by
`RenderBoard()`.
@param pos Row, column position in the board.
@param isEOL Flag indicates if this is the last column drawn in a row.
@param tile Value of the tile at this position
*/
type DrawTile func(pos Coordinate, isEOL bool, tile Tile)
// Board is the primary structure that represents the game's state.
type Board struct {
// Current tile layout
grid Grid
// Game's current score.
score Score
// Random number generator
random *rand.Rand
}
// Helper lambda function that performs one iteration of the move process.
// This is dependent on the direction.
type moveBoard func()
/***** Functions *****/
/*
NewBoard constructs a new board play with.
@return New board object to start a game with
*/
func NewBoard() *Board {
b := new(Board)
b.score = 0
// Set a new random generator per game. This ensures that we don't
// constantly reconstruct the generator for every random value we need.
b.random = rand.New(rand.NewSource(time.Now().UnixNano()))
// Initialize the starting board configuration.
b.initBoard()
return b
}
/***** Internal Members *****/
/*
Generates a new random tile and places it on the board
*/
func (b *Board) generateTile() {
// Tiles that are added to the board start at either 2 or 4, with 2 having
// a much higher probability to show up.
tileValue := Tile(2)
if b.random.Intn(4) == 0 {
tileValue = 4
}
// Since the board is relatively small, iterate over the entire board and
// record all possible positions.
var possiblePositions []*Coordinate
for row := 0; row < BoardSize; row++ {
for col := 0; col < BoardSize; col++ {
if b.grid[row][col] == 0 {
possiblePositions = append(possiblePositions, &Coordinate{row, col})
}
}
}
// Only add a new tile if the board has an open position
possibleSize := len(possiblePositions)
if possibleSize > 0 {
pos := possiblePositions[b.random.Intn(possibleSize)]
b.grid[pos.Row][pos.Col] = tileValue
}
}
/*
Initializes the new board
*/
func (b *Board) initBoard() {
// Two tiles are randomly placed on the empty board
b.generateTile()
b.generateTile()
}
/*
Helper function that centralizes the move logic for all 4 moves, handling
the accumulation of values and the current score.
@param curPos Current board position being examined
@param nextPos Next board position in the direction of the move. This is the
position that is being accumulated into.
*/
func (b *Board) calcMove(curPos Coordinate, nextPos Coordinate) {
curValue := b.grid[curPos.Row][curPos.Col]
nextValue := b.grid[nextPos.Row][nextPos.Col]
// If the other value is 0, move the current value in
if b.grid[nextPos.Row][nextPos.Col] == 0 {
b.grid[nextPos.Row][nextPos.Col] = curValue
b.grid[curPos.Row][curPos.Col] = 0
} else if curValue == nextValue {
// If the values are equal, accumulate
b.grid[nextPos.Row][nextPos.Col] *= 2
b.grid[curPos.Row][curPos.Col] = 0
// Score increments with the value accumulated
b.score += Score(nextValue)
}
}
/*
Helper function that de-dupes core move logic from directional iterations.
To quote my alma mater, "Make Moves, Son!"
@param move Helper lambda that iterates over the board in the desired
direction.
*/
func (b *Board) makeMove(move moveBoard) {
// Repeat the accumulation process until all positions move as far as they
// can.
for i := 1; i < BoardSize; i++ {
move()
}
// Every move generates a tile, if possible
b.generateTile()
}
/***** Members *****/
/*
GetDisplayScore returns the score as a displayable string
@return Score, as a displayable string.
*/
func (b *Board) GetDisplayScore() string {
return fmt.Sprintf("Score: %10d", b.score)
}
/*
RenderBoard iterates over the board, invoking a callback to render a tile
at a given position.
*/
func (b *Board) RenderBoard(draw DrawTile) {
for row := 0; row < BoardSize; row++ {
for col := 0; col < BoardSize; col++ {
isEOL := (col + 1) == BoardSize
draw(Coordinate{row, col}, isEOL, b.grid[row][col])
}
}
}
/*
IsEndGame determines if the game has ended.
@return True if the game ended. False otherwise.
*/
func (b *Board) IsEndGame() bool {
boundSize := BoardSize - 1
// To end the game:
// 1) The board must be filled.
// 2) There are no 2 adjacent tiles with the same value.
for row := 0; row < BoardSize; row++ {
for col := 0; col < BoardSize; col++ {
value := b.grid[row][col]
// Board is not filled
if value == 0 {
return false
}
// Check surrounding positions for equivalent values
if (row > 0) && (value == b.grid[row-1][col]) {
return false
}
if (row < boundSize) && (value == b.grid[row+1][col]) {
return false
}
if (col > 0) && (value == b.grid[row][col-1]) {
return false
}
if (col < boundSize) && (value == b.grid[row][col+1]) {
return false
}
}
}
return true
}
/*
MoveLeft moves tiles to the left
*/
func (b *Board) MoveLeft() {
b.makeMove(func() {
for row := 0; row < BoardSize; row++ {
for col := 1; col < BoardSize; col++ {
b.calcMove(Coordinate{row, col}, Coordinate{row, col - 1})
}
}
})
}
/*
MoveRight moves tiles to the right
*/
func (b *Board) MoveRight() {
b.makeMove(func() {
for row := 0; row < BoardSize; row++ {
for col := BoardSize - 2; col >= 0; col-- {
b.calcMove(Coordinate{row, col}, Coordinate{row, col + 1})
}
}
})
}
/*
MoveUp moves tiles up
*/
func (b *Board) MoveUp() {
b.makeMove(func() {
for row := 1; row < BoardSize; row++ {
for col := 0; col < BoardSize; col++ {
b.calcMove(Coordinate{row, col}, Coordinate{row - 1, col})
}
}
})
}
/*
MoveDown moves tiles down
*/
func (b *Board) MoveDown() {
b.makeMove(func() {
for row := BoardSize - 2; row >= 0; row-- {
for col := 0; col < BoardSize; col++ {
b.calcMove(Coordinate{row, col}, Coordinate{row + 1, col})
}
}
})
} | src/g048/model/board.go | 0.735926 | 0.576125 | board.go | starcoder |
package rgo
// CreateZeros creates a matrix of the given dimensions in which every element is 0. If the given dimensions
// are nonsensical (negative, for example) it will return an InvalidIndex error.
func CreateZeros(Nrow, Ncol int) (*Matrix, error) {
if Nrow <= 0 || Ncol <= 0 {
return nil, InvalidIndex
}
//create the vector
v := make([]float64, Nrow*Ncol)
//create the matrix
m := Matrix{Nrow: Nrow, Ncol: Ncol, Data: v}
return &m, nil
}
// CreateIdentity creates an identity matrix, which is always square by definition, of the input dimension.
// An identity matrix is a matrix will all 0s, except for having a 1 in each element of the diagonal. If the
// given size is impossible, it will return an InvalidIndex error.
func CreateIdentity(size int) (*Matrix, error) {
// start by creating a zero matrix
outMat, err := CreateZeros(size, size)
if err != nil {
return outMat, err
}
// now fill in the 1s
for i := 0; i < size; i++ {
err := outMat.SetInd(i, i, 1)
if err != nil {
return outMat, err
}
}
return outMat, nil
}
// NewMatrix creates a new matrix given a vector of data. The number of rows and columns must be provided, and it
// assumes the data is already in the order a Matrix should be, with column indexes adjacent. In other
// words, the data vector should be a concatenation of several vectors, one for each column. NewMatrix makes a copy
// of the input slice, so that changing the slice later will not affect the data in the matrix. If the provided
// dimensions don't match the length of the provided data, an ImpossibleMatrix error will be returned.
func NewMatrix(Nrow, Ncol int, data []float64) (*Matrix, error) {
if Nrow < 0 || Ncol < 0 {
return nil, InvalidIndex
}
if len(data) != Nrow*Ncol {
return nil, ImpossibleMatrix
}
outMat := &Matrix{Nrow: Nrow, Ncol: Ncol}
outMat.Data = make([]float64, Nrow*Ncol)
copy(outMat.Data, data)
return outMat, nil
}
// CopyMatrix creates an exact copy of an existing matrix. The copies are independent, so that the output matrix can
// be changed without changing the input matrix and vice versa.
func CopyMatrix(in Matrix) (out Matrix) {
out.Nrow = in.Nrow
out.Ncol = in.Ncol
out.Data = make([]float64, len(in.Data))
for i, f := range in.Data {
out.Data[i] = f
}
return out
}
// CreateTranspose creates a new matrix which is a transpose of the input matrix. The output matrix is created from
// a copy of the input matrix such that they can be altered independently.
func (m *Matrix) CreateTranspose() *Matrix {
// create the matrix with the new dimensions
mt := Matrix{Nrow: m.Ncol, Ncol: m.Nrow, Data: make([]float64, len(m.Data))}
// each row of the new matrix is a column of the old one
for i := 0; i < m.Ncol; i++ {
rowToSet, _ := m.GetCol(i)
mt.SetRow(i, rowToSet)
}
return &mt
} | matrixCreation.go | 0.824674 | 0.808748 | matrixCreation.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.