code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"os"
"regexp"
"strconv"
)
// FindCycleAndSimulate compute the given system's total energy after the
// requested number of steps along with the step count needed to reach a state
// that exactly matches a previous state.
func FindCycleAndSimulate(system *System, steps int) (energy, count int) {
// map an axis state to its step number
seen := make(map[string]int)
// cycle step count for x, y, and z axis.
var cycle [3]int
// getters for each axis of a Vec3d, allowing us to loop through axis.
axis := []func(Vec3d) int{
func(v Vec3d) int { return v.x },
func(v Vec3d) int { return v.y },
func(v Vec3d) int { return v.z },
}
var buf bytes.Buffer
for i := 0; ; i++ {
// Compute the total system energy after the request number of steps.
if i == steps {
energy = system.TotalEnergy()
}
// compute each axis current state (as they are independent from each
// other) in the search for a previously seen one so that we can detect
// the cycle step count for this axis.
for a, get := range axis {
if cycle[a] > 0 {
// we already know the cycle step count for this axis, so we
// may skip the computation.
continue
}
// start each state with the axis index. We do so in order to avoid
// a collision with the same state from another axis.
buf.Reset()
buf.WriteString(strconv.Itoa(a))
// write each moon's current axis position and velocity into the
// state. After that we have a full picture of the current axis
// state.
for _, m := range system.Moons {
buf.WriteString(strconv.Itoa(get(m.pos)))
buf.WriteString(strconv.Itoa(get(m.vel)))
}
state := buf.String()
if n, ok := seen[state]; ok {
// we found the same state from a previous computation, now we
// can get the cycle step count.
cycle[a] = i - n
} else {
// store this state for later searches.
seen[state] = i
}
}
// we're done when we've found the cycle step count for each axis and
// also having reached at least the request number of steps to compute
// the total system energy.
if cycle[0] > 0 && cycle[1] > 0 && cycle[2] > 0 && i >= steps {
count = lcm3(cycle[0], cycle[1], cycle[2])
break
}
// advance the simulation.
system.Simulate(1)
}
return
}
// main compute and display the total energy in the system given on stdin after
// 1000 steps, along with the number of steps it take to reach the first state
// that exactly matches a previous state.
func main() {
steps := 1000 // see README.md
system, err := Parse(os.Stdin)
if err != nil {
log.Fatalf("input error: %s\n", err)
}
energy, count := FindCycleAndSimulate(system, steps)
fmt.Printf("The total energy in the system after %d steps is %d,\n", steps, energy)
fmt.Printf("and it take %d steps to reach a cycle.\n", count)
}
// Parse read the the position of the four largest moons of Jupiter (Io,
// Europa, Ganymede, and Callisto). It returns a system containing moons and
// any read or parsing error encountered.
func Parse(r io.Reader) (*System, error) {
// strict regexp but that's good enough.
posRegexp, err := regexp.Compile(`^<x=(-?\d+), y=(-?\d+), z=(-?\d+)>$`)
if err != nil {
return nil, err
}
var s System
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := scanner.Text()
matches := posRegexp.FindStringSubmatch(line)
if matches == nil {
return nil, fmt.Errorf("invalid position: %s", line)
}
// ignore conversion errors as the regexp has already ensured we get a
// number.
x, _ := strconv.Atoi(matches[1])
y, _ := strconv.Atoi(matches[2])
z, _ := strconv.Atoi(matches[3])
s.Moons = append(s.Moons, NewMoon(x, y, z))
}
if err := scanner.Err(); err != nil {
return nil, err
}
return &s, nil
}
// gcd returns the greatest common divisor between a and b.
func gcd(a, b int) int {
for b != 0 {
t := b
b = a % b
a = t
}
return a
}
// lcm3 returns the least common multiple between a, b, and c.
func lcm3(a, b, c int) int {
t := a * b / gcd(a, b)
r := t * c / gcd(t, c)
return r
} | day12/main.go | 0.67405 | 0.530176 | main.go | starcoder |
package cpebiten
import (
"github.com/hajimehoshi/ebiten"
"github.com/hajimehoshi/ebiten/inpututil"
"github.com/jakecoffman/cp"
"math"
)
var GrabbableMaskBit uint = 1 << 31
var Grabbable = cp.ShapeFilter{
cp.NO_GROUP, GrabbableMaskBit, GrabbableMaskBit,
}
var NotGrabbable = cp.ShapeFilter{
cp.NO_GROUP, ^GrabbableMaskBit, ^GrabbableMaskBit,
}
func handleGrab(space *cp.Space, pos cp.Vector, touchBody *cp.Body) *cp.Constraint {
const radius = 5.0 // make it easier to grab stuff
info := space.PointQueryNearest(pos, radius, Grabbable)
// avoid infinite mass objects
if info.Shape != nil && info.Shape.Body().Mass() < math.MaxFloat64 {
var nearest cp.Vector
if info.Distance > 0 {
nearest = info.Point
} else {
nearest = pos
}
// create a joint between the invisible mouse body and the shape
body := info.Shape.Body()
joint := cp.NewPivotJoint2(touchBody, body, cp.Vector{}, body.WorldToLocal(nearest))
joint.SetMaxForce(50000)
joint.SetErrorBias(math.Pow(1.0-0.15, 60.0))
space.AddConstraint(joint)
return joint
}
return nil
}
type touchInfo struct {
id int
body *cp.Body
joint *cp.Constraint
}
var (
mouseBody = cp.NewKinematicBody()
mouseJoint *cp.Constraint
touches = map[int]*touchInfo{}
)
func UpdateInput(space *cp.Space) {
x, y := ebiten.CursorPosition()
mouse := cp.Vector{float64(x), float64(y)}
if inpututil.IsMouseButtonJustPressed(ebiten.MouseButtonLeft) {
mouseJoint = handleGrab(space, mouse, mouseBody)
}
for _, id := range inpututil.JustPressedTouchIDs() {
x, y := ebiten.TouchPosition(id)
touchPos := cp.Vector{float64(x), float64(y)}
body := cp.NewKinematicBody()
body.SetPosition(touchPos)
touch := &touchInfo{
id: id,
body: body,
joint: handleGrab(space, touchPos, body),
}
touches[id] = touch
}
for id, touch := range touches {
if touch.joint != nil && inpututil.IsTouchJustReleased(id) {
space.RemoveConstraint(touch.joint)
touch.joint = nil
delete(touches, id)
} else {
x, y := ebiten.TouchPosition(id)
touchPos := cp.Vector{float64(x), float64(y)}
// calculate velocity so the object goes as fast as the touch moved
newPoint := touch.body.Position().Lerp(touchPos, 0.25)
touch.body.SetVelocityVector(newPoint.Sub(touch.body.Position()).Mult(60.0))
touch.body.SetPosition(newPoint)
}
}
if mouseJoint != nil && inpututil.IsMouseButtonJustReleased(ebiten.MouseButtonLeft) {
space.RemoveConstraint(mouseJoint)
mouseJoint = nil
}
// calculate velocity so the object goes as fast as the mouse moved
newPoint := mouseBody.Position().Lerp(mouse, 0.25)
mouseBody.SetVelocityVector(newPoint.Sub(mouseBody.Position()).Mult(60.0))
mouseBody.SetPosition(newPoint)
} | input.go | 0.557604 | 0.496216 | input.go | starcoder |
// Package grand provides high performance random string generation functionality.
package grand
import (
"unsafe"
)
var (
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" // 52
symbols = "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" // 32
digits = "0123456789" // 10
characters = letters + digits + symbols // 94
)
// Meet randomly calculate whether the given probability <num>/<total> is met.
func Meet(num, total int) bool {
return Intn(total) < num
}
// MeetProb randomly calculate whether the given probability is met.
func MeetProb(prob float32) bool {
return Intn(1e7) < int(prob*1e7)
}
// N returns a random int between min and max: [min, max].
// The <min> and <max> also support negative numbers.
func N(min, max int) int {
if min >= max {
return min
}
if min >= 0 {
// Because Intn dose not support negative number,
// so we should first shift the value to left,
// then call Intn to produce the random number,
// and finally shift the result back to right.
return Intn(max-(min-0)+1) + (min - 0)
}
if min < 0 {
// Because Intn dose not support negative number,
// so we should first shift the value to right,
// then call Intn to produce the random number,
// and finally shift the result back to left.
return Intn(max+(0-min)+1) - (0 - min)
}
return 0
}
// S returns a random string which contains digits and letters, and its length is <n>.
// The optional parameter <symbols> specifies whether the result could contain symbols,
// which is false in default.
func S(n int, symbols ...bool) string {
b := make([]byte, n)
for i := range b {
if len(symbols) > 0 && symbols[0] {
b[i] = characters[Intn(94)]
} else {
b[i] = characters[Intn(62)]
}
}
return *(*string)(unsafe.Pointer(&b))
}
// Str randomly picks and returns <n> count of chars from given string <s>.
// It also supports unicode string like Chinese/Russian/Japanese, etc.
func Str(s string, n int) string {
b := make([]rune, n)
runes := []rune(s)
for i := range b {
b[i] = runes[Intn(len(runes))]
}
return string(b)
}
// Digits returns a random string which contains only digits, and its length is <n>.
func Digits(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = digits[Intn(10)]
}
return *(*string)(unsafe.Pointer(&b))
}
// Letters returns a random string which contains only letters, and its length is <n>.
func Letters(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letters[Intn(52)]
}
return *(*string)(unsafe.Pointer(&b))
}
// Symbols returns a random string which contains only symbols, and its length is <n>.
func Symbols(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = symbols[Intn(52)]
}
return *(*string)(unsafe.Pointer(&b))
}
// Perm returns, as a slice of n int numbers, a pseudo-random permutation of the integers [0,n).
func Perm(n int) []int {
m := make([]int, n)
for i := 0; i < n; i++ {
j := Intn(i + 1)
m[i] = m[j]
m[j] = i
}
return m
} | util/grand/grand.go | 0.741393 | 0.401482 | grand.go | starcoder |
package set
import (
"sort"
)
// Ints represents the classic "set" data structure, and contains ints.
type Ints map[int]bool
// NewInts creates and initializes an Ints and populates it with
// initial values as specified in the parameters.
func NewInts(initial ...int) Ints {
result := make(Ints)
for _, value := range initial {
result.Add(value)
}
return result
}
// Size returns the number of elements in the set.
func (is Ints) Size() int {
return len(is)
}
// IsEmpty is true for empty or uninitialized sets.
func (is Ints) IsEmpty() bool {
return len(is) == 0
}
// Add puts a value into the set.
func (is Ints) Add(value int) {
if is == nil {
panic("uninitalised set")
}
is[value] = true
}
// Remove takes a value out of the set. If value wasn't in the set to start
// with, this method silently succeeds.
func (is Ints) Remove(value int) {
delete(is, value)
}
// Contains returns true if the value is in the set, and false otherwise.
func (is Ints) Contains(value int) bool {
_, exists := is[value]
return exists
}
// Values returns an unordered slice containing all the values in the set.
func (is Ints) Values() []int {
result := make([]int, len(is))
i := 0
for key := range is {
result[i] = key
i++
}
return result
}
// SortedValues returns an ordered slice containing all the values in the set.
func (is Ints) SortedValues() []int {
values := is.Values()
sort.Ints(values)
return values
}
// Union returns a new Ints representing a union of the elments in the
// method target and the parameter.
func (is Ints) Union(other Ints) Ints {
result := make(Ints)
// Use the internal map rather than going through the friendlier functions
// to avoid extra allocation of slices.
for value := range is {
result[value] = true
}
for value := range other {
result[value] = true
}
return result
}
// Intersection returns a new Ints representing a intersection of the elments in the
// method target and the parameter.
func (is Ints) Intersection(other Ints) Ints {
result := make(Ints)
// Use the internal map rather than going through the friendlier functions
// to avoid extra allocation of slices.
for value := range is {
if other.Contains(value) {
result[value] = true
}
}
return result
}
// Difference returns a new Ints representing all the values in the
// target that are not in the parameter.
func (is Ints) Difference(other Ints) Ints {
result := make(Ints)
// Use the internal map rather than going through the friendlier functions
// to avoid extra allocation of slices.
for value := range is {
if !other.Contains(value) {
result[value] = true
}
}
return result
} | vendor/github.com/juju/utils/set/ints.go | 0.834845 | 0.631452 | ints.go | starcoder |
package predict
import (
"context"
"github.com/Applifier/go-tensorflow/types/tensorflow/core/example"
"github.com/Applifier/go-tensorflow/utils"
)
// An Example is a mostly-normalized data format for storing data for
// training and inference. It contains a key-value store (features); where
// each key (string) maps to a Feature message (which is oneof packed BytesList,
// FloatList, or Int64List).
type Example = example.Example
// Feature contains Lists which may hold zero or more values.
type Feature = example.Feature
// Examplifier interface for types that can be converted to examples
type Examplifier interface {
Examples() ([]*Example, error)
}
// MapExample map type that implements Examplifier interface
type MapExample map[string]interface{}
// Examples returns examples (one example) from a given map
func (me MapExample) Examples() ([]*Example, error) {
example, err := utils.NewExampleFromMap(me)
if err != nil {
return nil, err
}
return []*Example{example}, nil
}
// Class struct returned by classify calls to a model
type Class struct {
Label string
Score float32
}
// Regression struct returned by regress calls to a model
type Regression struct {
Value float32
}
// ModelInfo struct contains infomation about the model used for the prediction (name, version, etc.)
type ModelInfo struct {
Name string
Version int
}
// TensorType type of the tensor
type TensorType int
const (
TensorTypeFloat = TensorType(iota)
TensorTypeDouble
TensorTypeInt32
TensorTypeUInt32
TensorTypeInt16
TensorTypeInt8
TensorTypeUInt8
TensorTypeString
TensorTypeComplex64
TensorTypeComplex128
TensorTypeInt64
TensorTypeUInt64
TensorTypeBool
)
// Tensor unified interface for Tensors
type Tensor interface {
Value() interface{}
Shape() []int64
Type() TensorType
}
// Predictor interface for unified model execution with different backend (embedded go model & tensorflow serving)
type Predictor interface {
// Predict runs prediction with given input map. Output is filtered with given filter. (nil defaults to all outputs)
Predict(ctx context.Context, inputs map[string]interface{}, outputFilter []string) (map[string]Tensor, ModelInfo, error)
// Classify runs classify with given features and context
Classify(ctx context.Context, examples []*Example, context *Example) ([][]Class, ModelInfo, error)
// Regress runs regression with given features and context
Regress(ctx context.Context, examples []*Example, context *Example) ([]Regression, ModelInfo, error)
// GetModelInfo returns the ModelInfo for the Predictor
GetModelInfo(ctx context.Context) (ModelInfo, error)
} | predict/api.go | 0.835249 | 0.551272 | api.go | starcoder |
package mutations
import "log"
import "fmt"
import "time"
import "github.com/dadleyy/charlestown/engine/objects"
import "github.com/dadleyy/charlestown/engine/constants"
// Interact mutates the state based on the current mode.
func Interact() Mutation {
return interact{}
}
type interact struct {
}
func (i interact) replaceNeighbor(list []objects.Neighbor, addition objects.Neighbor) []objects.Neighbor {
next := make([]objects.Neighbor, 0, len(list))
for _, n := range list {
if n.Direction == addition.Direction {
continue
}
next = append(next, n)
}
return append(next, addition)
}
func (i interact) build(next objects.Game) objects.Game {
addditions := make([]objects.Building, 0, len(next.Cursor.Inventory)+len(next.Buildings))
cache := make(map[int]map[int]objects.Building)
// Start the new addition by looping over our currently inventory, indexing them by their x and y coordinates.
for _, item := range next.Cursor.Inventory {
construct := objects.Building{next.Cursor.Location, item.Kind, make([]objects.Neighbor, 0, 5)}
if construct.Cost() > next.Funds {
message := fmt.Sprintf("Not enough funds")
expiry := time.Now().Add(time.Second * 3)
next.Flashes = append(next.Flashes, objects.Flash{message, expiry})
continue
}
x, y := construct.Location.Values()
column, ok := cache[x]
if !ok {
cache[x] = map[int]objects.Building{}
column = cache[x]
}
column[y] = construct
next.Funds -= construct.Cost()
}
log.Printf("addition cache: %v", cache)
// Loop over our current set of buildings, checking the additions to see if there is a location match.
for _, building := range next.Buildings {
x, y := building.Location.Values()
column, ok := cache[x]
// If we found our column, we should check north and south.
if ok {
// If there is a match at our y coordinate it is a duplicate.
if _, ok := column[y]; ok {
log.Printf("dupe building detected not adding")
continue
}
// Check for a neighbor to the north.
if north, hit := column[y-1]; hit {
building.Neighbors = i.replaceNeighbor(building.Neighbors, objects.Neighbor{north, constants.NeighborNorth})
north.Neighbors = i.replaceNeighbor(north.Neighbors, objects.Neighbor{building, constants.NeighborSouth})
column[y-1] = north
}
// Check for a neighbor to the south.
if south, hit := column[y+1]; hit {
building.Neighbors = i.replaceNeighbor(building.Neighbors, objects.Neighbor{south, constants.NeighborSouth})
south.Neighbors = i.replaceNeighbor(south.Neighbors, objects.Neighbor{building, constants.NeighborNorth})
column[y+1] = south
}
}
// Check for a neighbor to the west.
if west, ok := cache[x-2]; ok {
if neighbor, ok := west[y]; ok {
building.Neighbors = i.replaceNeighbor(building.Neighbors, objects.Neighbor{neighbor, constants.NeighborWest})
neighbor.Neighbors = i.replaceNeighbor(neighbor.Neighbors, objects.Neighbor{building, constants.NeighborEast})
cache[x-2][y] = neighbor
}
}
// Check for a neighbor to the east.
if east, ok := cache[x+2]; ok {
if neighbor, ok := east[y]; ok {
building.Neighbors = i.replaceNeighbor(building.Neighbors, objects.Neighbor{neighbor, constants.NeighborWest})
neighbor.Neighbors = i.replaceNeighbor(neighbor.Neighbors, objects.Neighbor{building, constants.NeighborEast})
cache[x+2][y] = neighbor
}
}
addditions = append(addditions, building)
}
// Cache is now ready - insert them
for _, column := range cache {
for _, cell := range column {
addditions = append(addditions, cell)
}
}
next.Buildings = addditions
return next
}
func (i interact) demolish(game objects.Game) objects.Game {
buildings := make([]objects.Building, 0, len(game.Buildings))
for _, b := range game.Buildings {
match := b.Location.Equals(game.Cursor.Location)
if match {
continue
}
neighbors := make([]objects.Neighbor, 0, len(b.Neighbors))
// Remove the neighbor
for _, c := range b.Neighbors {
if c.Building.Location.Equals(game.Cursor.Location) {
continue
}
neighbors = append(neighbors, c)
}
b.Neighbors = neighbors
buildings = append(buildings, b)
}
game.Buildings = buildings
return game
}
func (i interact) move(game objects.Game) objects.Game {
if len(game.Cursor.Inventory) == 1 {
log.Printf("completing move")
next := i.build(game)
next.Cursor.Inventory = []objects.Building{}
return next
}
log.Printf("initiating move")
buildings := make([]objects.Building, 0, len(game.Buildings))
for _, building := range game.Buildings {
hit := building.Location.Equals(game.Cursor.Location)
if !hit {
buildings = append(buildings, building)
continue
}
game.Cursor.Inventory = []objects.Building{building}
}
game.Buildings = buildings
return game
}
func (i interact) Apply(game objects.Game) objects.Game {
next := game
if next.Turn.Done() {
next.Flashes = append(next.Flashes, objects.Flash{"No actions remaining", time.Now().Add(time.Second * 5)})
return next
}
switch next.Cursor.Mode {
case constants.CursorDemolish:
next.Turn = next.Turn.Inc()
return i.demolish(next)
case constants.CursorBuild:
next.Turn = next.Turn.Inc()
return i.build(next)
case constants.CursorMove:
return i.move(next)
}
return next
} | engine/mutations/interact.go | 0.634204 | 0.401834 | interact.go | starcoder |
package ast2
import (
"go/ast"
"go/token"
r "reflect"
mt "github.com/cosmos72/gomacro/token"
)
// ToNode converts Ast back ast.Node, or panics on failure
// (it fails if the argument is not AstWithNode)
func ToNode(x Ast) ast.Node {
switch x := x.(type) {
case nil:
return nil
case AstWithNode:
return x.Node()
default:
y := x.Interface()
errorf("cannot convert to ast.Node: %v // %T", y, y)
return nil
}
}
func ToBasicLit(x Ast) *ast.BasicLit {
switch x := x.(type) {
case nil:
break
case BasicLit:
return x.X
default:
y := x.Interface()
errorf("cannot convert to *ast.BasicLit: %v // %T", y, y)
}
return nil
}
func ToBlockStmt(x Ast) *ast.BlockStmt {
switch x := x.(type) {
case nil:
break
case BlockStmt:
return x.X
default:
stmt := ToStmt(x)
return &ast.BlockStmt{Lbrace: stmt.Pos(), List: []ast.Stmt{stmt}, Rbrace: stmt.End()}
}
return nil
}
func ToCallExpr(x Ast) *ast.CallExpr {
switch x := x.(type) {
case nil:
break
case CallExpr:
return x.X
default:
y := x.Interface()
errorf("cannot convert to *ast.CallExpr: %v // %T", y, y)
}
return nil
}
func ToDecl(x Ast) ast.Decl {
switch node := ToNode(x).(type) {
case ast.Decl:
return node
case nil:
default:
y := x.Interface()
errorf("cannot convert to ast.Decl: %v // %T", y, y)
}
return nil
}
func ToExpr(x Ast) ast.Expr {
switch node := ToNode(x).(type) {
case nil:
break
case ast.Expr:
return node
case *ast.BlockStmt:
return BlockStmtToExpr(node)
case *ast.EmptyStmt:
return &ast.Ident{NamePos: node.Semicolon, Name: "nil"}
case *ast.ExprStmt:
return node.X
case ast.Stmt:
list := []ast.Stmt{node}
block := &ast.BlockStmt{List: list}
return BlockStmtToExpr(block)
default:
errorf("unimplemented conversion from %v to ast.Expr: %v <%v>",
r.TypeOf(node), node, r.TypeOf(node))
}
return nil
}
func ToExprSlice(x Ast) []ast.Expr {
switch x := x.(type) {
case nil:
break
case ExprSlice:
return x.X
case AstWithSlice:
n := x.Size()
ret := make([]ast.Expr, n)
for i := 0; i < n; i++ {
ret[i] = ToExpr(x.Get(i))
}
return ret
default:
errorf("unimplemented conversion from %v <%v> to []ast.Expr", x, r.TypeOf(x))
}
return nil
}
func ToField(x Ast) *ast.Field {
switch node := ToNode(x).(type) {
case nil:
break
case *ast.Field:
return node
default:
errorf("cannot convert %v <%v> to *ast.Field", node, r.TypeOf(node))
}
return nil
}
func ToFile(x Ast) *ast.File {
switch node := ToNode(x).(type) {
case nil:
break
case *ast.File:
return node
default:
errorf("cannot convert %v <%v> to *ast.File", node, r.TypeOf(node))
}
return nil
}
func ToFieldList(x Ast) *ast.FieldList {
switch node := ToNode(x).(type) {
case nil:
break
case *ast.FieldList:
return node
case *ast.Field:
return &ast.FieldList{Opening: node.Pos(), List: []*ast.Field{node}, Closing: node.End()}
default:
errorf("cannot convert %v <%v> to *ast.Field", node, r.TypeOf(node))
}
return nil
}
func ToFuncType(x Ast) *ast.FuncType {
switch node := ToNode(x).(type) {
case nil:
break
case *ast.FuncType:
return node
default:
errorf("cannot convert %v <%v> to *ast.FuncType", node, r.TypeOf(node))
}
return nil
}
func ToImportSpec(x Ast) *ast.ImportSpec {
switch node := ToNode(x).(type) {
case nil:
break
case *ast.ImportSpec:
return node
default:
errorf("cannot convert %v <%v> to *ast.ImportSpec", node, r.TypeOf(node))
}
return nil
}
func ToIdent(x Ast) *ast.Ident {
switch node := ToNode(x).(type) {
case nil:
break
case *ast.Ident:
return node
default:
errorf("cannot convert %v <%v> to *ast.Ident", node, r.TypeOf(node))
}
return nil
}
func ToIdentSlice(x Ast) []*ast.Ident {
switch x := x.(type) {
case nil:
break
case IdentSlice:
return x.X
case AstWithSlice:
n := x.Size()
ret := make([]*ast.Ident, n)
for i := 0; i < n; i++ {
ret[i] = ToIdent(x.Get(i))
}
return ret
default:
errorf("unimplemented conversion from %v <%v> to []*ast.Ident", x, r.TypeOf(x))
}
return nil
}
func ToSpec(x Ast) ast.Spec {
switch node := ToNode(x).(type) {
case nil:
break
case ast.Spec:
return node
default:
errorf("cannot convert %v <%v> to ast.Spec", node, r.TypeOf(node))
}
return nil
}
func ToStmt(x Ast) ast.Stmt {
switch node := ToNode(x).(type) {
case ast.Stmt:
return node
case ast.Decl:
return &ast.DeclStmt{Decl: node}
case ast.Expr:
return &ast.ExprStmt{X: node}
case nil:
break
default:
errorf("unimplemented conversion from %v <%v> to ast.Stmt", node, r.TypeOf(node))
}
return nil
}
func ToStmtSlice(x Ast) []ast.Stmt {
switch x := x.(type) {
case nil:
break
case StmtSlice:
return x.X
case AstWithSlice:
n := x.Size()
ret := make([]ast.Stmt, n)
for i := 0; i < n; i++ {
ret[i] = ToStmt(x.Get(i))
}
return ret
default:
errorf("unimplemented conversion from %v <%v> to []ast.Stmt", x, r.TypeOf(x))
}
return nil
}
func BlockStmtToExpr(node *ast.BlockStmt) ast.Expr {
if node == nil {
return nil
}
list := node.List
switch len(list) {
case 0:
// convert {} to nil, because {} in expression context means "no useful value"
return &ast.Ident{NamePos: node.Lbrace, Name: "nil"}
case 1:
// check if we are lucky...
switch node := list[0].(type) {
case *ast.ExprStmt:
return node.X
case *ast.EmptyStmt:
// convert { ; } to nil, because { ; } in expression context means "no useful value"
return &ast.Ident{NamePos: node.Semicolon, Name: "nil"}
}
}
// due to go/ast strictly typed model, there is only one mechanism
// to insert a statement inside an expression: use a closure.
// so we return a unary expression: MACRO (func() { /*block*/ })
typ := &ast.FuncType{Func: token.NoPos, Params: &ast.FieldList{}}
fun := &ast.FuncLit{Type: typ, Body: node}
return &ast.UnaryExpr{Op: mt.MACRO, X: fun}
} | vendor/github.com/cosmos72/gomacro/ast2/unwrap.go | 0.589244 | 0.558086 | unwrap.go | starcoder |
package basiccolor
import (
"image/color"
"math"
)
var _ color.Color = HSL{}
// HSL represents the HSL value for a color.
type HSL struct {
H, S, L float64
A uint8
}
func (c HSL) RGBA() (uint32, uint32, uint32, uint32) {
r, g, b := hslToNRGB(c.H, c.S, c.L)
return color.NRGBA{r, g, b, c.A}.RGBA()
}
// HSLModel is the color.Model for the HSL type.
var HSLModel = color.ModelFunc(func(c color.Color) color.Color {
if _, ok := c.(HSL); ok {
return c
}
nrgba := color.NRGBAModel.Convert(c).(color.NRGBA)
h, s, l := nrgbToHSL(nrgba.R, nrgba.G, nrgba.B)
return HSL{h, s, l, nrgba.A}
})
// Returns the Hue [0..360], Saturation and Lightness [0..1] of the color.
func nrgbToHSL(r, g, b uint8) (h, s, l float64) {
fr := float64(r) / 255.0
fg := float64(g) / 255.0
fb := float64(b) / 255.0
min := math.Min(math.Min(fr, fg), fb)
max := math.Max(math.Max(fr, fg), fb)
l = (max + min) / 2
if min == max {
s = 0
h = 0
} else {
if l < 0.5 {
s = (max - min) / (max + min)
} else {
s = (max - min) / (2.0 - max - min)
}
if max == fr {
h = (fg - fb) / (max - min)
} else if max == fg {
h = 2.0 + (fb-fr)/(max-min)
} else {
h = 4.0 + (fr-fg)/(max-min)
}
h *= 60
if h < 0 {
h += 360
}
}
return h, s, l
}
// Returns the RGB [0..255] values given a Hue [0..360], Saturation and Lightness [0..1]
func hslToNRGB(h, s, l float64) (uint8, uint8, uint8) {
if s == 0 {
return clampUint8(uint8(roundFloat64(l*255.0)), 0, 255), clampUint8(uint8(roundFloat64(l*255.0)), 0, 255), clampUint8(uint8(roundFloat64(l*255.0)), 0, 255)
}
var (
r, g, b float64
t1, t2 float64
tr, tg, tb float64
)
if l < 0.5 {
t1 = l * (1.0 + s)
} else {
t1 = l + s - l*s
}
t2 = 2*l - t1
h = h / 360
tr = h + 1.0/3.0
tg = h
tb = h - 1.0/3.0
if tr < 0 {
tr++
}
if tr > 1 {
tr--
}
if tg < 0 {
tg++
}
if tg > 1 {
tg--
}
if tb < 0 {
tb++
}
if tb > 1 {
tb--
}
// Red
if 6*tr < 1 {
r = t2 + (t1-t2)*6*tr
} else if 2*tr < 1 {
r = t1
} else if 3*tr < 2 {
r = t2 + (t1-t2)*(2.0/3.0-tr)*6
} else {
r = t2
}
// Green
if 6*tg < 1 {
g = t2 + (t1-t2)*6*tg
} else if 2*tg < 1 {
g = t1
} else if 3*tg < 2 {
g = t2 + (t1-t2)*(2.0/3.0-tg)*6
} else {
g = t2
}
// Blue
if 6*tb < 1 {
b = t2 + (t1-t2)*6*tb
} else if 2*tb < 1 {
b = t1
} else if 3*tb < 2 {
b = t2 + (t1-t2)*(2.0/3.0-tb)*6
} else {
b = t2
}
return clampUint8(uint8(roundFloat64(r*255.0)), 0, 255), clampUint8(uint8(roundFloat64(g*255.0)), 0, 255), clampUint8(uint8(roundFloat64(b*255.0)), 0, 255)
}
func clampUint8(value, min, max uint8) uint8 {
if value < min {
return min
}
if value > max {
return max
}
return value
}
// Utility function for rounding the value of a number.
func roundFloat64(value float64) float64 {
if value < 0.0 {
return value - 0.5
}
return value + 0.5
} | appengine/basiccolor/hsl.go | 0.781747 | 0.439868 | hsl.go | starcoder |
package dlx
import (
"errors"
)
/*
New constructs a new matrix.
primary - The amount of primary columns to be in the matrix.
optional - The amount of optional columns to be in the matrix.
*/
func New(primary, optional int) (*Matrix, error) {
if primary <= 0 {
return nil, errors.New("primary must be positive")
}
if optional < 0 {
return nil, errors.New("optional must be non-negative")
}
cols := primary + optional
result := Matrix{
root: element{},
headers: make([]element, cols),
details: make([]element, 0),
rows: make([]row, 0),
rowCount: 0,
solution: newSolution(),
}
result.root.up, result.root.down = &result.root, &result.root
for i := 0; i < primary; i++ {
result.headers[i] = element{
size: 0,
row: nil,
header: &result.headers[i],
up: &result.headers[i],
down: &result.headers[i],
left: &result.headers[negativeMod(i-1, primary)],
right: &result.headers[(i+1)%primary],
}
}
for i := primary; i < cols; i++ {
result.headers[i] = element{
size: 0,
row: nil,
header: &result.headers[i],
up: &result.headers[i],
down: &result.headers[i],
left: &result.headers[i],
right: &result.headers[i],
}
}
result.root.left = result.headers[0].left
result.root.right = &result.headers[0]
result.root.left.right = &result.root
result.root.right.left = &result.root
return &result, nil
}
/*
AddRow adds a new row of elements to a matrix.
dlx - The matrix to add the row to.
indexes - The indices of the columns containing 1s.
*/
func AddRow(dlx *Matrix, indexes ...int) error {
count := len(indexes)
if count == 0 {
return nil
}
last := -1
for _, index := range indexes {
if index < 0 || index >= len(dlx.headers) {
return errors.New("index out of range")
}
if last != -1 && index <= last {
return errors.New("indexes not in ascending order")
}
last = index
}
length := len(dlx.details)
dlx.details = append(dlx.details, make([]element, count)...)
newRow := dlx.details[length:]
dlx.rows = append(dlx.rows, row{
index: dlx.rowCount,
firstElement: &newRow[0],
covered: false,
})
for i, col := range indexes {
newRow[i] = element{
size: 0,
header: &dlx.headers[col],
up: dlx.headers[col].up,
down: &dlx.headers[col],
left: &newRow[negativeMod(i-1, count)],
right: &newRow[(i+1)%count],
row: &dlx.rows[dlx.rowCount],
}
dlx.headers[col].size++
newRow[i].up.down, newRow[i].down.up = &newRow[i], &newRow[i]
}
dlx.rowCount++
return nil
}
/*
AddToSolution adds a row explicitly to the solution for a matrix.
dlx - The matrix which contains the row.
index - The index of the row.
*/
func AddToSolution(dlx *Matrix, index int) error {
if index < 0 || index >= dlx.rowCount {
return errors.New("index out of range")
}
if dlx.rows[index].covered {
return errors.New("row is covered, cannot be included in solution")
}
dlx.solution.push(index)
firstElement := dlx.rows[index].firstElement
coverRow(firstElement)
coverColumn(firstElement.header)
for e := firstElement.right; e != firstElement; e = e.right {
coverColumn(e.header)
}
return nil
}
/*
ClearSolution removes all rows from the current solution for a matrix.
This function undoes any calls to AddToSolution.
dlx - The matrix to clear the solution for.
*/
func ClearSolution(dlx *Matrix) {
for dlx.solution.size() > 0 {
index, _ := dlx.solution.pop()
firstElement := dlx.rows[index].firstElement
for e := firstElement.left; e != firstElement; e = e.left {
uncoverColumn(e.header)
}
uncoverColumn(firstElement.header)
uncoverRow(firstElement)
}
}
/*
ForEachSolution calls f with a slice of all row indexes which correspond
to a solution for a matrix.
dlx - The matrix to find solutions for.
f - The function to be called when a solution is found.
*/
func ForEachSolution(dlx *Matrix, f func([]int)) {
if dlx.root.left == &dlx.root {
f(dlx.solution.values[:dlx.solution.stackptr])
return
}
header, emptyColumn := colPolicy(dlx)
if emptyColumn {
return
}
for r := header.down; r != header; r = r.down {
dlx.solution.push(r.row.index)
coverRow(r)
coverColumn(r.header)
for j := r.right; j != r; j = j.right {
coverColumn(j.header)
}
ForEachSolution(dlx, f)
for j := r.left; j != r; j = j.left {
uncoverColumn(j.header)
}
uncoverColumn(r.header)
uncoverRow(r)
dlx.solution.pop()
}
}
/*
FirstSolution finds a solution for a matrix and returns the row indexes.
dlx - The matrix to find a solution for.
*/
func FirstSolution(dlx *Matrix) []int {
if dlx.root.left == &dlx.root {
return dlx.solution.values[:dlx.solution.stackptr]
}
header, emptyColumn := colPolicy(dlx)
if emptyColumn {
return nil
}
for r := header.down; r != header; r = r.down {
dlx.solution.push(r.row.index)
coverRow(r)
coverColumn(r.header)
for j := r.right; j != r; j = j.right {
coverColumn(j.header)
}
result := FirstSolution(dlx)
for j := r.left; j != r; j = j.left {
uncoverColumn(j.header)
}
uncoverColumn(r.header)
uncoverRow(r)
dlx.solution.pop()
if result != nil {
return result
}
}
return nil
}
func newSolution() solution {
return solution{
values: make([]int, 0),
stackptr: 0,
}
}
func colPolicy(dlx *Matrix) (*element, bool) {
// TODO: Make more efficient column policy
// algorithm than a linear search
var best *element
for h := dlx.root.right; h != &dlx.root; h = h.right {
if best == nil || h.size < best.size {
best = h
}
if best.size == 0 {
return nil, true
}
}
return best, false
}
func coverRow(e *element) {
e.row.covered = true
e = e.right
e.up.down, e.down.up = e.down, e.up
e.header.size--
for r := e.right; r != e; r = r.right {
r.up.down, r.down.up = r.down, r.up
r.header.size--
}
}
func uncoverRow(e *element) {
e.row.covered = false
e = e.left
e.up.down, e.down.up = e, e
e.header.size++
for r := e.left; r != e; r = r.left {
r.up.down, r.down.up = r, r
r.header.size++
}
}
func coverColumn(h *element) {
h.left.right, h.right.left = h.right, h.left
for j := h.down; j != h; j = j.down {
j.left.right, j.right.left = j.right, j.left
coverRow(j)
}
}
func uncoverColumn(h *element) {
for j := h.up; j != h; j = j.up {
uncoverRow(j)
j.left.right, j.right.left = j, j
}
h.left.right, h.right.left = h, h
}
func negativeMod(a, b int) int {
result := a % b
if (result < 0 && b > 0) || (result > 0 && b < 0) {
return result + b
}
return result
} | dlx.go | 0.549399 | 0.492859 | dlx.go | starcoder |
package main
import (
"math"
"github.com/qeedquan/go-media/math/f64"
)
const (
LUT_SIZE = 21 // number of importance index entries in the lookup table
NUM_STRUCT_INDEX_BITS = 6 // Number of significant bits taken from F-Code.
GOLDEN_RATIO = PHI
PHI = 1.6180339887498948482045868343656 // ( 1 + sqrt(5) ) / 2
PHI2 = 2.6180339887498948482045868343656 // Phi squared
LOG_PHI = 0.48121182505960347 // log(Phi)
SQRT5 = 2.2360679774997896964091736687313 // sqrt(5.0)
)
// Two-bit sequences.
const (
B00 = iota
B10
B01
)
const (
TileTypeA = iota
TileTypeB
TileTypeC
TileTypeD
TileTypeE
TileTypeF
)
// Individual tile elements, which also serve as nodes for the tile subdivision tree.
type TileNode struct {
level int // Depth in the tree.
tileType int // Types A through F.
dir int // Tile orientation, 0=North, in Pi/10 increments, CCW.
scale float64
p1, p2, p3 f64.Vec2 // Tile orientation, 0=North, in Pi/10 increments, CCW.
// The F-Code binary sequence.
f_code int
// tiling tree structure
parent *TileNode
parent_slot int // position in parent's list (needed for iterators)
terminal bool // true for leaf nodes
children []*TileNode
}
// Helper constructor.
// Creates an initial tile that is certain to contain the ROI.
// The starting tile is of type F (arbitrary).
func NewTileNodeROI(roi_width, roi_height float64) *TileNode {
side := math.Max(roi_width, roi_height)
scale := 2.0 * side
offset := f64.Vec2{PHI*PHI/2.0 - 0.25, 0.125}
return NewTileNodeEx(nil, TileTypeF, offset.Scale(-scale), 15, 0, 0, scale)
}
func NewTileNodeEx(parent *TileNode, tileType int, refPt f64.Vec2, dir int, newbits int, parent_slot int, scale float64) *TileNode {
t := &TileNode{
parent: parent,
tileType: tileType,
p1: refPt,
dir: dir % 20,
parent_slot: parent_slot,
scale: scale,
}
if parent != nil {
t.level = parent.level + 1
}
// Build triangle, according to type.
switch tileType {
case TileTypeC, TileTypeD:
// "Skinny" triangles
t.p2 = t.p1.Add(vvect[dir%20].Scale(t.scale))
t.p3 = t.p1.Add(vvect[(dir+4)%20].Scale(PHI * scale))
case TileTypeE, TileTypeF:
// "Fat" triangles
t.p2 = t.p1.Add(vvect[dir%20].Scale(PHI2 * scale))
t.p3 = t.p1.Add(vvect[(dir+2)%20].Scale(PHI * scale))
default:
// Pentagonal tiles (triangle undefined)
t.p2 = t.p1.Add(vvect[dir%20].Scale(scale))
t.p3 = t.p1.Add(vvect[(dir+5)%20].Scale(scale))
}
// Append 2 new bits to the F-Code.
if t.parent != nil {
t.f_code = (t.parent.f_code << 2) ^ newbits
} else {
t.f_code = newbits
}
// Set as leaf node
t.terminal = true
t.children = t.children[:0]
return t
}
func (t *TileNode) GetP1() f64.Vec2 { return t.p1 }
func (t *TileNode) GetP2() f64.Vec2 { return t.p2 }
func (t *TileNode) GetP3() f64.Vec2 { return t.p3 }
func (t *TileNode) GetCenter() f64.Vec2 {
p := t.p1.Add(t.p2)
p = p.Add(t.p3)
p = p.Scale(1 / 3.0)
return p
}
func (t *TileNode) GetLevel() int { return t.level }
func (t *TileNode) IsSamplingType() bool {
return t.tileType == TileTypeA || t.tileType == TileTypeB
}
func (t *TileNode) GetFCode() int { return t.f_code }
// Splits a tile according to the given subdivision rules.
func (t *TileNode) Refine() {
// Can only subdivide leaf nodes.
if !t.terminal {
return
}
t.terminal = false // The tile now has children.
newscale := t.scale / GOLDEN_RATIO // The scale factor between levels is constant.
switch t.tileType {
// Each new tile is created using the following information:
// A pointer to its parent, the type of the new tile (a through f),
// the origin of the new tile, the change in orientation of the new tile with
// respect to the parent's orientation, the two bits to be pre-pended to the F-Code,
// the parent's slot (for traversal purposes), and the new linear scale of the tile,
// which is always the parent's scale divided by the golden ratio.
case TileTypeA:
t.children = append(t.children, NewTileNodeEx(t, TileTypeB, t.p1, t.dir+0, B00, 0, newscale))
case TileTypeB:
t.children = append(t.children, NewTileNodeEx(t, TileTypeA, t.p1, t.dir+10, B00, 0, newscale))
case TileTypeC:
t.children = append(t.children, NewTileNodeEx(t, TileTypeF, t.p3, t.dir+14, B00, 0, newscale))
t.children = append(t.children, NewTileNodeEx(t, TileTypeC, t.p2, t.dir+6, B10, 1, newscale))
t.children = append(t.children, NewTileNodeEx(t, TileTypeA, t.children[0].p3, t.dir+1, B10, 2, newscale))
case TileTypeD:
t.children = append(t.children, NewTileNodeEx(t, TileTypeE, t.p2, t.dir+6, B00, 0, newscale))
t.children = append(t.children, NewTileNodeEx(t, TileTypeD, t.children[0].p3, t.dir+14, B10, 1, newscale))
case TileTypeE:
t.children = append(t.children, NewTileNodeEx(t, TileTypeC, t.p3, t.dir+12, B10, 0, newscale))
t.children = append(t.children, NewTileNodeEx(t, TileTypeE, t.p2, t.dir+8, B01, 1, newscale))
t.children = append(t.children, NewTileNodeEx(t, TileTypeF, t.p1, t.dir+0, B00, 2, newscale))
t.children = append(t.children, NewTileNodeEx(t, TileTypeA, t.children[0].p2, t.dir+7, B10, 3, newscale))
case TileTypeF:
t.children = append(t.children, NewTileNodeEx(t, TileTypeF, t.p3, t.dir+12, B01, 0, newscale))
t.children = append(t.children, NewTileNodeEx(t, TileTypeE, t.children[0].p3, t.dir+0, B00, 1, newscale))
t.children = append(t.children, NewTileNodeEx(t, TileTypeD, t.children[1].p3, t.dir+8, B10, 2, newscale))
t.children = append(t.children, NewTileNodeEx(t, TileTypeA, t.children[0].p2, t.dir+15, B01, 3, newscale))
}
}
func (t *TileNode) IsTerminal() bool {
return t.terminal
}
func (t *TileNode) GetChild(i int) *TileNode {
return t.children[i]
}
// Returns the next closest leaf to a node.
// Returns NULL if it's the last leaf.
func (t *TileNode) NextLeaf() *TileNode {
tmp := t
for {
tmp = tmp.NextNode()
if tmp == nil {
return nil
}
if tmp.IsTerminal() {
return tmp
}
}
}
// Returns the next node of the tree, in depth-first traversal.
// Returns NULL if it is at the last node.
func (t *TileNode) NextNode() *TileNode {
if !t.terminal {
return t.children[0]
}
// single node case
if t.level == 0 {
return nil
}
if t.parent_slot < len(t.parent.children)-1 {
return t.parent.children[t.parent_slot+1]
}
// last child case
tmp := t
for {
tmp = tmp.parent
if !((tmp.level != 0) && (tmp.parent_slot == len(tmp.parent.children)-1)) {
break
}
}
// last node
if tmp.level == 0 {
return nil
}
return tmp.parent.children[tmp.parent_slot+1]
}
func (t *TileNode) GetDisplacedSamplingPoint(importance int) f64.Vec2 {
p := t.p1.Add(calcDisplacementVector(importance, t.f_code, t.dir))
p = p.Scale(t.scale)
return p
}
func calcDisplacementVector(importance, f_code, dir int) f64.Vec2 {
i_s := calcStructuralIndex(f_code)
i_v := calcImportanceIndex(importance)
return vvect[dir].Scale(lut[i_v][i_s][0]).Add(vvect[(dir+5)%20].Scale(lut[i_v][i_s][1]))
}
func calcStructuralIndex(bitsequence int) int {
return calcFCodeValue(bitsequence, NUM_STRUCT_INDEX_BITS)
}
func calcImportanceIndex(importance int) int {
t := math.Log(1.0+math.Sqrt(5.0)*float64(importance)) / math.Log(PHI2)
t -= math.Floor(t) // modulo 1.0
return int(LUT_SIZE * t)
}
type TileLeafIterator struct {
shape *TileNode
}
func NewTileLeafIterator(s *TileNode) *TileLeafIterator {
t := &TileLeafIterator{}
t.begin(s)
return t
}
func (t *TileLeafIterator) begin(s *TileNode) {
tmp := s
for !tmp.IsTerminal() {
tmp = tmp.GetChild(0)
}
t.shape = tmp
}
func (t *TileLeafIterator) GetShape() *TileNode {
return t.shape
}
// Moves to the next node in the subdivision tree, in depth-first traversal.
// Returns false iff there is no such node.
func (t *TileLeafIterator) Next() bool {
s := t.shape.NextLeaf()
t.shape = s
if s != nil {
return true
}
return false
}
// Checks if there is a next tile, in depth-first traversal.
func (t *TileLeafIterator) HasNext() bool {
s := t.shape.NextLeaf()
return s != nil
}
func (t *TileLeafIterator) Refine() {
t.shape.Refine()
t.shape = t.shape.GetChild(0)
}
type QuasiSampler struct {
width float64
height float64
root *TileNode
GetImportanceAt func(f64.Vec2) int
}
func NewQuasiSampler(width, height float64, getImportanceAt func(f64.Vec2) int) *QuasiSampler {
return &QuasiSampler{
width: width,
height: height,
GetImportanceAt: getImportanceAt,
}
}
// Builds and collects the point set generated be the sampling system,
// using the previously defined importance function.
func (s *QuasiSampler) GetSamplingPoints() []f64.Vec2 {
s.buildAdaptiveSubdivision(6)
pointlist := s.collectPoints(true)
return pointlist
}
// Generates the hierarchical structure.
func (s *QuasiSampler) buildAdaptiveSubdivision(minSubdivisionLevel int) {
s.root = NewTileNodeROI(s.width, s.height)
// Since we are approximating the MAX within each tile by the values at
// a few key points, we must provide a sufficiently dense initial
// tiling. This would not be necessary with a more thorough scan of each
// tile.
s.subdivideAll(minSubdivisionLevel)
it := NewTileLeafIterator(s.root)
// Recursively subdivide all triangles until each triangle's
// required level is reached.
for {
level := it.GetShape().GetLevel()
// Sampling tiles are infinitesimal
if it.GetShape().IsSamplingType() {
if level < s.GetReqSubdivisionLevel(s.GetImportanceAt_bounded(it.GetShape().GetP1())) {
tmp := it.GetShape()
tmp.Refine()
}
} else {
if (level < s.GetReqSubdivisionLevel(s.GetImportanceAt_bounded(it.GetShape().GetP1()))) ||
(level < s.GetReqSubdivisionLevel(s.GetImportanceAt_bounded(it.GetShape().GetP2()))) ||
(level < s.GetReqSubdivisionLevel(s.GetImportanceAt_bounded(it.GetShape().GetP3()))) ||
(level < s.GetReqSubdivisionLevel(s.GetImportanceAt_bounded(it.GetShape().GetCenter()))) {
tmp := it.GetShape()
tmp.Refine()
}
}
if !it.Next() {
break
}
}
}
// Returns the required level of subdivision for a given importance value.
// The value returned is \f$ \lceil{\log_{\phi^2}(importance)}\rceil \f$,
// where \f$ \phi=\frac{1 + {\sqrt{5}}}{2}\f$ is the Golden Ratio.
func (s *QuasiSampler) GetReqSubdivisionLevel(importance int) int {
if importance == 0 {
return 0
}
nbits := math.Log(float64(importance)*SQRT5+1.0) / LOG_PHI
if nbits < 1 {
nbits = 1
}
return int(math.Ceil(nbits * 0.5))
}
// This is a helper function which constrains the incoming points
// to the region of interest.
func (s *QuasiSampler) GetImportanceAt_bounded(pt f64.Vec2) int {
if pt.X >= 0 && pt.X < s.width && pt.Y >= 0 && pt.Y < s.height {
return s.GetImportanceAt(pt)
}
return 0
}
// Subdivides all tiles down a level, a given number of times.
func (s *QuasiSampler) subdivideAll(times int) {
if s.root == nil {
return
}
for i := 0; i < times; i++ {
it := NewTileLeafIterator(s.root)
for {
tmp := it.GetShape()
it.Next()
tmp.Refine()
if it == nil {
break
}
}
}
}
// Collect the resulting point set.
func (s *QuasiSampler) collectPoints(filterBounds bool) []f64.Vec2 {
var pointlist []f64.Vec2
it := NewTileLeafIterator(s.root)
for {
pt := it.GetShape().GetP1()
// Only "pentagonal" tiles generate sampling points.
if it.GetShape().IsSamplingType() {
importance := s.GetImportanceAt_bounded(pt)
// Threshold the function against the F-Code value.
if importance >= calcFCodeValue(it.GetShape().GetFCode(), it.GetShape().GetLevel()) {
// Get the displaced point using the lookup table.
pt_displaced := it.GetShape().GetDisplacedSamplingPoint(importance)
if !filterBounds ||
(pt_displaced.X >= 0 && pt_displaced.X < s.width &&
pt_displaced.Y >= 0 && pt_displaced.Y < s.height) {
pointlist = append(pointlist, pt_displaced)
}
}
}
if !it.Next() {
break
}
}
return pointlist
}
func calcFCodeValue(bitsequence, nbits int) int {
i_s := 0
for i := 0; i < nbits; i++ {
if bitsequence&(1<<uint(nbits-i-1)) != 0 {
i_s += fibonacci(i + 2)
}
}
return i_s
}
func fibonacci(i int) int {
if i < 1 {
return 1
}
if i < len(fiboTable) {
return fiboTable[i-1]
}
return fibonacci(i-1) + fibonacci(i-2)
}
var fiboTable = [32]int{
1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987,
1597, 2584, 4181, 6765, 10946, 17711, 28567, 46368, 75025,
121393, 196418, 317811, 514229, 832040, 1346269, 2178309,
}
var vvect = []f64.Vec2{
f64.Vec2{0, 1}, f64.Vec2{-0.309017, 0.951057}, f64.Vec2{-0.587785, 0.809017},
f64.Vec2{-0.809017, 0.587785}, f64.Vec2{-0.951057, 0.309017}, f64.Vec2{-1, 0},
f64.Vec2{-0.951057, -0.309017}, f64.Vec2{-0.809017, -0.587785},
f64.Vec2{-0.587785, -0.809017}, f64.Vec2{-0.309017, -0.951057}, f64.Vec2{0, -1},
f64.Vec2{0.309017, -0.951057}, f64.Vec2{0.587785, -0.809017}, f64.Vec2{0.809017, -0.587785},
f64.Vec2{0.951057, -0.309017}, f64.Vec2{1, 0}, f64.Vec2{0.951057, 0.309017},
f64.Vec2{0.809017, 0.587785}, f64.Vec2{0.587785, 0.809017}, f64.Vec2{0.309017, 0.951057},
}
var lut = [LUT_SIZE][21][2]float64{
{{0.0130357, 0.0419608}, {-0.0241936, 0.0152706}, {-0.00384601, -0.311212}, {-0.000581893, -0.129134},
{-0.0363269, 0.0127624}, {0.0999483, 0.408639}, {-0.0526517, 0.4385}, {-0.128703, 0.392}, {0.0132026, 1.0818},
{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
{{0.00793289, 0.0148063}, {0.0206067, -0.0809589}, {0.0110103, -0.430433}, {0.0000473169, -0.293185},
{-0.0593578, 0.019457}, {0.34192, 0.291714}, {-0.286696, 0.386017}, {-0.345313, 0.311961}, {0.00606029, 1.00877},
{0.04757, 0.05065}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
{{0.00454493, -0.00805726}, {0.0545058, -0.140953}, {0.00960599, -0.493483}, {0.000527191, -0.354496},
{-0.0742085, -0.0477178}, {0.436518, 0.218493}, {-0.422435, 0.275524}, {-0.425198, 0.257027},
{0.0127468, 0.979585}, {0.128363, 0.139522}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},
{0, 0}, {0, 0}, {0, 0}}, {{-0.0014899, -0.0438403}, {0.122261, -0.229582}, {-0.00497263, -0.580537},
{-0.00489546, -0.424237}, {-0.107601, -0.133695}, {0.526304, 0.125709}, {-0.558461, 0.0679206},
{-0.511708, 0.153397}, {0.0271526, 0.950065}, {0.298021, 0.327582}, {-0.00464701, -0.00362132}, {0, 0}, {0, 0},
{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
{{-0.0182024, -0.0837012}, {0.226792, -0.318088}, {-0.0416745, -0.663614}, {-0.0253331, -0.455424},
{-0.159087, -0.20807}, {0.552691, 0.0525824}, {-0.617244, -0.197362}, {-0.561762, 0.00314535},
{0.0522991, 0.928754}, {0.376689, 0.429912}, {-0.0180693, -0.00792235}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},
{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}}, {{-0.0308901, -0.108719}, {0.362157, -0.377329},
{-0.0918077, -0.742776}, {-0.0571567, -0.453854}, {-0.242014, -0.230347}, {0.542952, -0.00542364},
{-0.614735, -0.35591}, {-0.565238, -0.204834}, {0.084241, 0.900632}, {0.403207, 0.481046},
{-0.0459391, -0.00743248}, {0.0143212, 0.0776031}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},
{0, 0}, {0, 0}}, {{-0.0429758, -0.112222}, {0.470514, -0.41007}, {-0.139291, -0.797567}, {-0.0930261, -0.382258},
{-0.30831, -0.210972}, {0.504387, -0.05265}, {-0.578917, -0.4354}, {-0.545885, -0.40618}, {0.122368, 0.852639},
{0.377534, 0.476884}, {-0.0712593, 0.0238995}, {0.0349156, 0.248696}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},
{0, 0}, {0, 0}, {0, 0}, {0, 0}}, {{-0.0297026, -0.0818903}, {0.514634, -0.426843}, {-0.161039, -0.817284},
{-0.099245, -0.221824}, {-0.359506, -0.135015}, {0.433957, -0.0878639}, {-0.541453, -0.46714},
{-0.526484, -0.556459}, {0.1735, 0.771396}, {0.353023, 0.455358}, {-0.07854, 0.0885735}, {0.0714601, 0.591673},
{-0.0147015, 0.0839976}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
{{-0.0204607, -0.0433266}, {0.515056, -0.428386}, {-0.153717, -0.803384}, {-0.0874438, 0.032819},
{-0.370233, 0.00469937}, {0.331072, -0.0951004}, {-0.507368, -0.487422}, {-0.533403, -0.648977},
{0.243233, 0.652577}, {0.33663, 0.406983}, {-0.0624495, 0.167064}, {0.0527702, 0.808443}, {-0.0444704, 0.258347},
{0.030331, -0.00128903}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
{{-0.0184965, 0.00557424}, {0.495666, -0.40889}, {-0.136052, -0.781115}, {-0.0493628, 0.265293},
{-0.337945, 0.202038}, {0.193353, -0.0835904}, {-0.479971, -0.497456}, {-0.574003, -0.71938},
{0.32445, 0.514949}, {0.331709, 0.341565}, {-0.034108, 0.244375}, {0.0149632, 0.910353}, {-0.104428, 0.60938},
{0.0948414, -0.00216379}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
{{-0.0436899, 0.0294207}, {0.469933, -0.372015}, {-0.153852, -0.756531}, {0.00920944, 0.393625},
{-0.270292, 0.392355}, {0.0540646, -0.0473047}, {-0.466651, -0.492248}, {-0.647575, -0.793479},
{0.394352, 0.385016}, {0.330852, 0.272582}, {-0.0125759, 0.30811}, {-0.0407447, 0.902855}, {-0.136947, 0.8021},
{0.227048, -0.0014045}, {0.0261797, 0.0109521}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
{{-0.0602358, 0.0215278}, {0.43301, -0.338538}, {-0.233311, -0.71494}, {0.0916642, 0.433266},
{-0.173199, 0.474801}, {-0.0384285, 0.024931}, {-0.475596, -0.469989}, {-0.739327, -0.866143},
{0.440049, 0.277063}, {0.326099, 0.207864}, {-0.00488013, 0.365323}, {-0.0890991, 0.872087},
{-0.159106, 0.889116}, {0.311406, 0.0126425}, {0.081674, 0.0403966}, {0.01391, 0.00573611}, {0, 0}, {0, 0},
{0, 0}, {0, 0}, {0, 0}}, {{-0.0723894, -0.00927744}, {0.354855, -0.326512}, {-0.329593, -0.647058},
{0.169384, 0.42962}, {-0.0250381, 0.472328}, {-0.108748, 0.122704}, {-0.507741, -0.424372},
{-0.805866, -0.896362}, {0.48306, 0.211626}, {0.314407, 0.142681}, {-0.00348365, 0.415081},
{-0.125494, 0.836485}, {-0.183247, 0.847226}, {0.366439, 0.0391043}, {0.18978, 0.100287}, {0.0401008, 0.018797},
{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}}, {{-0.0748666, -0.0517059}, {0.237999, -0.333105},
{-0.391007, -0.558425}, {0.223599, 0.428175}, {0.159284, 0.420084}, {-0.17834, 0.234411}, {-0.553952, -0.353981},
{-0.821481, -0.848098}, {0.527132, 0.175271}, {0.312397, 0.0908259}, {0.00190795, 0.441568},
{-0.149358, 0.790424}, {-0.226469, 0.765995}, {0.383259, 0.0740479}, {0.243694, 0.15335}, {0.0901877, 0.0475938},
{-0.00963625, 0.00819101}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},
{{-0.0862318, -0.0937052}, {0.132383, -0.310846}, {-0.420153, -0.463782}, {0.261956, 0.440763},
{0.290379, 0.392449}, {-0.264095, 0.349189}, {-0.576491, -0.274722}, {-0.797096, -0.724963},
{0.565701, 0.153393}, {0.315376, 0.0546255}, {0.0149326, 0.430477}, {-0.167772, 0.702404}, {-0.283244, 0.645617},
{0.383304, 0.0988087}, {0.248786, 0.17877}, {0.103708, 0.0729573}, {-0.0286781, 0.0298329},
{-0.00878083, 0.0189161}, {0, 0}, {0, 0}, {0, 0}},
{{-0.0911025, -0.116785}, {0.058151, -0.268943}, {-0.424486, -0.374671}, {0.288764, 0.470621},
{0.362681, 0.386055}, {-0.327219, 0.436709}, {-0.585384, -0.202215}, {-0.772145, -0.5936}, {0.580061, 0.135496},
{0.313963, 0.0305349}, {0.0109925, 0.360967}, {-0.181933, 0.552414}, {-0.300836, 0.508161},
{0.364265, 0.0976394}, {0.210088, 0.176749}, {0.096516, 0.0958074}, {-0.0658733, 0.0731591},
{-0.0280071, 0.057776}, {0.0158411, 0.00325704}, {0, 0}, {0, 0}},
{{-0.0974734, -0.0918732}, {0.0139633, -0.212455}, {-0.406371, -0.282796}, {0.296357, 0.483457},
{0.381376, 0.39536}, {-0.333854, 0.503081}, {-0.58254, -0.14516}, {-0.763625, -0.49765}, {0.567887, 0.121286},
{0.30413, 0.0127316}, {-0.00152308, 0.270083}, {-0.191895, 0.352083}, {-0.283727, 0.35145},
{0.326415, 0.0742237}, {0.163984, 0.15982}, {0.0726181, 0.108651}, {-0.0800514, 0.114725},
{-0.0673361, 0.138093}, {0.0402953, 0.00961117}, {-0.0193168, 0.0236477}, {0, 0}},
{{-0.0790912, -0.0163216}, {-0.00448123, -0.162101}, {-0.352873, -0.196134}, {0.271462, 0.449512},
{0.35836, 0.383875}, {-0.286884, 0.565229}, {-0.550438, -0.0846486}, {-0.75899, -0.42121}, {0.528606, 0.119818},
{0.280538, 0.00168322}, {-0.0349212, 0.150096}, {-0.171099, 0.193366}, {-0.250974, 0.211407},
{0.280682, 0.0548899}, {0.126017, 0.143427}, {0.0562988, 0.110436}, {-0.0785227, 0.145239},
{-0.0937526, 0.190149}, {0.0791086, 0.0227095}, {-0.0545744, 0.0707386}, {0, 0}},
{{-0.0518157, 0.0510771}, {-0.00760212, -0.128097}, {-0.253754, -0.111841}, {0.205436, 0.354864},
{0.295866, 0.325402}, {-0.192075, 0.64807}, {-0.4774, -0.00676484}, {-0.722069, -0.332801}, {0.470923, 0.131373},
{0.244358, -0.00366888}, {-0.0555535, 0.0625726}, {-0.128642, 0.0933316}, {-0.239777, 0.136585},
{0.234046, 0.0562388}, {0.105223, 0.134278}, {0.0497268, 0.106459}, {-0.0606163, 0.175207},
{-0.106271, 0.232174}, {0.0538097, 0.0296093}, {-0.122383, 0.16238}, {-0.0113815, 0.0340113}},
{{-0.0304857, 0.0883196}, {0.00193379, -0.129688}, {-0.148195, -0.0572436}, {0.128477, 0.258454},
{0.18546, 0.230594}, {-0.120249, 0.694404}, {-0.326488, 0.130702}, {-0.599671, -0.166452}, {0.371228, 0.215584},
{0.18765, -0.00862734}, {-0.0530754, 0.00501476}, {-0.0781737, 0.0495139}, {-0.215913, 0.0922068},
{0.202485, 0.0708782}, {0.103985, 0.125369}, {0.0553649, 0.1009}, {-0.0397036, 0.199708}, {-0.0966645, 0.253069},
{-0.0153489, 0.0350904}, {-0.134291, 0.193388}, {-0.0315258, 0.0780417}},
{{-0.00909437, 0.0971829}, {0.00766774, -0.145809}, {-0.0755563, -0.0337505}, {0.0700629, 0.188928},
{0.109764, 0.175155}, {-0.084045, 0.707208}, {-0.200288, 0.246694}, {-0.431284, 0.0136518}, {0.274276, 0.314326},
{0.138397, -0.0136486}, {-0.033298, -0.019655}, {-0.0429267, 0.0341841}, {-0.195447, 0.0692005},
{0.188428, 0.0886883}, {0.112392, 0.115937}, {0.0568682, 0.0920568}, {-0.0238131, 0.214855},
{-0.0754228, 0.259851}, {-0.0881413, 0.0371697}, {-0.127762, 0.194639}, {-0.0700573, 0.173426}},
} | gfx/pdsample/quasisampler.go | 0.659186 | 0.400486 | quasisampler.go | starcoder |
package main
import (
"fmt"
"strings"
)
// Node is a server and instance value used in the hash ring. A key is
// mapped to one or more of the configured Node structs in the hash ring.
type Node struct {
Server string
Instance string
}
// XorShift generates a predictable random-ish hash from the given integer.
// This method is also used by carbon-c-relay for replication in a Jump
// hash ring.
// http://vigna.di.unimi.it/ftp/papers/xorshift.pdf
func XorShift(i uint64) uint64 {
i ^= i >> 12
i ^= i << 25
i ^= i >> 27
return i * 2685821657736338717
}
// Fnv1a64 returns a 64 bit hash of the given data using the FNV-1a hashing
// algorithm. Golang's libraries natively support this hashing, but I need
// something simpler.
func Fnv1a64(data []byte) uint64 {
var hash uint64 = 14695981039346656037
for _, d := range data {
hash = (hash ^ uint64(d)) * 1099511628211
}
return hash
}
// Jump returns a bucket index less that buckets using Google's Jump
// consistent hashing algorithm: http://arxiv.org/pdf/1406.2294.pdf
// Note that the return is int for convienance and will not be larger than
// an int32.
func Jump(key uint64, buckets int) int {
var b int64 = -1
var j int64 = 0
for j < int64(buckets) {
b = j
key = key*2862933555777941757 + 1
j = int64(float64(b+1) * (float64(1<<31) / float64(key>>33+1)))
}
return int(b)
}
// JumpHashRing stores the hashring information.
type JumpHashRing struct {
ring []Node
replicas int
}
// NewJumpHashRing creates a new hashring configured with the given replicas
// such that the number of solutions matches the number of replicas.
func NewJumpHashRing(replicas int) *JumpHashRing {
chr := new(JumpHashRing)
chr.replicas = replicas
return chr
}
// String displays the buckets in the hashring and their index numbers.
func (chr *JumpHashRing) String() string {
s := make([]string, 0)
s = append(s, "jump_fnv1a:")
for i := range chr.ring {
s = append(s, fmt.Sprintf("%3d:%s", i, chr.ring[i].Server))
}
return strings.Join(s, "\t")
}
// Replicas returns the number of replicas the hash ring is configured for.
func (chr *JumpHashRing) Replicas() int {
return chr.replicas
}
// Len returns the number of buckets in the hash ring.
func (chr *JumpHashRing) Len() int {
return len(chr.ring)
}
// Nodes returns the Nodes in the hashring
func (chr *JumpHashRing) Nodes() []Node {
return chr.ring
}
// AddNode adds a Node to the Jump Hash Ring. Jump only operates on the
// number of buckets so we assume that AddNode will not be used to attempt
// to insert a Node in the middle of the ring as that will affect the mapping
// of buckets to server addresses. This uses the instance value to define
// an order of the slice of Nodes. Empty ("") instance values will be
// appended to the end of the slice.
func (chr *JumpHashRing) AddNode(node Node) {
if node.Instance == "" {
chr.ring = append(chr.ring, node)
} else {
i := 0
for i = 0; i < chr.Len() && node.Instance <= chr.ring[i].Instance; i++ {
}
chr.ring = append(chr.ring, node) // Make room
copy(chr.ring[i+1:], chr.ring[i:]) // Shuffle array
chr.ring[i] = node // insert new node
}
}
// RemoveNode removes the last node in the ring regardless of the value of
// the given node which is here to implement our interface.
func (chr *JumpHashRing) RemoveNode(node Node) {
chr.ring = chr.ring[:len(chr.ring)-1]
}
// GetNode returns a bucket for the given key using Google's Jump Hash
// algorithm.
func (chr *JumpHashRing) GetNode(key string) Node {
var key64 uint64 = Fnv1a64([]byte(key))
idx := Jump(key64, len(chr.ring))
//fmt.Printf("JUMP: %s => %x => %d\n", key, key64, idx)
return chr.ring[idx]
}
// GetNodes returns a slice of Node objects one for each replica where the
// object is stored.
func (chr *JumpHashRing) GetNodes(key string) []Node {
ring := make([]Node, 0)
ret := make([]Node, 0)
h := Fnv1a64([]byte(key))
i := len(chr.ring)
j := 0
r := chr.replicas
// We need to alter the ring as we go along, make a safe place
copy(ring, chr.ring)
for i > 0 {
j = Jump(h, i)
ret = append(ret, chr.ring[j])
if r--; r <= 0 {
break
}
// Generate a new unique hash
h = XorShift(h)
// Remove the previously selected bucket from our list
i--
ring[j] = ring[i]
}
return ret
} | jump.go | 0.756178 | 0.451568 | jump.go | starcoder |
package utils
import "strconv"
//ODSAData is the structure for the compressed data
type ODSAData struct {
//pMap is the position map of the compressed data
pMap map[byte]int
//lPosition is the last position of the compressed data
lPosition int
//lLetter is the last letter in the transformed data
lLetter byte
//pCArray is the position-character mapping array
pCArray []byte
//pIArray is the position-indedx mapping array
pIArray []int
//nCArray is the character array of noise
nCArray []byte
//nIArray is the index mapping of the noise with the character
nIArray []int
//chunkInfo has the index information about the chunk
chunkInfo int
}
/*ChanData is the wrapper struct for Data to
be send between the go routines
*/
type ChanData struct {
Data
Index int
}
/*ChanByte is the wrapper struct for Index Byte array to
be send between the go routines
*/
type ChanByte struct {
Output []byte
Index int
}
//GetLastPos returns the last position of the data
func (data *ODSAData) GetLastPos() int {
return data.lPosition
}
//SetLastPos sets the last position of the data
func (data *ODSAData) SetLastPos(pos int) {
data.lPosition = pos
}
//GetLastLetter returns the last letter of the data
func (data *ODSAData) GetLastLetter() byte {
return data.lLetter
}
//SetLastLetter sets the last letter of the data
func (data *ODSAData) SetLastLetter(char byte) {
data.lLetter = char
}
//ToString converts the data to plain text
func (data *ODSAData) ToString() string {
output := "position -> ["
length := len(data.pIArray)
/*
Iterating through the position arrays to get the value
*/
for i := 0; i < length; i++ {
output += " " + string(data.pCArray[i]) + " : " + strconv.Itoa(data.pIArray[i])
if i < length-1 {
output += ","
}
}
output += " ]\nnoise -> ["
length = len(data.nIArray)
/*
Iterating through the noise arrays to get the value
*/
for i := 0; i < length; i++ {
output += " " + string(data.nCArray[i]) + " : " + strconv.Itoa(data.nIArray[i])
if i < length-1 {
output += ","
}
}
output += " ]\nlast letter -> " + strconv.Itoa(data.lPosition)
//Returning the output text
return output
}
//Data interface to protect the Data
type Data interface {
AddData(byte) bool
GetData() []byte
ToString() string
GetLastPos() int
SetLastPos(int)
GetLastLetter() byte
SetLastLetter(byte)
} | utils/datastructures.go | 0.576661 | 0.458591 | datastructures.go | starcoder |
package golist
import (
"fmt"
"math/rand"
"time"
)
// SliceError is a slice of type error.
type SliceError struct {
data []error
}
// NewSliceError returns a pointer to a new SliceError initialized with the specified elements.
func NewSliceError(elems ...error) *SliceError {
s := new(SliceError)
s.data = make([]error, len(elems))
for i := 0; i < len(elems); i++ {
s.data[i] = elems[i]
}
return s
}
// Append adds the elements to the end of SliceError.
func (s *SliceError) Append(elems ...error) *SliceError {
if s == nil {
return nil
}
s.data = append(s.data, elems...)
return s
}
// Prepend adds the elements to the beginning of SliceError.
func (s *SliceError) Prepend(elems ...error) *SliceError {
if s == nil {
return nil
}
s.data = append(elems, s.data...)
return s
}
// At returns the element in SliceError at the specified index.
func (s *SliceError) At(index int) error {
if s.data == nil || len(s.data) == 0 {
panic("SliceError does not contain any elements")
}
if index >= len(s.data) || index < 0 {
panic(fmt.Sprintf("index %d outside the range of SliceError", index))
}
return s.data[index]
}
// Set sets the element of SliceError at the specified index.
func (s *SliceError) Set(index int, elem error) *SliceError {
if s == nil {
return nil
}
s.data[index] = elem
return s
}
// Insert inserts the elements into SliceError at the specified index.
func (s *SliceError) Insert(index int, elems ...error) *SliceError {
if s == nil {
return nil
}
// Grow the slice by the number of elements (using the zero value)
var zero error
for i := 0; i < len(elems); i++ {
s.data = append(s.data, zero)
}
// Use copy to move the upper part of the slice out of the way and open a hole.
copy(s.data[index+len(elems):], s.data[index:])
// Store the new values
for i := 0; i < len(elems); i++ {
s.data[index+i] = elems[i]
}
// Return the result.
return s
}
// Remove removes the element from SliceError at the specified index.
func (s *SliceError) Remove(index int) *SliceError {
if s == nil {
return nil
}
s.data = append(s.data[:index], s.data[index+1:]...)
return s
}
// Filter removes elements from SliceError that do not satisfy the filter function.
func (s *SliceError) Filter(fn func(elem error) bool) *SliceError {
if s == nil {
return nil
}
data := s.data[:0]
for _, elem := range s.data {
if fn(elem) {
data = append(data, elem)
}
}
s.data = data
return s
}
// Transform modifies each element of SliceError according to the specified function.
func (s *SliceError) Transform(fn func(elem error) error) *SliceError {
if s == nil {
return nil
}
for i, elem := range s.data {
s.data[i] = fn(elem)
}
return s
}
// Unique modifies SliceError to keep only the first occurrence of each element (removing any duplicates).
func (s *SliceError) Unique() *SliceError {
if s == nil {
return nil
}
seen := make(map[error]struct{})
data := s.data[:0]
for _, elem := range s.data {
if _, ok := seen[elem]; !ok {
data = append(data, elem)
seen[elem] = struct{}{}
}
}
s.data = data
return s
}
// Reverse reverses the order of the elements of SliceError.
func (s *SliceError) Reverse() *SliceError {
if s == nil {
return nil
}
for i := len(s.data)/2 - 1; i >= 0; i-- {
opp := len(s.data) - 1 - i
s.Swap(i, opp)
}
return s
}
// Shuffle randomly shuffles the order of the elements in SliceError.
func (s *SliceError) Shuffle(seed int64) *SliceError {
if s == nil {
return nil
}
if seed == 0 {
seed = time.Now().UnixNano()
}
r := rand.New(rand.NewSource(seed))
r.Shuffle(s.Count(), s.Swap)
return s
}
// Data returns the raw elements of SliceError.
func (s *SliceError) Data() []error {
if s == nil {
return nil
}
return s.data
}
// Count returns the number of elements in SliceError.
func (s *SliceError) Count() int {
return len(s.data)
}
// Len returns the number of elements in SliceError (alias for Count).
func (s *SliceError) Len() int {
return s.Count()
}
// Swap swaps the elements in SliceError specified by the indices i and j.
func (s *SliceError) Swap(i, j int) {
s.data[i], s.data[j] = s.data[j], s.data[i]
}
// Clone performs a deep copy of SliceError and returns it
func (s *SliceError) Clone() *SliceError {
if s == nil {
return nil
}
s2 := new(SliceError)
s2.data = make([]error, len(s.data))
copy(s2.data, s.data)
return s2
}
// Equal returns true if the SliceError is logically equivalent to the specified SliceError.
func (s *SliceError) Equal(s2 *SliceError) bool {
if s == s2 {
return true
}
if s == nil || s2 == nil {
return false // has to be false because s == s2 tested earlier
}
if len(s.data) != len(s2.data) {
return false
}
for i, elem := range s.data {
if elem != s2.data[i] {
return false
}
}
return true
} | slice_error.go | 0.762247 | 0.443058 | slice_error.go | starcoder |
package xex
import (
"fmt"
"math"
"reflect"
)
//Set up built-in number functions
func registerNumberBuiltins() {
RegisterFunction(
NewFunction(
"add",
FunctionDocumentation{
Text: `adds two numbers returning a single numerical result`,
Parameters: map[string]string{
"num1": "The first number to add.",
"num2": "The second number to add.",
},
},
func(num1, num2 interface{}) (interface{}, error) {
switch n1 := num1.(type) {
//First make sure we got a number in num1
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
//Then make sure num2 is the same type
if reflect.TypeOf(num1) != reflect.TypeOf(num2) {
return 0, fmt.Errorf("add cannot use different types (%s & %s) - convert them first", reflect.TypeOf(num1).Name(), reflect.TypeOf(num2).Name())
}
//Then do all the conversions & additions (now we know both types are the same)
switch n2 := num2.(type) {
case int:
return n1.(int) + n2, nil
case int8:
return n1.(int8) + n2, nil
case int16:
return n1.(int16) + n2, nil
case int32:
return n1.(int32) + n2, nil
case int64:
return n1.(int64) + n2, nil
case uint:
return n1.(uint) + n2, nil
case uint8:
return n1.(uint8) + n2, nil
case uint16:
return n1.(uint16) + n2, nil
case uint32:
return n1.(uint32) + n2, nil
case uint64:
return n1.(uint64) + n2, nil
case float32:
return n1.(float32) + n2, nil
case float64:
return n1.(float64) + n2, nil
}
}
return 0, fmt.Errorf("add can only add numeric types, not %s and %s", reflect.TypeOf(num1).Name(), reflect.TypeOf(num2).Name())
},
),
)
RegisterFunction(
NewFunction(
"subtract",
FunctionDocumentation{
Text: `subtracts two numbers returning a single numerical result`,
Parameters: map[string]string{
"minuend": "The initial number to subtract from.",
"subtrahend": "The value to subreact from minuend.",
},
},
func(minuend, subtrahend interface{}) (interface{}, error) {
switch n1 := minuend.(type) {
//First make sure we got a number in num1
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
//Then make sure num2 is the same type
if reflect.TypeOf(minuend) != reflect.TypeOf(subtrahend) {
return 0, fmt.Errorf("subtract cannot use different types (%s & %s) - convert them first", reflect.TypeOf(minuend).Name(), reflect.TypeOf(subtrahend).Name())
}
//Then do all the conversions & additions (now we know both types are the same)
switch n2 := subtrahend.(type) {
case int:
return n1.(int) - n2, nil
case int8:
return n1.(int8) - n2, nil
case int16:
return n1.(int16) - n2, nil
case int32:
return n1.(int32) - n2, nil
case int64:
return n1.(int64) - n2, nil
case uint:
return n1.(uint) - n2, nil
case uint8:
return n1.(uint8) - n2, nil
case uint16:
return n1.(uint16) - n2, nil
case uint32:
return n1.(uint32) - n2, nil
case uint64:
return n1.(uint64) - n2, nil
case float32:
return n1.(float32) - n2, nil
case float64:
return n1.(float64) - n2, nil
}
}
return 0, fmt.Errorf("subtract can only subtract numeric types, not %s and %s", reflect.TypeOf(minuend).Name(), reflect.TypeOf(subtrahend).Name())
},
),
)
RegisterFunction(
NewFunction(
"multiply",
FunctionDocumentation{
Text: `multiplies two numbers returning a single numerical result`,
Parameters: map[string]string{
"multiplicand": "The number to be multiplied.",
"multiplier": "The number to multiply by.",
},
},
func(multiplicand, multiplier interface{}) (interface{}, error) {
switch n1 := multiplicand.(type) {
//First make sure we got a number in num1
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
//Then make sure num2 is the same type
if reflect.TypeOf(multiplicand) != reflect.TypeOf(multiplier) {
return 0, fmt.Errorf("multiply cannot use different types (%s & %s) - convert them first", reflect.TypeOf(multiplicand).Name(), reflect.TypeOf(multiplier).Name())
}
//Then do all the conversions & additions (now we know both types are the same)
switch n2 := multiplier.(type) {
case int:
return n1.(int) * n2, nil
case int8:
return n1.(int8) * n2, nil
case int16:
return n1.(int16) * n2, nil
case int32:
return n1.(int32) * n2, nil
case int64:
return n1.(int64) * n2, nil
case uint:
return n1.(uint) * n2, nil
case uint8:
return n1.(uint8) * n2, nil
case uint16:
return n1.(uint16) * n2, nil
case uint32:
return n1.(uint32) * n2, nil
case uint64:
return n1.(uint64) * n2, nil
case float32:
return n1.(float32) * n2, nil
case float64:
return n1.(float64) * n2, nil
}
}
return 0, fmt.Errorf("multiply can only add numeric types, not %s and %s", reflect.TypeOf(multiplicand).Name(), reflect.TypeOf(multiplier).Name())
},
),
)
RegisterFunction(
NewFunction(
"divide",
FunctionDocumentation{
Text: `divides two numbers returning a single numerical result`,
Parameters: map[string]string{
"dividend": "The number to be divided.",
"divisor": "The number to divide by.",
},
},
func(dividend, divisor interface{}) (interface{}, error) {
switch n1 := dividend.(type) {
//First make sure we got a number in num1
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
//Then make sure num2 is the same type
if reflect.TypeOf(dividend) != reflect.TypeOf(divisor) {
return 0, fmt.Errorf("divide cannot use different types (%s & %s) - convert them first", reflect.TypeOf(dividend).Name(), reflect.TypeOf(divisor).Name())
}
//Then do all the conversions & additions (now we know both types are the same)
switch n2 := divisor.(type) {
case int:
return n1.(int) / n2, nil
case int8:
return n1.(int8) / n2, nil
case int16:
return n1.(int16) / n2, nil
case int32:
return n1.(int32) / n2, nil
case int64:
return n1.(int64) / n2, nil
case uint:
return n1.(uint) / n2, nil
case uint8:
return n1.(uint8) / n2, nil
case uint16:
return n1.(uint16) / n2, nil
case uint32:
return n1.(uint32) / n2, nil
case uint64:
return n1.(uint64) / n2, nil
case float32:
return n1.(float32) / n2, nil
case float64:
return n1.(float64) / n2, nil
}
}
return 0, fmt.Errorf("divide can only divide numeric types, not %s and %s", reflect.TypeOf(dividend).Name(), reflect.TypeOf(divisor).Name())
},
),
)
RegisterFunction(
NewFunction(
"pow",
FunctionDocumentation{
Text: `pow returns x to the power of y (x**y).`,
Parameters: map[string]string{
"x": "The base number.",
"y": "The exponent (number of times x is multiplied by itself).",
},
},
math.Pow,
),
)
RegisterFunction(
NewFunction(
"mod",
FunctionDocumentation{
Text: `mod returns the remainder of dividend divided by divisor.`,
Parameters: map[string]string{
"dividend": "The number to be divided.",
"divisor": "The number to divide by.",
},
},
math.Mod,
),
)
RegisterFunction(
NewFunction(
"int",
FunctionDocumentation{
Text: `int converts the passed in value to an int or returns a error if conversion isn't possible`,
},
func(number interface{}) (int, error) {
switch num := number.(type) {
case int:
return num, nil
case int8:
return int(num), nil
case int16:
return int(num), nil
case int32:
return int(num), nil
case int64:
return int(num), nil
case uint:
return int(num), nil
case uint8:
return int(num), nil
case uint16:
return int(num), nil
case uint32:
return int(num), nil
case uint64:
return int(num), nil
case float32:
return int(num), nil
case float64:
return int(num), nil
}
return 0, fmt.Errorf("cannot convert %s to int", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"int8",
FunctionDocumentation{
Text: `int8 converts the passed in value to an int8 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (int8, error) {
switch num := number.(type) {
case int:
return int8(num), nil
case int8:
return num, nil
case int16:
return int8(num), nil
case int32:
return int8(num), nil
case int64:
return int8(num), nil
case uint:
return int8(num), nil
case uint8:
return int8(num), nil
case uint16:
return int8(num), nil
case uint32:
return int8(num), nil
case uint64:
return int8(num), nil
case float32:
return int8(num), nil
case float64:
return int8(num), nil
}
return 0, fmt.Errorf("cannot convert %s to int8", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"int16",
FunctionDocumentation{
Text: `int16 converts the passed in value to an int16 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (int16, error) {
switch num := number.(type) {
case int:
return int16(num), nil
case int8:
return int16(num), nil
case int16:
return num, nil
case int32:
return int16(num), nil
case int64:
return int16(num), nil
case uint:
return int16(num), nil
case uint8:
return int16(num), nil
case uint16:
return int16(num), nil
case uint32:
return int16(num), nil
case uint64:
return int16(num), nil
case float32:
return int16(num), nil
case float64:
return int16(num), nil
}
return 0, fmt.Errorf("cannot convert %s to int16", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"int32",
FunctionDocumentation{
Text: `int32 converts the passed in value to an int32 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (int32, error) {
switch num := number.(type) {
case int:
return int32(num), nil
case int8:
return int32(num), nil
case int16:
return int32(num), nil
case int32:
return num, nil
case int64:
return int32(num), nil
case uint:
return int32(num), nil
case uint8:
return int32(num), nil
case uint16:
return int32(num), nil
case uint32:
return int32(num), nil
case uint64:
return int32(num), nil
case float32:
return int32(num), nil
case float64:
return int32(num), nil
}
return 0, fmt.Errorf("cannot convert %s to int32", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"int64",
FunctionDocumentation{
Text: `int64 converts the passed in value to an int64 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (int64, error) {
switch num := number.(type) {
case int:
return int64(num), nil
case int8:
return int64(num), nil
case int16:
return int64(num), nil
case int32:
return int64(num), nil
case int64:
return num, nil
case uint:
return int64(num), nil
case uint8:
return int64(num), nil
case uint16:
return int64(num), nil
case uint32:
return int64(num), nil
case uint64:
return int64(num), nil
case float32:
return int64(num), nil
case float64:
return int64(num), nil
}
return 0, fmt.Errorf("cannot convert %s to int64", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"uint",
FunctionDocumentation{
Text: `uint converts the passed in value to an uint or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (uint, error) {
switch num := number.(type) {
case int:
return uint(num), nil
case int8:
return uint(num), nil
case int16:
return uint(num), nil
case int32:
return uint(num), nil
case int64:
return uint(num), nil
case uint:
return num, nil
case uint8:
return uint(num), nil
case uint16:
return uint(num), nil
case uint32:
return uint(num), nil
case uint64:
return uint(num), nil
case float32:
return uint(num), nil
case float64:
return uint(num), nil
}
return 0, fmt.Errorf("cannot convert %s to uint", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"uint8",
FunctionDocumentation{
Text: `uint8 converts the passed in value to an uint8 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (uint8, error) {
switch num := number.(type) {
case int:
return uint8(num), nil
case int8:
return uint8(num), nil
case int16:
return uint8(num), nil
case int32:
return uint8(num), nil
case int64:
return uint8(num), nil
case uint:
return uint8(num), nil
case uint8:
return num, nil
case uint16:
return uint8(num), nil
case uint32:
return uint8(num), nil
case uint64:
return uint8(num), nil
case float32:
return uint8(num), nil
case float64:
return uint8(num), nil
}
return 0, fmt.Errorf("cannot convert %s to uint8", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"uint16",
FunctionDocumentation{
Text: `uint16 converts the passed in value to an uint16 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (uint16, error) {
switch num := number.(type) {
case int:
return uint16(num), nil
case int8:
return uint16(num), nil
case int16:
return uint16(num), nil
case int32:
return uint16(num), nil
case int64:
return uint16(num), nil
case uint:
return uint16(num), nil
case uint8:
return uint16(num), nil
case uint16:
return num, nil
case uint32:
return uint16(num), nil
case uint64:
return uint16(num), nil
case float32:
return uint16(num), nil
case float64:
return uint16(num), nil
}
return 0, fmt.Errorf("cannot convert %s to uint16", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"uint32",
FunctionDocumentation{
Text: `uint32 converts the passed in value to an uint32 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (uint32, error) {
switch num := number.(type) {
case int:
return uint32(num), nil
case int8:
return uint32(num), nil
case int16:
return uint32(num), nil
case int32:
return uint32(num), nil
case int64:
return uint32(num), nil
case uint:
return uint32(num), nil
case uint8:
return uint32(num), nil
case uint16:
return uint32(num), nil
case uint32:
return num, nil
case uint64:
return uint32(num), nil
case float32:
return uint32(num), nil
case float64:
return uint32(num), nil
}
return 0, fmt.Errorf("cannot convert %s to uint32", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"uint64",
FunctionDocumentation{
Text: `uint64 converts the passed in value to an uint64 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (uint64, error) {
switch num := number.(type) {
case int:
return uint64(num), nil
case int8:
return uint64(num), nil
case int16:
return uint64(num), nil
case int32:
return uint64(num), nil
case int64:
return uint64(num), nil
case uint:
return uint64(num), nil
case uint8:
return uint64(num), nil
case uint16:
return uint64(num), nil
case uint32:
return uint64(num), nil
case uint64:
return num, nil
case float32:
return uint64(num), nil
case float64:
return uint64(num), nil
}
return 0, fmt.Errorf("cannot convert %s to uint64", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"float32",
FunctionDocumentation{
Text: `float32 converts the passed in value to an float32 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (float32, error) {
switch num := number.(type) {
case int:
return float32(num), nil
case int8:
return float32(num), nil
case int16:
return float32(num), nil
case int32:
return float32(num), nil
case int64:
return float32(num), nil
case uint:
return float32(num), nil
case uint8:
return float32(num), nil
case uint16:
return float32(num), nil
case uint32:
return float32(num), nil
case uint64:
return float32(num), nil
case float32:
return num, nil
case float64:
return float32(num), nil
}
return 0, fmt.Errorf("cannot convert %s to float32", reflect.TypeOf(number).Name())
},
),
)
RegisterFunction(
NewFunction(
"float64",
FunctionDocumentation{
Text: `float64 converts the passed in value to an float64 or returns a error if conversion isn't possible`,
Parameters: map[string]string{
"number": "The number to convert.",
},
},
func(number interface{}) (float64, error) {
switch num := number.(type) {
case int:
return float64(num), nil
case int8:
return float64(num), nil
case int16:
return float64(num), nil
case int32:
return float64(num), nil
case int64:
return float64(num), nil
case uint:
return float64(num), nil
case uint8:
return float64(num), nil
case uint16:
return float64(num), nil
case uint32:
return float64(num), nil
case uint64:
return float64(num), nil
case float32:
return float64(num), nil
case float64:
return num, nil
}
return 0, fmt.Errorf("cannot convert %s to float64", reflect.TypeOf(number).Name())
},
),
)
} | builtins_numbers.go | 0.653459 | 0.560012 | builtins_numbers.go | starcoder |
package band
import (
"fmt"
"math"
)
// ChMaskCntlPair pairs a ChMaskCntl with a mask.
type ChMaskCntlPair struct {
Cntl uint8
Mask [16]bool
}
func parseChMask(offset uint8, mask ...bool) map[uint8]bool {
if len(mask)-1 > int(math.MaxUint8-offset) {
panic(fmt.Sprintf("channel mask overflows uint8, offset: %d, mask length: %d", offset, len(mask)))
}
m := make(map[uint8]bool, len(mask))
for i, v := range mask {
m[offset+uint8(i)] = v
}
return m
}
func parseChMask16(mask [16]bool, cntl uint8) (map[uint8]bool, error) {
switch cntl {
case 0:
return parseChMask(0, mask[:]...), nil
case 6:
return parseChMask(0,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
), nil
}
return nil, errUnsupportedChMaskCntl.WithAttributes("chmaskcntl", cntl)
}
func parseChMask72(mask [16]bool, cntl uint8) (map[uint8]bool, error) {
switch cntl {
case 0, 1, 2, 3:
return parseChMask(cntl*16, mask[:]...), nil
case 4:
return parseChMask(64, mask[0:8]...), nil
case 5:
return parseChMask(0,
mask[0], mask[0], mask[0], mask[0], mask[0], mask[0], mask[0], mask[0],
mask[1], mask[1], mask[1], mask[1], mask[1], mask[1], mask[1], mask[1],
mask[2], mask[2], mask[2], mask[2], mask[2], mask[2], mask[2], mask[2],
mask[3], mask[3], mask[3], mask[3], mask[3], mask[3], mask[3], mask[3],
mask[4], mask[4], mask[4], mask[4], mask[4], mask[4], mask[4], mask[4],
mask[5], mask[5], mask[5], mask[5], mask[5], mask[5], mask[5], mask[5],
mask[6], mask[6], mask[6], mask[6], mask[6], mask[6], mask[6], mask[6],
mask[7], mask[7], mask[7], mask[7], mask[7], mask[7], mask[7], mask[7],
), nil
case 6:
return parseChMask(0,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
mask[0], mask[1], mask[2], mask[3], mask[4], mask[5], mask[6], mask[7],
), nil
case 7:
return parseChMask(0,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
mask[0], mask[1], mask[2], mask[3], mask[4], mask[5], mask[6], mask[7],
), nil
}
return nil, errUnsupportedChMaskCntl.WithAttributes("chmaskcntl", cntl)
}
func parseChMask96(mask [16]bool, cntl uint8) (map[uint8]bool, error) {
switch cntl {
case 0, 1, 2, 3, 4, 5:
return parseChMask(cntl*16, mask[:]...), nil
case 6:
return parseChMask(0,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
), nil
}
return nil, errUnsupportedChMaskCntl.WithAttributes("chmaskcntl", cntl)
}
func boolsTo16BoolArray(vs ...bool) [16]bool {
if len(vs) > 16 {
panic(fmt.Sprintf("length of vs must be less or equal to 16, got %d", len(vs)))
}
var ret [16]bool
for i, v := range vs {
ret[i] = v
}
return ret
}
func generateChMask16(currentChs, desiredChs []bool) ([]ChMaskCntlPair, error) {
if len(currentChs) != 16 || len(desiredChs) != 16 {
return nil, errInvalidChannelCount.New()
}
// NOTE: ChMaskCntl==6 never provides a more optimal ChMask sequence than ChMaskCntl==0.
return []ChMaskCntlPair{
{
Mask: boolsTo16BoolArray(desiredChs...),
},
}, nil
}
// EqualChMasks returns true if both channel masks are equal.
func EqualChMasks(a, b []bool) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func generateChMaskMatrix(pairs []ChMaskCntlPair, currentChs, desiredChs []bool) ([]ChMaskCntlPair, error) {
n := len(currentChs)
if n%16 != 0 || len(desiredChs) != n {
return nil, errInvalidChannelCount.New()
}
for i := 0; i < n/16; i++ {
for j := 0; j < 16; j++ {
if currentChs[16*i+j] != desiredChs[16*i+j] {
pairs = append(pairs, ChMaskCntlPair{
Cntl: uint8(i),
Mask: boolsTo16BoolArray(desiredChs[16*i : 16*i+16]...),
})
break
}
}
}
return pairs, nil
}
func trueCount(vs ...bool) int {
var n int
for _, v := range vs {
if v {
n++
}
}
return n
}
func generateChMask72Generic(currentChs, desiredChs []bool) ([]ChMaskCntlPair, error) {
if len(currentChs) != 72 || len(desiredChs) != 72 {
return nil, errInvalidChannelCount.New()
}
if EqualChMasks(currentChs, desiredChs) {
return []ChMaskCntlPair{
{
Mask: boolsTo16BoolArray(desiredChs[0:16]...),
},
}, nil
}
on125 := trueCount(desiredChs[0:64]...)
switch on125 {
case 0:
return []ChMaskCntlPair{
{
Cntl: 7,
Mask: boolsTo16BoolArray(desiredChs[64:72]...),
},
}, nil
case 64:
return []ChMaskCntlPair{
{
Cntl: 6,
Mask: boolsTo16BoolArray(desiredChs[64:72]...),
},
}, nil
}
pairs, err := generateChMaskMatrix(make([]ChMaskCntlPair, 0, 5), currentChs[0:64], desiredChs[0:64])
if err != nil {
return nil, err
}
for i := 65; i < 72; i++ {
if currentChs[i] != desiredChs[i] {
pairs = append(pairs, ChMaskCntlPair{
Cntl: 4,
Mask: boolsTo16BoolArray(desiredChs[64:72]...),
})
break
}
}
if len(pairs) <= 2 {
return pairs, nil
}
// Count amount of pairs required assuming either ChMaskCntl==6 or ChMaskCntl==7 is sent first.
// The minimum amount of pairs required in such case will be 2, hence only attempt this if amount
// of generated pairs so far is higher than 2.
cntl6Pairs, err := generateChMaskMatrix(make([]ChMaskCntlPair, 0, 4), []bool{
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
}, desiredChs[0:64])
if err != nil {
return nil, err
}
cntl7Pairs, err := generateChMaskMatrix(make([]ChMaskCntlPair, 0, 4), []bool{
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
}, desiredChs[0:64])
if err != nil {
return nil, err
}
switch {
case len(pairs) <= 1+len(cntl6Pairs) && len(pairs) <= 1+len(cntl7Pairs):
return pairs, nil
case len(cntl6Pairs) < len(cntl7Pairs):
return append(append(make([]ChMaskCntlPair, 0, 1+len(cntl6Pairs)), ChMaskCntlPair{
Cntl: 6,
Mask: boolsTo16BoolArray(desiredChs[64:72]...),
}), cntl6Pairs...), nil
default:
return append(append(make([]ChMaskCntlPair, 0, 1+len(cntl7Pairs)), ChMaskCntlPair{
Cntl: 7,
Mask: boolsTo16BoolArray(desiredChs[64:72]...),
}), cntl7Pairs...), nil
}
}
func makeGenerateChMask72(supportChMaskCntl5 bool) func([]bool, []bool) ([]ChMaskCntlPair, error) {
if !supportChMaskCntl5 {
return generateChMask72Generic
}
return func(currentChs, desiredChs []bool) ([]ChMaskCntlPair, error) {
pairs, err := generateChMask72Generic(currentChs, desiredChs)
if err != nil {
return nil, err
}
if len(pairs) <= 1 {
return pairs, nil
}
var fsbs [8]bool
for i := 0; i < 8; i++ {
if trueCount(desiredChs[8*i:8*i+8]...) == 8 {
fsbs[i] = true
}
}
if n := trueCount(fsbs[:]...); n == 0 || n == 8 {
// Since there are either no enabled FSBs, or no disabled FSBs we won't be able to compute a
// more efficient result that one using ChMaskCntl==6 or ChMaskCntl==7.
return pairs, nil
}
cntl5Pairs, err := generateChMaskMatrix(make([]ChMaskCntlPair, 0, 5), []bool{
fsbs[0], fsbs[0], fsbs[0], fsbs[0], fsbs[0], fsbs[0], fsbs[0], fsbs[0],
fsbs[1], fsbs[1], fsbs[1], fsbs[1], fsbs[1], fsbs[1], fsbs[1], fsbs[1],
fsbs[2], fsbs[2], fsbs[2], fsbs[2], fsbs[2], fsbs[2], fsbs[2], fsbs[2],
fsbs[3], fsbs[3], fsbs[3], fsbs[3], fsbs[3], fsbs[3], fsbs[3], fsbs[3],
fsbs[4], fsbs[4], fsbs[4], fsbs[4], fsbs[4], fsbs[4], fsbs[4], fsbs[4],
fsbs[5], fsbs[5], fsbs[5], fsbs[5], fsbs[5], fsbs[5], fsbs[5], fsbs[5],
fsbs[6], fsbs[6], fsbs[6], fsbs[6], fsbs[6], fsbs[6], fsbs[6], fsbs[6],
fsbs[7], fsbs[7], fsbs[7], fsbs[7], fsbs[7], fsbs[7], fsbs[7], fsbs[7],
}, desiredChs[0:64])
if err != nil {
return nil, err
}
for i := 65; i < 72; i++ {
if currentChs[i] != desiredChs[i] {
cntl5Pairs = append(cntl5Pairs, ChMaskCntlPair{
Cntl: 4,
Mask: boolsTo16BoolArray(desiredChs[64:72]...),
})
break
}
}
if len(pairs) <= 1+len(cntl5Pairs) {
return pairs, nil
}
return append(append(make([]ChMaskCntlPair, 0, 1+len(cntl5Pairs)), ChMaskCntlPair{
Cntl: 5,
Mask: boolsTo16BoolArray(fsbs[:]...),
}), cntl5Pairs...), nil
}
}
func generateChMask96(currentChs, desiredChs []bool) ([]ChMaskCntlPair, error) {
if len(currentChs) != 96 || len(desiredChs) != 96 {
return nil, errInvalidChannelCount.New()
}
if EqualChMasks(currentChs, desiredChs) {
return []ChMaskCntlPair{
{
Mask: boolsTo16BoolArray(desiredChs[0:16]...),
},
}, nil
}
if trueCount(desiredChs...) == 96 {
return []ChMaskCntlPair{
{
Cntl: 6,
},
}, nil
}
pairs, err := generateChMaskMatrix(make([]ChMaskCntlPair, 0, 6), currentChs, desiredChs)
if err != nil {
return nil, err
}
if len(pairs) <= 2 {
return pairs, nil
}
// Count amount of pairs required assuming ChMaskCntl==6 is sent first.
// The minimum amount of pairs required in such case will be 2, hence only attempt this if amount
// of generated pairs so far is higher than 2.
cntl6Pairs, err := generateChMaskMatrix(make([]ChMaskCntlPair, 0, 6), []bool{
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
true, true, true, true, true, true, true, true,
}, desiredChs)
if err != nil {
return nil, err
}
if len(pairs) <= 1+len(cntl6Pairs) {
return pairs, nil
}
return append(append(make([]ChMaskCntlPair, 0, 1+len(cntl6Pairs)), ChMaskCntlPair{
Cntl: 6,
}), cntl6Pairs...), nil
}
func uint64Ptr(v uint64) *uint64 {
return &v
} | pkg/band/channel_mask.go | 0.522689 | 0.458773 | channel_mask.go | starcoder |
package gopherplot
import (
"errors"
"fmt"
)
// Shape is the description of a matrix's dimensions
type Shape []int
// Equals determines if two shapes are the same
func (s Shape) Equals(s2 Shape) bool {
if len(s) != len(s2) {
return false
}
for i := range s {
if s[i] != s2[i] {
return false
}
}
return true
}
// Valuable will return a value
type Valuable interface {
Value() interface{}
}
// Number is a number structure
type Number struct {
Val float64
}
// Value of the Number
func (n *Number) Value() interface{} {
return n.Val
}
func (n *Number) String() string {
return fmt.Sprintf("%f", n.Value().(float64))
}
//Getable will retrieve the Valuable at the provided coordinates
type Getable interface {
Get(...int) (Valuable, error)
}
// Dimensioned provides access to the shape of the structure
type Dimensioned interface {
Dimension() Shape
}
// Vector is a list of float64 values
type Vector struct {
Data []*Number
dims Shape
Getable
Valuable
Dimensioned
}
// Max return the maximum int value from a []int
func Max(vals ...int) int {
var max int
for i := 0; i < len(vals); i++ {
if vals[i] > vals[max] {
max = i
}
}
return vals[max]
}
/*
NewVector creates a new vector.
for a horizontal vector, the shape is [n,1]
for a vertical vector, the shape is [1,n]
One of shape's values must be 1.
Shape cannot have negative values
If the len(values) < n, the remaining values are set to 0
*/
func NewVector(shape []int, values ...float64) (*Vector, error) {
if (shape[0] != 1 && shape[1] != 1) || shape[0] < 1 || shape[1] < 1 || len(shape) != 2 {
return nil, errors.New("Invalid Shape")
}
newData := []*Number{}
for i := 0; i < Max(shape...); i++ {
v := &Number{Val: 0}
if i < len(values) {
v = &Number{Val: values[i]}
}
newData = append(newData, v)
}
v := Vector{
dims: shape,
Data: newData,
}
return &v, nil
}
// Dimension describes the shape of the Vector
func (v *Vector) Dimension() Shape {
return v.dims
}
//Get the float64 value at the given address
func (v *Vector) Get(addr ...int) (Valuable, error) {
err := errors.New("Invalid Address")
var val Valuable
if len(addr) > len(v.dims) {
return val, err
}
for i, a := range addr {
if a < 0 {
return val, err
}
if a > v.dims[i] {
return val, err
}
}
return val, nil
}
func (v *Vector) String() string {
trailer := ""
prefix := ""
if v.dims[0] > 1 {
trailer = " "
} else {
trailer = "\n"
prefix = " "
}
s := "[" + trailer
for _, v := range v.Data {
s += fmt.Sprintf("%s%f%s", prefix, v.Value().(float64), trailer)
}
s += "]"
return s
}
//Times multiplies a number to each value of the Vector
func (v *Vector) Times(n *Number) {
for i, d := range v.Data {
v.Data[i] = &Number{Val: d.Value().(float64) * n.Value().(float64)}
}
}
//Plus adds n to each element of Vector
func (v *Vector) Plus(n *Number) {
for i, d := range v.Data {
v.Data[i] = &Number{Val: d.Value().(float64) + n.Value().(float64)}
}
}
// DotProduct produces the inner product of two Vectors
func (v *Vector) DotProduct(v2 *Vector) (*Number, error) {
if v.dims[0] != v2.dims[1] && v.dims[1] != v2.dims[0] {
return nil, errors.New("Vectors lack compatible shapes")
}
if v.dims[1] != 1 {
return nil, errors.New("Vector must be a row")
}
var p float64
for i := range v.Data {
p += v.Data[i].Value().(float64) * v2.Data[i].Value().(float64)
}
return &Number{p}, nil
}
/*
T Generates a transpose the vector using the same data.
*/
func (v *Vector) T() *Vector {
return &Vector{
Data: v.Data,
dims: []int{v.dims[1], v.dims[0]},
}
}
// Matrix is an n x m matrix
type Matrix struct {
dims []int
Data []*Number
Getable
Valuable
Dimensioned
}
/*
NewMatrix creates a new n-dimensional matrix
All dimensions must be >= 1
Data populates dimensions in the order presented
Unfilled areas default to 0
*/
func NewMatrix(dims []int, data ...float64) (*Matrix, error) {
if len(dims) < 2 {
return nil, errors.New("Matrix must have 2 or more dimension")
}
dLen := 1
for i, d := range dims {
dLen = dLen * d
if dims[i] < 1 {
return nil, fmt.Errorf("Dimension %d of %d must be >=1", i, d)
}
}
if len(data) > dLen {
return nil, fmt.Errorf("Invalid dimensions - %d data would be lost", len(data)-dLen)
}
newData := []*Number{}
for i := 0; i < dLen; i++ {
n := &Number{0}
if i < len(data) {
n.Val = data[i]
}
newData = append(newData, n)
}
return &Matrix{
Data: newData,
dims: dims,
}, nil
}
//Get allows Matrix to be Gettable
func (m *Matrix) Get(dims ...int) (Valuable, error) {
if len(dims) != len(m.dims) {
return nil, fmt.Errorf("Shape has wrong dimensions. Matrix has %d dimensions", len(dims))
}
for i, d := range dims {
if m.dims[i] < dims[i] {
return nil, fmt.Errorf("Requested %d Dimension %d exceeds Matrix shape %v", i, d, m.dims)
}
}
i := dims[0]
for idx, addr := range dims[1:] {
i += (addr * m.dims[idx])
}
return m.Data[i], nil
}
// Dimension gives the dimension of the matrix
func (m *Matrix) Dimension() Shape {
return m.dims
}
// AddNumber adds a number to each element of the Matrix
// To subtract, multiply n by -1
func (m *Matrix) AddNumber(n *Number) {
for _, c := range m.Data {
c.Val += n.Val
}
}
//AddMatrix is for adding same-dimensioned matrixes
func (m *Matrix) AddMatrix(m2 *Matrix) error {
if !m.Dimension().Equals(m2.Dimension()) {
return errors.New("Matrixes are not the same dimensions")
}
for i := range m.Data {
m.Data[i].Val += m2.Data[i].Val
}
return nil
}
func (m *Matrix) String() string {
var s string
for j := 0; j < m.Dimension()[1]; j++ {
prefix := "\n"
for i := 0; i < m.Dimension()[0]; i++ {
v, err := m.Get(i, j)
if err != nil {
fmt.Println(err)
return ""
}
s += fmt.Sprintf("%s[%0.2f]", prefix, v.Value().(float64))
prefix = " "
}
}
return s
} | transforms.go | 0.768081 | 0.618521 | transforms.go | starcoder |
package jsonio
import (
"bytes"
"encoding/hex"
"encoding/json"
"time"
"github.com/brimdata/zed"
"github.com/brimdata/zed/zcode"
"github.com/brimdata/zed/zson"
)
func marshalAny(typ zed.Type, bytes zcode.Bytes) interface{} {
if bytes == nil {
return nil
}
switch typ := typ.(type) {
case *zed.TypeNamed:
return marshalAny(typ.Type, bytes)
case *zed.TypeOfUint8, *zed.TypeOfUint16, *zed.TypeOfUint32, *zed.TypeOfUint64:
return zed.DecodeUint(bytes)
case *zed.TypeOfInt8, *zed.TypeOfInt16, *zed.TypeOfInt32, *zed.TypeOfInt64:
return zed.DecodeInt(bytes)
case *zed.TypeOfDuration:
return zed.DecodeDuration(bytes).String()
case *zed.TypeOfTime:
return zed.DecodeTime(bytes).Time().Format(time.RFC3339Nano)
case *zed.TypeOfFloat32:
return zed.DecodeFloat32(bytes)
case *zed.TypeOfFloat64:
return zed.DecodeFloat64(bytes)
case *zed.TypeOfBool:
return zed.DecodeBool(bytes)
case *zed.TypeOfBytes:
return "0x" + hex.EncodeToString(bytes)
case *zed.TypeOfString:
return string(bytes)
case *zed.TypeOfIP:
return zed.DecodeIP(bytes).String()
case *zed.TypeOfNet:
return zed.DecodeNet(bytes).String()
case *zed.TypeOfNull:
return nil
case *zed.TypeRecord:
return marshalRecord(typ, bytes)
case *zed.TypeArray:
return marshalArray(typ, bytes)
case *zed.TypeSet:
return marshalSet(typ, bytes)
case *zed.TypeMap:
return marshalMap(typ, bytes)
case *zed.TypeUnion:
return marshalAny(typ.SplitZNG(bytes))
case *zed.TypeEnum:
return marshalEnum(typ, bytes)
case *zed.TypeError:
return map[string]interface{}{"error": marshalAny(typ.Type, bytes)}
default:
return zson.MustFormatValue(*zed.NewValue(typ, bytes))
}
}
func marshalRecord(typ *zed.TypeRecord, bytes zcode.Bytes) interface{} {
it := bytes.Iter()
rec := record{}
for _, col := range typ.Columns {
rec = append(rec, field{col.Name, marshalAny(col.Type, it.Next())})
}
return rec
}
// record represents a Zed record and encodes as a JSON object. In contrast to
// a map, it preserves field order.
type record []field
type field struct {
name string
value interface{}
}
func (r record) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.SetEscapeHTML(false)
buf.WriteByte('{')
for i, field := range r {
if i > 0 {
buf.WriteByte(',')
}
if err := enc.Encode(field.name); err != nil {
return nil, err
}
buf.WriteByte(':')
if err := enc.Encode(field.value); err != nil {
return nil, err
}
}
buf.WriteByte('}')
return buf.Bytes(), nil
}
func marshalArray(typ *zed.TypeArray, bytes zcode.Bytes) interface{} {
a := make([]interface{}, 0)
it := bytes.Iter()
for !it.Done() {
a = append(a, marshalAny(typ.Type, it.Next()))
}
return a
}
func marshalSet(typ *zed.TypeSet, bytes zcode.Bytes) interface{} {
s := make([]interface{}, 0)
it := bytes.Iter()
for !it.Done() {
s = append(s, marshalAny(typ.Type, it.Next()))
}
return s
}
type Entry struct {
Key interface{} `json:"key"`
Value interface{} `json:"value"`
}
func marshalMap(typ *zed.TypeMap, bytes zcode.Bytes) interface{} {
var entries []Entry
it := bytes.Iter()
for !it.Done() {
key := marshalAny(typ.KeyType, it.Next())
val := marshalAny(typ.ValType, it.Next())
entries = append(entries, Entry{key, val})
}
return entries
}
func marshalEnum(typ *zed.TypeEnum, bytes zcode.Bytes) interface{} {
selector := int(zed.DecodeUint(bytes))
if selector >= len(typ.Symbols) {
return "<bad enum>"
}
return typ.Symbols[selector]
} | zio/jsonio/marshal.go | 0.560493 | 0.406214 | marshal.go | starcoder |
package pipeline
import (
"fmt"
"strings"
"github.com/gcla/sklearn/base"
"github.com/gcla/sklearn/preprocessing"
"gonum.org/v1/gonum/mat"
)
// Estimator is an interface for Predict
type Estimator interface {
Predict(X, Y *mat.Dense)
}
// NamedStep represents a pipeline named Step
type NamedStep struct {
Name string
Step base.Transformer
}
// Pipeline is a sequance of transformers and an estimator
type Pipeline struct {
NamedSteps []NamedStep
NOutputs int
}
// NewPipeline returns a *Pipeline
func NewPipeline(steps ...NamedStep) *Pipeline {
p := &Pipeline{NamedSteps: steps}
return p
}
// Fit for Pipeline
func (p *Pipeline) Fit(X, Y *mat.Dense) base.Transformer {
_, p.NOutputs = Y.Dims()
Xtmp, Ytmp := X, Y
for _, step := range p.NamedSteps {
step.Step.Fit(Xtmp, Ytmp)
Xtmp, Ytmp = step.Step.Transform(Xtmp, Ytmp)
}
return p
}
// Predict ...
func (p *Pipeline) Predict(X, Y *mat.Dense) base.Regressor {
Xtmp, Ytmp := X, Y
for _, step := range p.NamedSteps {
Xtmp, Ytmp = step.Step.Transform(Xtmp, Ytmp)
}
for iStep := len(p.NamedSteps) - 2; iStep >= 0; iStep-- {
step := p.NamedSteps[iStep]
_, Ytmp = step.Step.(preprocessing.InverseTransformer).InverseTransform(nil, Ytmp)
}
Y.Copy(Ytmp)
return p
}
// Transform for pipeline
func (p *Pipeline) Transform(X, Y *mat.Dense) (Xout, Yout *mat.Dense) {
nSamples, _ := X.Dims()
Xout = X
Yout = mat.NewDense(nSamples, p.NOutputs, nil)
p.Predict(Xout, Yout)
return
}
// Score for base.Regressor
func (p *Pipeline) Score(X, Y *mat.Dense) float64 {
Xtmp, Ytmp := X, Y
for _, step := range p.NamedSteps[:len(p.NamedSteps)-1] {
Xtmp, Ytmp = step.Step.Transform(Xtmp, Ytmp)
}
return p.NamedSteps[len(p.NamedSteps)-1].Step.(base.Regressor).Score(Xtmp, Y)
}
// MakePipeline returns a Pipeline from unnamed steps
func MakePipeline(steps ...base.Transformer) *Pipeline {
p := &Pipeline{}
for _, step := range steps {
/*if _, ok := step.(preprocessing.Transformer); (istep < len(steps)-1) && !ok {
panic(fmt.Errorf("%T is not a Transformer", step))
}
if _, ok := step.(lm.Regressor); (istep == len(steps)-1) && !ok {
panic(fmt.Errorf("%T is not a lm.Regressor", step))
}*/
p.NamedSteps = append(p.NamedSteps, NamedStep{Name: strings.ToLower(fmt.Sprintf("%T", step)), Step: step})
}
return p
} | pipeline/pipeline.go | 0.708616 | 0.423041 | pipeline.go | starcoder |
package signalfxformat
import (
"fmt"
"github.com/signalfx/golib/v3/trace"
)
// JSONDatapointV1 is the JSON API format for /v1/datapoint
//easyjson:json
type JSONDatapointV1 struct {
//easyjson:json
Source string `json:"source"`
Metric string `json:"metric"`
Value float64 `json:"value"`
}
// JSONDatapointV2 is the V2 json datapoint sending format
//easyjson:json
type JSONDatapointV2 map[string][]*BodySendFormatV2
// BodySendFormatV2 is the JSON format signalfx datapoints are expected to be in
//easyjson:json
type BodySendFormatV2 struct {
Metric string `json:"metric"`
Timestamp int64 `json:"timestamp"`
Value ValueToSend `json:"value"`
Dimensions map[string]string `json:"dimensions"`
}
func (bodySendFormat *BodySendFormatV2) String() string {
return fmt.Sprintf("DP[metric=%s|time=%d|val=%s|dimensions=%s]", bodySendFormat.Metric, bodySendFormat.Timestamp, bodySendFormat.Value, bodySendFormat.Dimensions)
}
// ValueToSend are values are sent from the gateway to a receiver for the datapoint
type ValueToSend interface{}
// JSONEventV2 is the V2 json event sending format
//easyjson:json
type JSONEventV2 []*EventSendFormatV2
// EventSendFormatV2 is the JSON format signalfx datapoints are expected to be in
//easyjson:json
type EventSendFormatV2 struct {
EventType string `json:"eventType"`
Category *string `json:"category"`
Dimensions map[string]string `json:"dimensions"`
Properties map[string]interface{} `json:"properties"`
Timestamp *int64 `json:"timestamp"`
}
// InputAnnotation associates an event that explains latency with a timestamp.
// Unlike log statements, annotations are often codes. Ex. “ws” for WireSend
//easyjson:json
type InputAnnotation struct {
Endpoint *trace.Endpoint `json:"endpoint"`
Timestamp *float64 `json:"timestamp"`
Value *string `json:"value"`
}
// ToV2 converts an InputAnnotation to a V2 InputAnnotation, which basically
// means dropping the endpoint. The endpoint must be considered in other
// logic to know which span to associate the endpoint with.
func (a *InputAnnotation) ToV2() *trace.Annotation {
return &trace.Annotation{
Timestamp: GetPointerToInt64(a.Timestamp),
Value: a.Value,
}
}
// GetPointerToInt64 does that
func GetPointerToInt64(p *float64) *int64 {
if p == nil {
return nil
}
i := int64(*p)
return &i
}
// BinaryAnnotation associates an event that explains latency with a timestamp.
//easyjson:json
type BinaryAnnotation struct {
Endpoint *trace.Endpoint `json:"endpoint"`
Key *string `json:"key"`
Value *interface{} `json:"value"`
}
// InputSpan defines a span that is the union of v1 and v2 spans
//easyjson:json
type InputSpan struct {
trace.Span
Timestamp *float64 `json:"timestamp"`
Duration *float64 `json:"duration"`
Annotations []*InputAnnotation `json:"annotations"`
BinaryAnnotations []*BinaryAnnotation `json:"binaryAnnotations"`
}
// InputSpanList is an array of InputSpan pointers
//easyjson:json
type InputSpanList []*InputSpan | protocol/signalfx/format/signalfx_format.go | 0.705176 | 0.444987 | signalfx_format.go | starcoder |
package fv
// the references, in order, of the Extended set of Fighter Verses
var ExtendedSet = []entry{
{SetOrder: 101, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:1-2"},
{SetOrder: 102, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:3-6"},
{SetOrder: 103, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:7-8"},
{SetOrder: 104, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:9-11"},
{SetOrder: 105, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:12-14"},
{SetOrder: 106, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:15-17"},
{SetOrder: 107, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:18-19"},
{SetOrder: 108, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:20-21"},
{SetOrder: 109, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:22-24"},
{SetOrder: 110, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:25-26"},
{SetOrder: 111, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:27-28a"},
{SetOrder: 112, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1:28b-30"},
{SetOrder: 113, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1"},
{SetOrder: 114, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:1-2"},
{SetOrder: 115, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:3-4"},
{SetOrder: 116, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:5-7"},
{SetOrder: 117, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:8"},
{SetOrder: 118, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:9-11"},
{SetOrder: 119, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:12-13"},
{SetOrder: 120, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:14-16"},
{SetOrder: 121, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:17-18"},
{SetOrder: 122, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:19-21"},
{SetOrder: 123, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:22-24"},
{SetOrder: 124, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:25-26"},
{SetOrder: 125, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:27-28"},
{SetOrder: 126, BibleOrder: 0, Set: "Extended", Ref: "Philippians 2:29-30"},
{SetOrder: 127, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1-2"},
{SetOrder: 128, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:1-2"},
{SetOrder: 129, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:3-4a"},
{SetOrder: 130, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:4b-6"},
{SetOrder: 131, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:7-8a"},
{SetOrder: 132, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:8b-9"},
{SetOrder: 133, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:10-11"},
{SetOrder: 134, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:12"},
{SetOrder: 135, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:13-14"},
{SetOrder: 136, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:15-16"},
{SetOrder: 137, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:17-19"},
{SetOrder: 138, BibleOrder: 0, Set: "Extended", Ref: "Philippians 3:20-21"},
{SetOrder: 139, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1-3"},
{SetOrder: 140, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:1"},
{SetOrder: 141, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:2-3"},
{SetOrder: 142, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:4-5"},
{SetOrder: 143, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:6-7"},
{SetOrder: 144, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:8-9"},
{SetOrder: 145, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:10-11"},
{SetOrder: 146, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:12-13"},
{SetOrder: 147, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:14-15"},
{SetOrder: 148, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:16-17"},
{SetOrder: 149, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:18"},
{SetOrder: 150, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:19-20"},
{SetOrder: 151, BibleOrder: 0, Set: "Extended", Ref: "Philippians 4:21-23"},
{SetOrder: 152, BibleOrder: 0, Set: "Extended", Ref: "Philippians 1-4"},
{SetOrder: 201, BibleOrder: 0, Set: "Extended", Ref: "Romans 5:1-2"},
{SetOrder: 202, BibleOrder: 0, Set: "Extended", Ref: "Romans 5:3-5"},
{SetOrder: 203, BibleOrder: 0, Set: "Extended", Ref: "Romans 5:6-8"},
{SetOrder: 204, BibleOrder: 0, Set: "Extended", Ref: "Romans 5:9-11"},
{SetOrder: 205, BibleOrder: 0, Set: "Extended", Ref: "Romans 5:12-13"},
{SetOrder: 206, BibleOrder: 0, Set: "Extended", Ref: "Romans 5:14-15"},
{SetOrder: 207, BibleOrder: 0, Set: "Extended", Ref: "Romans 5:16-17"},
{SetOrder: 208, BibleOrder: 0, Set: "Extended", Ref: "Romans 5:18-19"},
{SetOrder: 209, BibleOrder: 0, Set: "Extended", Ref: "Romans 5:20-21"},
{SetOrder: 210, BibleOrder: 0, Set: "Extended", Ref: "Romans 5"},
{SetOrder: 211, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:1-3"},
{SetOrder: 212, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:4-5"},
{SetOrder: 213, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:6-7"},
{SetOrder: 214, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:8-9"},
{SetOrder: 215, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:10-12"},
{SetOrder: 216, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:13-14"},
{SetOrder: 217, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:15-16"},
{SetOrder: 218, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:17-18"},
{SetOrder: 219, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:19"},
{SetOrder: 220, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:20-21"},
{SetOrder: 221, BibleOrder: 0, Set: "Extended", Ref: "Romans 6:22-23"},
{SetOrder: 222, BibleOrder: 0, Set: "Extended", Ref: "Romans 5-6"},
{SetOrder: 223, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:1-2"},
{SetOrder: 224, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:3"},
{SetOrder: 225, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:4"},
{SetOrder: 226, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:5-6"},
{SetOrder: 227, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:7"},
{SetOrder: 228, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:8-10"},
{SetOrder: 229, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:11-12"},
{SetOrder: 230, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:13"},
{SetOrder: 231, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:14-15"},
{SetOrder: 232, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:16-18"},
{SetOrder: 233, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:19-20"},
{SetOrder: 234, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:21-23"},
{SetOrder: 235, BibleOrder: 0, Set: "Extended", Ref: "Romans 7:24-25"},
{SetOrder: 236, BibleOrder: 0, Set: "Extended", Ref: "Romans 5-7"},
{SetOrder: 237, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:1-2"},
{SetOrder: 238, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:3-4"},
{SetOrder: 239, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:5-8"},
{SetOrder: 240, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:9"},
{SetOrder: 241, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:10-11"},
{SetOrder: 242, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:12-14"},
{SetOrder: 243, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:15-17"},
{SetOrder: 244, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:18-19"},
{SetOrder: 245, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:20-22"},
{SetOrder: 246, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:23-25"},
{SetOrder: 247, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:26-27"},
{SetOrder: 248, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:28-30"},
{SetOrder: 249, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:31-33"},
{SetOrder: 250, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:34-36"},
{SetOrder: 251, BibleOrder: 0, Set: "Extended", Ref: "Romans 8:37-39"},
{SetOrder: 252, BibleOrder: 0, Set: "Extended", Ref: "Romans 5-8"},
{SetOrder: 301, BibleOrder: 0, Set: "Extended", Ref: "Colossians 1:9-10"},
{SetOrder: 302, BibleOrder: 0, Set: "Extended", Ref: "Colossians 1:11-12"},
{SetOrder: 303, BibleOrder: 0, Set: "Extended", Ref: "Colossians 1:13-14"},
{SetOrder: 304, BibleOrder: 0, Set: "Extended", Ref: "Colossians 1:15-16"},
{SetOrder: 305, BibleOrder: 0, Set: "Extended", Ref: "Colossians 1:17-18"},
{SetOrder: 306, BibleOrder: 0, Set: "Extended", Ref: "Colossians 1:19-20"},
{SetOrder: 307, BibleOrder: 0, Set: "Extended", Ref: "Colossians 1:21-23"},
{SetOrder: 308, BibleOrder: 0, Set: "Extended", Ref: "Colossians 1:24-26"},
{SetOrder: 309, BibleOrder: 0, Set: "Extended", Ref: "Colossians 1:27-29"},
{SetOrder: 310, BibleOrder: 0, Set: "Extended", Ref: "Isaiah 53:1-2"},
{SetOrder: 311, BibleOrder: 0, Set: "Extended", Ref: "Isaiah 53:3"},
{SetOrder: 312, BibleOrder: 0, Set: "Extended", Ref: "Isaiah 53:4-5"},
{SetOrder: 313, BibleOrder: 0, Set: "Extended", Ref: "Isaiah 53:6-7"},
{SetOrder: 314, BibleOrder: 0, Set: "Extended", Ref: "Isaiah 53:8"},
{SetOrder: 315, BibleOrder: 0, Set: "Extended", Ref: "Isaiah 53:9-10"},
{SetOrder: 316, BibleOrder: 0, Set: "Extended", Ref: "Isaiah 53:11"},
{SetOrder: 317, BibleOrder: 0, Set: "Extended", Ref: "Isaiah 53:12"},
{SetOrder: 318, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 3:1-2"},
{SetOrder: 319, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 3:3"},
{SetOrder: 320, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 3:4-6"},
{SetOrder: 321, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 3:7-8"},
{SetOrder: 322, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 3:9-11"},
{SetOrder: 323, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 3:12-13"},
{SetOrder: 324, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 3:14-15"},
{SetOrder: 325, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 3:16-18"},
{SetOrder: 326, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 3"},
{SetOrder: 327, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 4:1-2"},
{SetOrder: 328, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 4:3-4"},
{SetOrder: 329, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 4:5-6"},
{SetOrder: 330, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 4:7-9"},
{SetOrder: 331, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 4:10-12"},
{SetOrder: 332, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 4:13-14"},
{SetOrder: 333, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 4:15-16"},
{SetOrder: 334, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 4:17-18"},
{SetOrder: 335, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 4"},
{SetOrder: 336, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 5:1-3"},
{SetOrder: 337, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 5:4-5"},
{SetOrder: 338, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 5:6-8"},
{SetOrder: 339, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 5:9-10"},
{SetOrder: 340, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 5:11-12"},
{SetOrder: 341, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 5:13-15"},
{SetOrder: 342, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 5:16-17"},
{SetOrder: 343, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 5:18-19"},
{SetOrder: 344, BibleOrder: 0, Set: "Extended", Ref: "2 Corinthians 5:20-21"},
{SetOrder: 345, BibleOrder: 0, Set: "Extended", Ref: "John 1:1-2"},
{SetOrder: 346, BibleOrder: 0, Set: "Extended", Ref: "John 1:3-5"},
{SetOrder: 347, BibleOrder: 0, Set: "Extended", Ref: "John 1:6-7"},
{SetOrder: 348, BibleOrder: 0, Set: "Extended", Ref: "John 1:8-9"},
{SetOrder: 349, BibleOrder: 0, Set: "Extended", Ref: "John 1:10-11"},
{SetOrder: 350, BibleOrder: 0, Set: "Extended", Ref: "John 1:12-13"},
{SetOrder: 351, BibleOrder: 0, Set: "Extended", Ref: "John 1:14-15"},
{SetOrder: 352, BibleOrder: 0, Set: "Extended", Ref: "John 1:16-18"},
{SetOrder: 401, BibleOrder: 0, Set: "Extended", Ref: "James 1:1-3"},
{SetOrder: 402, BibleOrder: 0, Set: "Extended", Ref: "James 1:4-5"},
{SetOrder: 403, BibleOrder: 0, Set: "Extended", Ref: "James 1:6-8"},
{SetOrder: 404, BibleOrder: 0, Set: "Extended", Ref: "James 1:9-11"},
{SetOrder: 405, BibleOrder: 0, Set: "Extended", Ref: "James 1:12"},
{SetOrder: 406, BibleOrder: 0, Set: "Extended", Ref: "James 1:13-15"},
{SetOrder: 407, BibleOrder: 0, Set: "Extended", Ref: "James 1:16-18"},
{SetOrder: 408, BibleOrder: 0, Set: "Extended", Ref: "James 1:19-21"},
{SetOrder: 409, BibleOrder: 0, Set: "Extended", Ref: "James 1:22-24"},
{SetOrder: 410, BibleOrder: 0, Set: "Extended", Ref: "James 1:25"},
{SetOrder: 411, BibleOrder: 0, Set: "Extended", Ref: "James 1:26-27"},
{SetOrder: 412, BibleOrder: 0, Set: "Extended", Ref: "James 1"},
{SetOrder: 413, BibleOrder: 0, Set: "Extended", Ref: "James 2:1-2"},
{SetOrder: 414, BibleOrder: 0, Set: "Extended", Ref: "James 2:3-4"},
{SetOrder: 415, BibleOrder: 0, Set: "Extended", Ref: "James 2:5-7"},
{SetOrder: 416, BibleOrder: 0, Set: "Extended", Ref: "James 2:8-10"},
{SetOrder: 417, BibleOrder: 0, Set: "Extended", Ref: "James 2:11-13"},
{SetOrder: 418, BibleOrder: 0, Set: "Extended", Ref: "James 2:14-16"},
{SetOrder: 419, BibleOrder: 0, Set: "Extended", Ref: "James 2:17-18"},
{SetOrder: 420, BibleOrder: 0, Set: "Extended", Ref: "James 2:19-20"},
{SetOrder: 421, BibleOrder: 0, Set: "Extended", Ref: "James 2:21-23"},
{SetOrder: 422, BibleOrder: 0, Set: "Extended", Ref: "James 2:24-26"},
{SetOrder: 423, BibleOrder: 0, Set: "Extended", Ref: "James 1-2"},
{SetOrder: 424, BibleOrder: 0, Set: "Extended", Ref: "James 3:1-2"},
{SetOrder: 425, BibleOrder: 0, Set: "Extended", Ref: "James 3:3-4"},
{SetOrder: 426, BibleOrder: 0, Set: "Extended", Ref: "James 3:5-6"},
{SetOrder: 427, BibleOrder: 0, Set: "Extended", Ref: "James 3:7-8"},
{SetOrder: 428, BibleOrder: 0, Set: "Extended", Ref: "James 3:9-10"},
{SetOrder: 429, BibleOrder: 0, Set: "Extended", Ref: "James 3:11-12"},
{SetOrder: 430, BibleOrder: 0, Set: "Extended", Ref: "James 3:13-14"},
{SetOrder: 431, BibleOrder: 0, Set: "Extended", Ref: "James 3:15-16"},
{SetOrder: 432, BibleOrder: 0, Set: "Extended", Ref: "James 3:17-18"},
{SetOrder: 433, BibleOrder: 0, Set: "Extended", Ref: "James 1-3"},
{SetOrder: 434, BibleOrder: 0, Set: "Extended", Ref: "James 4:1-2a"},
{SetOrder: 435, BibleOrder: 0, Set: "Extended", Ref: "James 4:2b-3"},
{SetOrder: 436, BibleOrder: 0, Set: "Extended", Ref: "James 4:4-5"},
{SetOrder: 437, BibleOrder: 0, Set: "Extended", Ref: "James 4:6-8a"},
{SetOrder: 438, BibleOrder: 0, Set: "Extended", Ref: "James 4:8b-10"},
{SetOrder: 439, BibleOrder: 0, Set: "Extended", Ref: "James 4:11-12"},
{SetOrder: 440, BibleOrder: 0, Set: "Extended", Ref: "James 4:13-14"},
{SetOrder: 441, BibleOrder: 0, Set: "Extended", Ref: "James 4:15-17"},
{SetOrder: 442, BibleOrder: 0, Set: "Extended", Ref: "James 1-4"},
{SetOrder: 443, BibleOrder: 0, Set: "Extended", Ref: "James 5:1-3"},
{SetOrder: 444, BibleOrder: 0, Set: "Extended", Ref: "James 5:4-6"},
{SetOrder: 445, BibleOrder: 0, Set: "Extended", Ref: "James 5:7-9"},
{SetOrder: 446, BibleOrder: 0, Set: "Extended", Ref: "James 5:10-11"},
{SetOrder: 447, BibleOrder: 0, Set: "Extended", Ref: "James 5:12"},
{SetOrder: 448, BibleOrder: 0, Set: "Extended", Ref: "James 5:13-14"},
{SetOrder: 449, BibleOrder: 0, Set: "Extended", Ref: "James 5:15-16"},
{SetOrder: 450, BibleOrder: 0, Set: "Extended", Ref: "James 5:17-18"},
{SetOrder: 451, BibleOrder: 0, Set: "Extended", Ref: "James 5:19-20"},
{SetOrder: 452, BibleOrder: 0, Set: "Extended", Ref: "James 1-5"},
{SetOrder: 501, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:1-3"},
{SetOrder: 502, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:4-6"},
{SetOrder: 503, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:7-9"},
{SetOrder: 504, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:10-12"},
{SetOrder: 505, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:13"},
{SetOrder: 506, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:14-16"},
{SetOrder: 507, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:17-18"},
{SetOrder: 508, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:19"},
{SetOrder: 509, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:20"},
{SetOrder: 510, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:21-22"},
{SetOrder: 511, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:23-24"},
{SetOrder: 512, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:25-26"},
{SetOrder: 513, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:27-28"},
{SetOrder: 514, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:29-30"},
{SetOrder: 515, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:31-32"},
{SetOrder: 516, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:33-35"},
{SetOrder: 517, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:36-37"},
{SetOrder: 518, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:38-40"},
{SetOrder: 519, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:41-42"},
{SetOrder: 520, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:43-45a"},
{SetOrder: 521, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5:45b-48"},
{SetOrder: 522, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5"},
{SetOrder: 523, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:1"},
{SetOrder: 524, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:2"},
{SetOrder: 525, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:3-4"},
{SetOrder: 526, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:5"},
{SetOrder: 527, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:6"},
{SetOrder: 528, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:7-8"},
{SetOrder: 529, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:9-13"},
{SetOrder: 530, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:14-15"},
{SetOrder: 531, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:16-18"},
{SetOrder: 532, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:19-21"},
{SetOrder: 533, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:22-23"},
{SetOrder: 534, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:24"},
{SetOrder: 535, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:25-26"},
{SetOrder: 536, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:27-29"},
{SetOrder: 537, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:30-32"},
{SetOrder: 538, BibleOrder: 0, Set: "Extended", Ref: "Matthew 6:33-34"},
{SetOrder: 539, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5-6"},
{SetOrder: 540, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:1-2"},
{SetOrder: 541, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:3-5"},
{SetOrder: 542, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:6"},
{SetOrder: 543, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:7-8"},
{SetOrder: 544, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:9-11"},
{SetOrder: 545, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:12-14"},
{SetOrder: 546, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:15-16"},
{SetOrder: 547, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:17-20"},
{SetOrder: 548, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:21-23"},
{SetOrder: 549, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:24-25"},
{SetOrder: 550, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:26-27"},
{SetOrder: 551, BibleOrder: 0, Set: "Extended", Ref: "Matthew 7:28-29"},
{SetOrder: 552, BibleOrder: 0, Set: "Extended", Ref: "Matthew 5-7"},
} | fv/extended.go | 0.54819 | 0.553807 | extended.go | starcoder |
package advent2018
import (
"bufio"
"errors"
"io"
"math"
"sort"
"strconv"
"strings"
)
type grid struct {
coordinates []point
min, max point
}
func (g grid) largestFiniteArea() int {
closestPointsByLandmark := map[point][]point{}
for x := g.min.x; x <= g.max.x; x++ {
for y := g.min.y; y <= g.max.y; y++ {
a := point{x, y}
closest, err := findClosestCoordinate(a, g.coordinates)
if err != nil {
continue
}
closestPointsByLandmark[closest] = append(closestPointsByLandmark[closest], a)
}
}
finites := []point{}
for _, p := range g.coordinates {
if g.isFinite(closestPointsByLandmark[p]) {
finites = append(finites, p)
}
}
largestArea := math.MinInt64
sort.Slice(finites, func(i, j int) bool {
return len(closestPointsByLandmark[finites[i]]) > len(closestPointsByLandmark[finites[j]])
})
for _, f := range finites {
if len(closestPointsByLandmark[f]) > largestArea {
largestArea = len(closestPointsByLandmark[f])
}
}
return largestArea
}
// a given point on the grid is considered finite if it non of it's closest points are on the boundary
func (g grid) isFinite(closestPoints []point) bool {
for _, p := range closestPoints {
if p.x == g.min.x || p.x == g.max.x || p.y == g.min.y || p.y == g.max.y {
return false
}
}
return true
}
func (g grid) regionMinimizedByConstraint(constraint int) int {
region := []point{}
for x := g.min.x; x <= g.max.x; x++ {
for y := g.min.y; y <= g.max.y; y++ {
var cummulativeDistance int
a := point{x, y}
for _, b := range g.coordinates {
cummulativeDistance += a.manhattanDistance(b)
}
if cummulativeDistance < constraint {
region = append(region, a)
}
}
}
return len(region)
}
func findClosestCoordinate(a point, coordinates []point) (point, error) {
coordinatesByDistance := map[int][]point{}
min := math.MaxInt64
for _, c := range coordinates {
d := a.manhattanDistance(c)
if d < min {
min = d
}
coordinatesByDistance[d] = append(coordinatesByDistance[d], c)
}
if len(coordinatesByDistance[min]) > 1 {
return point{}, errors.New("no ties allowed")
}
return coordinatesByDistance[min][0], nil
}
type point struct {
x, y int
}
func (p point) manhattanDistance(other point) int {
return abs(p.x-other.x) + abs(p.y-other.y)
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
func FindLargestFiniteArea(r io.Reader) int {
scanner := bufio.NewScanner(r)
var points []point
for scanner.Scan() {
points = append(points, parsePoint(scanner.Text()))
}
min, max := findMinMax(points)
g := grid{points, min, max}
return g.largestFiniteArea()
}
func FindRegionAreaMinimizedByConstraint(r io.Reader, constraint int) int {
scanner := bufio.NewScanner(r)
var points []point
for scanner.Scan() {
points = append(points, parsePoint(scanner.Text()))
}
min, max := findMinMax(points)
g := grid{points, min, max}
return g.regionMinimizedByConstraint(constraint)
}
func parsePoint(rawPoint string) point {
parts := strings.Split(rawPoint, ", ")
x, _ := strconv.Atoi(parts[0])
y, _ := strconv.Atoi(parts[1])
return point{x, y}
}
func findMinMax(points []point) (point, point) {
var min, max point
minx, miny, maxx, maxy := math.MaxInt64, math.MaxInt64, math.MinInt64, math.MinInt64
for _, point := range points {
if point.x < minx {
minx = point.x
}
if point.y < miny {
miny = point.y
}
if point.x > maxx {
maxx = point.x
}
if point.y > maxy {
maxy = point.y
}
}
min.x = minx
min.y = miny
max.x = maxx
max.y = maxy
return min, max
} | advent2018/coordinates.go | 0.719384 | 0.419232 | coordinates.go | starcoder |
package day18
import (
"fmt"
"strings"
"advent2021.com/util"
)
type Node struct {
Value int
Parent, Left, Right *Node
}
func NewNode(value int, parent *Node) *Node {
return &Node{Value: value, Parent: parent, Left: nil, Right: nil}
}
func (n *Node) IsRoot() bool {
return n.Parent == nil
}
func (n *Node) IsLeaf() bool {
return n.Left == nil && n.Right == nil
}
func (n *Node) IsPair() bool {
return !n.IsLeaf() && n.Left.IsLeaf() && n.Right.IsLeaf()
}
// Get the leaf nodes of the tree in BFS order
func (n *Node) GetLeafNodes() []*Node {
nodes := make([]*Node, 0)
_ = n.FindDfs(func(n *Node) bool {
if n.IsLeaf() {
nodes = append(nodes, n)
}
return false
})
return nodes
}
func (n *Node) GetRoot() *Node {
cur := n
for !cur.IsRoot() {
cur = cur.Parent
}
return cur
}
func (n *Node) Explode() {
util.Require(n.IsPair())
left := n.Left.Value
right := n.Right.Value
n.Value = 0
n.Left = nil
n.Right = nil
leafs := n.GetRoot().GetLeafNodes()
leafIndex := func() int {
for i, cur := range leafs {
if cur == n {
return i
}
}
panic("node not found")
}()
if leafIndex > 0 {
leafs[leafIndex-1].Value += left
}
if leafIndex+1 < len(leafs) {
leafs[leafIndex+1].Value += right
}
util.Require(n.IsLeaf())
}
func (n *Node) Split() {
util.Require(n.IsLeaf())
extra := n.Value % 2
n.Left = NewNode((n.Value-extra)/2, n)
n.Right = NewNode(((n.Value-extra)/2)+extra, n)
n.Value = -1
}
func (n *Node) Reduce() {
reduceOne := func() bool {
explode := n.FindDfs(func(n *Node) bool {
return n.IsPair() && n.Depth() >= 4
})
if explode != nil {
explode.Explode()
return true
}
split := n.FindDfs(func(n *Node) bool {
return n.Value > 9
})
if split != nil {
split.Split()
return true
}
return false
}
for reduceOne() {
// fmt.Println(n.String())
}
}
func (n *Node) Join(other *Node) *Node {
p := NewNode(-1, nil)
p.Left = n
n.Parent = p
p.Right = other
other.Parent = p
p.Reduce()
return p
}
func (n *Node) String() string {
var sb strings.Builder
var impl func(*Node)
impl = func(cur *Node) {
if cur.IsLeaf() {
fmt.Fprintf(&sb, "%d", cur.Value)
} else if cur.IsPair() {
fmt.Fprintf(&sb, "[%d,%d]", cur.Left.Value, cur.Right.Value)
} else {
sb.WriteString("[")
impl(cur.Left)
sb.WriteString(",")
impl(cur.Right)
sb.WriteString("]")
}
}
impl(n)
return sb.String()
}
func (n *Node) Depth() int {
depth := 0
cur := n
for !cur.IsRoot() {
depth++
cur = cur.Parent
}
return depth
}
// Run the predicate on the nodes in the tree in BFS order. Will return
// first Node for which the predicate returns true, nil otherwise
func (n *Node) FindBfs(predicate func(*Node) bool) *Node {
toVisit := []*Node{n}
for len(toVisit) > 0 {
next := toVisit[0]
toVisit = toVisit[1:]
if predicate(next) {
return next
}
if next.Left != nil {
toVisit = append(toVisit, next.Left)
}
if next.Right != nil {
toVisit = append(toVisit, next.Right)
}
}
return nil
}
// Run the predicate on the nodes in the tree in BFS order. Will return
// first Node for which the predicate returns true, nil otherwise
func (n *Node) FindDfs(predicate func(*Node) bool) *Node {
toVisit := []*Node{n}
for len(toVisit) > 0 {
index := len(toVisit) - 1
cur := toVisit[index]
toVisit = toVisit[:index]
if predicate(cur) {
return cur
}
if cur.Right != nil {
toVisit = append(toVisit, cur.Right)
}
if cur.Left != nil {
toVisit = append(toVisit, cur.Left)
}
}
return nil
}
func ParseNode(text string) (*Node, error) {
var impl func(*Node) (*Node, error)
index := 0
impl = func(parent *Node) (*Node, error) {
if index == len(text) {
return nil, nil
}
var err error
if text[index] == '[' {
node := NewNode(-1, parent)
index++
node.Left, err = impl(node)
if err != nil {
return nil, err
}
if text[index] != ',' {
return nil, fmt.Errorf("expected ']' got '%b'", text[index])
}
index++
node.Right, err = impl(node)
if err != nil {
return nil, err
}
if text[index] != ']' {
return nil, fmt.Errorf("expected ']' got '%b'", text[index])
}
index++
return node, nil
} else {
var value int
value, err = util.ByteToInt(text[index])
if err != nil {
return nil, err
}
index++
return NewNode(value, parent), nil
}
}
return impl(nil)
}
func MustParseNode(text string) *Node {
node, err := ParseNode(text)
if err != nil {
panic(err)
}
return node
} | day18/node.go | 0.610221 | 0.421135 | node.go | starcoder |
package check
import (
"go/ast"
"go/token"
"strconv"
"strings"
"unicode"
"golang.org/x/tools/go/ast/astutil"
)
// Exposed here for unit tests.
const (
notOnlyTopAddTestMsg = `testing.AddTest() should be the only top level statement of init()`
addTestArgLitMsg = `testing.AddTest() should take &testing.Test{...} composite literal`
noDescMsg = `Desc field should be filled to describe the registered entity`
nonLiteralDescMsg = `Desc should be string literal`
badDescMsg = `Desc should be capitalized phrases without trailing punctuation, e.g. "Checks that foo is bar"`
noContactMsg = `Contacts field should exist to list owners' email addresses`
nonLiteralContactsMsg = `Contacts field should be an array literal of string literals`
nonLiteralAttrMsg = `Test Attr should be an array literal of string literals`
nonLiteralVarsMsg = `Test Vars should be an array literal of string literals or constants, or append(array literal, ConstList...)`
nonLiteralSoftwareDepsMsg = `Test SoftwareDeps should be an array literal of string literals or constants, or append(array literal, ConstList...)`
nonLiteralParamsMsg = `Test Params should be an array literal of Param struct literals`
nonLiteralParamNameMsg = `Name of Param should be a string literal`
testRegistrationURL = `https://chromium.googlesource.com/chromiumos/platform/tast/+/HEAD/docs/writing_tests.md#Test-registration`
testParamTestURL = `https://chromium.googlesource.com/chromiumos/platform/tast/+/HEAD/docs/writing_tests.md#Parameterized-test-registration`
testRuntimeVariablesURL = `https://chromium.googlesource.com/chromiumos/platform/tast/+/HEAD/docs/writing_tests.md#Runtime-variables`
)
// TestDeclarations checks declarations of testing.Test structs.
func TestDeclarations(fs *token.FileSet, f *ast.File, fix bool) []*Issue {
filename := fs.Position(f.Package).Filename
if !isEntryFile(filename) {
return nil
}
var issues []*Issue
for _, decl := range f.Decls {
issues = append(issues, verifyInit(fs, decl, fix)...)
}
return issues
}
// FixtureDeclarations checks declarations of testing.Fixture structs.
func FixtureDeclarations(fs *token.FileSet, f *ast.File, fix bool) []*Issue {
var issues []*Issue
for _, node := range f.Decls {
decl, ok := node.(*ast.FuncDecl)
if !ok || decl.Recv != nil || decl.Name.Name != "init" {
// Not an init() function declaration. Skip.
continue
}
for _, node := range decl.Body.List {
expr, ok := node.(*ast.ExprStmt)
if !ok {
continue
}
call, ok := expr.X.(*ast.CallExpr)
if !ok {
continue
}
if toQualifiedName(call.Fun) != "testing.AddFixture" {
continue
}
issues = append(issues, verifyAddFixtureCall(fs, call, fix)...)
}
}
return issues
}
// verifyInit checks init() function declared at node.
// If the node is not init() function, returns nil.
func verifyInit(fs *token.FileSet, node ast.Decl, fix bool) []*Issue {
decl, ok := node.(*ast.FuncDecl)
if !ok || decl.Recv != nil || decl.Name.Name != "init" {
// Not an init() function declaration. Skip.
return nil
}
if len(decl.Body.List) == 1 {
if estmt, ok := decl.Body.List[0].(*ast.ExprStmt); ok && isTestingAddTestCall(estmt.X) {
// X's type is already verified in isTestingAddTestCall().
return verifyAddTestCall(fs, estmt.X.(*ast.CallExpr), fix)
}
}
var addTestNode ast.Node
ast.Walk(funcVisitor(func(n ast.Node) {
if addTestNode == nil && isTestingAddTestCall(n) {
addTestNode = n
}
}), node)
if addTestNode != nil {
return []*Issue{{
Pos: fs.Position(addTestNode.Pos()),
Msg: notOnlyTopAddTestMsg,
Link: testRegistrationURL,
}}
}
return nil
}
type entityFields map[string]*ast.KeyValueExpr
// registeredEntityFields returns a mapping from field name to value, or issues
// on error.
// call must be a registration of an entity, e.g. testing.AddTest or
// testing.AddFixture.
func registeredEntityFields(fs *token.FileSet, call *ast.CallExpr) (entityFields, []*Issue) {
if len(call.Args) != 1 {
// This should be checked by a compiler, so skipped.
return nil, nil
}
// Verify the argument is "&testing.Test{...}"
arg, ok := call.Args[0].(*ast.UnaryExpr)
if !ok || arg.Op != token.AND {
return nil, []*Issue{{
Pos: fs.Position(call.Args[0].Pos()),
Msg: addTestArgLitMsg,
Link: testRegistrationURL,
}}
}
comp, ok := arg.X.(*ast.CompositeLit)
if !ok {
return nil, []*Issue{{
Pos: fs.Position(call.Args[0].Pos()),
Msg: addTestArgLitMsg,
Link: testRegistrationURL,
}}
}
res := make(entityFields)
for _, el := range comp.Elts {
kv, ok := el.(*ast.KeyValueExpr)
if !ok {
continue
}
ident, ok := kv.Key.(*ast.Ident)
if !ok {
continue
}
res[ident.Name] = kv
}
return res, nil
}
func verifyAddFixtureCall(fs *token.FileSet, call *ast.CallExpr, fix bool) []*Issue {
fields, issues := registeredEntityFields(fs, call)
if len(issues) > 0 {
return issues
}
issues = append(issues, verifyVars(fs, fields)...)
issues = append(issues, verifyDesc(fs, fields, call, fix)...)
issues = append(issues, verifyContacts(fs, fields, call)...)
return issues
}
// verifyAddTestCall verifies testing.AddTest calls. Specifically
// - testing.AddTest() can take a pointer of a testing.Test composite literal.
// - verifies each element of testing.Test literal.
func verifyAddTestCall(fs *token.FileSet, call *ast.CallExpr, fix bool) []*Issue {
fields, issues := registeredEntityFields(fs, call)
if len(issues) > 0 {
return issues
}
if kv, ok := fields["Attr"]; ok {
issues = append(issues, verifyAttr(fs, kv.Value)...)
}
if kv, ok := fields["SoftwareDeps"]; ok {
issues = append(issues, verifySoftwareDeps(fs, kv.Value)...)
}
issues = append(issues, verifyVars(fs, fields)...)
issues = append(issues, verifyParams(fs, fields)...)
issues = append(issues, verifyDesc(fs, fields, call, fix)...)
issues = append(issues, verifyContacts(fs, fields, call)...)
return issues
}
func verifyDesc(fs *token.FileSet, fields entityFields, call *ast.CallExpr, fix bool) []*Issue {
kv, ok := fields["Desc"]
if !ok {
return []*Issue{{
Pos: fs.Position(call.Args[0].Pos()),
Msg: noDescMsg,
Link: testRegistrationURL,
}}
}
node := kv.Value
s, ok := toString(node)
if !ok {
return []*Issue{{
Pos: fs.Position(node.Pos()),
Msg: nonLiteralDescMsg,
Link: testRegistrationURL,
}}
}
if s == "" || !unicode.IsUpper(rune(s[0])) || s[len(s)-1] == '.' {
if !fix {
return []*Issue{{
Pos: fs.Position(node.Pos()),
Msg: badDescMsg,
Link: "https://chromium.googlesource.com/chromiumos/platform/tast/+/HEAD/docs/writing_tests.md#Formatting",
Fixable: true,
}}
}
astutil.Apply(kv, func(c *astutil.Cursor) bool {
lit, ok := c.Node().(*ast.BasicLit)
if !ok || lit.Kind != token.STRING {
return true
}
s, err := strconv.Unquote(lit.Value)
if err != nil {
return true
}
if strtype, ok := stringLitTypeOf(lit.Value); ok {
c.Replace(&ast.BasicLit{
Kind: token.STRING,
Value: quoteAs(strings.TrimRight(strings.ToUpper(s[:1])+s[1:], "."), strtype),
})
}
return false
}, nil)
}
return nil
}
func verifyContacts(fs *token.FileSet, fields entityFields, call *ast.CallExpr) []*Issue {
kv, ok := fields["Contacts"]
if !ok {
return []*Issue{{
Pos: fs.Position(call.Args[0].Pos()),
Msg: noContactMsg,
Link: testRegistrationURL,
}}
}
comp, ok := kv.Value.(*ast.CompositeLit)
if !ok {
return []*Issue{{
Pos: fs.Position(kv.Value.Pos()),
Msg: nonLiteralContactsMsg,
Link: testRegistrationURL,
}}
}
var issues []*Issue
for _, el := range comp.Elts {
if _, ok := toString(el); !ok {
issues = append(issues, &Issue{
Pos: fs.Position(el.Pos()),
Msg: nonLiteralContactsMsg,
Link: testRegistrationURL,
})
}
}
return issues
}
func verifyAttr(fs *token.FileSet, node ast.Node) []*Issue {
comp, ok := node.(*ast.CompositeLit)
if !ok {
return []*Issue{{
Pos: fs.Position(node.Pos()),
Msg: nonLiteralAttrMsg,
Link: testRegistrationURL,
}}
}
var issues []*Issue
for _, el := range comp.Elts {
if _, ok := toString(el); !ok {
issues = append(issues, &Issue{
Pos: fs.Position(el.Pos()),
Msg: nonLiteralAttrMsg,
Link: testRegistrationURL,
})
}
}
return issues
}
func isStaticString(expr ast.Expr) bool {
_, isString := expr.(*ast.BasicLit)
_, isIdent := expr.(*ast.Ident)
_, isSelector := expr.(*ast.SelectorExpr)
return isString || isSelector || isIdent
}
func isStaticStringList(expr ast.Expr) bool {
_, isSelector := expr.(*ast.SelectorExpr)
if isSelector {
return true
}
if compositeLit, ok := expr.(*ast.CompositeLit); ok {
for _, arg := range compositeLit.Elts {
if !isStaticString(arg) {
return false
}
}
return true
}
if callExpr, ok := expr.(*ast.CallExpr); ok {
fun, ok := callExpr.Fun.(*ast.Ident)
if !ok || fun.Name != "append" {
return false
}
for i, arg := range callExpr.Args {
isVarList := i == 0 || (i == len(callExpr.Args)-1 && callExpr.Ellipsis != token.NoPos)
if isVarList && !isStaticStringList(arg) {
return false
}
if !isVarList && !isStaticString(arg) {
return false
}
}
return true
}
// Since the type of the expression is a list, any selector must be a list constant.
_, ok := expr.(*ast.SelectorExpr)
return ok
}
func verifyVars(fs *token.FileSet, fields entityFields) []*Issue {
kv, ok := fields["Vars"]
if !ok {
return nil
}
if !isStaticStringList(kv.Value) {
return []*Issue{{
Pos: fs.Position(kv.Value.Pos()),
Msg: nonLiteralVarsMsg,
Link: testRegistrationURL,
}}
}
return nil
}
func verifySoftwareDeps(fs *token.FileSet, node ast.Expr) []*Issue {
if !isStaticStringList(node) {
return []*Issue{{
Pos: fs.Position(node.Pos()),
Msg: nonLiteralSoftwareDepsMsg,
Link: testRegistrationURL,
}}
}
return nil
}
func verifyParams(fs *token.FileSet, fields entityFields) []*Issue {
kv, ok := fields["Params"]
if !ok {
return nil
}
comp, ok := kv.Value.(*ast.CompositeLit)
if !ok {
return []*Issue{{
Pos: fs.Position(kv.Value.Pos()),
Msg: nonLiteralParamsMsg,
Link: testParamTestURL,
}}
}
var issues []*Issue
for _, el := range comp.Elts {
issues = append(issues, verifyParamElement(fs, el)...)
}
return issues
}
func verifyParamElement(fs *token.FileSet, node ast.Node) []*Issue {
comp, ok := node.(*ast.CompositeLit)
if !ok {
return []*Issue{{
Pos: fs.Position(node.Pos()),
Msg: nonLiteralParamsMsg,
Link: testParamTestURL,
}}
}
var issues []*Issue
for _, el := range comp.Elts {
kv, ok := el.(*ast.KeyValueExpr)
if !ok {
continue
}
ident, ok := kv.Key.(*ast.Ident)
if !ok {
continue
}
switch ident.Name {
case "Name":
if _, ok := toString(kv.Value); !ok {
issues = append(issues, &Issue{
Pos: fs.Position(kv.Value.Pos()),
Msg: nonLiteralParamNameMsg,
Link: testParamTestURL,
})
}
case "ExtraAttr":
issues = append(issues, verifyAttr(fs, kv.Value)...)
case "ExtraSoftwareDeps":
issues = append(issues, verifySoftwareDeps(fs, kv.Value)...)
}
}
return issues
}
// isTestingAddTestCall returns true if the call is an expression
// to invoke testing.AddTest().
func isTestingAddTestCall(node ast.Node) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return false
}
return toQualifiedName(call.Fun) == "testing.AddTest"
}
func isStringLiteralOrIdent(node ast.Node) bool {
if _, ok := toString(node); ok {
return true
}
return toQualifiedName(node) != ""
}
// toString converts the given node representing a string literal
// into string value. If the node is not a string literal, returns
// false for ok.
func toString(node ast.Node) (s string, ok bool) {
lit, ok := node.(*ast.BasicLit)
if !ok || lit.Kind != token.STRING {
return "", false
}
s, err := strconv.Unquote(lit.Value)
if err != nil {
return "", false
}
return s, true
} | cmd/tast-lint/internal/check/declarations.go | 0.717408 | 0.410166 | declarations.go | starcoder |
package natural
import (
"fmt"
"math"
"math/big"
"regexp"
"strconv"
"strings"
"github.com/shopspring/decimal"
)
var (
multiplierMap = map[string]int{
// eng
"hundred": 100,
"thousand": 1000,
"million": 1000000,
// swe
// https://sv.wikipedia.org/wiki/Miljard
// https://sv.wikipedia.org/wiki/Biljon
// https://sv.wikipedia.org/wiki/Biljard_%28tal%29
"hundra": 100,
"tusen": 1000,
"miljon": 1000000,
"miljoner": 1000000,
"miljard": 1000000000,
"miljarder": 1000000000,
"biljon": 1000000000000,
"biljoner": 1000000000000,
"biljard": 1000000000000000,
"biljarder": 1000000000000000,
}
)
// stringAsInt parses a string as a number
func prefixedStringAsInt(s string) (int64, error) {
s = strings.TrimSpace(s)
base := 10
if strings.Contains(s, "0x") {
// hex
base = 16
s = s[2:]
} else if strings.Contains(s, "0b") {
// binary
base = 2
s = s[2:]
} else if strings.Contains(s, "0o") {
// octal
base = 8
s = s[2:]
} else {
return 0, fmt.Errorf("no prefix")
}
return strconv.ParseInt(s, base, 64)
}
// NumberStringToBig converts a string representation of a number such as "123" to a Decimal representation
func NumberStringToBig(s string) (decimal.Decimal, error) {
s = strings.TrimSpace(s)
n, err := prefixedStringAsInt(s)
if err == nil {
return decimal.NewFromFloat(float64(n)), nil
}
return decimal.NewFromString(s)
}
// ParseNumber parses cardinal numbers (like "five") in English and Swedish
func ParseNumber(s string) (decimal.Decimal, error) {
if num, err := NumberStringToBig(s); err == nil {
return num, nil
}
if res, err := ParseNumberSwedish(s); err == nil {
return res, nil
}
if res, err := ParseNumberEnglish(s); err == nil {
return res, nil
}
return decimal.NewFromFloat(0), fmt.Errorf("Cannot parse number '%s'", s)
}
func isNumericString(s string) bool {
for _, c := range s {
if c < '0' || c > '9' {
return false
}
}
return true
}
var (
multiplierSvSERegex = regexp.MustCompile(`^(?P<num>[\d]+) (?P<size>hundra|tusen|miljon(er)?|miljard(er)?|biljon(er)?|biljard(er)?)+$`)
wholeAndFraction = regexp.MustCompile(`^(?P<arg1>.*) och (?P<arg2>.*)$`)
wholeCommaDecimal = regexp.MustCompile(`^(?P<arg1>.*) komma (?P<arg2>.*)$`)
scaleDataSV = []struct {
singular string
plural string
scale float64
}{
{
// 1,000,000,000,000 - 999,999,999,999,999 (1 biljard)
"biljard",
"biljarder",
1000000000000000.,
},
{
// 1,000,000,000,000 - 999,999,999,999,999 (1 biljon)
"biljon",
"biljoner",
1000000000000.,
},
{
// 1,000,000,000 - 999,999,999,999 (1 miljard)
"miljard",
"miljarder",
1000000000.,
},
{
// 1,000,000 - 999,999,999 (1 miljon)
"miljon",
"miljoner",
1000000.,
},
}
)
// ParseNumberSwedish parses a natural number in written Swedish
func ParseNumberSwedish(s string) (decimal.Decimal, error) {
s = strings.TrimSpace(s)
s = strings.ToLower(s)
var err error
res := decimal.NewFromFloat(0)
if s == "" {
return res, nil
}
if s == "hälften" {
return decimal.NewFromString("0.5")
}
match := multiplierSvSERegex.FindAllStringSubmatch(s, -1)
if len(match) != 0 {
res, err = mapToMultiplier(match[0][1], match[0][2])
return res, err
}
// https://sv.wikipedia.org/wiki/Namn_p%C3%A5_stora_tal
s = strings.Replace(s, " hundra", "hundra", -1)
s = strings.Replace(s, " tusen", "tusen", -1)
s = strings.Replace(s, " miljon", "miljon", -1) // 10^6
s = strings.Replace(s, " miljard", "miljard", -1) // 10^9
s = strings.Replace(s, " biljon", "biljon", -1) // 10^12
s = strings.Replace(s, " biljard", "biljard", -1) // 10^15
// "fem komma två"
match = wholeCommaDecimal.FindAllStringSubmatch(s, -1)
if len(match) != 0 {
res, err = ParseNumberSwedish(match[0][1])
if err != nil {
return res, err
}
var dec decimal.Decimal
dec, err = ParseNumberSwedish(match[0][2])
if err != nil {
return res, err
}
//ten := decimal.NewFromFloat(10.)
// XXX the Pow() in use is not mainline yet: https://github.com/shopspring/decimal/pull/29
//scale := ten.Pow(decimal.NewFromFloat(float64(len(dec.String()))))
scale := decimal.NewFromFloat(math.Pow(10, float64(len(dec.String()))))
return dec.Div(scale).
Add(res), nil
}
// "femton och tre fjärdedelar" = 15.75
match = wholeAndFraction.FindAllStringSubmatch(s, -1)
if len(match) != 0 {
var frac decimal.Decimal
res, err = ParseNumberSwedish(match[0][1])
if err != nil {
return res, err
}
frac, err = parseFractions(match[0][2])
if err != nil {
return res, err
}
return res.Add(frac), nil
}
for _, d := range scaleDataSV {
if strings.Contains(s, d.singular) {
for i := int64(1); i <= 999; i++ {
prefix := ""
if i == 1 {
prefix = "en" + d.singular
} else {
prefix = PresentSvSE(i) + d.plural
}
if len(s) >= len(prefix) && s[0:len(prefix)] == prefix {
res, err = ParseNumberSwedish(s[len(prefix):])
return decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(d.scale)).
Add(res), err
}
}
}
}
// 1,000 - 999,999
for i := int64(1); i <= 999; i++ {
if i == 1 && len(s) >= 5 && s[0:5] == "tusen" {
res, err = ParseNumberSwedish(s[5:])
return decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(1000.)).
Add(res), err
}
if i == 1 && len(s) >= 7 && s[0:7] == "ettusen" {
res, err = ParseNumberSwedish(s[7:])
return decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(1000.)).
Add(res), err
}
if i == 1 && len(s) >= 11 && s[0:11] == "hundratusen" {
res, err = ParseNumberSwedish(s[11:])
return decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(100000.)).
Add(res), err
}
if i == 1 && len(s) >= 14 && s[0:14] == "'etthundratusen" {
res, err = ParseNumberSwedish(s[14:])
return decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(100000.)).
Add(res), err
}
prefix := PresentSvSE(i) + "tusen"
if len(s) >= len(prefix) && s[0:len(prefix)] == prefix {
res, err = ParseNumberSwedish(s[len(prefix):])
return decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(1000.)).
Add(res), err
}
}
// 100 - 1999 ("nittonhundra"... ej "ettusenniohundra")
for prefix, i := range numbersToTwentySvSE {
prefix = prefix + "hundra"
if i == 1 && len(s) >= 6 && s[0:6] == "hundra" {
res, err = ParseNumberSwedish(s[6:])
if err != nil {
return res, err
}
res = decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(100.)).
Add(res)
return res, nil
}
if len(s) >= len(prefix) && s[0:len(prefix)] == prefix {
res, err = ParseNumberSwedish(s[len(prefix):])
if err != nil {
return res, err
}
res = decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(100.)).
Add(res)
return res, nil
}
}
// 20 - 100
for _, prefix := range tensSvSE {
if len(s) >= len(prefix) && s[0:len(prefix)] == prefix {
if tens, _err := arrayIndex(prefix, tensSvSE); _err == nil {
res, err = ParseNumberSwedish(s[len(prefix):])
if err != nil {
return res, err
}
_tens := decimal.NewFromFloat(float64(tens * 10))
return _tens.Add(res), nil
}
}
}
// 1 - 20
if v, ok := numbersToTwentySvSE[s]; ok {
return decimal.NewFromFloat(float64(v)), nil
}
if s == "en" {
return decimal.NewFromFloat(1.), nil
}
// "<NAME>"
res, err = parseFractions(s)
return res, err
}
// ParseNumberEnglish parses a natural number in written English
func ParseNumberEnglish(s string) (decimal.Decimal, error) {
s = strings.TrimSpace(s)
s = strings.ToLower(s)
// XXX handle higher numbers
if s == "" {
return decimal.NewFromFloat(0), nil
}
// https://en.wikipedia.org/wiki/Names_of_large_numbers
s = strings.Replace(s, " hundred", "hundred", -1)
s = strings.Replace(s, "hundred ", "hundred", -1)
s = strings.Replace(s, " thousand", "thousand", -1)
s = strings.Replace(s, "thousand ", "thousand", -1)
s = strings.Replace(s, " million", "million", -1) // 10^6
s = strings.Replace(s, "million ", "million", -1)
s = strings.Replace(s, " billion", "billion", -1) // 10^9
s = strings.Replace(s, "billion ", "billion", -1)
s = strings.Replace(s, " trillion", "trillion", -1) // 10^12
s = strings.Replace(s, "trillion", "trillion", -1)
// 100 - 1999
for prefix, i := range numbersToTwentyEnUS {
prefix = prefix + "hundred"
if i == 1 && len(s) >= 7 && s[0:7] == "hundred" {
res, err := ParseNumberEnglish(s[7:])
if err != nil {
return res, err
}
res = decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(100.)).
Add(res)
return res, nil
}
if len(s) >= len(prefix) && s[0:len(prefix)] == prefix {
res, err := ParseNumberEnglish(s[len(prefix):])
if err != nil {
return res, err
}
res = decimal.NewFromFloat(float64(i)).
Mul(decimal.NewFromFloat(100.)).
Add(res)
return res, nil
}
}
// 20 - 100
for _, prefix := range tensEnUS {
if len(s) >= len(prefix) && s[0:len(prefix)] == prefix {
if tens, _err := arrayIndex(prefix, tensEnUS); _err == nil {
res, err := ParseNumberEnglish(s[len(prefix):])
if err != nil {
return res, err
}
_tens := decimal.NewFromFloat(float64(tens * 10))
return _tens.Add(res), nil
}
}
}
// 1 - 20
if v, ok := numbersToTwentyEnUS[s]; ok {
return decimal.NewFromFloat(float64(v)), nil
}
return decimal.NewFromFloat(0), fmt.Errorf("error")
}
func mapToMultiplier(num, multiplier string) (decimal.Decimal, error) {
var res decimal.Decimal
if _, ok := multiplierMap[multiplier]; !ok {
return res, fmt.Errorf("key not found")
}
n, err := decimal.NewFromString(num)
if err != nil {
return res, err
}
res = n.Mul(decimal.NewFromFloat(float64(multiplierMap[multiplier])))
return res, err
}
func getFraction(s string) *big.Rat {
// s: tredjedelars => tredjedelar, tredjedels => tredjedel
if len(s) > 1 && s[len(s)-1:] == "s" {
s = s[0 : len(s)-1]
}
// tredjedelar => tredjedel
if len(s) > 2 && s[len(s)-2:] == "ar" {
s = s[0 : len(s)-2]
}
if v, ok := fractionsSvSE[s]; ok {
r := new(big.Rat)
r.SetString(v)
return r
}
fmt.Println("ERROR getFraction failed to parse", s)
return nil
}
func parseFractions(s string) (decimal.Decimal, error) {
var res decimal.Decimal
x := strings.SplitN(s, " ", 2)
if len(x) == 1 {
return res, fmt.Errorf("parseFractions failed %s", s)
}
if len(x) == 2 {
num, err := ParseNumber(x[0])
if err != nil {
return res, err
}
fraction := getFraction(x[1])
if fraction == nil {
return res, fmt.Errorf("getFraction failed %s", x[1])
}
// XXX hack, loss of precision:
frac, err := decimal.NewFromString(fraction.FloatString(16))
if err != nil {
return res, err
}
return num.Mul(frac), nil
}
return res, fmt.Errorf("nothing parsed")
} | number.go | 0.523908 | 0.404743 | number.go | starcoder |
package maths
import (
"fmt"
"math"
"github.com/wdevore/Ranger-Go-IGE/api"
)
// vector3 contains base components
type vector3 struct {
x, y, z float32
}
// NewVector3 creates a Vector3 initialized to 0.0, 0.0, 0.0
func NewVector3() api.IVector3 {
v := new(vector3)
v.x = 0.0
v.y = 0.0
v.z = 0.0
return v
}
// NewVector3With3Components creates a Vector3 initialized with x,y,z
func NewVector3With3Components(x, y, z float32) api.IVector3 {
v := new(vector3)
v.x = x
v.y = y
v.z = z
return v
}
// NewVector3With2Components creates a Vector3 initialized with x,y and z = 0.0
func NewVector3With2Components(x, y float32) api.IVector3 {
v := new(vector3)
v.x = x
v.y = y
v.z = 0.0
return v
}
// Clone returns a new copy this vector
func (v *vector3) Clone() api.IVector3 {
c := new(vector3)
c.x = v.x
c.y = v.y
c.z = v.z
return c
}
// Set3Components modifies x,y,z
func (v *vector3) Set3Components(x, y, z float32) {
v.x = x
v.y = y
v.z = z
}
// Set2Components modifies x,y only
func (v *vector3) Set2Components(x, y float32) {
v.x = x
v.y = y
}
// X return x component
func (v *vector3) X() float32 {
return v.x
}
// Y return x component
func (v *vector3) Y() float32 {
return v.y
}
// Z return z component
func (v *vector3) Z() float32 {
return v.x
}
// Components2D returns x,y
func (v *vector3) Components2D() (x, y float32) {
return v.x, v.y
}
// Components3D returns x,y
func (v *vector3) Components3D() (x, y, z float32) {
return v.x, v.y, v.z
}
// Set modifies x,y,z from source
func (v *vector3) Set(source api.IVector3) {
v.x = source.X()
v.y = source.Y()
v.z = source.Z()
}
// Add a Vector3 to this vector
func (v *vector3) Add(src api.IVector3) {
v.x += src.X()
v.y += src.Y()
v.z += src.Z()
}
// Add2Components adds x and y to this vector
func (v *vector3) Add2Components(x, y float32) {
v.x += x
v.y += y
}
// Sub subtracts a Vector3 to this vector
func (v *vector3) Sub(src api.IVector3) {
v.x -= src.X()
v.y -= src.Y()
v.z -= src.Z()
}
// Sub2Components subtracts x and y to this vector
func (v *vector3) Sub2Components(x, y float32) {
v.x -= x
v.y -= y
}
// ScaleBy scales this vector by s
func (v *vector3) ScaleBy(s float32) {
v.x *= s
v.y *= s
v.z *= s
}
// ScaleBy2Components scales this vector by sx and sy
func (v *vector3) ScaleBy2Components(sx, sy float32) {
v.x *= sx
v.y *= sy
}
// MulAdd scales and adds src to this vector
func (v *vector3) MulAdd(src api.IVector3, scalar float32) {
v.x += src.X() * scalar
v.y += src.Y() * scalar
v.z += src.Z() * scalar
}
// Length returns the euclidean length
func Length(x, y, z float32) float32 {
return float32(math.Sqrt(float64(x*x + y*y + z*z)))
}
// Length returns the euclidean length
func (v *vector3) Length() float32 {
return float32(math.Sqrt(float64(v.x*v.x + v.y*v.y + v.z*v.z)))
}
// LengthSquared returns the euclidean length squared
func LengthSquared(x, y, z float32) float32 {
return x*x + y*y + z*z
}
// LengthSquared returns the euclidean length squared
func (v *vector3) LengthSquared() float32 {
return v.x*v.x + v.y*v.y + v.z*v.z
}
// Equal makes an exact equality check. Use EqEpsilon, it is more realistic.
func (v *vector3) Equal(other api.IVector3) bool {
return v.x == other.X() && v.y == other.Y() && v.z == other.Z()
}
// EqEpsilon makes an approximate equality check. Preferred
func (v *vector3) EqEpsilon(other api.IVector3) bool {
return (v.x-other.X()) < Epsilon && (v.y-other.Y()) < Epsilon && (v.z-other.Z()) < Epsilon
}
// Distance finds the euclidean distance between the two specified vectors
func Distance(x1, y1, z1, x2, y2, z2 float32) float32 {
a := x2 - x1
b := y2 - y1
c := z2 - z1
return float32(math.Sqrt(float64(a*a + b*b + c*c)))
}
// Distance finds the euclidean distance between the two specified vectors
func (v *vector3) Distance(src api.IVector3) float32 {
a := src.X() - v.x
b := src.Y() - v.y
c := src.Z() - v.z
return float32(math.Sqrt(float64(a*a + b*b + c*c)))
}
// DistanceSquared finds the euclidean distance between the two specified vectors squared
func DistanceSquared(x1, y1, z1, x2, y2, z2 float32) float32 {
a := x2 - x1
b := y2 - y1
c := z2 - z1
return a*a + b*b + c*c
}
// DistanceSquared finds the euclidean distance between the two specified vectors squared
func (v *vector3) DistanceSquared(src api.IVector3) float32 {
a := src.X() - v.x
b := src.Y() - v.y
c := src.Z() - v.z
return a*a + b*b + c*c
}
// Dot returns the product between the two vectors
func Dot(x1, y1, z1, x2, y2, z2 float32) float32 {
return x1*x2 + y1*y2 + z1*z2
}
// DotByComponent returns the product between the two vectors
func (v *vector3) DotByComponent(x, y, z float32) float32 {
return v.x*x + v.y*y + v.z*z
}
// Dot returns the product between the two vectors
func (v *vector3) Dot(o api.IVector3) float32 {
return v.x*o.X() + v.y*o.Y() + v.z*o.Z()
}
// Cross sets this vector to the cross product between it and the other vector.
func (v *vector3) Cross(o api.IVector3) {
v.Set3Components(
v.y*o.Z()-v.z*o.Y(),
v.z*o.X()-v.x*o.Z(),
v.x*o.Y()-v.y*o.X())
}
// --------------------------------------------------------------------------
// Transforms
// --------------------------------------------------------------------------
// |M00 M01 M02 M03| |x|
// |M10 M11 M12 M13| x |y|
// |M20 M21 M22 M23| |z|
// |M30 M31 M32 M33| |1|
// Mul left-multiplies the vector by the given matrix, assuming the fourth (w) component of the vector is 1.
func (v *vector3) Mul(m api.IMatrix4) {
me := m.Matrix()
v.Set3Components(
v.x*me[M00]+v.y*me[M01]+v.z*me[M02]+me[M03],
v.x*me[M10]+v.y*me[M11]+v.z*me[M12]+me[M13],
v.x*me[M20]+v.y*me[M21]+v.z*me[M22]+me[M23])
}
func (v vector3) String() string {
return fmt.Sprintf("<%7.3f, %7.3f, %7.3f>", v.x, v.y, v.z)
} | engine/maths/vector3.go | 0.901891 | 0.554109 | vector3.go | starcoder |
package types
import "github.com/centrifuge/go-substrate-rpc-client/v4/scale"
type BodyID struct {
IsUnit bool
IsNamed bool
Body []U8
IsIndex bool
Index U32
IsExecutive bool
IsTechnical bool
IsLegislative bool
IsJudicial bool
}
func (b *BodyID) Decode(decoder scale.Decoder) error {
bb, err := decoder.ReadOneByte()
if err != nil {
return err
}
switch bb {
case 0:
b.IsUnit = true
case 1:
b.IsNamed = true
return decoder.Decode(&b.Body)
case 2:
b.IsIndex = true
return decoder.Decode(&b.Index)
case 3:
b.IsExecutive = true
case 4:
b.IsTechnical = true
case 5:
b.IsLegislative = true
case 6:
b.IsJudicial = true
}
return nil
}
func (b BodyID) Encode(encoder scale.Encoder) error {
switch {
case b.IsUnit:
return encoder.PushByte(0)
case b.IsNamed:
if err := encoder.PushByte(1); err != nil {
return err
}
return encoder.Encode(b.Body)
case b.IsIndex:
if err := encoder.PushByte(2); err != nil {
return err
}
return encoder.Encode(b.Index)
case b.IsExecutive:
return encoder.PushByte(3)
case b.IsTechnical:
return encoder.PushByte(4)
case b.IsLegislative:
return encoder.PushByte(5)
case b.IsJudicial:
return encoder.PushByte(6)
}
return nil
}
type BodyPart struct {
IsVoice bool
IsMembers bool
MembersCount U32
IsFraction bool
FractionNom U32
FractionDenom U32
IsAtLeastProportion bool
AtLeastProportionNom U32
AtLeastProportionDenom U32
IsMoreThanProportion bool
MoreThanProportionNom U32
MoreThanProportionDenom U32
}
func (b *BodyPart) Decode(decoder scale.Decoder) error {
bb, err := decoder.ReadOneByte()
if err != nil {
return err
}
switch bb {
case 0:
b.IsVoice = true
case 1:
b.IsMembers = true
return decoder.Decode(&b.MembersCount)
case 2:
b.IsFraction = true
if err := decoder.Decode(&b.FractionNom); err != nil {
return err
}
return decoder.Decode(&b.FractionDenom)
case 3:
b.IsAtLeastProportion = true
if err := decoder.Decode(&b.AtLeastProportionNom); err != nil {
return err
}
return decoder.Decode(&b.AtLeastProportionDenom)
case 4:
b.IsMoreThanProportion = true
if err := decoder.Decode(&b.MoreThanProportionNom); err != nil {
return err
}
return decoder.Decode(&b.MoreThanProportionDenom)
}
return nil
}
func (b BodyPart) Encode(encoder scale.Encoder) error {
switch {
case b.IsVoice:
return encoder.PushByte(0)
case b.IsMembers:
if err := encoder.PushByte(1); err != nil {
return err
}
return encoder.Encode(b.MembersCount)
case b.IsFraction:
if err := encoder.PushByte(2); err != nil {
return err
}
if err := encoder.Encode(b.FractionNom); err != nil {
return err
}
return encoder.Encode(b.FractionDenom)
case b.IsAtLeastProportion:
if err := encoder.PushByte(3); err != nil {
return err
}
if err := encoder.Encode(b.AtLeastProportionNom); err != nil {
return err
}
return encoder.Encode(b.AtLeastProportionDenom)
case b.IsMoreThanProportion:
if err := encoder.PushByte(4); err != nil {
return err
}
if err := encoder.Encode(b.MoreThanProportionNom); err != nil {
return err
}
return encoder.Encode(b.MoreThanProportionDenom)
}
return nil
} | types/body.go | 0.633524 | 0.405861 | body.go | starcoder |
package main
import (
"flag"
"github.com/SOMAS2020/SOMAS2020/internal/common/config"
"github.com/SOMAS2020/SOMAS2020/internal/common/shared"
"github.com/pkg/errors"
)
var (
// config.Config
maxSeasons = flag.Uint(
"maxSeasons",
100,
"The maximum number of 1-indexed seasons to run the game.",
)
maxTurns = flag.Uint(
"maxTurns",
50,
"The maximum numbers of 1-indexed turns to run the game.",
)
initialResources = flag.Float64(
"initialResources",
1000,
"The default number of resources at the start of the game.",
)
initialCommonPool = flag.Float64(
"initialCommonPool",
600,
"The default number of resources in the common pool at the start of the game.",
)
costOfLiving = flag.Float64(
"costOfLiving",
10,
"Subtracted from an islands pool before the next turn.\n"+
"This is the simulation-level equivalent to using resources to stay \n"+
"alive (e.g. food consumed). These resources are permanently consumed and do \n"+
" NOT go into the common pool. Note: this is NOT the same as the tax",
)
minimumResourceThreshold = flag.Float64(
"minimumResourceThreshold",
200,
"The minimum resources required for an island to not be in Critical state.",
)
maxCriticalConsecutiveTurns = flag.Uint(
"maxCriticalConsecutiveTurns",
5,
"The maximum consecutive turns an island can be in the critical state.",
)
// config.ForagingConfig.DeerHuntConfig
foragingDeerMaxPerHunt = flag.Uint(
"foragingMaxDeerPerHunt",
5,
"Max possible number of deer on a single hunt (regardless of number of participants). ** should be strictly less than max deer population.",
)
foragingDeerStaticProb = flag.Bool(
"foragingDeerStaticProb",
true,
"Toggles whether the probability of catching a deer is static or linked to the current population size",
)
foragingDeerStrict2Player = flag.Bool(
"foragingDeerStrict2Player",
true,
"Toggles whether subsequent deer can be foraged in a single deer hunt, or if 1 can be caught per 2 deer hunters",
)
foragingDeerIncrementalInputDecay = flag.Float64(
"foragingDeerIncrementalInputDecay",
0.9,
"Determines decay of incremental input cost of hunting more deer.",
)
foragingDeerBernoulliProb = flag.Float64(
"foragingDeerBernoulliProb",
0.8,
"`p` param in D variable (see foraging README). Controls prob of catching a deer or not.",
)
foragingDeerExponentialRate = flag.Float64(
"foragingDeerExponentialRate",
0.5,
"`lambda` param in W variable (see foraging README). Controls distribution of deer sizes.",
)
foragingDeerInputScaler = flag.Float64(
"foragingDeerInputScaler",
0,
"scalar value that adjusts deer input resources to be in a range that is commensurate with cost of living, salaries etc.",
)
foragingDeerOutputScaler = flag.Float64(
"foragingDeerOutputScaler",
40,
"scalar value that adjusts deer returns to be in a range that is commensurate with cost of living, salaries etc.",
)
foragingDeerDistributionStrategy = flag.Int(
"foragingDeerDistributionStrategy",
int(shared.InputProportionalSplit),
shared.HelpResourceDistributionStrategy(),
)
foragingDeerThetaCritical = flag.Float64(
"foragingDeerThetaCritical",
0.97,
"Bernoulli prob of catching deer when population ratio = running population/max deer per hunt = 1",
)
foragingDeerThetaMax = flag.Float64(
"foragingDeerThetaMax",
0.99,
"Bernoulli prob of catching deer when population is at carrying capacity (max population)",
)
foragingDeerMaxPopulation = flag.Uint(
"foragingDeerMaxPopulation",
20,
"Max possible deer population. ** Should be strictly greater than max deer per hunt.",
)
foragingDeerGrowthCoefficient = flag.Float64(
"foragingDeerGrowthCoefficient",
0.4,
"Scaling parameter used in the population model. Larger coeff => deer pop. regenerates faster.",
)
// config.ForagingConfig.FishingConfig
foragingFishMaxPerHunt = flag.Uint(
"foragingMaxFishPerHunt",
10,
"Max possible catch (num. fish) on a single expedition (regardless of number of participants).",
)
foragingFishingIncrementalInputDecay = flag.Float64(
"foragingFishingIncrementalInputDecay",
0.95,
"Determines decay of incremental input cost of catching more fish.",
)
foragingFishingMean = flag.Float64(
"foragingFishingMean",
1.45,
"Determines mean of normal distribution of fishing return (see foraging README)",
)
foragingFishingVariance = flag.Float64(
"foragingFishingVariance",
0.1,
"Determines variance of normal distribution of fishing return (see foraging README)",
)
foragingFishingInputScaler = flag.Float64(
"foragingFishingInputScaler",
18,
"scalar value that adjusts input resources to be in a range that is commensurate with cost of living, salaries etc.",
)
foragingFishingOutputScaler = flag.Float64(
"foragingFishingOutputScaler",
18,
"scalar value that adjusts returns to be in a range that is commensurate with cost of living, salaries etc.",
)
foragingFishingDistributionStrategy = flag.Int(
"foragingFishingDistributionStrategy",
int(shared.EqualSplit),
shared.HelpResourceDistributionStrategy(),
)
// config.DisasterConfig
disasterXMin = flag.Float64(
"disasterXMin",
0,
"Min x bound of archipelago (bounds for possible disaster).",
)
disasterXMax = flag.Float64(
"disasterXMax",
10,
"Max x bound of archipelago (bounds for possible disaster).",
)
disasterYMin = flag.Float64(
"disasterYMin",
0,
"Min y bound of archipelago (bounds for possible disaster).",
)
disasterYMax = flag.Float64(
"disasterYMax",
10,
"Max y bound of archipelago (bounds for possible disaster).",
)
disasterPeriod = flag.Uint(
"disasterPeriod",
5,
"Period T between disasters in deterministic case and E[T] in stochastic case.",
)
disasterSpatialPDFType = flag.Int(
"disasterSpatialPDFType",
0,
shared.HelpSpatialPDFType(),
)
disasterMagnitudeLambda = flag.Float64(
"disasterMagnitudeLambda",
6.5,
"Gaussian mean for disaster magnitude",
)
disasterMagnitudeResourceMultiplier = flag.Float64(
"disasterMagnitudeResourceMultiplier",
85,
"Multiplier to map disaster magnitude to CP resource deductions",
)
disasterCommonpoolThreshold = flag.Float64(
"disasterCommonpoolThreshold",
300,
"Common pool threshold value for disaster to be mitigated",
)
disasterStochasticPeriod = flag.Bool(
"disasterStochasticPeriod",
false,
"If true, period between disasters becomes random. If false, it will be consistent (deterministic)",
)
disasterCommonpoolThresholdVisible = flag.Bool(
"disasterCommonpoolThresholdVisible",
shared.CPVis,
"Whether disasterCommonpoolThreshold is visible to agents",
)
disasterPeriodVisible = flag.Bool(
"disasterPeriodVisible",
shared.PerVis,
"Whether disasterPeriod is visible to agents",
)
disasterStochasticPeriodVisible = flag.Bool(
"disasterStochasticPeriodVisible",
false,
"Whether stochasticPeriod is visible to agents",
)
// config.IIGOConfig - Executive branch
iigoGetRuleForSpeakerActionCost = flag.Float64(
"iigoGetRuleForSpeakerActionCost",
2,
"IIGO action cost for getRuleForSpeaker action",
)
iigoBroadcastTaxationActionCost = flag.Float64(
"iigoBroadcastTaxationActionCost",
0,
"IIGO action cost for broadcastTaxation action",
)
iigoReplyAllocationRequestsActionCost = flag.Float64(
"iigoReplyAllocationRequestsActionCost",
2,
"IIGO action cost for replyAllocationRequests action",
)
iigoRequestAllocationRequestActionCost = flag.Float64(
"iigoRequestAllocationRequestActionCost",
2,
"IIGO action cost for requestAllocationRequest action",
)
iigoRequestRuleProposalActionCost = flag.Float64(
"iigoRequestRuleProposalActionCost",
2,
"IIGO action cost for requestRuleProposal action",
)
iigoAppointNextSpeakerActionCost = flag.Float64(
"iigoAppointNextSpeakerActionCost",
2,
"IIGO action cost for appointNextSpeaker action",
)
// config.IIGOConfig - Judiciary branch
iigoInspectHistoryActionCost = flag.Float64(
"iigoInspectHistoryActionCost",
2,
"IIGO action cost for inspectHistory",
)
historicalRetributionActionCost = flag.Float64(
"historicalRetributionActionCost",
2,
"IIGO action cost for inspectHistory retroactively (in turns before the last one)",
)
iigoInspectBallotActionCost = flag.Float64(
"iigoInspectBallotActionCost",
2,
"IIGO action cost for inspectBallot",
)
iigoInspectAllocationActionCost = flag.Float64(
"iigoInspectAllocationActionCost",
2,
"IIGO action cost for inspectAllocation",
)
iigoAppointNextPresidentActionCost = flag.Float64(
"iigoAppointNextPresidentActionCost",
2,
"IIGO action cost for appointNextPresident",
)
iigoDefaultSanctionScore = flag.Uint(
"iigoDefaultSanctionScore",
2,
"Default penalty score for breaking a rule",
)
iigoSanctionCacheDepth = flag.Uint(
"iigoSanctionCacheDepth",
3,
"Turn depth of sanctions to be applied or pardoned",
)
iigoHistoryCacheDepth = flag.Uint(
"iigoHistoryCacheDepth",
3,
"Turn depth of history cache for events to be evaluated",
)
iigoAssumedResourcesNoReport = flag.Uint(
"iigoAssumedResourcesNoReport",
100,
"If an island doesn't report usaged this value is assumed for sanction calculations",
)
iigoSanctionLength = flag.Uint(
"iigoSanctionLength",
5,
"Sanction length for all sanctions",
)
// config.IIGOConfig - Legislative branch
iigoSetVotingResultActionCost = flag.Float64(
"iigoSetVotingResultActionCost",
2,
"IIGO action cost for setVotingResult",
)
iigoSetRuleToVoteActionCost = flag.Float64(
"iigoSetRuleToVoteActionCost",
2,
"IIGO action cost for setRuleToVote action",
)
iigoAnnounceVotingResultActionCost = flag.Float64(
"iigoAnnounceVotingResultActionCost",
2,
"IIGO action cost for announceVotingResult action",
)
iigoUpdateRulesActionCost = flag.Float64(
"iigoUpdateRulesActionCost",
2,
"IIGO action cost for updateRules action",
)
iigoAppointNextJudgeActionCost = flag.Float64(
"iigoAppointNextJudgeActionCost",
2,
"IIGO action cost for appointNextJudge action",
)
iigoTermLengthPresident = flag.Uint(
"iigoTermLengthPresident",
4,
"Length of the term for the President",
)
iigoTermLengthSpeaker = flag.Uint(
"iigoTermLengthSpeaker",
4,
"Length of the term for the Speaker",
)
iigoTermLengthJudge = flag.Uint(
"iigoTermLengthJudge",
4,
"Length of the term for the Judge",
)
startWithRulesInPlay = flag.Bool(
"startWithRulesInPlay",
true,
"Pull all available rules into play at start of run",
)
)
func parseConfig() (config.Config, error) {
flag.Parse()
parsedForagingDeerDistributionStrategy, err := shared.ParseResourceDistributionStrategy(*foragingDeerDistributionStrategy)
if err != nil {
return config.Config{}, errors.Errorf("Error parsing foragingDeerDistributionStrategy: %v", err)
}
parsedDeerMaxPerHunt, parseDeerMaxPopulation, err := shared.ParseDeerPopulationParams(
*foragingDeerMaxPerHunt,
*foragingDeerMaxPopulation,
)
if err != nil {
return config.Config{}, errors.Errorf("Error parsing foragingDeerMaxPerHunt and/or foragingDeerMaxPopulation: %v", err)
}
parsedForagingFishingDistributionStrategy, err := shared.ParseResourceDistributionStrategy(*foragingFishingDistributionStrategy)
if err != nil {
return config.Config{}, errors.Errorf("Error parsing foragingFishingDistributionStrategy: %v", err)
}
parsedDisasterSpatialPDFType, err := shared.ParseSpatialPDFType(*disasterSpatialPDFType)
if err != nil {
return config.Config{}, errors.Errorf("Error parsing disasterSpatialPDFType: %v", err)
}
deerConf := config.DeerHuntConfig{
//Deer parameters
MaxDeerPerHunt: parsedDeerMaxPerHunt,
IncrementalInputDecay: *foragingDeerIncrementalInputDecay,
BernoulliProb: *foragingDeerBernoulliProb,
ExponentialRate: *foragingDeerExponentialRate,
InputScaler: *foragingDeerInputScaler,
OutputScaler: *foragingDeerOutputScaler,
DistributionStrategy: parsedForagingDeerDistributionStrategy,
ThetaCritical: *foragingDeerThetaCritical,
ThetaMax: *foragingDeerThetaMax,
MaxDeerPopulation: parseDeerMaxPopulation,
DeerGrowthCoefficient: *foragingDeerGrowthCoefficient,
DeerStaticProb: *foragingDeerStaticProb,
DeerStrict2Player: *foragingDeerStrict2Player,
}
fishingConf := config.FishingConfig{
// Fish parameters
MaxFishPerHunt: *foragingFishMaxPerHunt,
IncrementalInputDecay: *foragingFishingIncrementalInputDecay,
Mean: *foragingFishingMean,
Variance: *foragingFishingVariance,
InputScaler: *foragingFishingInputScaler,
OutputScaler: *foragingFishingOutputScaler,
DistributionStrategy: parsedForagingFishingDistributionStrategy,
}
foragingConf := config.ForagingConfig{
DeerHuntConfig: deerConf,
FishingConfig: fishingConf,
}
disasterConf := config.DisasterConfig{
XMin: *disasterXMin,
XMax: *disasterXMax,
YMin: *disasterYMin,
YMax: *disasterYMax,
Period: *disasterPeriod,
SpatialPDFType: parsedDisasterSpatialPDFType,
MagnitudeLambda: *disasterMagnitudeLambda,
StochasticPeriod: *disasterStochasticPeriod,
MagnitudeResourceMultiplier: *disasterMagnitudeResourceMultiplier,
CommonpoolThreshold: shared.Resources(*disasterCommonpoolThreshold),
CommonpoolThresholdVisible: *disasterCommonpoolThresholdVisible,
PeriodVisible: *disasterPeriodVisible,
StochasticPeriodVisible: *disasterStochasticPeriodVisible,
}
iigoConf := config.IIGOConfig{
IIGOTermLengths: map[shared.Role]uint{shared.President: *iigoTermLengthPresident,
shared.Speaker: *iigoTermLengthSpeaker,
shared.Judge: *iigoTermLengthJudge},
// Executive branch
GetRuleForSpeakerActionCost: shared.Resources(*iigoGetRuleForSpeakerActionCost),
BroadcastTaxationActionCost: shared.Resources(*iigoBroadcastTaxationActionCost),
ReplyAllocationRequestsActionCost: shared.Resources(*iigoReplyAllocationRequestsActionCost),
RequestAllocationRequestActionCost: shared.Resources(*iigoRequestAllocationRequestActionCost),
RequestRuleProposalActionCost: shared.Resources(*iigoRequestRuleProposalActionCost),
AppointNextSpeakerActionCost: shared.Resources(*iigoAppointNextSpeakerActionCost),
// Judiciary branch
InspectHistoryActionCost: shared.Resources(*iigoInspectHistoryActionCost),
HistoricalRetributionActionCost: shared.Resources(*historicalRetributionActionCost),
InspectBallotActionCost: shared.Resources(*iigoInspectBallotActionCost),
InspectAllocationActionCost: shared.Resources(*iigoInspectAllocationActionCost),
AppointNextPresidentActionCost: shared.Resources(*iigoAppointNextPresidentActionCost),
DefaultSanctionScore: shared.IIGOSanctionsScore(*iigoDefaultSanctionScore),
SanctionCacheDepth: *iigoSanctionCacheDepth,
HistoryCacheDepth: *iigoHistoryCacheDepth,
AssumedResourcesNoReport: shared.Resources(*iigoAssumedResourcesNoReport),
SanctionLength: *iigoSanctionLength,
// Legislative branch
SetVotingResultActionCost: shared.Resources(*iigoSetVotingResultActionCost),
SetRuleToVoteActionCost: shared.Resources(*iigoSetRuleToVoteActionCost),
AnnounceVotingResultActionCost: shared.Resources(*iigoAnnounceVotingResultActionCost),
UpdateRulesActionCost: shared.Resources(*iigoUpdateRulesActionCost),
AppointNextJudgeActionCost: shared.Resources(*iigoAppointNextJudgeActionCost),
StartWithRulesInPlay: *startWithRulesInPlay,
}
return config.Config{
MaxSeasons: *maxSeasons,
MaxTurns: *maxTurns,
InitialResources: shared.Resources(*initialResources),
InitialCommonPool: shared.Resources(*initialCommonPool),
CostOfLiving: shared.Resources(*costOfLiving),
MinimumResourceThreshold: shared.Resources(*minimumResourceThreshold),
MaxCriticalConsecutiveTurns: *maxCriticalConsecutiveTurns,
ForagingConfig: foragingConf,
DisasterConfig: disasterConf,
IIGOConfig: iigoConf,
}, nil
} | params.go | 0.500732 | 0.494324 | params.go | starcoder |
package govaluate
// ExprNode is a structured representation of an expression.
// There are three types of nodes: literal, variable and operator. The latter
// can have child nodes. They form a tree, where each node is an expression itself.
type ExprNode struct {
Type ExprNodeType
Name string
Value interface{}
Args []ExprNode
SourcePos, SourceLen int
OperatorType OperatorType
}
// ExprNodeType is a type of ExprNode.
type ExprNodeType int
const (
// NodeTypeLiteral is just a constant literal, e.g. a boolean, a number, or a string.
// ExprNode.Value contains the actual value.
NodeTypeLiteral ExprNodeType = iota
// NodeTypeVariable is a variable.
// ExprNode.Name contains the name of the variable.
NodeTypeVariable
// NodeTypeOperator is an operation over the arguments, the other nodes.
// It can be a function call, a binary operator (+, -, *, etc) with two arguments,
// unary (!, -, ~), ternary (?:), etc. There are no restrictions on operator name,
// it just needs to be defined at the evaluation phase.
// ExprNode.Name is the name of the operation.
// ExprNode.Args are the arguments.
NodeTypeOperator
)
type OperatorType int
const (
OperatorTypeCall OperatorType = iota
OperatorTypeInfix
OperatorTypePrefix
OperatorTypeTernary
OperatorTypeArray
OperatorTypeIndexer
)
// NewExprNodeLiteral constructs a literal node.
func NewExprNodeLiteral(value interface{}, sourcePos, sourceLen int) ExprNode {
return ExprNode{
Type: NodeTypeLiteral,
Value: value,
SourcePos: sourcePos,
SourceLen: sourceLen,
}
}
// NewExprNodeVariable constructs a variable node.
func NewExprNodeVariable(name string, sourcePos, sourceLen int) ExprNode {
return ExprNode{
Type: NodeTypeVariable,
Name: name,
SourcePos: sourcePos,
SourceLen: sourceLen,
}
}
// NewExprNodeOperator constructs an operator node.
func NewExprNodeOperator(name string, args []ExprNode, sourcePos, sourceLen int, operatorType OperatorType) ExprNode {
return ExprNode{
Type: NodeTypeOperator,
Name: name,
Args: args,
SourcePos: sourcePos,
SourceLen: sourceLen,
OperatorType: operatorType,
}
}
// IsOperator returns true if this expression is an operator with matching name.
func (expr ExprNode) IsOperator(name string) bool {
return expr.Type == NodeTypeOperator && expr.Name == name
}
// IsLiteral returns true if this expression is a literal with matching value.
func (expr ExprNode) IsLiteral(value interface{}) bool {
return expr.Type == NodeTypeLiteral && expr.Value == value
}
// GetValue returns expression value, if it's a constant.
func (expr ExprNode) GetValue() (interface{}, bool) {
if expr.Type == NodeTypeLiteral {
return expr.Value, true
}
return nil, false
}
// VarsCount returns a map where keys are the variable names in the expression,
// and values are how many times they are referenced.
func (expr ExprNode) VarsCount() map[string]int {
vars := map[string]int{}
collectVars(expr, vars)
return vars
}
// Vars returns a list of variables referenced in the expression.
func (expr ExprNode) Vars() []string {
vars := expr.VarsCount()
res := make([]string, 0, len(vars))
for key := range vars {
res = append(res, key)
}
return res
}
func collectVars(expr ExprNode, output map[string]int) {
switch expr.Type {
case NodeTypeVariable:
output[expr.Name]++
case NodeTypeOperator:
for _, arg := range expr.Args {
collectVars(arg, output)
}
}
} | ExprNode.go | 0.710025 | 0.751625 | ExprNode.go | starcoder |
package docs
import (
"bytes"
"encoding/json"
"strings"
"github.com/alecthomas/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{.Description}}",
"title": "{{.Title}}",
"contact": {},
"license": {
"name": "MIT",
"url": "https://opensource.org/licenses/MIT"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/api/account/login": {
"post": {
"description": "JWT certification",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"account"
],
"summary": "If the infomation passed in the request body matches the data in the table, a cookie will be issued.",
"responses": {
"200": {
"description": "200 OK",
"schema": {
"type": "string"
}
},
"400": {
"description": "400 incrrect password",
"schema": {
"type": "string"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
},
"404": {
"description": "404 Not Found",
"schema": {
"type": "string"
}
}
}
}
},
"/api/account/logout": {
"post": {
"description": "JWT certification",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"account"
],
"summary": "If the cookie exists, delete it.",
"responses": {
"200": {
"description": ""
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
}
}
}
},
"/api/account/nowuser": {
"get": {
"description": "Browse Account table.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"account"
],
"summary": "Show infomation about the currently logged in user.",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/models.Account"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
}
}
},
"put": {
"description": "Browse Account table.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"account"
],
"summary": "Updates information about the currently logged in user.",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/models.Account"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
},
"404": {
"description": "404 Not Found",
"schema": {
"type": "string"
}
}
}
},
"delete": {
"description": "Browse Account table.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"account"
],
"summary": "Deletes infomation about the currently logged in user.",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/models.Account"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
},
"404": {
"description": "404 Not Found",
"schema": {
"type": "string"
}
}
}
}
},
"/api/account/signup": {
"post": {
"description": "Use the account table.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"account"
],
"summary": "Register Account infomation in the database.",
"responses": {
"201": {
"description": "201 Created",
"schema": {
"type": "string"
}
},
"409": {
"description": "409 It is already registered",
"schema": {
"type": "string"
}
}
}
}
},
"/api/impression/:id": {
"get": {
"description": "Can be executed only at login.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"impression"
],
"summary": "Display of impressions.",
"responses": {
"200": {
"description": "200 OK",
"schema": {
"type": "string"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
},
"404": {
"description": "404 Not Found",
"schema": {
"type": "string"
}
}
}
},
"put": {
"description": "Can be executed only at login.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"impression"
],
"summary": "Update of impressions.",
"responses": {
"200": {
"description": "200 OK",
"schema": {
"type": "string"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
},
"404": {
"description": "404 Not Found",
"schema": {
"type": "string"
}
}
}
},
"delete": {
"description": "Can be executed only at login.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"impression"
],
"summary": "Delete impressions.",
"responses": {
"200": {
"description": "200 OK",
"schema": {
"type": "string"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
},
"404": {
"description": "404 Not Found",
"schema": {
"type": "string"
}
}
}
}
},
"/api/impressions": {
"get": {
"description": "Can be executed only at login.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"impressions"
],
"summary": "List of impressions",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/models.Impression"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
}
}
},
"post": {
"description": "Can be executed only at login.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"impressions"
],
"summary": "impression registration process.",
"responses": {
"200": {
"description": "200 OK",
"schema": {
"type": "string"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
}
}
}
},
"/api/impressions/search/:bookid": {
"get": {
"description": "Can be executed only at login.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"impressions"
],
"summary": "Search for books using Bookid as a key.",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/models.Impression"
}
},
"401": {
"description": "401 unauthenticated",
"schema": {
"type": "string"
}
},
"404": {
"description": "404 Not Found",
"schema": {
"type": "string"
}
}
}
}
}
},
"definitions": {
"models.Account": {
"type": "object",
"properties": {
"create_at": {
"type": "string"
},
"delete_at": {
"type": "string"
},
"email": {
"type": "string"
},
"id": {
"type": "integer"
},
"uid": {
"type": "string"
},
"update_at": {
"type": "string"
},
"username": {
"type": "string"
}
}
},
"models.Impression": {
"type": "object",
"properties": {
"body": {
"type": "string"
},
"bookid": {
"type": "string"
},
"booktitle": {
"type": "string"
},
"create_at": {
"type": "string"
},
"delete_at": {
"type": "string"
},
"id": {
"type": "integer"
},
"imageurl": {
"type": "string"
},
"isbn10": {
"type": "string"
},
"isbn13": {
"type": "string"
},
"title": {
"type": "string"
},
"uid": {
"type": "string"
},
"update_at": {
"type": "string"
}
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "1.0.0",
Host: "Secret",
BasePath: "/",
Schemes: []string{},
Title: "book_Impressions_back",
Description: "API of software to describe impressions of books.",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register(swag.Name, &s{})
} | api/docs/docs.go | 0.504639 | 0.406626 | docs.go | starcoder |
package vm
import "errors"
import "fmt"
import "math"
import "time"
// Flips the X and Y axes of all moves
func (vm *Machine) FlipXY() {
for idx := range vm.Positions {
pos := vm.Positions[idx]
vm.Positions[idx].X, vm.Positions[idx].Y = pos.Y, pos.X
}
}
// Limit feedrate.
func (vm *Machine) LimitFeedrate(feed float64) {
for idx, m := range vm.Positions {
if m.State.Feedrate > feed {
vm.Positions[idx].State.Feedrate = feed
}
}
}
// Increase feedrate
func (vm *Machine) FeedrateMultiplier(feedMultiplier float64) {
for idx := range vm.Positions {
vm.Positions[idx].State.Feedrate *= feedMultiplier
}
}
// Multiply move distances - This makes no sense - Dangerous.
func (vm *Machine) MoveMultiplier(moveMultiplier float64) {
for idx := range vm.Positions {
vm.Positions[idx].X *= moveMultiplier
vm.Positions[idx].Y *= moveMultiplier
vm.Positions[idx].Z *= moveMultiplier
}
}
// Enforce spindle mode
func (vm *Machine) EnforceSpindle(enabled, clockwise bool, speed float64) {
for idx := range vm.Positions {
vm.Positions[idx].State.SpindleSpeed = speed
vm.Positions[idx].State.SpindleEnabled = enabled
vm.Positions[idx].State.SpindleClockwise = clockwise
}
}
// Detect the highest Z position
func (vm *Machine) FindSafetyHeight() float64 {
var maxz float64
for _, m := range vm.Positions {
if m.Z > maxz {
maxz = m.Z
}
}
return maxz
}
// Set safety-height.
// Scans for the highest position on the Y axis, and afterwards replaces all instances
// of this position with the requested height.
func (vm *Machine) SetSafetyHeight(height float64) error {
// Ensure we detected the highest point in the script - we don't want any collisions
maxz := vm.FindSafetyHeight()
nextz := 0.0
for _, m := range vm.Positions {
if m.Z < maxz && m.Z > nextz {
nextz = m.Z
}
}
if height <= nextz {
return errors.New(fmt.Sprintf("New safety height collides with lower feed height of %g", nextz))
}
// Apply the changes
var lastx, lasty float64
for idx, m := range vm.Positions {
if lastx == m.X && lasty == m.Y && m.Z == maxz {
vm.Positions[idx].Z = height
}
lastx, lasty = m.X, m.Y
}
return nil
}
// Ensure return to X0 Y0 Z0.
// Simply adds a what is necessary to move back to X0 Y0 Z0.
func (vm *Machine) Return(disableSpindle, disableCoolant bool) {
var maxz float64
for _, m := range vm.Positions {
if m.Z > maxz {
maxz = m.Z
}
}
if len(vm.Positions) == 0 {
return
}
lastPos := vm.Positions[len(vm.Positions)-1]
if lastPos.X == 0 && lastPos.Y == 0 && lastPos.Z == 0 {
if disableSpindle {
lastPos.State.SpindleEnabled = false
}
if disableCoolant {
lastPos.State.MistCoolant = false
lastPos.State.FloodCoolant = false
}
vm.Positions[len(vm.Positions)-1] = lastPos
return
} else if lastPos.X == 0 && lastPos.Y == 0 && lastPos.Z != 0 {
lastPos.Z = 0
lastPos.State.MoveMode = MoveModeRapid
if disableSpindle {
lastPos.State.SpindleEnabled = false
}
if disableCoolant {
lastPos.State.MistCoolant = false
lastPos.State.FloodCoolant = false
}
vm.Positions = append(vm.Positions, lastPos)
return
} else if lastPos.Z == maxz {
move1 := lastPos
move1.X = 0
move1.Y = 0
move1.State.MoveMode = MoveModeRapid
move2 := move1
move2.Z = 0
if disableSpindle {
move2.State.SpindleEnabled = false
}
if disableCoolant {
move2.State.MistCoolant = false
move2.State.FloodCoolant = false
}
vm.Positions = append(vm.Positions, move1)
vm.Positions = append(vm.Positions, move2)
return
} else {
move1 := lastPos
move1.Z = maxz
move1.State.MoveMode = MoveModeRapid
move2 := move1
move2.X = 0
move2.Y = 0
move3 := move2
move3.Z = 0
if disableSpindle {
move3.State.SpindleEnabled = false
}
if disableCoolant {
move3.State.MistCoolant = false
move3.State.FloodCoolant = false
}
vm.Positions = append(vm.Positions, move1)
vm.Positions = append(vm.Positions, move2)
vm.Positions = append(vm.Positions, move3)
return
}
}
// Generate move information
func (vm *Machine) Info() (minx, miny, minz, maxx, maxy, maxz float64, feedrates []float64) {
for _, pos := range vm.Positions {
if pos.X < minx {
minx = pos.X
} else if pos.X > maxx {
maxx = pos.X
}
if pos.Y < miny {
miny = pos.Y
} else if pos.Y > maxy {
maxy = pos.Y
}
if pos.Z < minz {
minz = pos.Z
} else if pos.Z > maxz {
maxz = pos.Z
}
feedrateFound := false
for _, feed := range feedrates {
if feed == pos.State.Feedrate {
feedrateFound = true
break
}
}
if !feedrateFound {
feedrates = append(feedrates, pos.State.Feedrate)
}
}
return
}
// Estimate runtime for job
func (m *Machine) ETA() time.Duration {
lastTool := -1
lastToolSuggestion := -1
var eta time.Duration
var lx, ly, lz float64
for _, pos := range m.Positions {
if pos.State.ToolIndex != lastTool {
if pos.State.ToolIndex == lastToolSuggestion {
eta += 5 * time.Second
} else {
eta += 10 * time.Second
}
}
lastTool = pos.State.ToolIndex
lastToolSuggestion = pos.State.NextToolIndex
feed := pos.State.Feedrate
if feed <= 0 {
// Just to use something...
feed = 300
}
// Convert from minutes to microseconds
feed /= 60000000
switch pos.State.MoveMode {
case MoveModeNone:
continue
case MoveModeRapid:
// This is silly, but it gives something to calculate with
feed *= 8
case MoveModeDwell:
eta += time.Duration(pos.State.DwellTime) * time.Second
continue
}
dx, dy, dz := pos.X-lx, pos.Y-ly, pos.Z-lz
lx, ly, lz = pos.X, pos.Y, pos.Z
dist := math.Sqrt(math.Pow(dx, 2) + math.Pow(dy, 2) + math.Pow(dz, 2))
eta += time.Duration(dist/feed) * time.Microsecond
}
return eta
} | vm/utils.go | 0.697197 | 0.40539 | utils.go | starcoder |
package three
import "github.com/gopherjs/gopherjs/js"
// Matrix4 - represents a Matrix4.
type Matrix4 struct {
*js.Object
// A column-major list of matrix values.
A *js.Object `js:"A"`
}
func NewMatrix4() *Matrix4 {
return &Matrix4{
Object: three.Get("Matrix4").New(),
}
}
// Sets this matrix to the transformation composed of position, quaternion and scale.
func (m *Matrix4) Compose(position Vector3, q Quaternion, scale Vector3) (this *Matrix4) {
m.Object.Call("compose", position, q, scale)
return m
}
// Decomposes this matrix into its position, quaternion and scale components.
// Note: Not all matrices are decomposable in this way. For example, if an object has a non-uniformly scaled parent, then the object's world matrix may not be decomposable, and this method may not be appropriate.
func (m *Matrix4) Decompose(position Vector3, q Quaternion, scale Vector3) {
m.Object.Call("decompose", position, q, scale)
}
// http://www.euclideanspace.com/maths/algebra/matrix/functions/inverse/fourD/index.htm
func (m *Matrix4) Determinant() float64 {
return m.Object.Call("determinant").Float()
}
// Extracts the basis of this matrix into the three axis vectors provided. If this matrix is:
// a, b, c, d,
// e, f, g, h,
// i, j, k, l,
// m, n, o, p
// then x,y,z will be set:
// x = (a, e, i)
// y = (b, f, j)
// z = (c, g, k)
func (m *Matrix4) ExtractBasis(x, y, z Vector3) (this *Matrix4) {
m.Object.Call("extractBasis", x, y, z)
return m
}
func (m *Matrix4) ExtractRotation(a *Matrix4) (this *Matrix4) {
m.Object.Call("extractRotation", a)
return m
}
// Sets the rotation component of this matrix to the rotation specified by q, as outlined here.
// The rest of the matrix is set to the identity. So, given q = w + xi + yj + zk, the resulting matrix will be:
// 1-2y²-2z² 2xy-2zw 2xz+2yw 0
// 2xy+2zw 1-2x²-2z² 2yz-2xw 0
// 2xz-2yw 2yz+2xw 1-2x²-2y² 0
// 0 0 0 1
func (m *Matrix4) MakeRotationFromQuaternion(q Quaternion) (this *Matrix4) {
m.Object.Call("makeRotationFromQuaternion", q)
return m
}
// Sets this matrix as a translation transform:
// 1, 0, 0, x,
// 0, 1, 0, y,
// 0, 0, 1, z,
// 0, 0, 0, 1
func (m *Matrix4) MakeTranslation(x, y, z float64) (this *Matrix4) {
m.Object.Call("makeTranslation", x, y, z)
return m
}
// Set this to the basis matrix consisting of the three provided basis vectors:
// xAxis.x, yAxis.x, zAxis.x, 0,
// xAxis.y, yAxis.y, zAxis.y, 0,
// xAxis.z, yAxis.z, zAxis.z, 0,
// 0, 0, 0, 1
func (m *Matrix4) MakeBasis(x, y, z Vector3) (this *Matrix4) {
m.Object.Call("makeBasis", x, y, z)
return m
}
// Inverts this matrix, using the analytic method. You can not invert with a determinant of zero. If you attempt this, the method produces a zero matrix instead.
func (m *Matrix4) Invert() (this *Matrix4) {
m.Object.Call("invert")
return m
}
// Resets this matrix to the identity matrix.
func (m *Matrix4) Identity() (this *Matrix4) {
m.Object.Call("identity")
return m
} | math_matrix4.go | 0.918242 | 0.679275 | math_matrix4.go | starcoder |
package redblack
// Element represents the constituent of any Node
type Element interface {
Key() int
Merge(Element) Element
}
type color bool
const (
red color = false
black color = true
)
// Node is what the tree is made of.
type Node struct {
color color
element Element
left *Node
right *Node
}
// Root returns (the root of) a fresh Red-Black Tree
func Root(e Element) *Node {
return &Node{red, e, nil, nil}
}
func (color color) isRedL(n *Node) bool {
if color == black && n != nil && n.color == red {
return red == n.left.color
}
return false
}
func (color color) isRedR(n *Node) bool {
if color == black && n != nil && n.color == red {
return red == n.right.color
}
return false
}
func tree(a *Node, x Element, b *Node, y Element, c *Node, z Element, d *Node) *Node {
return &Node{red, y, &Node{black, x, a, b}, &Node{black, z, c, d}}
}
func balance(x Element, color color, a *Node, y Element, b *Node) *Node {
if color.isRedL(a) {
z, d := y, b
y, l, c := a.element, a.left, a.right
x, a, b := l.element, l.left, l.right
return tree(a, x, b, y, c, z, d)
} else if color.isRedR(a) {
z, d := y, b
x, a, r := a.element, a.left, a.right
y, b, c := r.element, r.left, r.right
return tree(a, x, b, y, c, z, d)
} else if color.isRedL(b) {
z, l, d := b.element, b.left, b.right
y, b, c := l.element, l.left, l.right
return tree(a, x, b, y, c, z, d)
} else if color.isRedR(b) {
y, b, r := b.element, b.left, b.right
z, c, d := r.element, r.left, r.right
return tree(a, x, b, y, c, z, d)
} else {
return &Node{color, y, a, b}
}
}
func (tree *Node) ins(x Element) (here *Node) {
if tree == nil {
here = Root(x)
} else {
color := tree.color
y, a, b := tree.element, tree.left, tree.right
x_key, y_key := x.Key(), y.Key()
if y_key > x_key {
here = balance(x, color, a.ins(x), y, b)
} else if x_key == y_key {
return &Node{color, y.Merge(x), a, b}
} else {
here = balance(x, color, a, y, b.ins(x))
}
}
return
}
func make_black(s *Node) *Node {
if s.color == red {
return s
} else {
return &Node{black, s.element, s.left, s.right}
}
}
// Insert an Element
func (tree *Node) Insert(e Element) *Node {
return make_black(tree.ins(e))
}
// Locate an Element given it's Key()
func (tree *Node) Locate(n int) (Element, bool) {
for {
if tree == nil {
return nil, false
} else {
x, l, r := tree.element, tree.left, tree.right
if x.Key() == n {
return x, true
} else if x.Key() > n {
tree = l
} else {
tree = r
}
}
}
} | internal/redblack/redblack.go | 0.855625 | 0.513485 | redblack.go | starcoder |
package pattern_matching_lcci
/*
面试题 16.18. 模式匹配 https://leetcode-cn.com/problems/pattern-matching-lcci/
你有两个字符串,即pattern和value。
pattern字符串由字母"a"和"b"组成,用于描述字符串中的模式。
例如,字符串"catcatgocatgo"匹配模式"aabab"(其中"cat"是"a","go"是"b"),该字符串也匹配像"a"、"ab"和"b"这样的模式。
但需注意"a"和"b"不能同时表示相同的字符串。编写一个方法判断value字符串是否匹配pattern字符串。
示例 1:
输入: pattern = "abba", value = "dogcatcatdog"
输出: true
示例 2:
输入: pattern = "abba", value = "dogcatcatfish"
输出: false
示例 3:
输入: pattern = "aaaa", value = "dogcatcatdog"
输出: false
示例 4:
输入: pattern = "abba", value = "dogdogdogdog"
输出: true
解释: "a"="dogdog",b="",反之也符合规则
提示:
0 <= len(pattern) <= 1000
0 <= len(value) <= 1000
你可以假设pattern只包含字母"a"和"b",value仅包含小写字母。
*/
/*
边界情况较多;主要思路是统计pattern里'a'、'b'的数量,然后穷举实际匹配到的字符串的长度
*/
func patternMatching(pattern string, value string) bool {
if len(pattern) == 0 {
return len(value) == 0
}
if len(value) == 0 {
return len(pattern) == 1
}
return check(pattern, value)
}
func check(pattern, value string) bool {
nA, nB := countAB(pattern)
if nA == 0 {
return canRepeat(value, nB)
}
if nB == 0 {
return canRepeat(value, nA)
}
if canRepeat(value, nA) || canRepeat(value, nB) {
return true
}
return rangeABLensToCheck(pattern, value, nA, nB)
}
func countAB(pattern string) (int, int) {
nA, nB := 0, 0
for _, v := range pattern {
if v == 'a' {
nA++
} else {
nB++
}
}
return nA, nB
}
func canRepeat(value string, k int) bool {
m := len(value)
if m%k != 0 {
return false
}
subSize := m / k
sub := value[:subSize]
for i := subSize; i < m; i += subSize {
if value[i:i+subSize] != sub {
return false
}
}
return true
}
func rangeABLensToCheck(pattern, value string, nA, nB int) bool {
m := len(value)
// lenA*nA + lenB*nB = m
for lenA := 1; lenA*nA+nB <= m; lenA++ {
if (m-lenA*nA)%nB != 0 {
continue
}
lenB := (m - lenA*nA) / nB
if canMatch(pattern, value, lenA, lenB) {
return true
}
}
return false
}
func canMatch(pattern, value string, lenA, lenB int) bool {
matchedA, matchedB := "", ""
j := 0
for _, v := range pattern {
if v == 'a' {
sub := value[j : j+lenA]
if matchedA == "" {
matchedA = sub
} else if matchedA != sub {
return false
}
j += lenA
} else {
sub := value[j : j+lenB]
if matchedB == "" {
matchedB = sub
} else if matchedB != sub {
return false
}
j += lenB
}
}
return true
}
// canMatch函数平均圈复杂度有点高,两个判断分支里的逻辑重复,可以有以下两个方法改进圈复杂度
func canMatch0(pattern, value string, lenA, lenB int) bool {
matchedA, matchedB := "", ""
j := 0
ok := false
for _, v := range pattern {
if v == 'a' {
ok, j, matchedA = judge(value, matchedA, j, lenA)
} else {
ok, j, matchedB = judge(value, matchedB, j, lenB)
}
if !ok {
return false
}
}
return true
}
func judge(value, matched string, j, length int) (bool, int, string) {
sub := value[j : j+length]
if matched == "" {
matched = sub
} else if matched != sub {
return false, j, matched
}
j += length
return true, j, matched
}
func canMatch1(pattern, value string, lenA, lenB int) bool {
j := 0
matched := []string{"", ""}
lens := []int{lenA, lenB}
index := 0
for _, v := range pattern {
index = int(v - 'a')
sub := value[j : j+lens[index]]
if matched[index] == "" {
matched[index] = sub
} else if matched[index] != sub {
return false
}
j += lens[index]
}
return true
} | solutions/pattern-matching-lcci/d.go | 0.581303 | 0.47859 | d.go | starcoder |
package wavelet
import (
"errors"
"jpeg2000/data"
"strconv"
"strings"
)
type Wavelet interface {
WaveletTransform(l data.Layer) data.Layer
WaveletInverse(l data.Layer) data.Layer
ToProtobuf() *data.WaveletConfig
}
func scaleX(f1, f2 data.Layer) data.Layer {
sizeX1, sizeY1 := f1.GetDimensions()
sizeX2, sizeY2 := f2.GetDimensions()
if sizeX1 != sizeX2 || sizeY1 != sizeY2 {
panic("Image dimensions aren't equal")
}
sizeX := sizeX1
sizeY := sizeY1
data := data.NewLayer(2*sizeX, sizeY)
for j := 0; j < sizeY; j++ {
for i := 0; i < sizeX; i++ {
data[j][2*i+0] = f1[j][i] + f2[j][i]
data[j][2*i+1] = f1[j][i] - f2[j][i]
}
}
return data
}
func scaleY(f1, f2 data.Layer) data.Layer {
sizeX1, sizeY1 := f1.GetDimensions()
sizeX2, sizeY2 := f2.GetDimensions()
if sizeX1 != sizeX2 || sizeY1 != sizeY2 {
panic("Image dimensions aren't equal")
}
sizeX := sizeX1
sizeY := sizeY1
data := data.NewLayer(sizeX, 2*sizeY)
for j := 0; j < sizeY; j++ {
for i := 0; i < sizeX; i++ {
data[2*j+0][i] = f1[j][i] + f2[j][i]
data[2*j+1][i] = f1[j][i] - f2[j][i]
}
}
return data
}
func copyIntoQuadrant(from, into data.Layer, quadrant int) {
sizeFromX, sizeFromY := from.GetDimensions()
sizeIntoX, sizeIntoY := into.GetDimensions()
if 2*sizeFromX > sizeIntoX {
panic("Invalid X size for copying from")
}
if 2*sizeFromY > sizeIntoY {
panic("Invalid Y size for copying from")
}
var offsetX int
var offsetY int
switch quadrant {
case 1:
offsetX = sizeFromX
offsetY = 0
case 2:
offsetX = 0
offsetY = 0
case 3:
offsetX = 0
offsetY = sizeFromY
case 4:
offsetX = sizeFromX
offsetY = sizeFromY
default:
panic("Invalid quadrant selected for copying")
}
for j := 0; j < sizeFromY; j++ {
for i := 0; i < sizeFromX; i++ {
into[j+offsetY][i+offsetX] = from[j][i]
}
}
}
func copyFromQuadrant(from, into data.Layer, quadrant int) {
sizeFromX, sizeFromY := from.GetDimensions()
sizeIntoX, sizeIntoY := into.GetDimensions()
if 2*sizeIntoX > sizeFromX {
panic("Invalid X size for copying into")
}
if 2*sizeIntoY > sizeFromY {
panic("Invalid Y size for copying into")
}
var offsetX int
var offsetY int
switch quadrant {
case 1:
offsetX = sizeIntoX
offsetY = 0
case 2:
offsetX = 0
offsetY = 0
case 3:
offsetX = 0
offsetY = sizeIntoY
case 4:
offsetX = sizeIntoX
offsetY = sizeIntoY
default:
panic("Invalid quadrant selected for copying")
}
for j := 0; j < sizeIntoY; j++ {
for i := 0; i < sizeIntoX; i++ {
into[j][i] = from[j+offsetY][i+offsetX]
}
}
}
func FromCommandLine(arg string) (Wavelet, error) {
splited := strings.Split(arg, ":")
if len(splited) < 1 {
return nil, errors.New("Invalid number of argument for parsing wavelet")
}
switch splited[0] {
case "haar":
if len(splited) != 2 {
return nil, errors.New("Invalid number of argument for parsing haar wavelet")
}
level, err := strconv.ParseInt(splited[1], 0, 32)
if err != nil {
return nil, err
}
return NewHaarWavelet(level)
case "daubechies":
if len(splited) != 3 {
return nil, errors.New("Invalid number of argument for parsing daubechies wavelet")
}
level, err := strconv.ParseInt(splited[1], 0, 32)
if err != nil {
return nil, err
}
coefficient, err := strconv.ParseInt(splited[2], 0, 32)
if err != nil {
return nil, err
}
return NewDaubechiesWavelet(level, coefficient)
case "dummy":
if len(splited) != 1 {
return nil, errors.New("Invalid number of argument for parsing dummy wavelet")
}
return NewDummyWavelet(), nil
default:
return nil, errors.New("Unrecognized wavelet type")
}
}
func FromProtobuf(d *data.WaveletConfig) (Wavelet, error) {
switch d.Data.(type) {
case *data.WaveletConfig_Haar:
haar := HaarWavelet{}
if err := haar.FromProtobuf(d); err != nil {
return nil, err
} else {
return &haar, nil
}
case *data.WaveletConfig_Daubechies:
daubechies := DaubechiesWavelet{}
if err := daubechies.FromProtobuf(d); err != nil {
return nil, err
} else {
return &daubechies, nil
}
case *data.WaveletConfig_Dummy:
dummy := DummyWavelet{}
if err := dummy.FromProtobuf(d); err != nil {
return nil, err
} else {
return &dummy, nil
}
case nil:
return nil, errors.New("Wavelet configuration not found in protobuf data")
default:
return nil, errors.New("Unexpected wavelet configuration in protobuf data")
}
} | labo-2/jpeg2000/wavelet/wavelet.go | 0.503906 | 0.530419 | wavelet.go | starcoder |
package expectations
import (
"fmt"
"reflect"
"runtime"
"strings"
)
// FailFunction is normally an instance of testing.T
type FailFunction interface {
Fail()
}
// Logger prints out the validation failures
type Logger interface {
Log(message string)
}
type defaultLogger struct{}
// Log writes a message to stdout
func (defaultLogger) Log(message string) {
fmt.Println(message)
}
// Et is a component containing the testing.T of go.
// To create it use the NewT() function
type Et struct {
T FailFunction
Logger Logger
}
// NewT creates a struct containing a reference to the testing.T and a default Logger
func NewT(t FailFunction) Et {
return Et{T: t, Logger: defaultLogger{}}
}
// NewTWithLogger creates a struct containing a reference to the testing.T and custome Logger
func NewTWithLogger(t FailFunction, l Logger) Et {
return Et{T: t, Logger: l}
}
// Expectation holds the actual value and is linked to methods allowing to compare it with the expected value
type Expectation struct {
T FailFunction
Logger Logger
Value interface{}
failed bool
}
// Expect builds an Expectation which allows to compare the value to expected values
func (aEt *Et) ExpectThat(value interface{}) *Expectation {
return &Expectation{aEt.T, aEt.Logger, value, false}
}
// Expect builds an Expectation which allows to compare the value to expected values
func (aEt *Et) ExpectThatString(value string) *StringExpectation {
return &StringExpectation{&Expectation{aEt.T, aEt.Logger, value, false}}
}
// ExpectThatSlice builds an Expectation for slices which allows to compare the value to expected values
func (aEt *Et) ExpectThatSlice(value interface{}) *SliceExpectation {
return &SliceExpectation{&Expectation{aEt.T, aEt.Logger, value, false}}
}
// Reset sets the failed flag to false so that further expectations can be executed
func (e *Expectation) Reset() {
e.failed = false
}
func createMessageOnTypeMismatch(expected, actual interface{}) string {
if actual == nil || expected == nil {
return ""
}
actualType := reflect.TypeOf(actual)
expectedType := reflect.TypeOf(expected)
if expectedType != actualType {
return fmt.Sprintf("You try to compare different types %v(%v) - %v(%v)", actual, actualType, expected, expectedType)
}
return ""
}
// Equals fails test if expected is not equal to value
func (e *Expectation) Equals(expected interface{}) *Expectation {
if e.failed {
return e
}
if msg := createMessageOnTypeMismatch(expected, e.Value); msg != "" {
e.failed = true
fail(e.T, e.Logger, msg)
} else if e.Value != expected {
e.failed = true
fail(e.T, e.Logger, fmt.Sprintf("Expect %v to equal %v", e.Value, expected))
}
return e
}
// DoesNotEqual fails test if expected is equal to value
func (e *Expectation) DoesNotEqual(expected interface{}) *Expectation {
if e.failed {
return e
}
if msg := createMessageOnTypeMismatch(expected, e.Value); msg != "" {
e.failed = true
fail(e.T, e.Logger, msg)
} else if e.Value == expected {
e.failed = true
fail(e.T, e.Logger, fmt.Sprintf("Expect %v to not equal %v", e.Value, expected))
}
return e
}
// IsGreater fails test if expected is not greater than value
func (e *Expectation) IsGreater(referencedValue interface{}) *Expectation {
if e.failed {
return e
}
if msg := createMessageOnTypeMismatch(referencedValue, e.Value); msg != "" {
e.failed = true
fail(e.T, e.Logger, msg)
} else if result := doCompare(referencedValue, e.Value); result != greater {
e.failed = true
fail(e.T, e.Logger, buildFailMessage("Expect %v to be greater than %v", result == notComparable, e.Value, referencedValue))
}
return e
}
// IsGreaterOrEqual fails test if expected is not greater than or equal to value
func (e *Expectation) IsGreaterOrEqual(referencedValue interface{}) *Expectation {
if e.failed {
return e
}
if msg := createMessageOnTypeMismatch(referencedValue, e.Value); msg != "" {
e.failed = true
fail(e.T, e.Logger, msg)
} else if result := doCompare(referencedValue, e.Value); result != greater && result != equal {
e.failed = true
fail(e.T, e.Logger, buildFailMessage("Expect %v to be greater than or equal to %v", result == notComparable, e.Value, referencedValue))
}
return e
}
// IsLower fails test if expected is not lower than referencedValue
func (e *Expectation) IsLower(referencedValue interface{}) *Expectation {
if e.failed {
return e
}
if msg := createMessageOnTypeMismatch(referencedValue, e.Value); msg != "" {
e.failed = true
fail(e.T, e.Logger, msg)
} else if result := doCompare(referencedValue, e.Value); result != lower {
e.failed = true
fail(e.T, e.Logger, buildFailMessage("Expect %v to be lower than %v", result == notComparable, e.Value, referencedValue))
}
return e
}
// IsLowerOrEqual fails test if value is not lower than or equal to referencedValue
func (e *Expectation) IsLowerOrEqual(referencedValue interface{}) *Expectation {
if e.failed {
return e
}
if msg := createMessageOnTypeMismatch(referencedValue, e.Value); msg != "" {
e.failed = true
fail(e.T, e.Logger, msg)
} else if result := doCompare(referencedValue, e.Value); result != lower && result != equal {
e.failed = true
fail(e.T, e.Logger, buildFailMessage("Expect %v to be lower than or equal to %v", result == notComparable, e.Value, referencedValue))
}
return e
}
func IsNil(value interface{}) bool {
if value == nil {
return true
}
switch reflect.TypeOf(value).Kind() {
case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Slice:
return reflect.ValueOf(value).IsNil()
}
return false
}
// IsNil fails test if value is not nil
func (e *Expectation) IsNil() *Expectation {
if e.failed {
return e
}
if !IsNil(e.Value) {
e.failed = true
fail(e.T, e.Logger, buildFailMessage("Expect %v to be nil", true, e.Value))
}
return e
}
// IsNotNil fails test if value is nil
func (e *Expectation) IsNotNil() *Expectation {
if e.failed {
return e
}
if IsNil((e.Value)) {
e.failed = true
fail(e.T, e.Logger, buildFailMessage("Expect %v not to be nil", true, e.Value))
}
return e
}
// ===================== Strings ==============================
// StringExpectation allows to express expectations on strings
type StringExpectation struct {
E *Expectation
}
// String builds an Expectation for strings
// Deprecated: Use ExpectThatString instead
func (e *Expectation) String_() *StringExpectation {
_, valueOk := e.Value.(string)
if !valueOk {
fail(e.T, e.Logger, buildFailMessage("Expect %v to be a string", true, e.Value))
}
return &StringExpectation{e}
}
// Reset sets the failed flag to false so that further expectations can be executed
func (e *StringExpectation) Reset() {
e.E.failed = false
}
// IsNil fails test if value is not nil
func (e *StringExpectation) IsNil() *StringExpectation {
e.E.IsNil()
return e
}
// IsNotNil fails test if value is nil
func (e *StringExpectation) IsNotNil() *StringExpectation {
e.E.IsNotNil()
return e
}
// Equals fails test if expected is not equal to value
func (e *StringExpectation) Equals(expected interface{}) *StringExpectation {
if e.E.failed {
return e
}
result := compareEquality(expected, e.E.Value)
if result != equal {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to equal %v", result == notComparable, e.E.Value, expected))
}
return e
}
// EqualsIgnoringCase fails test if expected is not equal to value
func (e *StringExpectation) EqualsIgnoringCase(expected interface{}) *StringExpectation {
if e.E.failed {
return e
}
valueString, valueOk := e.E.Value.(string)
expectedString, expectedOk := expected.(string)
if !(valueOk && expectedOk) {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to equal ignoring case %v", showTypeInfos, e.E.Value, expected))
} else if strings.ToLower(valueString) != strings.ToLower(expectedString) {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to equal ignoring case %v", hideTypeInfos, e.E.Value, expected))
}
return e
}
// DoesNotEqual fails test if expected is equal to value
func (e *StringExpectation) DoesNotEqual(expected interface{}) *StringExpectation {
if e.E.failed {
return e
}
if expected == e.E.Value {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to not equal %v", hideTypeInfos, e.E.Value, expected))
}
return e
}
// StartsWith checks if expected starts with value
func (e *StringExpectation) StartsWith(prefix interface{}) *StringExpectation {
if e.E.failed {
return e
}
valueString, valueOk := e.E.Value.(string)
prefixString, prefixOk := prefix.(string)
if !(valueOk && prefixOk) {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to start with %v", showTypeInfos, e.E.Value, prefix))
} else if !strings.HasPrefix(valueString, prefixString) {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to start with %v", hideTypeInfos, e.E.Value, prefix))
}
return e
}
// EndsWith checks if expected starts with value
func (e *StringExpectation) EndsWith(suffix interface{}) *StringExpectation {
if e.E.failed {
return e
}
valueString, valueOk := e.E.Value.(string)
suffixString, suffixOk := suffix.(string)
if !(valueOk && suffixOk) {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to end with %v", showTypeInfos, e.E.Value, suffix))
} else if !strings.HasSuffix(valueString, suffixString) {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to end with %v", hideTypeInfos, e.E.Value, suffix))
}
return e
}
// Contains checks if expected contains all expected values
func (e *StringExpectation) Contains(expectedValues ...string) *StringExpectation {
if e.E.failed {
return e
}
valueString, valueOk := e.E.Value.(string)
if !(valueOk) {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to contain %v", showTypeInfos, e.E.Value, expectedValues))
return e
}
var lackingValues []string
var failed bool
for _, expectedValue := range expectedValues {
if !strings.Contains(valueString, expectedValue) {
failed = true
lackingValues = append(lackingValues, expectedValue)
}
}
if failed {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to contain %v but was missing %v", hideTypeInfos, e.E.Value, expectedValues, lackingValues))
}
return e
}
// DoesNotContain checks if expected does not contain any of the expected values
func (e *StringExpectation) DoesNotContain(expectedValues ...string) *StringExpectation {
if e.E.failed {
return e
}
valueString, valueOk := e.E.Value.(string)
if !(valueOk) {
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to not contain %v", showTypeInfos, e.E.Value, expectedValues))
return e
}
var foundValues []string
var failed bool
for _, expectedValue := range expectedValues {
if strings.Contains(valueString, expectedValue) {
failed = true
foundValues = append(foundValues, expectedValue)
}
}
if failed {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to not contain %v but it includes %v", hideTypeInfos, e.E.Value, expectedValues, foundValues))
}
return e
}
// ===================== Slices ==============================
// SliceExpectation allows to express expectations on strings
type SliceExpectation struct {
E *Expectation
}
// ExpectSlice builds an Expectation for slices which allows to compare the value to expected values
// Deprecated: Use ExpectThatSlice instead
func (e *Expectation) Slice() *SliceExpectation {
return &SliceExpectation{e}
}
// Reset sets the failed flag to false, so that further checks can be executed
func (e *SliceExpectation) Reset() {
e.E.failed = false
}
// Contains checks if expected contains all expected values
func (e *SliceExpectation) Contains(expectedValues ...interface{}) *SliceExpectation {
if e.E.failed {
return e
}
kind := reflect.TypeOf(e.E.Value).Kind()
if !(kind == reflect.Slice || kind == reflect.Array) {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect %v %T to be a slice", e.E.Value, e.E.Value))
return e
}
typesMatch := checkTypesMatch(toSlice(e.E.Value), expectedValues)
var lackingValues []interface{}
for _, expectedValue := range expectedValues {
if !doContain(e.E.Value, expectedValue) {
lackingValues = append(lackingValues, expectedValue)
}
}
if len(lackingValues) > 0 {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to contain %v but was missing %v", typesMatch, e.E.Value, expectedValues, lackingValues))
}
return e
}
// DoesNotContain checks if expected does not contain any of the expected values
func (e *SliceExpectation) DoesNotContain(expectedValues ...interface{}) *SliceExpectation {
if e.E.failed {
return e
}
if reflect.TypeOf(e.E.Value).Kind() != reflect.Slice {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect %v %T to be a slice", e.E.Value, e.E.Value))
return e
}
typesMatch := checkTypesMatch(toSlice(e.E.Value), expectedValues)
var additionalValues []interface{}
for _, expectedValue := range expectedValues {
if doContain(e.E.Value, expectedValue) {
additionalValues = append(additionalValues, expectedValue)
}
}
if len(additionalValues) > 0 {
e.E.failed = true
fail(e.E.T, e.E.Logger, buildFailMessage("Expect %v to not contain %v but it includes %v", typesMatch, e.E.Value, expectedValues, additionalValues))
}
return e
}
func (e *SliceExpectation) IsEmpty(expectedValues ...interface{}) *SliceExpectation {
if e.E.failed {
return e
}
if reflect.TypeOf(e.E.Value).Kind() != reflect.Slice {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect %v %T to be a slice", e.E.Value, e.E.Value))
return e
}
if len(toSlice(e.E.Value)) > 0 {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect %v %T to be empty", e.E.Value, e.E.Value))
return e
}
return e
}
func (e *SliceExpectation) IsNotEmpty(expectedValues ...interface{}) *SliceExpectation {
if e.E.failed {
return e
}
if reflect.TypeOf(e.E.Value).Kind() != reflect.Slice {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect %v %T to be a slice", e.E.Value, e.E.Value))
return e
}
if len(toSlice(e.E.Value)) == 0 {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect %v %T to be not empty", e.E.Value, e.E.Value))
return e
}
return e
}
func (e *SliceExpectation) HasSize(expectedValue uint) *SliceExpectation {
if e.E.failed {
return e
}
if reflect.TypeOf(e.E.Value).Kind() != reflect.Slice {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect %v %T to be a slice", e.E.Value, e.E.Value))
return e
}
if len(toSlice(e.E.Value)) != int(expectedValue) {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect len of %v %T to be %v and not %v", e.E.Value, e.E.Value, expectedValue, len(toSlice(e.E.Value))))
return e
}
return e
}
func (e *SliceExpectation) First() *Expectation {
return e.Nth(0)
}
func (e *SliceExpectation) Second() *Expectation {
return e.Nth(1)
}
func (e *SliceExpectation) Third() *Expectation {
return e.Nth(2)
}
func (e *SliceExpectation) Nth(nthElement int) *Expectation {
if e.E.failed {
return e.E
}
if reflect.TypeOf(e.E.Value).Kind() != reflect.Slice {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect %v %T to be a slice", e.E.Value, e.E.Value))
return e.E
}
valueAsSlice := toSlice(e.E.Value)
if len(valueAsSlice) <= nthElement {
e.E.failed = true
fail(e.E.T, e.E.Logger, fmt.Sprintf("Expect %v %T to have at least %v elements", e.E.Value, e.E.Value, nthElement+1))
return e.E
}
return &Expectation{e.E.T, e.E.Logger, valueAsSlice[nthElement], false}
}
func toSlice(value interface{}) []interface{} {
sourceSlice := reflect.ValueOf(value)
result := make([]interface{}, sourceSlice.Len(), sourceSlice.Cap())
for i := 0; i < sourceSlice.Len(); i++ {
value := sourceSlice.Index(i).Interface()
result[i] = value
}
return result
}
func doContain(sliceValue, expectedValue interface{}) bool {
for _, value := range toSlice(sliceValue) {
if value == expectedValue {
return true
}
}
return false
}
func checkTypesMatch(values, expectedValues []interface{}) bool {
if len(values) == 0 {
return true
}
valueType := reflect.TypeOf(values[0])
for _, expectedValue := range expectedValues {
if valueType != reflect.TypeOf(expectedValue) {
return false
}
}
return true
}
const (
greater = iota
lower = iota
equal = iota
notComparable = iota
comparable = iota
notEqual = iota
)
func compareEquality(expected interface{}, actual interface{}) uint {
if expected == actual {
return equal
}
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
return notComparable
}
return notEqual
}
func compareInt(expected int64, actual int64) uint {
switch {
case actual > expected:
return greater
case actual < expected:
return lower
default:
return equal
}
}
func compareUint(expected uint64, actual uint64) uint {
switch {
case actual > expected:
return greater
case actual < expected:
return lower
default:
return equal
}
}
func compareFloat(expected float64, actual float64) uint {
switch {
case actual > expected:
return greater
case actual < expected:
return lower
default:
return equal
}
}
func compareString(expected string, actual string) uint {
switch {
case actual > expected:
return greater
case actual < expected:
return lower
default:
return equal
}
}
func doCompare(expected interface{}, actual interface{}) uint {
if expected == actual {
return equal
}
if expected == nil || actual == nil {
return notComparable
}
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
return notComparable
}
switch expected.(type) {
case int:
return compareInt(int64(expected.(int)), int64(actual.(int)))
case int8:
return compareInt(int64(expected.(int8)), int64(actual.(int8)))
case int16:
return compareInt(int64(expected.(int16)), int64(actual.(int16)))
case int32:
return compareInt(int64(expected.(int32)), int64(actual.(int32)))
case int64:
return compareInt(expected.(int64), actual.(int64))
case uint:
return compareUint(uint64(expected.(uint)), uint64(actual.(uint)))
case uint8:
return compareUint(uint64(expected.(uint8)), uint64(actual.(uint8)))
case uint16:
return compareUint(uint64(expected.(uint16)), uint64(actual.(uint16)))
case uint32:
return compareUint(uint64(expected.(uint32)), uint64(actual.(uint32)))
case uint64:
return compareUint(expected.(uint64), actual.(uint64))
case uintptr:
return compareUint(uint64(expected.(uintptr)), uint64(actual.(uintptr)))
case float32:
return compareFloat(float64(expected.(float32)), float64(actual.(float32)))
case float64:
return compareFloat(expected.(float64), actual.(float64))
case string:
return compareString(expected.(string), actual.(string))
}
return notComparable
}
func doMap(source []interface{}, fn func(interface{}) interface{}) []interface{} {
result := make([]interface{}, len(source))
for i := 0; i < len(source); i++ {
result[i] = fn(source[i])
}
return result
}
const showTypeInfos = true
const hideTypeInfos = true
func buildFailMessage(message string, showTypeInfos bool, args ...interface{}) string {
formattedArgs := doMap(args, func(value interface{}) interface{} {
if showTypeInfos {
return addTypes(value)
}
return fmt.Sprintf("%v", value)
})
return fmt.Sprintf(message, formattedArgs...)
}
func addTypes(value interface{}) string {
if value != nil && reflect.TypeOf(value).Kind() == reflect.Slice {
result := ""
for _, item := range toSlice(value) {
if result != "" {
result += ", "
}
result += fmt.Sprintf("%v (%T)", item, item)
}
return fmt.Sprintf("[%v]", result)
}
return fmt.Sprintf("%v (%T)", value, value)
}
var lastFileName string
func fail(f FailFunction, l Logger, message string) {
fileName, methodName, line := determineCodeLocation()
if lastFileName != fileName {
l.Log(fileName)
l.Log(strings.Repeat("-", len(fileName)))
lastFileName = fileName
}
l.Log(fmt.Sprintf("--- %v in line %v: %v\n", methodName, line, message))
f.Fail()
}
func determineCodeLocation() (string, string, int) {
fileName := getFrame(3).File[strings.LastIndex(getFrame(3).File, "/")+1:]
methodName := getFrame(3).Function[strings.LastIndex(getFrame(3).Function, ".")+1:]
line := getFrame(3).Line
if strings.Contains(methodName, "-fm") && methodName[len(methodName)-3:] == "-fm" {
methodName = getFrame(4).Function[strings.LastIndex(getFrame(4).Function, ".")+1:]
}
return fileName, methodName, line
}
func getFrame(skipFrames int) runtime.Frame {
// We need the frame at index skipFrames+2, since we never want runtime.Callers and getFrame
targetFrameIndex := skipFrames + 2
// Set size to targetFrameIndex+2 to ensure we have room for one more caller than we need
programCounters := make([]uintptr, targetFrameIndex+2)
n := runtime.Callers(0, programCounters)
frame := runtime.Frame{Function: "unknown"}
if n > 0 {
frames := runtime.CallersFrames(programCounters[:n])
for more, frameIndex := true, 0; more && frameIndex <= targetFrameIndex; frameIndex++ {
var frameCandidate runtime.Frame
frameCandidate, more = frames.Next()
if frameIndex == targetFrameIndex {
frame = frameCandidate
}
}
}
return frame
} | expectations.go | 0.669637 | 0.617686 | expectations.go | starcoder |
package dendrolog
type renderedNode struct {
block stringBlock
// Defines at which x coord in this block on which the zone we can connect lines to starts
start int
// Defines at which x coord in this block on which the zone we can connect lines to end
end int
}
const baseSpacing = 3
//const debugConnections = false
const lineRow = 0
//const debugRow = 0
const connectRow = 1
func renderSingleChild(
child renderedNode,
self stringBlock,
) renderedNode {
renderer := stringRenderer{}
into := renderer.createBlockFromString("")
start := 0
end := 0
x := (self.width - child.block.width) / 2
if self.width > child.block.width {
self.renderInto(0, 0, &into)
child.block.renderInto(x, self.height+connectRow, &into)
into = into.appendString(x+(child.end-child.start)/2, self.height+lineRow, "|")
start = 0
end = self.width
/*if debugConnections {
into = into.appendString(x, self.height+debugRow, repeat(child.end-child.start+1, debugChar))
}*/
} else {
self.renderInto(-x, 0, &into)
child.block.renderInto(0, self.height+connectRow, &into)
into = into.appendString(-x, self.height+lineRow, "|")
start = -x
end = -x + self.width
/*if debugConnections {
into = into.appendString(0, self.height+debugRow, repeat(child.end-child.start+1, debugChar))
}*/
}
return renderedNode{
block: into,
start: start,
end: end - 1,
}
}
func renderBinaryChildren(
left renderedNode,
right renderedNode,
self stringBlock,
) renderedNode {
renderer := stringRenderer{}
into := renderer.createBlockFromString("")
spacing := intMax(intMax(baseSpacing, self.width-(left.block.width-left.end-1+right.start-1)), self.width+2)
left.block.renderInto(0, self.height+connectRow, &into)
right.block.renderInto(left.block.width+spacing, self.height+connectRow, &into)
/*if debugConnections {
into = into.appendString(left.start, self.height+debugRow, repeat(left.end-left.start+1, debugChar))
into = into.appendString(left.block.width+spacing+right.start, self.height+debugRow, repeat(right.end-right.start+1, debugChar))
}*/
into = into.appendString(left.end+1, self.height+lineRow, "/")
into = into.appendString(left.block.width+spacing+right.start-1, self.height+lineRow, "\\")
leftMost := left.end + 1
rightMost := left.block.width + spacing + right.start - 1
selfStart := leftMost + intMax(0, (rightMost-leftMost-self.width)/2) + 1
into = into.appendString(leftMost+1, 0, repeat(rightMost-leftMost-1, "_"))
self.renderInto(selfStart, 0, &into)
return renderedNode{
block: into,
start: selfStart,
end: selfStart + self.width - 1,
}
}
func renderLines(
children []renderedNode,
self stringBlock,
) renderedNode {
childCount := len(children)
switch childCount {
case 1:
return renderSingleChild(children[0], self)
case 2:
return renderBinaryChildren(children[0], children[1], self)
}
renderer := stringRenderer{}
into := renderer.createBlockFromString("")
innerWidth := 0
for _, child := range children[1 : len(children)-1] {
innerWidth = innerWidth + child.block.width
}
freeSpace := intMax(0, self.width+2-innerWidth)
childSpacing := intMax(baseSpacing, freeSpace/(len(children)-1))
leftAngle := 0
rightAngle := into.width
offset := 0
for i, child := range children {
if i == 0 {
leftAngle = offset + child.end + 2
into = into.appendString(offset+child.end+1, self.height+lineRow, "/")
child.block.renderInto(0, self.height+connectRow, &into)
/*if debugConnections {
into = into.appendString(child.start, self.height+debugRow, repeat(child.end-child.start+1, debugChar))
}*/
} else if i == len(children)-1 {
rightAngle = offset + child.start
into = into.appendString(rightAngle, self.height+lineRow, "\\")
child.block.renderInto(rightAngle-child.start+1, self.height+connectRow, &into)
/*if debugConnections {
into = into.appendString(rightAngle+1, self.height+debugRow, repeat(child.end-child.start+1, debugChar))
}*/
} else {
into = into.appendString(offset+child.start+(child.end-child.start)/2, self.height+lineRow, "|")
child.block.renderInto(offset, self.height+connectRow, &into)
/*if debugConnections {
into = into.appendString(offset+child.start, self.height+debugRow, repeat(child.end-child.start+1, debugChar))
}*/
}
offset = offset + childSpacing + child.block.width
}
x := (leftAngle + rightAngle - self.width) / 2
self.renderInto(x, 0, &into)
into = into.appendString(leftAngle, 0, repeat(x-leftAngle, "_"))
into = into.appendString(x+self.width, 0, repeat(rightAngle-(x+self.width), "_"))
return renderedNode{
block: into,
start: x,
end: x + self.width - 1,
}
} | connectionRenderer.go | 0.543348 | 0.429788 | connectionRenderer.go | starcoder |
//go:generate ./genCode.sh
// Package align provide basic sequence alignment types and helpers.
package align
import (
"github.com/biogo/biogo/alphabet"
"github.com/biogo/biogo/feat"
"github.com/biogo/biogo/seq"
"errors"
"fmt"
)
type AlphabetSlicer interface {
Alphabet() alphabet.Alphabet
Slice() alphabet.Slice
}
// An Aligner aligns the sequence data of two type-matching Slicers, returning an ordered
// slice of features describing matching and mismatching segments. The sequences to be aligned
// must have a valid gap letter in the first position of their alphabet; the alphabets
// {DNA,RNA}{gapped,redundant} and Protein provided by the alphabet package satisfy this.
type Aligner interface {
Align(reference, query AlphabetSlicer) ([]feat.Pair, error)
}
// A Linear is a basic linear gap penalty alignment description.
// It is a square scoring matrix with the first column and first row specifying gap penalties.
type Linear [][]int
// An Affine is a basic affine gap penalty alignment description.
type Affine struct {
Matrix Linear
GapOpen int
}
var (
_ Aligner = SW{}
_ Aligner = NW{}
)
const (
diag = iota
up
left
gap = 0
minInt = -int(^uint(0)>>1) - 1
)
var (
ErrMismatchedTypes = errors.New("align: mismatched sequence types")
ErrMismatchedAlphabets = errors.New("align: mismatched alphabets")
ErrNoAlphabet = errors.New("align: no alphabet")
ErrNotGappedAlphabet = errors.New("align: alphabet does not have gap at position 0")
ErrTypeNotHandled = errors.New("align: sequence type not handled")
ErrMatrixNotSquare = errors.New("align: scoring matrix is not square")
)
type ErrMatrixWrongSize struct {
Size int // size of the matrix
Len int // length of the alphabet
}
func (e ErrMatrixWrongSize) Error() string {
return fmt.Sprintf("align: scoring matrix size %d does not match alphabet length %d", e.Size, e.Len)
}
func max3(a, b, c int) int {
if b > a {
a = b
}
if c > a {
return c
}
return a
}
func max2(a, b int) int {
if a > b {
return a
}
return b
}
func add(a, b int) int {
if a == minInt || b == minInt {
return minInt
}
return a + b
}
type feature struct {
start, end int
loc feat.Feature
}
func (f feature) Name() string {
if f.loc != nil {
return f.loc.Name()
}
return ""
}
func (f feature) Description() string {
if f.loc != nil {
return f.loc.Description()
}
return ""
}
func (f feature) Location() feat.Feature { return f.loc }
func (f feature) Start() int { return f.start }
func (f feature) End() int { return f.end }
func (f feature) Len() int { return f.end - f.start }
type featPair struct {
a, b feature
score int
}
func (fp *featPair) Features() [2]feat.Feature { return [2]feat.Feature{fp.a, fp.b} }
func (fp *featPair) Score() int { return fp.score }
func (fp *featPair) Invert() { fp.a, fp.b = fp.b, fp.a }
func (fp *featPair) String() string {
switch {
case fp.a.start == fp.a.end:
return fmt.Sprintf("-/%s[%d,%d)=%d",
fp.b.Name(), fp.b.start, fp.b.end,
fp.score)
case fp.b.start == fp.b.end:
return fmt.Sprintf("%s[%d,%d)/-=%d",
fp.a.Name(), fp.a.start, fp.a.end,
fp.score)
}
return fmt.Sprintf("%s[%d,%d)/%s[%d,%d)=%d",
fp.a.Name(), fp.a.start, fp.a.end,
fp.b.Name(), fp.b.start, fp.b.end,
fp.score)
}
// Format returns a [2]alphabet.Slice representing the formatted alignment of a and b described by the
// list of feature pairs in f, with gap used to fill gaps in the alignment.
func Format(a, b seq.Slicer, f []feat.Pair, gap alphabet.Letter) [2]alphabet.Slice {
var as, aln [2]alphabet.Slice
for i, s := range [2]seq.Slicer{a, b} {
as[i] = s.Slice()
aln[i] = as[i].Make(0, 0)
}
for _, fs := range f {
fc := fs.Features()
for i := range aln {
if fc[i].Len() == 0 {
switch aln[i].(type) {
case alphabet.Letters:
aln[i] = aln[i].Append(alphabet.Letters(gap.Repeat(fc[1-i].Len())))
case alphabet.QLetters:
aln[i] = aln[i].Append(alphabet.QLetters(alphabet.QLetter{L: gap}.Repeat(fc[1-i].Len())))
}
} else {
aln[i] = aln[i].Append(as[i].Slice(fc[i].Start(), fc[i].End()))
}
}
}
return aln
} | align/align.go | 0.588889 | 0.449574 | align.go | starcoder |
package condition
import (
"encoding/json"
"errors"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/benthos/v3/lib/util/text"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeCheckInterpolation] = TypeSpec{
constructor: NewCheckInterpolation,
description: `
Resolves a string containing
[function interpolations](../config_interpolation.md#functions) and then tests
the result against a child condition.
For example, you could use this to test against the size of a message batch:
` + "``` yaml" + `
check_interpolation:
value: ${!batch_size}
condition:
number:
operator: greater_than
arg: 1
` + "```" + ``,
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
var condConf interface{} = struct{}{}
if conf.CheckInterpolation.Condition != nil {
var err error
if condConf, err = SanitiseConfig(*conf.CheckInterpolation.Condition); err != nil {
return nil, err
}
}
return map[string]interface{}{
"value": conf.CheckInterpolation.Value,
"condition": condConf,
}, nil
},
}
}
//------------------------------------------------------------------------------
// CheckInterpolationConfig contains configuration fields for the CheckInterpolation condition.
type CheckInterpolationConfig struct {
Value string `json:"value" yaml:"value"`
Condition *Config `json:"condition" yaml:"condition"`
}
// NewCheckInterpolationConfig returns a CheckInterpolationConfig with default values.
func NewCheckInterpolationConfig() CheckInterpolationConfig {
return CheckInterpolationConfig{
Value: "",
Condition: nil,
}
}
//------------------------------------------------------------------------------
type dummyCheckInterpolationConfig struct {
Value string `json:"value" yaml:"value"`
Condition interface{} `json:"condition" yaml:"condition"`
}
// MarshalJSON prints an empty object instead of nil.
func (c CheckInterpolationConfig) MarshalJSON() ([]byte, error) {
dummy := dummyCheckInterpolationConfig{
Value: c.Value,
Condition: c.Condition,
}
if c.Condition == nil {
dummy.Condition = struct{}{}
}
return json.Marshal(dummy)
}
// MarshalYAML prints an empty object instead of nil.
func (c CheckInterpolationConfig) MarshalYAML() (interface{}, error) {
dummy := dummyCheckInterpolationConfig{
Value: c.Value,
Condition: c.Condition,
}
if c.Condition == nil {
dummy.Condition = struct{}{}
}
return dummy, nil
}
//------------------------------------------------------------------------------
// CheckInterpolation is a condition that resolves an interpolated string field
// and checks the contents against a child condition.
type CheckInterpolation struct {
conf CheckInterpolationConfig
log log.Modular
stats metrics.Type
child Type
value *text.InterpolatedBytes
mCount metrics.StatCounter
mTrue metrics.StatCounter
mFalse metrics.StatCounter
}
// NewCheckInterpolation returns a CheckInterpolation condition.
func NewCheckInterpolation(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
if conf.CheckInterpolation.Condition == nil {
return nil, errors.New("cannot create check_interpolation condition without a child")
}
child, err := New(*conf.CheckInterpolation.Condition, mgr, log, stats)
if err != nil {
return nil, err
}
return &CheckInterpolation{
conf: conf.CheckInterpolation,
log: log,
stats: stats,
child: child,
value: text.NewInterpolatedBytes([]byte(conf.CheckInterpolation.Value)),
mCount: stats.GetCounter("count"),
mTrue: stats.GetCounter("true"),
mFalse: stats.GetCounter("false"),
}, nil
}
//------------------------------------------------------------------------------
// Check attempts to check a message part against a configured condition
func (c *CheckInterpolation) Check(msg types.Message) bool {
c.mCount.Incr(1)
payload := message.New(nil)
payload.Append(msg.Get(0).Copy().Set(c.value.Get(msg)))
res := c.child.Check(payload)
if res {
c.mTrue.Incr(1)
} else {
c.mFalse.Incr(1)
}
return res
}
//------------------------------------------------------------------------------ | lib/condition/check_interpolation.go | 0.724773 | 0.611324 | check_interpolation.go | starcoder |
package matrix
import "errors"
type Matrix struct {
rows int
columns int
cells []float64
}
func New(rows, columns int) Matrix {
return Matrix{
rows: rows,
columns: columns,
cells: make([]float64, rows*columns),
}
}
func NewWithValues(rows, columns int, vals ...float64) (Matrix, error) {
if len(vals) != rows*columns {
return Matrix{}, errors.New("Must provide rows*columns values")
}
m := New(rows, columns)
m.cells = vals
return m, nil
}
func (m Matrix) Dimensions() (int, int) {
return m.rows, m.columns
}
func (m Matrix) Set(x, y int, v float64) error {
ix, err := m.findCell(x, y)
if err != nil {
return err
}
m.cells[ix] = v
return nil
}
func (m Matrix) Get(x, y int) (float64, error) {
ix, err := m.findCell(x, y)
if err != nil {
return 0, err
}
return m.cells[ix], nil
}
func (m Matrix) Equals(n Matrix) bool {
if m.rows != n.rows || m.columns != n.columns {
return false
}
for i := 0; i < m.rows*m.columns; i++ {
if m.cells[i] != n.cells[i] {
return false
}
}
return true
}
func (m Matrix) Add(n Matrix) (Matrix, error) {
if m.rows != n.rows || m.columns != n.columns {
return Matrix{}, errors.New("matrix dimensions must match")
}
sum := New(m.rows, m.columns)
for i := 0; i < m.rows*m.columns; i++ {
sum.cells[i] = m.cells[i] + n.cells[i]
}
return sum, nil
}
func (m Matrix) Subtract(n Matrix) (Matrix, error) {
if m.rows != n.rows || m.columns != n.columns {
return Matrix{}, errors.New("matrix dimensions must match")
}
sum := New(m.rows, m.columns)
for i := 0; i < m.rows*m.columns; i++ {
sum.cells[i] = m.cells[i] - n.cells[i]
}
return sum, nil
}
func (m Matrix) ScalarMultiply(x float64) Matrix {
mult := New(m.rows, m.columns)
for i := 0; i < m.rows*m.columns; i++ {
mult.cells[i] = m.cells[i] * x
}
return mult
}
func (m Matrix) Multiply(n Matrix) (Matrix, error) {
if m.columns != n.rows {
return Matrix{}, errors.New("left matrix must have same number of columns as right matrix has rows")
}
product := New(m.rows, n.columns)
for i := 0; i < m.rows; i++ {
for j := 0; j < n.columns; j++ {
var v float64 = 0
for k := 0; k < m.columns; k++ {
v += m.cells[k+i*m.columns] * n.cells[j+k*n.columns]
}
product.Set(i, j, v)
}
}
return product, nil
}
func (m Matrix) findCell(r, c int) (int, error) {
if r >= m.rows || r < 0 {
return 0, errors.New("row out of bounds")
}
if c >= m.columns || r < 0 {
return 0, errors.New("column out of bounds")
}
return c + (r * m.columns), nil
} | matrix.go | 0.80112 | 0.63273 | matrix.go | starcoder |
package bes
import (
"github.com/OpenWhiteBox/primitives/gfmatrix"
"github.com/OpenWhiteBox/primitives/number"
)
// Powers of x mod M(x).
var powx = [16]byte{0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f}
type Construction struct {
// A 128-byte BES key.
Key gfmatrix.Row
}
// BlockSize returns the block size of BES. (Necessary to implement cipher.Block.)
func (constr Construction) BlockSize() int { return 128 }
// Encrypt encrypts the first block in src into dst. Dst and src may point at the same memory.
func (constr Construction) Encrypt(dst, src []byte) {
state := gfmatrix.NewRow(128)
for pos := 0; pos < 128; pos++ {
state[pos] = number.ByteFieldElem(src[pos])
}
state = constr.encrypt(state)
for pos := 0; pos < 128; pos++ {
dst[pos] = byte(state[pos])
}
}
// Decrypt decrypts the first block in src into dst. Dst and src may point at the same memory.
func (constr Construction) Decrypt(dst, src []byte) {
state := gfmatrix.NewRow(128)
for pos := 0; pos < 128; pos++ {
state[pos] = number.ByteFieldElem(src[pos])
}
state = constr.decrypt(state)
for pos := 0; pos < 128; pos++ {
dst[pos] = byte(state[pos])
}
}
func (constr *Construction) encrypt(in gfmatrix.Row) gfmatrix.Row {
roundKeys := constr.StretchedKey()
state := in.Add(roundKeys[0])
for i := 1; i <= 9; i++ {
state = constr.subBytes(state)
state = round.Mul(state)
state = state.Add(roundConst).Add(roundKeys[i])
}
state = constr.subBytes(state)
state = lastRound.Mul(state)
state = state.Add(roundConst).Add(roundKeys[10])
return state
}
func (constr *Construction) decrypt(in gfmatrix.Row) gfmatrix.Row {
roundKeys := constr.StretchedKey()
state := in.Add(roundConst).Add(roundKeys[10])
state = firstRound.Mul(state)
state = constr.subBytes(state)
for i := 9; i >= 1; i-- {
state = state.Add(roundConst).Add(roundKeys[i])
state = unRound.Mul(state)
state = constr.subBytes(state)
}
state = state.Add(roundKeys[0])
return state
}
// StretchedKey implements BES' key schedule. It returns the 11 round keys derived from the master key.
func (constr *Construction) StretchedKey() [11]gfmatrix.Row {
var (
i int = 0
stretched [4 * 11]gfmatrix.Row
split [11]gfmatrix.Row
)
for ; i < 4; i++ {
stretched[i] = constr.Key[32*i : 32*(i+1)]
}
for ; i < (4 * 11); i++ {
temp := stretched[i-1]
if (i % 4) == 0 {
temp = append(temp[8:], temp[:8]...)
temp = constr.subBytes(temp)
temp = wordSubBytes.Mul(temp).Add(wordSubBytesConst)
temp = temp.Add(Expand([]byte{powx[i/4-1], 0, 0, 0}))
}
stretched[i] = stretched[i-4].Add(temp)
}
for j := 0; j < 11; j++ {
split[j] = gfmatrix.NewRow(128)
for k := 0; k < 4; k++ {
copy(split[j][32*k:32*(k+1)], stretched[4*j+k])
}
}
return split
}
// subBytes rewrites each byte of the state with the S-Box. unSubBytes and subBytes are the same.
func (constr *Construction) subBytes(in gfmatrix.Row) gfmatrix.Row {
out := gfmatrix.NewRow(in.Size())
for pos := 0; pos < in.Size(); pos++ {
out[pos] = in[pos].Invert()
}
return out
} | constructions/bes/bes.go | 0.613468 | 0.475666 | bes.go | starcoder |
package symbol
import (
"github.com/kocircuit/kocircuit/lang/circuit/model"
"github.com/kocircuit/kocircuit/lang/go/kit/tree"
)
func Unify(span *model.Span, x, y Type) (Type, error) {
ctx := &typingCtx{Span: span}
return ctx.Unify(x, y)
}
func UnifyTypes(span *model.Span, tt []Type) (Type, error) {
ctx := &typingCtx{Span: span}
return ctx.UnifyTypes(tt)
}
func (ctx *typingCtx) UnifyTypes(tt []Type) (unified Type, err error) {
if len(tt) == 0 {
return EmptyType{}, nil
}
unified = tt[0]
for i := 1; i < len(tt); i++ {
if unified, err = ctx.Unify(unified, tt[i]); err != nil {
return nil, err
}
}
return
}
// Unify(x, y) = Unify(y, x)
// Unify(x, Unify(y, z)) = Unify(Unify(x, y), z)
func (ctx *typingCtx) Unify(x, y Type) (Type, error) {
switch {
case IsEmptyType(x) && IsEmptyType(y):
return EmptyType{}, nil
case !IsEmptyType(x) && IsEmptyType(y):
return Optionally(x), nil
case IsEmptyType(x) && !IsEmptyType(y):
return Optionally(y), nil
}
switch xt := x.(type) {
case *OptionalType:
if elem, err := ctx.Unify(xt.Elem, y); err != nil {
return nil, err
} else {
return Optionally(elem), nil
}
case *SeriesType:
switch yt := y.(type) {
case *OptionalType:
return ctx.Unify(y, x) // symmetry
case *SeriesType:
return ctx.UnifySeries(xt, yt)
case BasicType, *OpaqueType, *MapType, *StructType, VarietyType, NamedType:
if elem, err := ctx.Unify(xt.Elem, y); err != nil {
return nil, err
} else {
return &SeriesType{elem}, nil
}
}
case BasicType:
switch yt := y.(type) {
case *OptionalType, *SeriesType:
return ctx.Unify(y, x) // symmetry
case BasicType:
return ctx.UnifyBasic(xt, yt)
}
case *OpaqueType:
switch yt := y.(type) {
case *OptionalType, *SeriesType, BasicType:
return ctx.Unify(y, x) // symmetry
case *OpaqueType:
return ctx.UnifyOpaque(xt, yt)
}
case *MapType:
switch yt := y.(type) {
case *OptionalType, *SeriesType, BasicType, *OpaqueType:
return ctx.Unify(y, x) // symmetry
case *MapType:
return ctx.UnifyMap(xt, yt)
}
case *StructType:
switch yt := y.(type) {
case *OptionalType, *SeriesType, BasicType, *OpaqueType, *MapType:
return ctx.Unify(y, x) // symmetry
case *StructType:
return ctx.UnifyStruct(xt, yt)
}
case VarietyType:
switch y.(type) {
case *OptionalType, *SeriesType, BasicType, *OpaqueType, *MapType, *StructType:
return ctx.Unify(y, x) // symmetry
case VarietyType:
return VarietyType{}, nil
}
case NamedType:
switch yt := y.(type) {
case *OptionalType, *SeriesType, BasicType, *OpaqueType, *MapType, *StructType, VarietyType:
return ctx.Unify(y, x) // symmetry
case NamedType:
return ctx.UnifyNamed(xt, yt)
}
}
return nil, ctx.Errorf(nil, "%s and %s cannot be unified", tree.Sprint(x), tree.Sprint(y))
}
func (ctx *typingCtx) UnifyBasic(x, y BasicType) (Type, error) {
if unified, ok := unifyBasic(x, y); ok {
return unified, nil
} else {
return nil, ctx.Errorf(nil, "basic types %s and %s cannot be unified", tree.Sprint(x), tree.Sprint(y))
}
}
func (ctx *typingCtx) UnifyOpaque(x, y *OpaqueType) (Type, error) {
if x.Type == y.Type {
return x, nil
} else {
return nil, ctx.Errorf(nil, "opaque types %s and %s cannot be unified", tree.Sprint(x), tree.Sprint(y))
}
}
func (ctx *typingCtx) UnifySeries(x, y *SeriesType) (*SeriesType, error) {
if xyElem, err := ctx.Refine("()").Unify(x.Elem, y.Elem); err != nil {
return nil, ctx.Errorf(nil, "cannot unify sequences %s and %s", tree.Sprint(x), tree.Sprint(y))
} else {
return &SeriesType{Elem: xyElem}, nil
}
}
func (ctx *typingCtx) UnifyNamed(x, y NamedType) (Type, error) {
if x.Type == y.Type {
return x, nil
} else {
return nil, ctx.Errorf(nil, "named types %s and %s cannot be unified", tree.Sprint(x), tree.Sprint(y))
}
}
func (ctx *typingCtx) UnifyMap(x, y *MapType) (Type, error) {
if unified, err := ctx.Unify(x.Value, y.Value); err == nil {
return &MapType{Value: unified}, nil
} else {
return nil, ctx.Errorf(nil, "map types %s and %s cannot be unified", tree.Sprint(x), tree.Sprint(y))
}
} | lang/go/eval/symbol/unify.go | 0.52975 | 0.468304 | unify.go | starcoder |
package layers
import (
"fmt"
"github.com/nathanleary/reticulum/volume"
)
// NewRegressionLayer creates a new regression layer.
func NewRegressionLayer(def LayerDef) Layer {
if def.Type != Regression {
panic(fmt.Errorf("Invalid layer type: %s != regression", def.Type))
}
// Get config
conf, ok := def.LayerConfig.(*regressionLayerConfig)
if !ok {
panic("invalid LayerConfig for regressionLayerConfig")
}
n := def.Input.Size()
return ®ressionLayer{conf, def.Input, volume.NewDimensions(1, 1, n), nil, nil}
}
// NewRegressionLayerConfig creates a new LayerConfig config with the given options.
func NewRegressionLayerConfig(neurons int, opts ...LayerOptionFunc) LayerConfig {
if neurons <= 0 {
panic("neuron count must be greater than 0")
}
conf := ®ressionLayerConfig{
Neurons: neurons,
}
for i := 0; i < len(opts); i++ {
err := opts[i](conf)
if err != nil {
panic(err)
}
}
return conf
}
// regressionLayerConfig stores the config info for regression layers
type regressionLayerConfig struct {
Neurons int
}
type regressionLayer struct {
conf *regressionLayerConfig
inDim volume.Dimensions
outDim volume.Dimensions
inVol *volume.Volume
outVol *volume.Volume
}
func (l *regressionLayer) Type() LayerType {
return Regression
}
func (l *regressionLayer) Forward(vol *volume.Volume, training bool) *volume.Volume {
l.inVol = vol
l.outVol = vol
return vol
}
func (l *regressionLayer) MultiDimensionalLoss(y []float64) float64 {
if len(y) != l.outDim.Size() {
panic(fmt.Errorf("Invalid input length: %d != %d", len(y), l.outDim.Size()))
}
// compute and accumulate gradient wrt weights and bias of this layer
// zero out the gradient of input Vol
l.inVol.ZeroGrad()
var loss float64
for i := 0; i < l.outDim.Size(); i++ {
dY := l.inVol.GetByIndex(i) - y[i]
l.inVol.SetGradByIndex(i, dY)
loss += 0.5 * dY * dY
}
return loss
}
func (l *regressionLayer) DimensionalLoss(index int, value float64) float64 {
if index < 0 || index >= l.outDim.Size() {
panic(fmt.Errorf("Invalid dimension index: %d", index))
}
// compute and accumulate gradient wrt weights and bias of this layer
// zero out the gradient of input Vol
l.inVol.ZeroGrad()
// assume it is a struct with entries .dim and .val
// and we pass gradient only along dimension dim to be equal to val
var loss float64
dY := l.inVol.GetByIndex(index) - value
l.inVol.SetGradByIndex(index, dY)
loss += 0.5 * dY * dY
return loss
}
func (l *regressionLayer) Backward() {
panic(fmt.Errorf("Unsupported operation"))
}
func (l *regressionLayer) GetResponse() []LayerResponse {
return []LayerResponse{}
} | layers/regression.go | 0.761627 | 0.41117 | regression.go | starcoder |
package game
import (
"strings"
"strconv"
"regexp"
"errors"
"constants"
)
type Board struct {
Quadrants [4][3][3]string
}
type TwoDimensionArrayBoard struct {
Board [][]int `json:"board"`
CurrentPlayer int `json:"currentPlayer"`
}
func DeserializeTwoDimensionArrayBoard(arr TwoDimensionArrayBoard) Board {
var board Board
for x, line := range arr.Board {
for y, value := range line {
switch true {
case x < 3 && y < 3:
board.Quadrants[0][x][y] = strconv.Itoa(value)
case x < 3 && y >= 2:
board.Quadrants[1][x][y - 3] = strconv.Itoa(value)
case x >= 3 && y < 3:
board.Quadrants[2][x - 3][y] = strconv.Itoa(value)
case x >= 3 && y >= 3:
board.Quadrants[3][x - 3][y - 3] = strconv.Itoa(value)
}
}
}
return board
}
func DeserializeBoard(str string) (Board, error) {
var board Board
// Split string into array based on \n
strArr := strings.Fields(str)
// Remove first line, last line & middle line which are board's delimiters.
if len(strArr) != 9 {
return board, errors.New("Given string is not correctly formatted and have to contain 9 lines.")
}
arrayWithoutBorders := append(make([]string, 0, 6), strArr[1:4]...)
arrayWithoutBorders = append(arrayWithoutBorders, strArr[5:8]...)
re := regexp.MustCompile("[0-9]+")
for x, line := range arrayWithoutBorders {
// Remove all non-digits chars
lineStrings := re.FindAllString(line, -1)
if len(lineStrings) != 6 {
return board, errors.New("All line should contains 6 values.")
}
// This loop allow us to cast all digits into integers
for y, value := range lineStrings {
switch true {
case x < 3 && y < 3:
board.Quadrants[0][x][y] = value
case x < 3 && y >= 2:
board.Quadrants[1][x][y - 3] = value
case x >= 3 && y < 3:
board.Quadrants[2][x - 3][y] = value
case x >= 3 && y >= 3:
board.Quadrants[3][x - 3][y - 3] = value
}
}
}
return board, nil
}
func GetLineOfBoard(board Board, line int) string {
quadrantsLine := line % 3
quadrantNumber := 0
if line >= 3 {
quadrantNumber = 2
}
return strings.Join(board.Quadrants[quadrantNumber][quadrantsLine][:], "") +
strings.Join(board.Quadrants[quadrantNumber + 1][quadrantsLine][:], "")
}
func ToStringBoard(board Board) string {
boardStringified := ""
for i := 0; i < 6; i++ {
boardStringified = boardStringified + GetLineOfBoard(board, i)
}
return boardStringified
}
func ConvertQuadrantPositionIntoBoardPosition(position [3]int) ([2]int, error) {
// Given a position based on [quadrantIndex, x, y]
// Return a position based on a 2d matrix.
// e.g [2, 0, 2] should return [3, 2].
switch position[0] {
case 0:
return [2]int{position[1], position[2]}, nil
case 1:
return [2]int{position[1], position[2] + 3}, nil
case 2:
return [2]int{position[1] + 3, position[2]}, nil
case 3:
return [2]int{position[1] + 3, position[2] + 3}, nil
}
// If other case, throw an error
return [2]int{0, 0}, errors.New("Quadrant doesn't exist")
}
func Transpose(slice [3][3]string) [3][3]string {
result := slice
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
result[i][j] = slice[j][i]
}
}
return result
}
func Reverse(quadrant [3][3]string) [3][3]string {
for x, row := range(quadrant) {
quadrant[x][0] = row[2]
quadrant[x][2] = row[0]
}
return quadrant
}
func Rotate(quadrant [3][3]string, direction string) [3][3] string {
if direction == constants.ROTATE_CLOCKWISE {
return Reverse(Transpose(quadrant))
}
return Transpose(Reverse(quadrant))
} | src/game/game.go | 0.727685 | 0.483648 | game.go | starcoder |
package validator
import "regexp"
var postCodePatternDict = map[string]string{
"GB": `^GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\d{1,4}$`,
"JE": `^JE\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
"GG": `^GY\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
"IM": `^IM\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
"US": `^\d{5}([ \-]\d{4})?$`,
"CA": `^[ABCEGHJKLMNPRSTVXY]\d[ABCEGHJ-NPRSTV-Z][ ]?\d[ABCEGHJ-NPRSTV-Z]\d$`,
"DE": `^\d{5}$`,
"JP": `^\d{3}-\d{4}$`,
"FR": `^\d{2}[ ]?\d{3}$`,
"AU": `^\d{4}$`,
"IT": `^\d{5}$`,
"CH": `^\d{4}$`,
"AT": `^\d{4}$`,
"ES": `^\d{5}$`,
"NL": `^\d{4}[ ]?[A-Z]{2}$`,
"BE": `^\d{4}$`,
"DK": `^\d{4}$`,
"SE": `^\d{3}[ ]?\d{2}$`,
"NO": `^\d{4}$`,
"BR": `^\d{5}[\-]?\d{3}$`,
"PT": `^\d{4}([\-]\d{3})?$`,
"FI": `^\d{5}$`,
"AX": `^22\d{3}$`,
"KR": `^\d{3}[\-]\d{3}$`,
"CN": `^\d{6}$`,
"TW": `^\d{3}(\d{2})?$`,
"SG": `^\d{6}$`,
"DZ": `^\d{5}$`,
"AD": `^AD\d{3}$`,
"AR": `^([A-HJ-NP-Z])?\d{4}([A-Z]{3})?$`,
"AM": `^(37)?\d{4}$`,
"AZ": `^\d{4}$`,
"BH": `^((1[0-2]|[2-9])\d{2})?$`,
"BD": `^\d{4}$`,
"BB": `^(BB\d{5})?$`,
"BY": `^\d{6}$`,
"BM": `^[A-Z]{2}[ ]?[A-Z0-9]{2}$`,
"BA": `^\d{5}$`,
"IO": `^BBND 1ZZ$`,
"BN": `^[A-Z]{2}[ ]?\d{4}$`,
"BG": `^\d{4}$`,
"KH": `^\d{5}$`,
"CV": `^\d{4}$`,
"CL": `^\d{7}$`,
"CR": `^\d{4,5}|\d{3}-\d{4}$`,
"HR": `^\d{5}$`,
"CY": `^\d{4}$`,
"CZ": `^\d{3}[ ]?\d{2}$`,
"DO": `^\d{5}$`,
"EC": `^([A-Z]\d{4}[A-Z]|(?:[A-Z]{2})?\d{6})?$`,
"EG": `^\d{5}$`,
"EE": `^\d{5}$`,
"FO": `^\d{3}$`,
"GE": `^\d{4}$`,
"GR": `^\d{3}[ ]?\d{2}$`,
"GL": `^39\d{2}$`,
"GT": `^\d{5}$`,
"HT": `^\d{4}$`,
"HN": `^(?:\d{5})?$`,
"HU": `^\d{4}$`,
"IS": `^\d{3}$`,
"IN": `^\d{6}$`,
"ID": `^\d{5}$`,
"IL": `^\d{5}$`,
"JO": `^\d{5}$`,
"KZ": `^\d{6}$`,
"KE": `^\d{5}$`,
"KW": `^\d{5}$`,
"LA": `^\d{5}$`,
"LV": `^\d{4}$`,
"LB": `^(\d{4}([ ]?\d{4})?)?$`,
"LI": `^(948[5-9])|(949[0-7])$`,
"LT": `^\d{5}$`,
"LU": `^\d{4}$`,
"MK": `^\d{4}$`,
"MY": `^\d{5}$`,
"MV": `^\d{5}$`,
"MT": `^[A-Z]{3}[ ]?\d{2,4}$`,
"MU": `^(\d{3}[A-Z]{2}\d{3})?$`,
"MX": `^\d{5}$`,
"MD": `^\d{4}$`,
"MC": `^980\d{2}$`,
"MA": `^\d{5}$`,
"NP": `^\d{5}$`,
"NZ": `^\d{4}$`,
"NI": `^((\d{4}-)?\d{3}-\d{3}(-\d{1})?)?$`,
"NG": `^(\d{6})?$`,
"OM": `^(PC )?\d{3}$`,
"PK": `^\d{5}$`,
"PY": `^\d{4}$`,
"PH": `^\d{4}$`,
"PL": `^\d{2}-\d{3}$`,
"PR": `^00[679]\d{2}([ \-]\d{4})?$`,
"RO": `^\d{6}$`,
"RU": `^\d{6}$`,
"SM": `^4789\d$`,
"SA": `^\d{5}$`,
"SN": `^\d{5}$`,
"SK": `^\d{3}[ ]?\d{2}$`,
"SI": `^\d{4}$`,
"ZA": `^\d{4}$`,
"LK": `^\d{5}$`,
"TJ": `^\d{6}$`,
"TH": `^\d{5}$`,
"TN": `^\d{4}$`,
"TR": `^\d{5}$`,
"TM": `^\d{6}$`,
"UA": `^\d{5}$`,
"UY": `^\d{5}$`,
"UZ": `^\d{6}$`,
"VA": `^00120$`,
"VE": `^\d{4}$`,
"ZM": `^\d{5}$`,
"AS": `^96799$`,
"CC": `^6799$`,
"CK": `^\d{4}$`,
"RS": `^\d{6}$`,
"ME": `^8\d{4}$`,
"CS": `^\d{5}$`,
"YU": `^\d{5}$`,
"CX": `^6798$`,
"ET": `^\d{4}$`,
"FK": `^FIQQ 1ZZ$`,
"NF": `^2899$`,
"FM": `^(9694[1-4])([ \-]\d{4})?$`,
"GF": `^9[78]3\d{2}$`,
"GN": `^\d{3}$`,
"GP": `^9[78][01]\d{2}$`,
"GS": `^SIQQ 1ZZ$`,
"GU": `^969[123]\d([ \-]\d{4})?$`,
"GW": `^\d{4}$`,
"HM": `^\d{4}$`,
"IQ": `^\d{5}$`,
"KG": `^\d{6}$`,
"LR": `^\d{4}$`,
"LS": `^\d{3}$`,
"MG": `^\d{3}$`,
"MH": `^969[67]\d([ \-]\d{4})?$`,
"MN": `^\d{6}$`,
"MP": `^9695[012]([ \-]\d{4})?$`,
"MQ": `^9[78]2\d{2}$`,
"NC": `^988\d{2}$`,
"NE": `^\d{4}$`,
"VI": `^008(([0-4]\d)|(5[01]))([ \-]\d{4})?$`,
"VN": `^[0-9]{1,6}$`,
"PF": `^987\d{2}$`,
"PG": `^\d{3}$`,
"PM": `^9[78]5\d{2}$`,
"PN": `^PCRN 1ZZ$`,
"PW": `^96940$`,
"RE": `^9[78]4\d{2}$`,
"SH": `^(ASCN|STHL) 1ZZ$`,
"SJ": `^\d{4}$`,
"SO": `^\d{5}$`,
"SZ": `^[HLMS]\d{3}$`,
"TC": `^TKCA 1ZZ$`,
"WF": `^986\d{2}$`,
"XK": `^\d{5}$`,
"YT": `^976\d{2}$`,
}
var postCodeRegexDict = map[string]*regexp.Regexp{}
func init() {
for countryCode, pattern := range postCodePatternDict {
postCodeRegexDict[countryCode] = regexp.MustCompile(pattern)
}
} | vendor/github.com/go-playground/validator/v10/postcode_regexes.go | 0.512693 | 0.68431 | postcode_regexes.go | starcoder |
package nav
import (
"fmt"
"math"
"github.com/golang/geo/s1"
"github.com/golang/geo/s2"
)
// A Speed represents the change in position
// as a float64 meters per second count.
type Speed float64
type Distance float64
const (
Meters Distance = 1
Kilometers = Meters * 1000
)
func (d Distance) Meters() float64 { return float64(d) }
func (d Distance) Kilometers() float64 { return float64(d / Kilometers) }
func (d Distance) String() string {
if d < Kilometers {
return fmt.Sprintf("%0.2f m", d)
}
return fmt.Sprintf("%0.2f km", d.Kilometers())
}
const (
MetersPerSecond Speed = 1
KilometersPerHour = MetersPerSecond / 3.6
MilesPerHour = 0.44704
Knots = 0.51444444444
)
func (s Speed) MetersPerSecond() float64 { return float64(s) }
const R = 6371 * Kilometers
func DistanceBetweenPositions(p1, p2 s2.LatLng) Distance {
c := p1.Distance(p2)
return Distance(R.Meters() * c.Radians())
}
// RelativeBearing returns the shortest difference between a given
// heading h, and bearing b.
// It will return a zero angle if the difference does not exceed
// a tolerance t.
// See https://stackoverflow.com/a/7869457
func RelativeBearing(h, b, t s1.Angle) s1.Angle {
r := h - b
if r > math.Pi {
r = r - (2 * math.Pi)
} else if r < (-1 * math.Pi) {
r = r + (2 * math.Pi)
}
if r.Abs() < t {
return 0
}
return r
}
/* Proud of this, but not as good as above
r := h - b
if r.Abs() > math.Pi {
if r < 0 {
return (h + 2*math.Pi) - b
} else {
return ((b + 2*math.Pi) - h) * -1
}
}
*/
// sourced from https://github.com/google/s2geometry/blob/master/src/s2/s2earth.cc
// sourced from https://www.movable-type.co.uk/scripts
// Formula: θ = atan2( sin Δλ ⋅ cos φ2 , cos φ1 ⋅ sin φ2 − sin φ1 ⋅ cos φ2 ⋅ cos Δλ )
// where φ1,λ1 is the start point, φ2,λ2 the end point (Δλ is the difference in longitude)
func InitialBearing(a, b s2.LatLng) s1.Angle {
var (
lat1 = a.Lat.Radians()
cosLat2 = math.Cos(b.Lat.Radians())
latDiff = b.Lat.Radians() - a.Lat.Radians()
lngDiff = b.Lng.Radians() - a.Lng.Radians()
x = math.Sin(latDiff) + math.Sin(lat1)*cosLat2*2*haversine(lngDiff)
y = math.Sin(lngDiff) * cosLat2
theta = math.Atan2(y, x)
)
if theta < 0 {
return s1.Angle(theta + 2*math.Pi)
}
return s1.Angle(theta)
}
// http://en.wikipedia.org/wiki/Haversine_formula
// Haversine(x) has very good numerical stability around zero.
// Haversine(x) == (1-cos(x))/2 == sin(x/2)^2; must be implemented with the
// second form to reap the numerical benefits.
func haversine(radians float64) float64 {
sinHalf := math.Sin(radians / 2)
return sinHalf * sinHalf
} | pkg/nav/nav.go | 0.89662 | 0.58673 | nav.go | starcoder |
package geom
//MultiPolygon is a collection of two-dimensional geometries representing polygons
type MultiPolygon []Polygon
//MultiPolygonZ is a collection of three-dimensional geometries representing polygons
type MultiPolygonZ []PolygonZ
//MultiPolygonM is a collection of two-dimensional geometries representing polygons, with an additional value defined on each vertex
type MultiPolygonM []PolygonM
//MultiPolygonZM is a collection of three-dimensional geometries representing polygons, with an additional value defined on each vertex
type MultiPolygonZM []PolygonZM
//Envelope returns an envelope around the GeometryCollection
func (c MultiPolygon) Envelope() *Envelope {
e := NewEnvelope()
for _, g := range c {
e.Extend(g.Envelope())
}
return e
}
//Envelope returns an envelope around the GeometryCollection
func (c MultiPolygonZ) Envelope() *Envelope {
e := NewEnvelope()
for _, g := range c {
e.Extend(g.Envelope())
}
return e
}
//EnvelopeZ returns an envelope around the GeometryCollection
func (c MultiPolygonZ) EnvelopeZ() *EnvelopeZ {
e := NewEnvelopeZ()
for _, g := range c {
e.Extend(g.EnvelopeZ())
}
return e
}
//Envelope returns an envelope around the GeometryCollection
func (c MultiPolygonM) Envelope() *Envelope {
e := NewEnvelope()
for _, g := range c {
e.Extend(g.Envelope())
}
return e
}
//EnvelopeM returns an envelope around the GeometryCollection
func (c MultiPolygonM) EnvelopeM() *EnvelopeM {
e := NewEnvelopeM()
for _, g := range c {
e.Extend(g.EnvelopeM())
}
return e
}
//Envelope returns an envelope around the GeometryCollection
func (c MultiPolygonZM) Envelope() *Envelope {
e := NewEnvelope()
for _, g := range c {
e.Extend(g.Envelope())
}
return e
}
//EnvelopeZ returns an envelope around the GeometryCollection
func (c MultiPolygonZM) EnvelopeZ() *EnvelopeZ {
e := NewEnvelopeZ()
for _, g := range c {
e.Extend(g.EnvelopeZ())
}
return e
}
//EnvelopeM returns an envelope around the GeometryCollection
func (c MultiPolygonZM) EnvelopeM() *EnvelopeM {
e := NewEnvelopeM()
for _, g := range c {
e.Extend(g.EnvelopeM())
}
return e
}
//EnvelopeZM returns an envelope around the GeometryCollection
func (c MultiPolygonZM) EnvelopeZM() *EnvelopeZM {
e := NewEnvelopeZM()
for _, g := range c {
e.Extend(g.EnvelopeZM())
}
return e
}
//Clone returns a deep copy of the multi-polygon
func (c MultiPolygon) Clone() Geometry {
return &c
}
//Clone returns a deep copy of the multi-polygon
func (c MultiPolygonZ) Clone() Geometry {
return &c
}
//Clone returns a deep copy of the multi-polygon
func (c MultiPolygonM) Clone() Geometry {
return &c
}
//Clone returns a deep copy of the multi-polygon
func (c MultiPolygonZM) Clone() Geometry {
return &c
}
//Iterate walks over the points (and can modify in situ) the multi-polygon
func (c MultiPolygon) Iterate(f func([]Point) error) error {
for i := range c {
if err := c[i].Iterate(f); err != nil {
return err
}
}
return nil
}
//Iterate walks over the points (and can modify in situ) the multi-polygon
func (c MultiPolygonZ) Iterate(f func([]Point) error) error {
for i := range c {
if err := c[i].Iterate(f); err != nil {
return err
}
}
return nil
}
//Iterate walks over the points (and can modify in situ) the multi-polygon
func (c MultiPolygonM) Iterate(f func([]Point) error) error {
for i := range c {
if err := c[i].Iterate(f); err != nil {
return err
}
}
return nil
}
//Iterate walks over the points (and can modify in situ) the multi-polygon
func (c MultiPolygonZM) Iterate(f func([]Point) error) error {
for i := range c {
if err := c[i].Iterate(f); err != nil {
return err
}
}
return nil
} | multipolygon.go | 0.899105 | 0.734048 | multipolygon.go | starcoder |
package mat
// MulGaxpy is a General AXPY algorithm to multiple 2 matrices.
// Author: @james-bowman
func MulGaxpy(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
// IKJ
for i := 0; i < aRows; i++ {
for k := 0; k < aCols; k++ {
s := a.data[i*aCols+k]
ib := i * bCols
kb := k * bCols
for j := 0; j < bCols; j++ {
data[ib+j] += s * b.data[kb+j]
}
}
}
return nil
}
// MulGonumNaivePrefetch multiples dense matrices.
// Author: @james-bowman
func MulGonumNaivePrefetch(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
row := make([]float64, aCols)
var data = c.data
for ar := 0; ar < aRows; ar++ {
for i := range row {
row[i] = a.At(ar, i)
}
for bc := 0; bc < bCols; bc++ {
var sum float64
for i, e := range row {
sum += e * b.At(i, bc)
}
di := ar*bCols + bc
data[di] = sum
}
}
return nil
}
// MulGonumNaive multiples matrices.
func MulGonumNaive(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
for ar := 0; ar < aRows; ar++ {
for bc := 0; bc < bCols; bc++ {
var sum float64
for ac := 0; ac < aCols; ac++ {
sum += a.At(ar, ac) * b.At(ac, bc)
}
c.Set(ar, bc, sum)
}
}
return nil
}
// MulMultiplePrefetch2 uses simple iteration to create the product of two matrices.
// Author: @james-bowman
func MulMultiplePrefetch2(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
if aRows%4 != 0 {
return MulNaiveIKJ(c, a, b)
}
var sum0 float64
var sum1 float64
var sum2 float64
var sum3 float64
buffer0 := make([]float64, aCols)
buffer1 := make([]float64, aCols)
buffer2 := make([]float64, aCols)
buffer3 := make([]float64, aCols)
for i := 0; i < aRows; i += 4 {
for j := 0; j < aCols; j++ {
buffer0[j] = a.data[i*aCols+j]
buffer1[j] = a.data[(i+1)*aCols+j]
buffer2[j] = a.data[(i+2)*aCols+j]
buffer3[j] = a.data[(i+3)*aCols+j]
}
for j := 0; j < bCols; j++ {
sum0 = 0
sum1 = 0
sum2 = 0
sum3 = 0
for k := 0; k < aCols; k++ {
bi := k*bCols + j
sum0 += buffer0[k] * b.data[bi]
sum1 += buffer1[k] * b.data[bi]
sum2 += buffer2[k] * b.data[bi]
sum3 += buffer3[k] * b.data[bi]
}
data[i*bCols+j] = sum0
data[(i+1)*bCols+j] = sum1
data[(i+2)*bCols+j] = sum2
data[(i+3)*bCols+j] = sum3
}
}
return nil
}
// MulGonumUnroll multiplies a matrix using the gonum At interface with 8 wide unrolled loop.
func MulGonumUnroll(c, a, b *Dense) error {
const stride = 8
aCols := a.Columns()
bCols := b.Columns()
var data = c.data
bounded := aCols
remainder := aCols % stride
if remainder != 0 {
bounded = aCols / stride * stride
}
for ar := 0; ar < a.Rows(); ar++ {
for bc := 0; bc < bCols; bc++ {
var sum float64
di := ar*bCols + bc
for ac := 0; ac < bounded; ac += stride {
ac0 := ac
ac1 := ac0 + 1
ac2 := ac0 + 2
ac3 := ac0 + 3
ac4 := ac0 + 4
ac5 := ac0 + 5
ac6 := ac0 + 6
ac7 := ac0 + 7
sum += a.At(ar, ac0) * b.At(ac0, bc)
sum += a.At(ar, ac1) * b.At(ac1, bc)
sum += a.At(ar, ac2) * b.At(ac2, bc)
sum += a.At(ar, ac3) * b.At(ac3, bc)
sum += a.At(ar, ac4) * b.At(ac4, bc)
sum += a.At(ar, ac5) * b.At(ac5, bc)
sum += a.At(ar, ac6) * b.At(ac6, bc)
sum += a.At(ar, ac7) * b.At(ac7, bc)
}
for ac := bounded; ac < aCols; ac++ {
sum += a.At(ar, ac) * b.At(ac, bc)
}
data[di] = sum
}
}
return nil
}
// MulNaiveIKJ uses simple iteration to create the product of two matrices.
func MulNaiveIKJ(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
for ar := 0; ar < aRows; ar++ {
for bc := 0; bc < bCols; bc++ {
di := ar*bCols + bc
for ac := 0; ac < aCols; ac++ {
ai := ar*aCols + ac
bi := ac*bCols + bc
data[di] += a.data[ai] * b.data[bi]
}
}
}
return nil
}
// MulNaiveIJK multiplies matrices.
func MulNaiveIJK(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
for ar := 0; ar < aRows; ar++ {
for ac := 0; ac < aCols; ac++ {
ai := ar*aCols + ac
for bc := 0; bc < bCols; bc++ {
di := ar*bCols + bc
bi := ac*bCols + bc
data[di] += a.data[ai] * b.data[bi]
}
}
}
return nil
}
// MulSimdIJK is lazily implemented if any of the dims aren't divisible by block size it defers to naive IJK.
func MulSimdIJK(c, a, b *Dense) error {
const blockSize = 32
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var cdata = c.data
var bdata = b.data
if aCols%blockSize != 0 || aRows%blockSize != 0 || bCols%blockSize != 0 {
return MulNaiveIJK(c, a, b)
}
for ar := 0; ar < aRows; ar += blockSize {
for ac := 0; ac < aCols; ac += blockSize {
for bc := 0; bc < bCols; bc += blockSize {
for arb := ar; arb < ar+blockSize; arb++ {
cib := arb * bCols
aib := arb * aCols
for acb := ac; acb < ac+blockSize; acb++ {
bib := acb * bCols
ai := aib + acb
s := a.data[ai]
for bcb := bc; bcb < bc+blockSize; bcb += blockSize {
ci := cib + bcb
bi := bib + bcb
WideAxpy(cdata, bdata, s, ci, bi)
//AxpyLoop(cdata, bdata, s, bc, cib, bib, blockSize)
}
}
}
}
}
}
return nil
}
// MulNaiveKIJ multiplies matrices.
func MulNaiveKIJ(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
for bc := 0; bc < bCols; bc++ {
for ar := 0; ar < aRows; ar++ {
di := ar*bCols + bc
for ac := 0; ac < aCols; ac++ {
ai := ar*aCols + ac
bi := ac*bCols + bc
data[di] += a.data[ai] * b.data[bi]
}
}
}
return nil
}
// MulNaiveJIK multiplies matrices.
func MulNaiveJIK(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
for ac := 0; ac < aCols; ac++ {
for ar := 0; ar < aRows; ar++ {
ai := ar*aCols + ac
for bc := 0; bc < bCols; bc++ {
di := ar*bCols + bc
bi := ac*bCols + bc
data[di] += a.data[ai] * b.data[bi]
}
}
}
return nil
}
// MulNaiveJKI multiplies matrices.
func MulNaiveJKI(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
for ac := 0; ac < aCols; ac++ {
for bc := 0; bc < bCols; bc++ {
bi := ac*bCols + bc
for ar := 0; ar < aRows; ar++ {
ai := ar*aCols + ac
di := ar*bCols + bc
data[di] += a.data[ai] * b.data[bi]
}
}
}
return nil
}
// MulNaiveKJI multiplies matrices.
func MulNaiveKJI(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
for bc := 0; bc < bCols; bc++ {
for ac := 0; ac < aCols; ac++ {
bi := ac*bCols + bc
for ar := 0; ar < aRows; ar++ {
ai := ar*aCols + ac
di := ar*bCols + bc
data[di] += a.data[ai] * b.data[bi]
}
}
}
return nil
}
// MulBlockIJK is lazily implemented if any of the dims aren't divisible by block size it defers to naive IJK.
func MulBlockIJK(blockSize int) func(c, a, b *Dense) error {
return func(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
if aCols%blockSize != 0 || aRows%blockSize != 0 || bCols%blockSize != 0 {
return MulNaiveIJK(c, a, b)
}
for ar := 0; ar < aRows; ar += blockSize {
for ac := 0; ac < aCols; ac += blockSize {
for bc := 0; bc < bCols; bc += blockSize {
for arb := ar; arb < ar+blockSize; arb++ {
dib := arb * bCols
aib := arb * aCols
for acb := ac; acb < ac+blockSize; acb++ {
bib := acb * bCols
ai := aib + acb
s := a.data[ai]
for bcb := bc; bcb < bc+blockSize; bcb++ {
di := dib + bcb
bi := bib + bcb
data[di] += s * b.data[bi]
}
}
}
}
}
}
return nil
}
}
// MulBlockUnrollIJK is lazily implemented if any of the dims aren't divisible by block size it defers to naive IJK.
func MulBlockUnrollIJK(blockSize int) func(c, a, b *Dense) error {
return func(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
var data = c.data
if aCols%blockSize != 0 || aRows%blockSize != 0 || bCols%blockSize != 0 {
return MulNaiveIJK(c, a, b)
}
for ar := 0; ar < aRows; ar += blockSize {
for ac := 0; ac < aCols; ac += blockSize {
for bc := 0; bc < bCols; bc += blockSize {
for arb := ar; arb < ar+blockSize; arb++ {
dib := arb * bCols
aib := arb * aCols
for acb := ac; acb < ac+blockSize; acb++ {
bib := acb * bCols
ai := aib + acb
s := a.data[ai]
for bcb := bc; bcb < bc+blockSize; bcb += 4 {
di := dib + bcb
bi := bib + bcb
data[di] += s * b.data[bi]
data[di+1] += s * b.data[bi+1]
data[di+2] += s * b.data[bi+2]
data[di+3] += s * b.data[bi+3]
}
}
}
}
}
}
return nil
}
}
// MulBlockFetchIJK is lazily implemented if any of the dims aren't divisible by block size it defers to naive IJK.
func MulBlockFetchIJK(blockSize int) func(c, a, b *Dense) error {
return func(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
row := make([]float64, blockSize)
var data = c.data
if aCols%blockSize != 0 || aRows%blockSize != 0 || bCols%blockSize != 0 {
return MulNaiveIJK(c, a, b)
}
for ar := 0; ar < aRows; ar += blockSize {
for ac := 0; ac < aCols; ac += blockSize {
for bc := 0; bc < bCols; bc += blockSize {
for arb := ar; arb < ar+blockSize; arb++ {
dib := arb * bCols
aib := ar * aCols
for i := range row {
row[i] = a.data[aib+ac+i]
}
for bcb := bc; bcb < bc+blockSize; bcb++ {
var sum float64
for i, e := range row {
sum += e * b.data[(ac+i)*bCols+bcb]
}
di := dib + bcb
data[di] += sum
}
}
}
}
}
return nil
}
}
// MulUnroll uses unrolled loops to create the product of two matrices.
func MulUnroll(c, a, b *Dense) error {
aCols := a.Columns()
aRows := a.Rows()
bCols := b.Columns()
data := c.data
const stride = 4
bounded := aCols
remainder := aCols % stride
if remainder != 0 {
bounded = aCols / stride * stride
}
var sum0 float64
var sum1 float64
var sum2 float64
var sum3 float64
for ar := 0; ar < aRows; ar++ {
for bc := 0; bc < bCols; bc++ {
var sum float64
di := ar*bCols + bc
for ac := 0; ac < bounded; ac += stride {
b0 := ac*bCols + bc
b1 := b0 + bCols
b2 := b1 + bCols
b3 := b2 + bCols
a0 := ar*aCols + ac
a1 := a0 + 1
a2 := a1 + 1
a3 := a2 + 1
av0 := a.data[a0]
av1 := a.data[a1]
av2 := a.data[a2]
av3 := a.data[a3]
bv0 := b.data[b0]
bv1 := b.data[b1]
bv2 := b.data[b2]
bv3 := b.data[b3]
sum0 = av0 * bv0
sum1 = av1 * bv1
sum2 = av2 * bv2
sum3 = av3 * bv3
sum += sum0 + sum1 + sum2 + sum3
}
for ac := bounded; ac < aCols; ac++ {
ai := ar*aCols + ac
bi := ac*bCols + bc
sum += a.data[ai] * b.data[bi]
}
data[di] = sum
}
}
return nil
} | mat/product.go | 0.828592 | 0.449574 | product.go | starcoder |
package benefit
/*******************************************************************************
* The ssa package an implementation of the algorithms that are used to
* calculate or estimate the Social Security benefits for a retiree.
*
* Here's a simple description of how it all works:
* - The Average Wage Index (AWI) Table contains the AWI for each year, going
* back to 1951.
* - The AWI is used to calculate the "indexing factor" for each year that a
* person worked and earned wages.
* - The "indexing factor" for year xxxx is calculated by dividing the AWI for
* the year the worker turned 60 by the AWI for year xxxx.
* - There is a maximum "Nominal Earnings" each year. You can see the maximum
* values in the maxWages table (max_earnings.go). If the person earned more
* than the max in a given year, then the max is used.
* - The "indexing factor" is multiplied by their nominal earnings for year xxxx
* to calculate their "indexed earnings" for that year.
* - The highest 35 "indexed earnings" are added together to produce the
* "highest-35 total".
* - The "highest-35 total" is divided by 420 to produce their "Average
* Indexed Monthly Earnings" (AIME). FYI, there are 420 months in 35 years.
*
* Here is a simple example:
* - The worker was born in 1954. We need to calculate the "indexing factor"
* for this person for the year 1976.
* - They were born in 1954, which means they turn 60 in 2014.
* - AWI(2014) = 46,481.52
* - AWI(1976) = 9,226.48
* - Indexing Factor = 46,481.52 / 9,226.48 = 5.0378.
* - Indexed Earnings for 1976 for this worker = Nominal_Earnings * 5.0378
*
******************************************************************************/
import ( "github.com/joeriggs/ssa/wages" )
func min(val1, val2 int) int {
if val1 < val2 {
return val1
} else {
return val2
}
}
func Benefit(dob int, career_earnings wages.List) int {
var highest_earnings IndexedWages = IndexedWagesCreate()
IndexedWagesHighestIndexedEarnings(dob, career_earnings, highest_earnings)
var total float32 = IndexedWagesTotalIndexedEarnings(highest_earnings)
var AIME int = int(total) / (35 * 12)
//fmt.Printf("total %9.2f : AIME %d\n", total, AIME)
var bend1, bend2 int
bend1, bend2, _, _, _ = BendPoints(dob)
//fmt.Printf("bend1 = %d : bend2 = %d\n", bend1, bend2)
var tempAIME int = AIME
var bend1Amt int = min(bend1, tempAIME)
var bend1BenefitFloat float32 = float32(bend1Amt) * 0.90
var bend1Benefit int = int(bend1BenefitFloat)
tempAIME -= bend1Amt
//fmt.Printf("bend1Amt = %d : bend1Benefit = %d\n", bend1Amt, bend1Benefit)
var bend2Amt int = min((bend2 - bend1), tempAIME)
var bend2BenefitFloat float32 = float32(bend2Amt) * 0.32
var bend2Benefit int = int(bend2BenefitFloat)
tempAIME -= bend2Amt
//fmt.Printf("bend2Amt = %d : bend2Benefit = %d\n", bend2Amt, bend2Benefit)
var moreAmt int = tempAIME
var moreBenefitFloat float32 = float32(moreAmt) * 0.15
var moreBenefit int = int(moreBenefitFloat)
//fmt.Printf("moreAmt = %d : moreBenefit = %d\n", moreAmt, moreBenefit)
var PIA int = bend1Benefit + bend2Benefit + moreBenefit
//fmt.Printf("PIA = %d\n", PIA)
return PIA
} | benefit/calc.go | 0.694199 | 0.481088 | calc.go | starcoder |
package fmom
import (
"fmt"
"math"
)
// Equal returns true if p1==p2
func Equal(p1, p2 P4) bool {
return p4equal(p1, p2, 1e-14)
}
func p4equal(p1, p2 P4, epsilon float64) bool {
if cmpeq(p1.E(), p2.E(), epsilon) &&
cmpeq(p1.Px(), p2.Px(), epsilon) &&
cmpeq(p1.Py(), p2.Py(), epsilon) &&
cmpeq(p1.Pz(), p2.Pz(), epsilon) {
return true
}
return false
}
func cmpeq(x, y, epsilon float64) bool {
if x == y {
return true
}
return math.Abs(x-y) < epsilon
}
// Add returns the sum p1+p2.
func Add(p1, p2 P4) P4 {
// FIXME(sbinet):
// dispatch most efficient/less-lossy addition
// based on type(dst) (and, optionally, type(src))
var sum P4
switch p1 := p1.(type) {
case *PxPyPzE:
p := NewPxPyPzE(p1.Px()+p2.Px(), p1.Py()+p2.Py(), p1.Pz()+p2.Pz(), p1.E()+p2.E())
sum = &p
case *EEtaPhiM:
p := NewPxPyPzE(p1.Px()+p2.Px(), p1.Py()+p2.Py(), p1.Pz()+p2.Pz(), p1.E()+p2.E())
var pp EEtaPhiM
pp.Set(&p)
sum = &pp
case *EtEtaPhiM:
p := NewPxPyPzE(p1.Px()+p2.Px(), p1.Py()+p2.Py(), p1.Pz()+p2.Pz(), p1.E()+p2.E())
var pp EtEtaPhiM
pp.Set(&p)
sum = &pp
case *PtEtaPhiM:
p := NewPxPyPzE(p1.Px()+p2.Px(), p1.Py()+p2.Py(), p1.Pz()+p2.Pz(), p1.E()+p2.E())
var pp PtEtaPhiM
pp.Set(&p)
sum = &pp
case *IPtCotThPhiM:
p := NewPxPyPzE(p1.Px()+p2.Px(), p1.Py()+p2.Py(), p1.Pz()+p2.Pz(), p1.E()+p2.E())
var pp IPtCotThPhiM
pp.Set(&p)
sum = &pp
default:
panic(fmt.Errorf("fmom: invalid P4 concrete value: %#v", p1))
}
return sum
}
// IAdd adds src into dst, and returns dst
func IAdd(dst, src P4) P4 {
// FIXME(sbinet):
// dispatch most efficient/less-lossy addition
// based on type(dst) (and, optionally, type(src))
var sum P4
var p4 *PxPyPzE = nil
switch p1 := dst.(type) {
case *PxPyPzE:
p4 = p1
sum = dst
case *EEtaPhiM:
p := NewPxPyPzE(p1.Px(), p1.Py(), p1.Pz(), p1.E())
p4 = &p
sum = dst
case *EtEtaPhiM:
p := NewPxPyPzE(p1.Px(), p1.Py(), p1.Pz(), p1.E())
p4 = &p
sum = dst
case *PtEtaPhiM:
p := NewPxPyPzE(p1.Px(), p1.Py(), p1.Pz(), p1.E())
p4 = &p
sum = dst
case *IPtCotThPhiM:
p := NewPxPyPzE(p1.Px(), p1.Py(), p1.Pz(), p1.E())
p4 = &p
sum = dst
default:
panic(fmt.Errorf("fmom: invalid P4 concrete value: %#v", dst))
}
p4[0] += src.Px()
p4[1] += src.Py()
p4[2] += src.Pz()
p4[3] += src.E()
sum.Set(p4)
return sum
}
// Scale returns a*p
func Scale(a float64, p P4) P4 {
// FIXME(sbinet):
// dispatch most efficient/less-lossy operation
// based on type(dst) (and, optionally, type(src))
var out P4
switch p := p.(type) {
case *PxPyPzE:
dst := NewPxPyPzE(a*p.Px(), a*p.Py(), a*p.Pz(), a*p.E())
out = &dst
case *EEtaPhiM:
dst := NewPxPyPzE(a*p.Px(), a*p.Py(), a*p.Pz(), a*p.E())
var pp EEtaPhiM
pp.Set(&dst)
out = &pp
case *EtEtaPhiM:
dst := NewPxPyPzE(a*p.Px(), a*p.Py(), a*p.Pz(), a*p.E())
var pp EtEtaPhiM
pp.Set(&dst)
out = &pp
case *PtEtaPhiM:
dst := NewPxPyPzE(a*p.Px(), a*p.Py(), a*p.Pz(), a*p.E())
var pp PtEtaPhiM
pp.Set(&dst)
out = &pp
case *IPtCotThPhiM:
dst := NewPxPyPzE(a*p.Px(), a*p.Py(), a*p.Pz(), a*p.E())
var pp IPtCotThPhiM
pp.Set(&dst)
out = &pp
default:
panic(fmt.Errorf("fmom: invalid P4 concrete value: %#v", p))
}
return out
}
// InvMass computes the invariant mass of two incoming 4-vectors p1 and p2.
func InvMass(p1, p2 P4) float64 {
p := Add(p1, p2)
return p.M()
} | fmom/ops.go | 0.640748 | 0.539529 | ops.go | starcoder |
package schema
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"strconv"
)
// Formats specific to GeoPoint field type.
const (
GeoPointArrayFormat = "array"
GeoPointObjectFormat = "object"
)
// GeoPoint represents a "geopoint" cell.
// More at: https://specs.frictionlessdata.io/table-schema/#geopoint
type GeoPoint struct {
Lon float64 `json:"lon,omitempty"`
Lat float64 `json:"lat,omitempty"`
}
// UnmarshalJSON sets *f to a copy of data. It will respect the default values
func (p *GeoPoint) UnmarshalJSON(data []byte) error {
type geoPointAlias struct {
Lon *float64 `json:"lon,omitempty"`
Lat *float64 `json:"lat,omitempty"`
}
var a geoPointAlias
if err := json.Unmarshal(data, &a); err != nil {
return err
}
if a.Lon == nil || a.Lat == nil {
return fmt.Errorf("Invalid geopoint:\"%s\"", string(data))
}
p.Lon = *a.Lon
p.Lat = *a.Lat
return nil
}
var (
geoPointDefaultRegexp = regexp.MustCompile(`^([-+]?[0-9]*\.?[0-9]*), ?([-+]?[0-9]*\.?[0-9]*)$`)
geoPointArrayRegexp = regexp.MustCompile(`^\[([-+]?[0-9]*\.?[0-9]+), ?([-+]?[0-9]*\.?[0-9]+)\]$`)
)
func castGeoPoint(format, value string) (GeoPoint, error) {
switch format {
case "", defaultFieldFormat:
return applyGeoPointRegexp(geoPointDefaultRegexp, value)
case GeoPointArrayFormat:
return applyGeoPointRegexp(geoPointArrayRegexp, value)
case GeoPointObjectFormat:
var p GeoPoint
if err := json.Unmarshal([]byte(value), &p); err != nil {
return GeoPoint{}, err
}
return p, nil
}
return GeoPoint{}, fmt.Errorf("invalid geopoint format:%s", format)
}
func applyGeoPointRegexp(r *regexp.Regexp, value string) (GeoPoint, error) {
matches := r.FindStringSubmatch(value)
if len(matches) == 0 || len(matches[1]) == 0 || len(matches[2]) == 0 {
return GeoPoint{}, fmt.Errorf("Invalid geopoint:\"%s\"", value)
}
lon, _ := strconv.ParseFloat(matches[1], 64)
lat, _ := strconv.ParseFloat(matches[2], 64)
return GeoPoint{lon, lat}, nil
}
func uncastGeoPoint(format string, gp interface{}) (string, error) {
switch format {
case "", defaultFieldFormat:
value, ok := gp.(string)
if ok {
_, err := applyGeoPointRegexp(geoPointDefaultRegexp, value)
if err != nil {
return "", err
}
return value, nil
}
return "", fmt.Errorf("invalid object type to uncast to geopoint dfault format. want:string got:%v", reflect.TypeOf(gp).String())
case GeoPointArrayFormat:
value, ok := gp.(string)
if ok {
_, err := applyGeoPointRegexp(geoPointArrayRegexp, value)
if err != nil {
return "", err
}
return value, nil
}
return "", fmt.Errorf("invalid object type to uncast to geopoint %s format. want:string got:%v", GeoPointArrayFormat, reflect.TypeOf(gp).String())
case GeoPointObjectFormat:
value, ok := gp.(GeoPoint)
if ok {
return fmt.Sprintf("%+v", value), nil
}
return "", fmt.Errorf("invalid object type to uncast to geopoint %s format. want:schema.Geopoint got:%v", GeoPointObjectFormat, reflect.TypeOf(gp).String())
}
return "", fmt.Errorf("invalid geopoint - type:%v value:\"%v\" format:%s", gp, reflect.ValueOf(gp).Type(), format)
} | schema/geopoint.go | 0.699973 | 0.404949 | geopoint.go | starcoder |
package vmath
import (
"fmt"
"math"
"github.com/maja42/vmath/mathi"
)
type Vec3i [3]int
func (v Vec3i) String() string {
return fmt.Sprintf("Vec3i[%d x %d x %d]", v[0], v[1], v[2])
}
// Format the vector to a string.
func (v Vec3i) Format(format string) string {
return fmt.Sprintf(format, v[0], v[1], v[2])
}
// Abs returns a vector with the components turned into absolute values.
func (v Vec3i) Abs() Vec3i {
return Vec3i{mathi.Abs(v[0]), mathi.Abs(v[1]), mathi.Abs(v[2])}
}
// Vec2f returns a float representation of the vector.
func (v Vec3i) Vec3f() Vec3f {
return Vec3f{float32(v[0]), float32(v[1]), float32(v[2])}
}
// Vec4i creates a 4D vector.
func (v Vec3i) Vec4i(w int) Vec4i {
return Vec4i{v[0], v[1], v[2], w}
}
// Split returns the vector's components.
func (v Vec3i) Split() (x, y, z int) {
return v[0], v[1], v[2]
}
// X returns the vector's first component.
// Performance is equivalent to using v[0].
func (v Vec3i) X() int {
return v[0]
}
// Y returns the vector's second component.
// Performance is equivalent to using v[1].
func (v Vec3i) Y() int {
return v[1]
}
// Z returns the vector's third component.
// Performance is equivalent to using v[2].
func (v Vec3i) Z() int {
return v[2]
}
// XY returns a 2D vector with the X and Y components.
func (v Vec3i) XY() Vec2i {
return Vec2i{v[0], v[1]}
}
// IsOrthogonal returns true if the vector is parallel to the X, Y or Z axis (one of its components is zero).
func (v Vec3i) IsOrthogonal() bool {
return v[0] == 0 || v[1] == 0 || v[2] == 0
}
// Add performs component-wise addition between two vectors.
func (v Vec3i) Add(other Vec3i) Vec3i {
return Vec3i{v[0] + other[0], v[1] + other[1], v[2] + other[2]}
}
// AddScalar performs a component-wise scalar addition.
func (v Vec3i) AddScalar(s int) Vec3i {
return Vec3i{v[0] + s, v[1] + s, v[2] + s}
}
// AddScalarf performs a scalar addition.
func (v Vec3i) AddScalarf(s float32) Vec3f {
return Vec3f{float32(v[0]) + s, float32(v[1]) + s, float32(v[2]) + s}
}
// Sub performs component-wise subtraction between two vectors.
func (v Vec3i) Sub(other Vec3i) Vec3i {
return Vec3i{v[0] - other[0], v[1] - other[1], v[2] - other[2]}
}
// SubScalar performs a component-wise scalar subtraction.
func (v Vec3i) SubScalar(s int) Vec3i {
return Vec3i{v[0] - s, v[1] - s, v[2] - s}
}
// SubScalarf performs a scalar subtraction.
func (v Vec3i) SubScalarf(s float32) Vec3f {
return Vec3f{float32(v[0]) - s, float32(v[1]) - s, float32(v[2]) - s}
}
// Mul performs a component-wise multiplication.
func (v Vec3i) Mul(other Vec3i) Vec3i {
return Vec3i{v[0] * other[0], v[1] * other[1], v[2] * other[2]}
}
// MulScalar performs a scalar multiplication.
func (v Vec3i) MulScalar(s int) Vec3i {
return Vec3i{v[0] * s, v[1] * s, v[2] * s}
}
// MulScalar performs a scalar multiplication.
func (v Vec3i) MulScalarf(s float32) Vec3f {
return Vec3f{float32(v[0]) * s, float32(v[1]) * s, float32(v[2]) * s}
}
// Div performs a component-wise division.
func (v Vec3i) Div(other Vec3i) Vec3i {
return Vec3i{v[0] / other[0], v[1] / other[1], v[2] / other[2]}
}
// DivScalar performs a scalar division.
func (v Vec3i) DivScalar(s int) Vec3i {
return Vec3i{v[0] / s, v[1] / s, v[2] / s}
}
// DivScalarf performs a scalar division.
func (v Vec3i) DivScalarf(s float32) Vec3f {
return Vec3f{float32(v[0]) / s, float32(v[1]) / s, float32(v[2]) / s}
}
// Length returns the vector's length.
func (v Vec3i) Length() float32 {
return float32(math.Sqrt(float64(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])))
}
// SquareLength returns the vector's squared length.
func (v Vec3i) SquareLength() int {
return v[0]*v[0] + v[1]*v[1] + v[2]*v[2]
}
// IsZero returns true if all components are zero.
func (v Vec3i) IsZero() bool {
return v[0] == 0 && v[1] == 0 && v[2] == 0
}
// Equal compares two vectors component-wise.
func (v Vec3i) Equal(other Vec3i) bool {
return v[0] == other[0] && v[1] == other[1] && v[2] == other[2]
}
// Clamp clamps each component to the range of [min, max].
func (v Vec3i) Clamp(min, max int) Vec3i {
return Vec3i{
Clampi(v[0], min, max),
Clampi(v[1], min, max),
Clampi(v[2], min, max),
}
}
// Negate inverts all components.
func (v Vec3i) Negate() Vec3i {
return Vec3i{-v[0], -v[1], -v[2]}
}
// Dot performs a dot product with another vector.
func (v Vec3i) Dot(other Vec3i) int {
return v[0]*other[0] + v[1]*other[1] + v[2]*other[2]
}
// Cross performs a cross product with another vector.
func (v Vec3i) Cross(other Vec3i) Vec3i {
return Vec3i{
v[1]*other[2] - v[2]*other[1],
v[2]*other[0] - v[0]*other[2],
v[0]*other[1] - v[1]*other[0],
}
}
// IsParallel returns true if the given vector is parallel.
// Vectors that point in opposite directions are also parallel (but not collinear).
func (v Vec3i) IsParallel(other Vec3i) bool {
return v.Cross(other).SquareLength() == 0
}
// IsCollinear returns true if the given vector is collinear (pointing in the same direction).
// Uses the given Epsilon as relative tolerance.
func (v Vec3i) IsCollinear(other Vec3i) bool {
return v.IsParallel(other) &&
(v[0] >= 0) == (other[0] >= 0) && // same x direction
(v[1] >= 0) == (other[1] >= 0) && // same y direction
(v[2] >= 0) == (other[2] >= 0) // same z direction
}
// Distance returns the euclidean distance to another position.
func (v Vec3i) Distance(other Vec3i) float32 {
return other.Sub(v).Length()
}
// SquareDistance returns the squared euclidean distance to another position.
func (v Vec3i) SquareDistance(other Vec3i) int {
return other.Sub(v).SquareLength()
} | vec3i.go | 0.913061 | 0.683043 | vec3i.go | starcoder |
package verzettler
// Page represents a set of items and a grid of Rows and Cols.
type Page struct {
Items []string
Rows int
Cols int
}
// NewPage creats a Page with an empty Items slice of size Rows*Cols.
func NewPage(rows, cols int) Page {
n := rows * cols
items := make([]string, n)
return Page{items, rows, cols}
}
// Sheet represents a physical sheet of paper with a Front and a Back page.
type Sheet struct {
Front Page
Back Page
}
// Distribute distributes the string pairs over a sheet. The order on the front
// and back page are reversed within a row, so that a pair is correctly placed
// on a duplex printout on the resulting sheets.
func Distribute(pairs map[string]string, rows, cols int) []Sheet {
perPage := rows * cols
sheets := make([]Sheet, 0)
frontSeq := buildFrontPageIndexSequence(rows, cols)
backSeq := buildBackPageIndexSequence(rows, cols)
i := 0
front := NewPage(rows, cols)
back := NewPage(rows, cols)
for key, value := range pairs {
front.Items[frontSeq[i]] = key
back.Items[backSeq[i]] = value
i++
if i == perPage {
sheets = append(sheets, Sheet{front, back})
front = NewPage(rows, cols)
back = NewPage(rows, cols)
i = 0
}
}
if i > 0 {
sheets = append(sheets, Sheet{front, back})
}
return sheets
}
// front: enumerate by column, then by row
// |-------|
// | 0 | 1 |
// |---|---|
// | 2 | 3 |
// |---|---|
// | 4 | 5 |
// |---|---|
// | 6 | 7 |
// |---|---|
func buildFrontPageIndexSequence(rows, cols int) []int {
cells := rows * cols
indexSequence := make([]int, cells)
for i := 0; i < cells; i++ {
indexSequence[i] = i
}
return indexSequence
}
// back: flip on long edge
// |-------|
// | 1 | 0 |
// |---|---|
// | 3 | 2 |
// |---|---|
// | 5 | 4 |
// |---|---|
// | 7 | 6 |
// |---|---|
func buildBackPageIndexSequence(rows, cols int) []int {
cells := rows * cols
indexSequence := make([]int, cells)
for r := 0; r < rows; r++ {
for c := 0; c < cols; c++ {
i := cols - 1 - c + r*cols
indexSequence[r*cols+c] = i
}
}
return indexSequence
} | paging.go | 0.756447 | 0.427098 | paging.go | starcoder |
package mesh
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"github.com/weqqr/panorama/lm"
)
func parseVector3(fields []string) (lm.Vector3, error) {
if len(fields) < 3 {
return lm.Vector3{}, fmt.Errorf("expected at least 3 vector elements, found %d", len(fields))
}
x, err := strconv.ParseFloat(fields[0], 32)
if err != nil {
return lm.Vector3{}, err
}
y, err := strconv.ParseFloat(fields[1], 32)
if err != nil {
return lm.Vector3{}, err
}
z, err := strconv.ParseFloat(fields[2], 32)
if err != nil {
return lm.Vector3{}, err
}
return lm.Vec3(float32(x), float32(y), float32(z)), nil
}
func parseVector2(fields []string) (lm.Vector2, error) {
if len(fields) < 2 {
return lm.Vector2{}, fmt.Errorf("expected at least 3 vector elements, found %d", len(fields))
}
x, err := strconv.ParseFloat(fields[0], 32)
if err != nil {
return lm.Vector2{}, err
}
y, err := strconv.ParseFloat(fields[1], 32)
if err != nil {
return lm.Vector2{}, err
}
return lm.Vec2(float32(x), float32(y)), nil
}
type Triplet struct {
positionIndex int
texcoordIndex *int
normalIndex *int
}
func parseFace(fields []string) ([]Triplet, error) {
if len(fields) < 3 {
return []Triplet{}, fmt.Errorf("face needs at least 3 fields, got %v", len(fields))
}
triplets := []Triplet{}
for _, field := range fields {
parts := strings.SplitN(field, "/", 3)
var err error
triplet := Triplet{}
triplet.positionIndex, err = strconv.Atoi(parts[0])
if err != nil {
return []Triplet{}, err
}
if len(parts) > 1 && len(parts[1]) != 0 {
texcoordIndex, err := strconv.Atoi(parts[1])
if err != nil {
return []Triplet{}, err
}
triplet.texcoordIndex = &texcoordIndex
}
if len(parts) > 2 && len(parts[2]) != 0 {
normalIndex, err := strconv.Atoi(parts[2])
if err != nil {
return []Triplet{}, err
}
triplet.normalIndex = &normalIndex
}
triplets = append(triplets, triplet)
}
return triplets, nil
}
type objParser struct {
positions []lm.Vector3
texcoords []lm.Vector2
normals []lm.Vector3
mesh Mesh
}
func (o *objParser) vertexAt(triplet Triplet) Vertex {
texcoord := lm.Vector2{}
normal := lm.Vector3{}
if triplet.texcoordIndex != nil {
texcoord = o.texcoords[*triplet.texcoordIndex-1]
}
if triplet.normalIndex != nil {
normal = o.normals[*triplet.normalIndex-1]
}
return Vertex{
Position: o.positions[triplet.positionIndex-1],
Texcoord: texcoord,
Normal: normal,
}
}
func (o *objParser) triangulatePolygon(triplets []Triplet) []Vertex {
vertices := []Vertex{}
origin := o.vertexAt(triplets[0])
for i := 2; i < len(triplets); i++ {
vertices = append(vertices, origin, o.vertexAt(triplets[i-1]), o.vertexAt(triplets[i]))
}
return vertices
}
func (o *objParser) processLine(line string) error {
// Skip comments
if strings.HasPrefix(line, "#") {
return nil
}
fields := strings.Fields(line)
// Skip empty lines
if len(fields) == 0 {
return nil
}
switch fields[0] {
case "v":
position, err := parseVector3(fields[1:])
if err != nil {
return err
}
o.positions = append(o.positions, position)
case "vt":
texcoord, err := parseVector2(fields[1:])
if err != nil {
return err
}
o.texcoords = append(o.texcoords, texcoord)
case "vn":
normal, err := parseVector3(fields[1:])
if err != nil {
return err
}
o.normals = append(o.normals, normal)
case "f":
triplets, err := parseFace(fields[1:])
if err != nil {
return err
}
vertices := o.triangulatePolygon(triplets)
o.mesh.Vertices = append(o.mesh.Vertices, vertices...)
default:
// log.Printf("unknown attribute %v; ignoring\n", fields[0])
}
return nil
}
func LoadOBJ(path string) (Model, error) {
file, err := os.Open(path)
if err != nil {
return Model{}, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
parser := objParser{
positions: []lm.Vector3{},
texcoords: []lm.Vector2{},
normals: []lm.Vector3{},
mesh: NewMesh(),
}
lineNumber := 1
for scanner.Scan() {
lineNumber += 1
err := parser.processLine(scanner.Text())
if err != nil {
return Model{}, err
}
}
if err := scanner.Err(); err != nil {
return Model{}, err
}
model := NewModel()
model.Meshes = append(model.Meshes, parser.mesh)
return model, nil
} | mesh/obj.go | 0.584745 | 0.522324 | obj.go | starcoder |
package util
import (
"math"
"sort"
"strconv"
"github.com/zvandehy/DataTrain/nba_graphql/graph/model"
)
//SimilarPlayers uses the euclidean distance formula to calculate the similarity between the given list of players and the target player
// and returns the 10 closest players to the target player "toPlayer"
func SimilarPlayers(players []model.PlayerAverage, toPlayer model.PlayerAverage) []*model.Player {
var playerDistances map[float64]model.Player = make(map[float64]model.Player, len(players))
for _, player := range players {
if player.Player.PlayerID == toPlayer.Player.PlayerID {
continue
}
if toPlayer.GamesPlayed >= 30 {
if player.GamesPlayed < 30 || player.Minutes < toPlayer.Minutes-5 {
continue
}
}
distance := EuclideanDistance(player, toPlayer)
_, found := playerDistances[distance]
for found {
distance += 0.00001
_, found = playerDistances[distance]
}
playerDistances[distance] = player.Player
}
var distances []float64
for k := range playerDistances {
distances = append(distances, k)
}
sort.Float64s(distances)
var closestPlayers []*model.Player = make([]*model.Player, 0, 10)
for i, distance := range distances {
player := playerDistances[distance]
closestPlayers = append(closestPlayers, &player)
if i == 9 {
break
}
}
return closestPlayers
}
func EuclideanDistance(player1 model.PlayerAverage, player2 model.PlayerAverage) float64 {
sumDistance := distance(player1.Points, player2.Points) +
distance(player1.Assists, player2.Assists) +
distance(player1.Rebounds, player2.Rebounds) +
distance(player1.Steals, player2.Steals) +
distance(player1.Blocks, player2.Blocks) +
distance(player1.Turnovers, player2.Turnovers) +
distance(player1.Minutes, player2.Minutes) +
distance(player1.FieldGoalsMade, player2.FieldGoalsMade) +
distance(player1.FieldGoalsAttempted, player2.FieldGoalsAttempted) +
distance(player1.ThreePointersMade, player2.ThreePointersMade) +
distance(player1.ThreePointersAttempted, player2.ThreePointersAttempted) +
distance(player1.FreeThrowsMade, player2.FreeThrowsMade) +
distance(player1.FreeThrowsAttempted, player2.FreeThrowsAttempted) +
distance(player1.Usage, player2.Usage) +
distance(heightInches(player1.Player.Height), heightInches(player2.Player.Height)) +
distance(float64(player1.Player.Weight), float64(player2.Player.Weight))
return math.Sqrt(sumDistance)
}
func distance(p float64, q float64) float64 {
return math.Pow((p - q), 2)
}
func heightInches(height string) float64 {
feet, err := strconv.ParseFloat(string(rune(height[0])), 64)
if err != nil {
return 0
}
inches, err := strconv.ParseFloat(string(rune(height[2])), 64)
if err != nil {
return 0
}
return feet*12 + inches
} | nba_graphql/util/similarity.go | 0.750918 | 0.442094 | similarity.go | starcoder |
package mlpack
/*
#cgo CFLAGS: -I./capi -Wall
#cgo LDFLAGS: -L. -lmlpack_go_bayesian_linear_regression
#include <capi/bayesian_linear_regression.h>
#include <stdlib.h>
*/
import "C"
import "gonum.org/v1/gonum/mat"
type BayesianLinearRegressionOptionalParam struct {
Center bool
Input *mat.Dense
InputModel *bayesianLinearRegression
Responses *mat.Dense
Scale bool
Test *mat.Dense
Verbose bool
}
func BayesianLinearRegressionOptions() *BayesianLinearRegressionOptionalParam {
return &BayesianLinearRegressionOptionalParam{
Center: false,
Input: nil,
InputModel: nil,
Responses: nil,
Scale: false,
Test: nil,
Verbose: false,
}
}
/*
An implementation of the bayesian linear regression.
This model is a probabilistic view and implementation of the linear
regression. The final solution is obtained by computing a posterior
distribution from gaussian likelihood and a zero mean gaussian isotropic
prior distribution on the solution.
Optimization is AUTOMATIC and does not require cross validation. The
optimization is performed by maximization of the evidence function. Parameters
are tuned during the maximization of the marginal likelihood. This procedure
includes the Ockham's razor that penalizes over complex solutions.
This program is able to train a Bayesian linear regression model or load a
model from file, output regression predictions for a test set, and save the
trained model to a file.
To train a BayesianLinearRegression model, the "Input" and
"Responses"parameters must be given. The "Center"and "Scale" parameters
control the centering and the normalizing options. A trained model can be
saved with the "OutputModel". If no training is desired at all, a model can be
passed via the "InputModel" parameter.
The program can also provide predictions for test data using either the
trained model or the given input model. Test points can be specified with the
"Test" parameter. Predicted responses to the test points can be saved with
the "Predictions" output parameter. The corresponding standard deviation can
be save by precising the "Stds" parameter.
For example, the following command trains a model on the data data and
responses responseswith center set to true and scale set to false (so,
Bayesian linear regression is being solved, and then the model is saved to
blr_model:
// Initialize optional parameters for BayesianLinearRegression().
param := mlpack.BayesianLinearRegressionOptions()
param.Input = data
param.Responses = responses
param.Center = 1
param.Scale = 0
blr_model, _, _ := mlpack.BayesianLinearRegression(param)
The following command uses the blr_model to provide predicted responses for
the data test and save those responses to test_predictions:
// Initialize optional parameters for BayesianLinearRegression().
param := mlpack.BayesianLinearRegressionOptions()
param.InputModel = &blr_model
param.Test = test
_, test_predictions, _ := mlpack.BayesianLinearRegression(param)
Because the estimator computes a predictive distribution instead of a simple
point estimate, the "Stds" parameter allows one to save the prediction
uncertainties:
// Initialize optional parameters for BayesianLinearRegression().
param := mlpack.BayesianLinearRegressionOptions()
param.InputModel = &blr_model
param.Test = test
_, test_predictions, stds := mlpack.BayesianLinearRegression(param)
Input parameters:
- Center (bool): Center the data and fit the intercept if enabled.
- Input (mat.Dense): Matrix of covariates (X).
- InputModel (bayesianLinearRegression): Trained
BayesianLinearRegression model to use.
- Responses (mat.Dense): Matrix of responses/observations (y).
- Scale (bool): Scale each feature by their standard deviations if
enabled.
- Test (mat.Dense): Matrix containing points to regress on (test
points).
- Verbose (bool): Display informational messages and the full list of
parameters and timers at the end of execution.
Output parameters:
- outputModel (bayesianLinearRegression): Output
BayesianLinearRegression model.
- predictions (mat.Dense): If --test_file is specified, this file is
where the predicted responses will be saved.
- stds (mat.Dense): If specified, this is where the standard deviations
of the predictive distribution will be saved.
*/
func BayesianLinearRegression(param *BayesianLinearRegressionOptionalParam) (bayesianLinearRegression, *mat.Dense, *mat.Dense) {
resetTimers()
enableTimers()
disableBacktrace()
disableVerbose()
restoreSettings("BayesianLinearRegression")
// Detect if the parameter was passed; set if so.
if param.Center != false {
setParamBool("center", param.Center)
setPassed("center")
}
// Detect if the parameter was passed; set if so.
if param.Input != nil {
gonumToArmaMat("input", param.Input)
setPassed("input")
}
// Detect if the parameter was passed; set if so.
if param.InputModel != nil {
setBayesianLinearRegression("input_model", param.InputModel)
setPassed("input_model")
}
// Detect if the parameter was passed; set if so.
if param.Responses != nil {
gonumToArmaRow("responses", param.Responses)
setPassed("responses")
}
// Detect if the parameter was passed; set if so.
if param.Scale != false {
setParamBool("scale", param.Scale)
setPassed("scale")
}
// Detect if the parameter was passed; set if so.
if param.Test != nil {
gonumToArmaMat("test", param.Test)
setPassed("test")
}
// Detect if the parameter was passed; set if so.
if param.Verbose != false {
setParamBool("verbose", param.Verbose)
setPassed("verbose")
enableVerbose()
}
// Mark all output options as passed.
setPassed("output_model")
setPassed("predictions")
setPassed("stds")
// Call the mlpack program.
C.mlpackBayesianLinearRegression()
// Initialize result variable and get output.
var outputModel bayesianLinearRegression
outputModel.getBayesianLinearRegression("output_model")
var predictionsPtr mlpackArma
predictions := predictionsPtr.armaToGonumMat("predictions")
var stdsPtr mlpackArma
stds := stdsPtr.armaToGonumMat("stds")
// Clear settings.
clearSettings()
// Return output(s).
return outputModel, predictions, stds
} | bayesian_linear_regression.go | 0.722918 | 0.606906 | bayesian_linear_regression.go | starcoder |
package cornellbox
import (
"math"
"github.com/peterstace/grayt/scene"
"github.com/peterstace/grayt/scene/dsl"
"github.com/peterstace/grayt/xmath"
)
func CornellCam(d float64) scene.Camera {
c := dsl.DefaultCamera()
c.Location = xmath.Vect(0.5, 0.5, d)
c.LookingAt = xmath.Vect(0.5, 0.5, -0.5)
c.FieldOfViewInRadians = 2 * math.Asin(0.5/math.Sqrt(0.25+d*d))
return c
}
var (
CornellFloor = dsl.AlignedSquare(xmath.Vect(0, 0, 0), xmath.Vect(1, 0, -1))
CornellCeiling = dsl.AlignedSquare(xmath.Vect(0, 1, 0), xmath.Vect(1, 1, -1))
CornellBackWall = dsl.AlignedSquare(xmath.Vect(0, 0, -1), xmath.Vect(1, 1, -1))
CornellLeftWall = dsl.AlignedSquare(xmath.Vect(0, 0, 0), xmath.Vect(0, 1, -1))
CornellRightWall = dsl.AlignedSquare(xmath.Vect(1, 0, 0), xmath.Vect(1, 1, -1))
)
func CornellCeilingLight() scene.Surface {
const size = 0.9
return scene.Surface{
AlignedBoxes: []scene.AlignedBox{{
CornerA: xmath.Vect(size, 1.0, -size),
CornerB: xmath.Vect(1.0-size, 0.999, -1.0+size),
}},
}
}
func CornellShortBlock() scene.Surface {
var (
// Left/Right, Top/Bottom, Front/Back.
LBF = xmath.Vect(0.76, 0.00, -0.12)
LBB = xmath.Vect(0.85, 0.00, -0.41)
RBF = xmath.Vect(0.47, 0.00, -0.21)
RBB = xmath.Vect(0.56, 0.00, -0.49)
LTF = xmath.Vect(0.76, 0.30, -0.12)
LTB = xmath.Vect(0.85, 0.30, -0.41)
RTF = xmath.Vect(0.47, 0.30, -0.21)
RTB = xmath.Vect(0.56, 0.30, -0.49)
)
return dsl.MergeSurfaces(
dsl.Square(LTF, LTB, RTB, RTF),
dsl.Square(LBF, RBF, RTF, LTF),
dsl.Square(LBB, RBB, RTB, LTB),
dsl.Square(LBF, LBB, LTB, LTF),
dsl.Square(RBF, RBB, RTB, RTF),
)
}
func CornellTallBlock() scene.Surface {
var (
// Left/Right, Top/Bottom, Front/Back.
LBF = xmath.Vect(0.52, 0.00, -0.54)
LBB = xmath.Vect(0.43, 0.00, -0.83)
RBF = xmath.Vect(0.23, 0.00, -0.45)
RBB = xmath.Vect(0.14, 0.00, -0.74)
LTF = xmath.Vect(0.52, 0.60, -0.54)
LTB = xmath.Vect(0.43, 0.60, -0.83)
RTF = xmath.Vect(0.23, 0.60, -0.45)
RTB = xmath.Vect(0.14, 0.60, -0.74)
)
return dsl.MergeSurfaces(
dsl.Square(LTF, LTB, RTB, RTF),
dsl.Square(LBF, RBF, RTF, LTF),
dsl.Square(LBB, RBB, RTB, LTB),
dsl.Square(LBF, LBB, LTB, LTF),
dsl.Square(RBF, RBB, RTB, RTF),
)
} | scene/cornellbox/shared.go | 0.571288 | 0.532182 | shared.go | starcoder |
package main
import "fmt"
// tag::ops[]
const (
// Ops.
opInp = 'i'
opAdd = 'a'
opMul = 'm'
opDiv = 'd'
opMod = 'o'
opEql = 'e'
// Registers.
inReg = 'w'
regW = 'w'
regX = 'x'
regY = 'y'
regZ = 'z'
)
type op struct {
act rune
reg rune
dat interface{}
}
// String gets a string rep.
func (o op) String() string {
str := fmt.Sprintf("%c -> %c", o.act, o.reg)
switch t := o.dat.(type) {
case int:
str += fmt.Sprintf(" : %d", t)
case rune:
str += fmt.Sprintf(" : %c", t)
}
return str
}
// Compare two ops. If the data is nil, it is excluded from the comparison
func (o op) cmp(other op) bool {
if o.act != other.act {
return false
}
if o.reg != other.reg {
return false
}
if o.dat != nil && o.dat != other.dat {
return false
}
return true
}
// This is an example block of operations that shows the expected order of operations:
// mul x 0
// add x z
// mod x 26
// div z 26 => 26 is called "divVal" below, the value "nil" in the expected ops signifies this.
// add x -9 => -9 is called "addX" below, the value "nil" in the expected ops signifies this.
// eql x w
// eql x 0
// mul y 0
// add y 25
// mul y x
// add y 1
// mul z y
// mul y 0
// add y w
// add y 5 => 5 is called "addY" below, the value "nil" in the expected ops signifies this.
// mul y x
// add z y
// Convert a set of ops between input operations into a neat function.
//nolint:gomnd,funlen
func getFunc(ops []op) (fn, error) {
var f fn
vals := []int{}
// Sanity check the expected order of operations. Also extract the values we need.
expectedOps := []op{
op{act: opMul, reg: regX, dat: 0},
op{act: opAdd, reg: regX, dat: regZ},
op{act: opMod, reg: regX, dat: 26},
op{act: opDiv, reg: regZ, dat: nil},
op{act: opAdd, reg: regX, dat: nil},
op{act: opEql, reg: regX, dat: regW},
op{act: opEql, reg: regX, dat: 0},
op{act: opMul, reg: regY, dat: 0},
op{act: opAdd, reg: regY, dat: 25},
op{act: opMul, reg: regY, dat: regX},
op{act: opAdd, reg: regY, dat: 1},
op{act: opMul, reg: regZ, dat: regY},
op{act: opMul, reg: regY, dat: 0},
op{act: opAdd, reg: regY, dat: regW},
op{act: opAdd, reg: regY, dat: nil},
op{act: opMul, reg: regY, dat: regX},
op{act: opAdd, reg: regZ, dat: regY},
}
if len(ops) != len(expectedOps) {
return nil, fmt.Errorf("unqeual number of ops, cannot process")
}
for opIdx, inOp := range ops {
expectedOp := expectedOps[opIdx]
if !expectedOp.cmp(inOp) {
return nil, fmt.Errorf(
"unexpected op '%s' at line %d, wanted '%s'",
inOp, opIdx+1, expectedOp,
)
}
if expectedOp.dat == nil {
converted, ok := inOp.dat.(int)
if !ok {
return nil, fmt.Errorf("failure in integer conversion")
}
vals = append(vals, converted)
}
}
if len(vals) != 3 {
return nil, fmt.Errorf("cannot extract enough variables from op input")
}
divVal := vals[0]
addX := vals[1]
addY := vals[2]
// There are basically two different kinds of functions. Ones that divide z by 26 and ones that
// divide z by 1 as per divVal. The first kind is a bit more complex and will involve comparing
// the value of "z mod 26 + addX" with the input digit. The second type is a lot simpler. Using
// some math, it can easily be deduced that those functions do not use the addX value at all.
// The value will simply be consumed by the modulo or integer division operation, I can't recall
// which one.
if divVal == 1 {
f = func(s acl, dig int) acl {
// This function does not need the addX value.
return acl{z: s.z*26 + dig + addY}
}
} else {
f = func(s acl, dig int) acl {
var val int
if s.z%26+addX != dig {
val = 1
}
return acl{z: s.z/26*(25*val+1) + (dig+addY)*val}
}
}
return f, nil
}
//nolint:nestif
func opsToFuncs(ops []op) ([]fn, error) {
funcs := []fn{}
// Separate ops into partitions that start with an input operation. Furthermore, sanity check
// whether all input happens to the w register.
partition := []op{}
for _, o := range ops {
if o.act == opInp {
if o.reg != inReg {
return []fn{}, fmt.Errorf("input to a non-w register detected")
}
if len(partition) > 0 {
f, err := getFunc(partition)
if err != nil {
return []fn{}, err
}
funcs = append(funcs, f)
partition = []op{}
}
} else {
partition = append(partition, o)
}
}
f, err := getFunc(partition)
if err != nil {
return []fn{}, err
}
funcs = append(funcs, f)
return funcs, nil
}
// end::ops[] | day24/go/razziel89/ops.go | 0.536313 | 0.582432 | ops.go | starcoder |
package enigmamachine
import (
"fmt"
"strings"
)
// MachineSetup describes the setup of an Enigma Machine. This describes the
// Reflector, Rotors and Plugboard setup. This passed to New to describe the
// setup of a new Machine instance.
type MachineSetup struct {
Reflector ReflectorSpec
Rotors []RotorSpec
RingPositions []int
Plugboard PlugboardSpec
}
// Machine represents an Enigma Machine.
type Machine struct {
rotors []*Rotor
plugboard Plugboard
}
// New constructs an enigma machine with given MachineSetup. err will be
// non-nil if the given configuration is invalid.
func New(s MachineSetup) (m *Machine, err error) {
m = &Machine{}
t, err := NewReflector(s.Reflector)
if err != nil {
return nil, fmt.Errorf("Reflector error: %s", err.Error())
}
var next LetterTranslator = t
if len(s.Rotors) < 3 {
return nil, fmt.Errorf("Minimum 3 rotors required, have %d", len(s.Rotors))
}
if len(s.Rotors) != len(s.RingPositions) {
return nil, fmt.Errorf("RingPosition mismatch: %d rotors and %d ring positions", len(s.Rotors), len(s.RingPositions))
}
for i, rs := range s.Rotors {
r, err := NewRotor(rs, s.RingPositions[i], next)
if err != nil {
return nil, fmt.Errorf("Rotor error: %s", err.Error())
}
m.rotors = append(m.rotors, r)
next = r
}
m.plugboard, err = NewPlugboard(s.Plugboard, next)
if err != nil {
return nil, fmt.Errorf("Plugboard error: %s", err.Error())
}
return m, nil
}
// SetPositions sets the positions of the rotors to the given letters.
func (m *Machine) SetPositions(positions []rune) {
for i, pos := range positions {
if i >= len(m.rotors) {
return
}
m.rotors[i].SetPosition(pos)
}
}
func (m *Machine) advanceRotors() {
l := len(m.rotors)
if m.rotors[l-2].AtNotch() {
m.rotors[l-3].AdvancePosition()
}
if m.rotors[l-1].AtNotch() || m.rotors[l-2].AtNotch() {
m.rotors[l-2].AdvancePosition()
}
m.rotors[l-1].AdvancePosition()
}
// TranslateLetter performs a translation (encryption or decryption) of a
// single letter. This effectively simulates the pressing of a single key on
// the Enigma Machine.
func (m *Machine) TranslateLetter(input rune) rune {
if input < 'A' || input > 'Z' {
return input
}
m.advanceRotors()
return m.plugboard.TranslateLetter(input)
}
// TranslateString runs the given string through the EnigmaMachine and returns
// the result. Any characters in the input that are not the uppercase letters
// 'A' - 'Z' are returned in the output unchanged.
func (m *Machine) TranslateString(input string) (string, error) {
var out strings.Builder
for _, ch := range input {
_, err := out.WriteRune(m.TranslateLetter(ch))
if err != nil {
return "", err
}
}
return out.String(), nil
} | machine.go | 0.704058 | 0.431824 | machine.go | starcoder |
package matrix
import (
"math"
"github.com/anolson/rtc/tuple"
)
// Transform returns the result of applying a transformation to a Tuple
func Transform(m *Matrix, t *tuple.Tuple) *tuple.Tuple {
return MultiplyByTuple(m, t)
}
// Chain returns the result of applying a multiple transformations to a Tuple
func Chain(t *tuple.Tuple, transforms ...*Matrix) *tuple.Tuple {
transformed := t
for _, transform := range transforms {
transformed = Transform(transform, transformed)
}
return transformed
}
// Identity returns the identiry Matrix
func Identity() *Matrix {
return New(4, 4, []float64{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
})
}
// Translation returns a Matrix for moving a point
func Translation(x, y, z float64) *Matrix {
return New(4, 4, []float64{
1, 0, 0, x,
0, 1, 0, y,
0, 0, 1, z,
0, 0, 0, 1,
})
}
// Scaling returns a Matrix for scaling a vector or point
func Scaling(x, y, z float64) *Matrix {
return New(4, 4, []float64{
x, 0, 0, 0,
0, y, 0, 0,
0, 0, z, 0,
0, 0, 0, 1,
})
}
// RotationX returns a Matrix for rotating a point around the x axis
func RotationX(radians float64) *Matrix {
cos := math.Cos(radians)
sin := math.Sin(radians)
return New(4, 4, []float64{
1, 0, 0, 0,
0, cos, -sin, 0,
0, sin, cos, 0,
0, 0, 0, 1,
})
}
// RotationY returns a Matrix for rotating a point around the y axis
func RotationY(radians float64) *Matrix {
cos := math.Cos(radians)
sin := math.Sin(radians)
return New(4, 4, []float64{
cos, 0, sin, 0,
0, 1, 0, 0,
-sin, 0, cos, 0,
0, 0, 0, 1,
})
}
// RotationZ returns a Matrix for rotating a point around the z axis
func RotationZ(radians float64) *Matrix {
cos := math.Cos(radians)
sin := math.Sin(radians)
return New(4, 4, []float64{
cos, -sin, 0, 0,
sin, cos, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
})
}
// Shearing returns a Matrix for rotating a point around the z axis
func Shearing(xy, xz, yx, yz, zx, zy float64) *Matrix {
return New(4, 4, []float64{
1, xy, xz, 0,
yx, 1, yz, 0,
zx, zy, 1, 0,
0, 0, 0, 1,
})
} | matrix/transformation.go | 0.893242 | 0.705798 | transformation.go | starcoder |
package conditions
import (
"math"
"sort"
"github.com/smartems/smartems/pkg/components/null"
"github.com/smartems/smartems/pkg/tsdb"
)
// queryReducer reduces an timeserie to a nullable float
type queryReducer struct {
// Type is how the timeserie should be reduced.
// Ex avg, sum, max, min, count
Type string
}
func (s *queryReducer) Reduce(series *tsdb.TimeSeries) null.Float {
if len(series.Points) == 0 {
return null.FloatFromPtr(nil)
}
value := float64(0)
allNull := true
switch s.Type {
case "avg":
validPointsCount := 0
for _, point := range series.Points {
if point[0].Valid {
value += point[0].Float64
validPointsCount++
allNull = false
}
}
if validPointsCount > 0 {
value = value / float64(validPointsCount)
}
case "sum":
for _, point := range series.Points {
if point[0].Valid {
value += point[0].Float64
allNull = false
}
}
case "min":
value = math.MaxFloat64
for _, point := range series.Points {
if point[0].Valid {
allNull = false
if value > point[0].Float64 {
value = point[0].Float64
}
}
}
case "max":
value = -math.MaxFloat64
for _, point := range series.Points {
if point[0].Valid {
allNull = false
if value < point[0].Float64 {
value = point[0].Float64
}
}
}
case "count":
value = float64(len(series.Points))
allNull = false
case "last":
points := series.Points
for i := len(points) - 1; i >= 0; i-- {
if points[i][0].Valid {
value = points[i][0].Float64
allNull = false
break
}
}
case "median":
var values []float64
for _, v := range series.Points {
if v[0].Valid {
allNull = false
values = append(values, v[0].Float64)
}
}
if len(values) >= 1 {
sort.Float64s(values)
length := len(values)
if length%2 == 1 {
value = values[(length-1)/2]
} else {
value = (values[(length/2)-1] + values[length/2]) / 2
}
}
case "diff":
allNull, value = calculateDiff(series, allNull, value, diff)
case "percent_diff":
allNull, value = calculateDiff(series, allNull, value, percentDiff)
case "count_non_null":
for _, v := range series.Points {
if v[0].Valid {
value++
}
}
if value > 0 {
allNull = false
}
}
if allNull {
return null.FloatFromPtr(nil)
}
return null.FloatFrom(value)
}
func newSimpleReducer(t string) *queryReducer {
return &queryReducer{Type: t}
}
func calculateDiff(series *tsdb.TimeSeries, allNull bool, value float64, fn func(float64, float64) float64) (bool, float64) {
var (
points = series.Points
first float64
i int
)
// get the newest point
for i = len(points) - 1; i >= 0; i-- {
if points[i][0].Valid {
allNull = false
first = points[i][0].Float64
break
}
}
if i >= 1 {
// get the oldest point
points = points[0:i]
for i := 0; i < len(points); i++ {
if points[i][0].Valid {
allNull = false
val := fn(first, points[i][0].Float64)
value = math.Abs(val)
break
}
}
}
return allNull, value
}
var diff = func(newest, oldest float64) float64 {
return newest - oldest
}
var percentDiff = func(newest, oldest float64) float64 {
return (newest - oldest) / oldest * 100
} | pkg/services/alerting/conditions/reducer.go | 0.606382 | 0.421076 | reducer.go | starcoder |
package geogoth
// MultiPolygon ...
type MultiPolygon struct {
Coords [][][][]float64
}
// NewMultiPolygon creates MultiPolygon
func NewMultiPolygon(coords [][][][]float64) MultiPolygon {
return MultiPolygon{
Coords: coords,
}
}
// Coordinates returns array of longitude, latitude of the MultiPolygon
func (m MultiPolygon) Coordinates() interface{} {
return m.Coords
}
// GetCoordinates returns array of longitude, latitude of Three-dimensional arrays (MultiLineString, Polygon)
// coordnum - index of coordinate arr
func (m MultiPolygon) GetCoordinates(setsnum, setnum, coordnum int) (float64, float64) {
coords := (m.Coordinates()).([][][][]float64)
lon := coords[setsnum][setnum][coordnum][0]
lat := coords[setsnum][setnum][coordnum][1]
return lon, lat // longitude (Y), latitude (X)
}
// Type returns type of the MultiPolygon (MultiPolygon)
func (m MultiPolygon) Type() string {
return "MultiPolygon"
}
// Length returns length of the MultiPolygon
func (m MultiPolygon) Length() float64 {
return MultipolygonLength(m)
}
// DistanceTo returns distance between two geo objects
func (m MultiPolygon) DistanceTo(f Feature) float64 {
var distance float64
switch f.Type() {
case "Point":
point := f.(*Point)
distance = DistancePointMultiPolygon(point, &m)
case "MultiPoint":
mpoint := f.(*MultiPoint)
distance = DistanceMultiPointMultiPolygon(mpoint, &m)
case "LineString":
ln := f.(*LineString)
distance = DistanceLineStringMultiPolygon(ln, &m)
case "MultiLineString":
mlstr := f.(*MultiLineString)
distance = DistanceMultiLineStringMultiPolygon(mlstr, &m)
case "Polygon":
plgn := f.(*Polygon)
distance = DistancePolygonMultiPolygon(plgn, &m)
case "MultiPolygon":
mpolyg := f.(*MultiPolygon)
distance = DistanceMultiPolygonMultiPolygon(&m, mpolyg)
}
return distance
}
// IntersectsWith returns true if geoObject intersects with Feature
func (m MultiPolygon) IntersectsWith(f Feature) bool {
var intersection bool
switch f.Type() {
case "Point":
// point := f.(*Point)
case "MultiPoint":
// mpoint := f.(*MultiPoint)
case "LineString":
// lstr := f.(*LineString)
case "MultiLineString":
// mlinestr := f.(*MultiLineString)
case "Polygon":
// polygon := f.(*Polygon)
case "MultiPolygon":
// mpolyg := f.(*MultiPolygon)
}
return intersection
} | multipolygon.go | 0.890526 | 0.749294 | multipolygon.go | starcoder |
package bulletproofs
import (
"fmt"
"github.com/incognitochain/go-incognito-sdk-v2/crypto"
"github.com/incognitochain/go-incognito-sdk-v2/privacy/utils"
)
// ConvertUint64ToBinary represents a integer number in binary.
func ConvertUint64ToBinary(number uint64, n int) []*crypto.Scalar {
if number == 0 {
res := make([]*crypto.Scalar, n)
for i := 0; i < n; i++ {
res[i] = new(crypto.Scalar).FromUint64(0)
}
return res
}
binary := make([]*crypto.Scalar, n)
for i := 0; i < n; i++ {
binary[i] = new(crypto.Scalar).FromUint64(number % 2)
number = number / 2
}
return binary
}
func computeHPrime(y *crypto.Scalar, N int, H []*crypto.Point) []*crypto.Point {
yInverse := new(crypto.Scalar).Invert(y)
HPrime := make([]*crypto.Point, N)
expYInverse := new(crypto.Scalar).FromUint64(1)
for i := 0; i < N; i++ {
HPrime[i] = new(crypto.Point).ScalarMult(H[i], expYInverse)
expYInverse.Mul(expYInverse, yInverse)
}
return HPrime
}
func computeDeltaYZ(z, zSquare *crypto.Scalar, yVector []*crypto.Scalar, N int) (*crypto.Scalar, error) {
oneNumber := new(crypto.Scalar).FromUint64(1)
twoNumber := new(crypto.Scalar).FromUint64(2)
oneVectorN := powerVector(oneNumber, utils.MaxExp)
twoVectorN := powerVector(twoNumber, utils.MaxExp)
oneVector := powerVector(oneNumber, N)
deltaYZ := new(crypto.Scalar).Sub(z, zSquare)
// ip1 = <1^(n*m), y^(n*m)>
var ip1, ip2 *crypto.Scalar
var err error
if ip1, err = innerProduct(oneVector, yVector); err != nil {
return nil, err
} else if ip2, err = innerProduct(oneVectorN, twoVectorN); err != nil {
return nil, err
} else {
deltaYZ.Mul(deltaYZ, ip1)
sum := new(crypto.Scalar).FromUint64(0)
zTmp := new(crypto.Scalar).Set(zSquare)
for j := 0; j < N/utils.MaxExp; j++ {
zTmp.Mul(zTmp, z)
sum.Add(sum, zTmp)
}
sum.Mul(sum, ip2)
deltaYZ.Sub(deltaYZ, sum)
}
return deltaYZ, nil
}
func innerProduct(a []*crypto.Scalar, b []*crypto.Scalar) (*crypto.Scalar, error) {
if len(a) != len(b) {
return nil, fmt.Errorf("incompatible sizes of a and b")
}
result := new(crypto.Scalar).FromUint64(uint64(0))
for i := range a {
result.MulAdd(a[i], b[i], result)
}
return result, nil
}
func vectorAdd(a []*crypto.Scalar, b []*crypto.Scalar) ([]*crypto.Scalar, error) {
if len(a) != len(b) {
return nil, fmt.Errorf("incompatible sizes of a and b")
}
result := make([]*crypto.Scalar, len(a))
for i := range a {
result[i] = new(crypto.Scalar).Add(a[i], b[i])
}
return result, nil
}
func setAggregateParams(N int) *bulletproofParams {
tmpParam := new(bulletproofParams)
tmpParam.g = aggParam.g[0:N]
tmpParam.h = aggParam.h[0:N]
tmpParam.u = aggParam.u
tmpParam.cs = aggParam.cs
return tmpParam
}
func roundUpPowTwo(v int) int {
if v == 0 {
return 1
} else {
v--
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
v++
return v
}
}
// hadamardProduct returns the Hadamard product of a and b.
func hadamardProduct(a []*crypto.Scalar, b []*crypto.Scalar) ([]*crypto.Scalar, error) {
if len(a) != len(b) {
return nil, fmt.Errorf("invalid input")
}
result := make([]*crypto.Scalar, len(a))
for i := 0; i < len(result); i++ {
result[i] = new(crypto.Scalar).Mul(a[i], b[i])
}
return result, nil
}
// powerVector calculates base^n
func powerVector(base *crypto.Scalar, n int) []*crypto.Scalar {
result := make([]*crypto.Scalar, n)
result[0] = new(crypto.Scalar).FromUint64(1)
if n > 1 {
result[1] = new(crypto.Scalar).Set(base)
for i := 2; i < n; i++ {
result[i] = new(crypto.Scalar).Mul(result[i-1], base)
}
}
return result
}
// vectorAddScalar adds a vector to a big int, returns big int array
func vectorAddScalar(v []*crypto.Scalar, s *crypto.Scalar) []*crypto.Scalar {
result := make([]*crypto.Scalar, len(v))
for i := range v {
result[i] = new(crypto.Scalar).Add(v[i], s)
}
return result
}
// vectorMulScalar mul a vector to a big int, returns a vector
func vectorMulScalar(v []*crypto.Scalar, s *crypto.Scalar) []*crypto.Scalar {
result := make([]*crypto.Scalar, len(v))
for i := range v {
result[i] = new(crypto.Scalar).Mul(v[i], s)
}
return result
}
// CommitAll commits a list of PCM_CAPACITY value(s)
func encodeVectors(l []*crypto.Scalar, r []*crypto.Scalar, g []*crypto.Point, h []*crypto.Point) (*crypto.Point, error) {
if len(l) != len(r) || len(g) != len(l) || len(h) != len(g) {
return nil, fmt.Errorf("invalid input")
}
tmp1 := new(crypto.Point).MultiScalarMult(l, g)
tmp2 := new(crypto.Point).MultiScalarMult(r, h)
res := new(crypto.Point).Add(tmp1, tmp2)
return res, nil
}
// bulletproofParams includes all generator for aggregated range proof
func newBulletproofParams(m int) *bulletproofParams {
maxExp := utils.MaxExp
numCommitValue := utils.NumBase
maxOutputCoin := utils.MaxOutputCoin
capacity := maxExp * m // fixed value
param := new(bulletproofParams)
param.g = make([]*crypto.Point, capacity)
param.h = make([]*crypto.Point, capacity)
csByte := make([]byte, 0)
for i := 0; i < capacity; i++ {
param.g[i] = crypto.HashToPointFromIndex(int64(numCommitValue+i), crypto.CStringBulletProof)
param.h[i] = crypto.HashToPointFromIndex(int64(numCommitValue+i+maxOutputCoin*maxExp), crypto.CStringBulletProof)
csByte = append(csByte, param.g[i].ToBytesS()...)
csByte = append(csByte, param.h[i].ToBytesS()...)
}
param.u = new(crypto.Point)
param.u = crypto.HashToPointFromIndex(int64(numCommitValue+2*maxOutputCoin*maxExp), crypto.CStringBulletProof)
csByte = append(csByte, param.u.ToBytesS()...)
param.cs = crypto.HashToPoint(csByte)
return param
}
func generateChallenge(hashCache []byte, values []*crypto.Point) *crypto.Scalar {
bytes := make([]byte, 0)
bytes = append(bytes, hashCache...)
for i := 0; i < len(values); i++ {
bytes = append(bytes, values[i].ToBytesS()...)
}
hash := crypto.HashToScalar(bytes)
return hash
} | privacy/v1/zkp/bulletproofs/bulletproofs_helper.go | 0.655557 | 0.541045 | bulletproofs_helper.go | starcoder |
package geometry
import (
"math"
)
type Vector struct {
X, Y float64
}
func New(x float64, y float64) Vector {
return Vector{X: x, Y: y}
}
func (v *Vector) Hashcode() (hash uint64) {
x, y := uint64(v.X), uint64(v.Y)
hash = x + y
return
}
func (v *Vector) Equals(oi interface{}) (equals bool) {
o, equals := oi.(*Vector)
if !equals {
var ov Vector
ov, equals = oi.(Vector)
equals = equals && v.EqualsVector(ov)
return
}
equals = v.EqualsVector(*o)
return
}
func (v Vector) EqualsVector(q Vector) bool { return v.X == q.X && v.Y == q.Y }
func (v Vector) Clone() Vector {
return New(v.X, v.Y)
}
func (v Vector) Magnitude() float64 {
return math.Sqrt((v.X * v.X) + (v.Y * v.Y))
}
func (v Vector) MagnitudeSquared() float64 {
return (v.X * v.X) + (v.Y * v.Y)
}
func (v Vector) RotateLeft() Vector {
return Vector{
X: -v.Y,
Y: v.X,
}
}
func (v Vector) RotateRight() Vector {
return Vector{
X: v.Y,
Y: -v.X,
}
}
func (v Vector) Rotate(angle float64) Vector {
cos := math.Cos(angle)
sin := math.Sin(angle)
return Vector{
X: v.X*cos - v.Y*sin,
Y: v.X*sin - v.Y*cos,
}
}
func (v Vector) RotateAboutPoint(angle float64, point Vector) Vector {
cos := math.Cos(angle)
sin := math.Sin(angle)
x := point.X + ((v.X-point.X)*cos - (v.Y-point.Y)*sin)
y := point.Y + ((v.X-point.X)*sin + (v.Y-point.Y)*cos)
return Vector{
X: x,
Y: y,
}
}
func (v Vector) Normalize() Vector {
magnitude := v.Magnitude()
if magnitude == 0 {
return Vector{
X: 0,
Y: 0,
}
}
return Vector{
X: v.X / magnitude,
Y: v.Y / magnitude,
}
}
func (v Vector) DotProduct(o Vector) float64 {
return (v.X * o.X) + (v.Y * o.Y)
}
func (v Vector) CrossProduct(o Vector) float64 {
return (v.X * o.Y) - (v.Y * o.X)
}
func (v Vector) Add(o Vector) Vector {
return Vector{
X: v.X + o.X,
Y: v.Y + o.Y,
}
}
func (v Vector) Subtract(o Vector) Vector {
return Vector{
X: v.X - o.X,
Y: v.Y - o.Y,
}
}
func (v Vector) Multiply(s float64) Vector {
return Vector{
X: v.X * s,
Y: v.Y * s,
}
}
func (v Vector) Perpendicular(negate bool) Vector {
var n float64
if negate {
n = -1
} else {
n = 1
}
return Vector{
X: n * -v.Y,
Y: n * -v.X,
}
}
func (v Vector) Negative() Vector {
return Vector{
X: -v.X,
Y: -v.Y,
}
}
func (v Vector) Angle(o Vector) float64 {
return math.Atan2(o.Y-v.Y, o.X-v.X)
}
func (v Vector) Divide(s float64) Vector {
return Vector{
X: v.X / s,
Y: v.Y / s,
}
}
func (v Vector) Distance(o Vector) float64 {
return math.Sqrt(v.DistanceSquared(o))
}
func (v Vector) DistanceSquared(o Vector) float64 {
return (o.X-v.X)*(o.X-v.X) + (o.Y-v.Y)*(o.Y-v.Y)
} | geometry/vector.go | 0.885996 | 0.746924 | vector.go | starcoder |
package raytrace
import (
"errors"
"image/color"
"math"
)
// Element ...
type Element struct {
element Intersectable
albedo float32
}
// Sphere ...
type Sphere struct {
center Point
radius float64
color color.Color
}
// Plane ...
type Plane struct {
origin Point
normal Vector
color color.Color
}
// Intersectable ...
type Intersectable interface {
Intersect(ray *Ray) float64
SurfaceNormal(hitPoint Point) *Vector
}
// Color ...
func (e *Element) Color() (color.Color, error) {
switch el := e.element.(type) {
case *Sphere:
return el.color, nil
case *Plane:
return el.color, nil
default:
return nil, errors.New("Element type is not supported")
}
}
// Intersect ...
func (e *Element) Intersect(ray *Ray) (float64, error) {
switch el := e.element.(type) {
case *Sphere:
return el.Intersect(ray), nil
case *Plane:
return el.Intersect(ray), nil
default:
return -1, errors.New("Element type is not supported")
}
}
// SurfaceNormal ...
func (e *Element) SurfaceNormal(hitPoint *Point) (*Vector, error) {
switch el := e.element.(type) {
case *Sphere:
return el.SurfaceNormal(*hitPoint), nil
case *Plane:
return el.SurfaceNormal(*hitPoint), nil
default:
return nil, errors.New("Element type is not supported")
}
}
// Intersect ...
func (s *Sphere) Intersect(ray *Ray) float64 {
line := MakeVector(s.center, ray.origin)
adjacent := float64(line.Dot(&ray.direction))
distance := float64(line.Dot(line)) - adjacent*adjacent
scope := s.radius * s.radius
if distance > scope {
return -1
}
thc := math.Sqrt(scope - distance)
t0 := adjacent - thc
t1 := adjacent + thc
if t0 < 0.0 && t1 < 0.0 {
return -1
} else if t0 < t1 {
return t0
} else {
return t1
}
}
// SurfaceNormal ...
func (s *Sphere) SurfaceNormal(hitPoint Point) *Vector {
vec := VectorFromPoints(&hitPoint, &s.center)
return vec.Normalize()
}
// CreateSphere ...
func CreateSphere(center Point, radius float64, color color.Color) *Sphere {
return &Sphere{
center: center,
radius: radius,
color: color,
}
}
// Intersect ...
func (p *Plane) Intersect(ray *Ray) float64 {
denom := p.normal.Dot(&ray.direction)
if denom > 1e-6 {
v := VectorFromPoints(&p.origin, &ray.origin)
distance := v.Dot(&p.normal) / denom
if distance >= 0.0 {
return float64(distance)
}
}
return -1.0
}
// SurfaceNormal ...
func (p *Plane) SurfaceNormal(hitPoint Point) *Vector {
return &Vector{
x: -p.normal.x,
y: -p.normal.y,
z: -p.normal.z,
}
}
// CreateColor ...
func CreateColor(red, green, blue uint8) *color.RGBA {
return &color.RGBA{red, green, blue, 1}
} | raytrace/objects.go | 0.850096 | 0.432303 | objects.go | starcoder |
package testutil
import (
"flag"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
var updateGolden = flag.Bool("update-golden", false, "Update golden test files")
var dryGolden = flag.Bool("dry-golden", false, "Used together with -update-golden. If set, write to separate, ignored files to compare manually.")
// AssertGoldenOutput reads the expected output of a test from goldenFile and checks that
// the actual ouput matches.
func AssertGoldenOutput(t *testing.T, testName string, goldenFile string, output string) {
if *updateGolden {
outputFile := goldenFile
if *dryGolden {
outputFile += ".tmp"
}
WriteTestdata(t, testName, outputFile, output)
} else {
expectedOutput := ReadTestdata(t, testName, goldenFile)
assert.Equal(t, expectedOutput, output, "Testcase: %s", testName)
}
}
// ReadTestdata reads a file from the current package's testdata/ folder.
func ReadTestdata(t *testing.T, testName string, path string) string {
data, err := readFile(FullTestdataPath(path))
if err != nil {
assert.Failf(t, "Could not load testdata", "Testcase: %s, file: %s, error: %s", testName, path, err)
}
return strings.ReplaceAll(data, "\r", "")
}
// WriteTestdata wrotes a file to the current package's testdata/ folder.
func WriteTestdata(t *testing.T, testName string, path string, data string) {
err := writeFile(FullTestdataPath(path), data)
if err != nil {
assert.Failf(t, "Could not write testdata", "Testcase: %s, file: %s, error: %s", testName, path, err)
}
}
// FullTestdataPath returns the full path of a testdata file, given a partial path (usually just the filename).
// E.g.: GetTestdataFile('my_testdata.txt') -> testdata/my_testdata.txt
func FullTestdataPath(filenameOrPartialPath string) string {
return filepath.Join("testdata", filenameOrPartialPath)
}
func readFile(filePath string) (string, error) {
data, err := os.ReadFile(filePath)
if err != nil {
return "", err
}
return string(data), nil
}
func writeFile(filePath string, str string) error {
if str[len(str)-1] != '\n' {
str += "\n"
}
return os.WriteFile(filePath, []byte(str), 0644)
} | testutil/golden_util.go | 0.514888 | 0.403009 | golden_util.go | starcoder |
package idemix
import (
"github.com/sinochem-tech/fabric-amcl/amcl"
"github.com/sinochem-tech/fabric-amcl/amcl/FP256BN"
"github.com/pkg/errors"
)
// NewSignature creates a new idemix pseudonym signature
func NewNymSignature(sk *FP256BN.BIG, Nym *FP256BN.ECP, RNym *FP256BN.BIG, ipk *IssuerPublicKey, msg []byte, rng *amcl.RAND) (*NymSignature, error) {
if sk == nil || Nym == nil || RNym == nil || ipk == nil || rng == nil {
return nil, errors.Errorf("cannot create NymSignature: received nil input")
}
Nonce := RandModOrder(rng)
HRand := EcpFromProto(ipk.HRand)
HSk := EcpFromProto(ipk.HSk)
// The rest of this function constructs the non-interactive zero knowledge proof proving that
// the signer 'owns' this pseudonym, i.e., it knows the secret key and randomness on which it is based.
// take the randomness used to compute the commitment values (aka t-values) for the ZKP
rSk := RandModOrder(rng)
rRNym := RandModOrder(rng)
// Compute the commitment (aka t-value)
t := HSk.Mul2(rSk, HRand, rRNym)
// Next, we compute the Fiat-Shamir hash, forming the challenge of the ZKP.
// proofData will hold the data being hashed, it consists of:
// - the signature label
// - 2 elements of G1 each taking 2*FieldBytes+1 bytes
// - one bigint (hash of the issuer public key) of length FieldBytes
// - disclosed attributes
// - message being signed
proofData := make([]byte, len([]byte(signLabel))+2*(2*FieldBytes+1)+FieldBytes+len(msg))
index := 0
index = appendBytesString(proofData, index, signLabel)
index = appendBytesG1(proofData, index, t)
index = appendBytesG1(proofData, index, Nym)
copy(proofData[index:], ipk.Hash)
index = index + FieldBytes
copy(proofData[index:], msg)
c := HashModOrder(proofData)
// combine the previous hash and the nonce and hash again to compute the final Fiat-Shamir value 'ProofC'
index = 0
proofData = proofData[:2*FieldBytes]
index = appendBytesBig(proofData, index, c)
index = appendBytesBig(proofData, index, Nonce)
ProofC := HashModOrder(proofData)
// Finally, we compute the s-values, which form the response answering challenge c
ProofSSk := Modadd(rSk, FP256BN.Modmul(ProofC, sk, GroupOrder), GroupOrder)
ProofSRNym := Modadd(rRNym, FP256BN.Modmul(ProofC, RNym, GroupOrder), GroupOrder)
// The signature consists of the Fiat-Shamir hash (ProofC), the s-values (ProofSSk, ProofSRNym), and the nonce.
return &NymSignature{
ProofC: BigToBytes(ProofC),
ProofSSk: BigToBytes(ProofSSk),
ProofSRNym: BigToBytes(ProofSRNym),
Nonce: BigToBytes(Nonce)}, nil
}
// Ver verifies an idemix NymSignature
func (sig *NymSignature) Ver(nym *FP256BN.ECP, ipk *IssuerPublicKey, msg []byte) error {
ProofC := FP256BN.FromBytes(sig.GetProofC())
ProofSSk := FP256BN.FromBytes(sig.GetProofSSk())
ProofSRNym := FP256BN.FromBytes(sig.GetProofSRNym())
Nonce := FP256BN.FromBytes(sig.GetNonce())
HRand := EcpFromProto(ipk.HRand)
HSk := EcpFromProto(ipk.HSk)
t := HSk.Mul2(ProofSSk, HRand, ProofSRNym)
t.Sub(nym.Mul(ProofC))
// proofData is the data being hashed, it consists of:
// the signature label
// 2 elements of G1 each taking 2*FieldBytes+1 bytes
// one bigint (hash of the issuer public key) of length FieldBytes
// disclosed attributes
// message being signed
proofData := make([]byte, len([]byte(signLabel))+2*(2*FieldBytes+1)+FieldBytes+len(msg))
index := 0
index = appendBytesString(proofData, index, signLabel)
index = appendBytesG1(proofData, index, t)
index = appendBytesG1(proofData, index, nym)
copy(proofData[index:], ipk.Hash)
index = index + FieldBytes
copy(proofData[index:], msg)
c := HashModOrder(proofData)
index = 0
proofData = proofData[:2*FieldBytes]
index = appendBytesBig(proofData, index, c)
index = appendBytesBig(proofData, index, Nonce)
if *ProofC != *HashModOrder(proofData) {
return errors.Errorf("pseudonym signature invalid: zero-knowledge proof is invalid")
}
return nil
} | idemix/nymsignature.go | 0.696887 | 0.446434 | nymsignature.go | starcoder |
package cimg
import (
"errors"
"image"
)
// Image is the concrete image type that is used by all functions
// inside cimg.
type Image struct {
Width int
Height int
Stride int
NChan int
Pixels []byte
}
// NewImage creates a new 8-bit image
func NewImage(width, height, nchan int) *Image {
return &Image{
Width: width,
Height: height,
Stride: width * nchan,
NChan: nchan,
Pixels: make([]byte, height*width*nchan),
}
}
// Wrap an array of bytes into an Image object (do not copy pixels)
func WrapImage(width, height, nchan int, pixels []byte) *Image {
return &Image{
Width: width,
Height: height,
Stride: width * nchan,
NChan: nchan,
Pixels: pixels,
}
}
// Convert a Go image.Image into a turbo.Image
// If allowDeepClone is true, and the source image is type GRAY, NRGBA, or RGBA,
// then the resulting Image points directly to the pixel buffer of the source image.
func FromImage(src image.Image, allowDeepClone bool) (*Image, error) {
dst := &Image{
Width: src.Bounds().Dx(),
Height: src.Bounds().Dy(),
}
switch v := src.(type) {
case *image.Gray:
dst.NChan = 1
dst.Stride = dst.NChan * dst.Width
if allowDeepClone {
dst.Pixels = v.Pix
} else {
dst.Pixels = make([]byte, dst.Stride*dst.Height)
copy(dst.Pixels, v.Pix)
}
return dst, nil
case *image.RGBA:
dst.NChan = 4
dst.Stride = dst.NChan * dst.Width
if allowDeepClone {
dst.Pixels = v.Pix
} else {
dst.Pixels = make([]byte, dst.Stride*dst.Height)
copy(dst.Pixels, v.Pix)
}
return dst, nil
case *image.NRGBA:
dst.NChan = 4
dst.Stride = dst.NChan * dst.Width
if allowDeepClone {
dst.Pixels = v.Pix
} else {
dst.Pixels = make([]byte, dst.Stride*dst.Height)
copy(dst.Pixels, v.Pix)
}
return dst, nil
}
return nil, errors.New("Unsupported source image type")
}
// ToImage returns an image from the Go standard library 'image' package
func (img *Image) ToImage() image.Image {
if img.NChan == 1 {
dst := image.NewGray(image.Rect(0, 0, img.Width, img.Height))
srcBuf := img.Pixels
dstBuf := dst.Pix
for y := 0; y < img.Height; y++ {
srcP := img.Stride * y
dstP := dst.Stride * y
copy(dstBuf[dstP:dstP+dst.Stride], srcBuf[srcP:srcP+img.Stride])
}
return dst
} else if img.NChan == 3 {
dst := image.NewRGBA(image.Rect(0, 0, img.Width, img.Height))
srcBuf := img.Pixels
dstBuf := dst.Pix
width := img.Width
for y := 0; y < img.Height; y++ {
srcP := img.Stride * y
dstP := dst.Stride * y
for x := 0; x < width; x++ {
dstBuf[dstP] = srcBuf[srcP]
dstBuf[dstP+1] = srcBuf[srcP+1]
dstBuf[dstP+2] = srcBuf[srcP+2]
dstBuf[dstP+3] = 255
srcP += 3
dstP += 4
}
}
return dst
} else if img.NChan == 4 {
dst := image.NewNRGBA(image.Rect(0, 0, img.Width, img.Height))
srcBuf := img.Pixels
dstBuf := dst.Pix
for y := 0; y < img.Height; y++ {
srcP := img.Stride * y
dstP := dst.Stride * y
copy(dstBuf[dstP:dstP+dst.Stride], srcBuf[srcP:srcP+img.Stride])
}
return dst
} else {
return nil
}
}
// Clone returns a deep clone of the image
func (img *Image) Clone() *Image {
copy := NewImage(img.Width, img.Height, img.NChan)
copy.CopyImage(img, 0, 0)
return copy
} | image.go | 0.714628 | 0.474936 | image.go | starcoder |
package main
import (
"encoding/xml"
"strings"
"golang.org/x/net/html/charset"
)
/*RSSV2 - What is RSS?
RSS is a Web content syndication format.
Its name is an acronym for Really Simple Syndication.
RSS is dialect of XML. All RSS files must conform to the XML 1.0 specification, as published on the World Wide Web Consortium (W3C) website.
At the top level, a RSS document is a <rss> element, with a mandatory attribute called version, that specifies the version of RSS that the document conforms to. If it conforms to this specification, the version attribute must be 2.0.
Subordinate to the <rss> element is a single <channel> element, which contains information about the channel (metadata) and its contents.
Sample files
Here are sample files for: RSS 0.91, 0.92 and 2.0.
Note that the sample files may point to documents and services that no longer exist. The 0.91 sample was created when the 0.91 docs were written. Maintaining a trail of samples seems like a good idea.
About this document
This document represents the status of RSS as of the Fall of 2002, version 2.0.1.
It incorporates all changes and additions, starting with the basic spec for RSS 0.91 (June 2000) and includes new features introduced in RSS 0.92 (December 2000) and RSS 0.94 (August 2002).
Change notes are here.
First we document the required and optional sub-elements of <channel>; and then document the sub-elements of <item>. The final sections answer frequently asked questions, and provide a roadmap for future evolution, and guidelines for extending RSS.*/
type RSSV2 struct {
XMLName xml.Name `xml:"rss"`
Version string `xml:"version,attr"`
Channel Channel `xml:"channel"`
}
/*Channel - Required channel elements
Here's a list of the required channel elements, each with a brief description, an example, and where available, a pointer to a more complete description.
Element Description Example
title The name of the channel. It's how people refer to your service. If you have an HTML website that contains the same information as your RSS file, the title of your channel should be the same as the title of your website. GoUpstate.com News Headlines
link The URL to the HTML website corresponding to the channel. http://www.goupstate.com/
description Phrase or sentence describing the channel. The latest news from GoUpstate.com, a Spartanburg Herald-Journal Web site.
Optional channel elements
Here's a list of optional channel elements.
Element Description Example
language The language the channel is written in. This allows aggregators to group all Italian language sites, for example, on a single page. A list of allowable values for this element, as provided by Netscape, is here. You may also use values defined by the W3C. en-us
copyright Copyright notice for content in the channel. Copyright 2002, Spartanburg Herald-Journal
managingEditor Email address for person responsible for editorial content. <EMAIL> (<NAME>)
webMaster Email address for person responsible for technical issues relating to channel. <EMAIL> (<NAME>)
pubDate The publication date for the content in the channel. For example, the New York Times publishes on a daily basis, the publication date flips once every 24 hours. That's when the pubDate of the channel changes. All date-times in RSS conform to the Date and Time Specification of RFC 822, with the exception that the year may be expressed with two characters or four characters (four preferred). Sat, 07 Sep 2002 0:00:01 GMT
lastBuildDate The last time the content of the channel changed. Sat, 07 Sep 2002 9:42:31 GMT
category Specify one or more categories that the channel belongs to. Follows the same rules as the <item>-level category element. More info. <category>Newspapers</category>
generator A string indicating the program used to generate the channel. MightyInHouse Content System v2.3
docs A URL that points to the documentation for the format used in the RSS file. It's probably a pointer to this page. It's for people who might stumble across an RSS file on a Web server 25 years from now and wonder what it is. http://backend.userland.com/rss
cloud Allows processes to register with a cloud to be notified of updates to the channel, implementing a lightweight publish-subscribe protocol for RSS feeds. More info here. <cloud domain="rpc.sys.com" port="80" path="/RPC2" registerProcedure="pingMe" protocol="soap"/>
ttl ttl stands for time to live. It's a number of minutes that indicates how long a channel can be cached before refreshing from the source. More info here. <ttl>60</ttl>
image Specifies a GIF, JPEG or PNG image that can be displayed with the channel. More info here.
textInput Specifies a text input box that can be displayed with the channel. More info here.
skipHours A hint for aggregators telling them which hours they can skip. More info here.
skipDays A hint for aggregators telling them which days they can skip. More info here.*/
type Channel struct {
Title string `xml:"title"`
Link string `xml:"link"`
Description string `xml:"description"`
Language string `xml:"language"`
Copyright string `xml:"copyright"`
ManagingEditor string `xml:"managingEditor"`
WebMaster string `xml:"webMaster"`
PubDate string `xml:"pubDate"`
LastBuildDate string `xml:"lastBuildDate"`
Category string `xml:"category"`
Generator string `xml:"generator"`
Docs string `xml:"docs"`
Image Image `xml:"image"`
Cloud Cloud `xml:"cloud"`
TTL string `xml:"ttl"`
ItemList []Item `xml:"item"`
TextInput TextInput `xml:"textInput"`
SkipHours []Hour `xml:"skipHours"`
SkipDays []Day `xml:"skipDays"`
}
/*Image - <image> sub-element of <channel>
<image> is an optional sub-element of <channel>, which contains three required and three optional sub-elements.
<url> is the URL of a GIF, JPEG or PNG image that represents the channel.
<title> describes the image, it's used in the ALT attribute of the HTML <img> tag when the channel is rendered in HTML.
<link> is the URL of the site, when the channel is rendered, the image is a link to the site. (Note, in practice the image <title> and <link> should have the same value as the channel's <title> and <link>.
Optional elements include <width> and <height>, numbers, indicating the width and height of the image in pixels. <description> contains text that is included in the TITLE attribute of the link formed around the image in the HTML rendering.
Maximum value for width is 144, default value is 88.
Maximum value for height is 400, default value is 31.*/
type Image struct {
URL string `xml:"url"`
Title string `xml:"title"`
Link string `xml:"link"`
Width string `xml:"width"`
Height string `xml:"height"`
Description string `xml:"description"`
}
/*Cloud - <cloud> sub-element of <channel>
<cloud> is an optional sub-element of <channel>.
It specifies a web service that supports the rssCloud interface which can be implemented in HTTP-POST, XML-RPC or SOAP 1.1.
Its purpose is to allow processes to register with a cloud to be notified of updates to the channel, implementing a lightweight publish-subscribe protocol for RSS feeds.
<cloud domain="radio.xmlstoragesystem.com" port="80" path="/RPC2" registerProcedure="xmlStorageSystem.rssPleaseNotify" protocol="xml-rpc" />
In this example, to request notification on the channel it appears in, you would send an XML-RPC message to radio.xmlstoragesystem.com on port 80, with a path of /RPC2. The procedure to call is xmlStorageSystem.rssPleaseNotify.
A full explanation of this element and the rssCloud interface is here.*/
type Cloud struct {
}
/*Item - Elements of <item>
A channel may contain any number of <item>s. An item may represent a "story" -- much like a story in a newspaper or magazine; if so its description is a synopsis of the story, and the link points to the full story. An item may also be complete in itself, if so, the description contains the text (entity-encoded HTML is allowed), and the link and title may be omitted. All elements of an item are optional, however at least one of title or description must be present.
Element Description Example
title The title of the item. Venice Film Festival Tries to Quit Sinking
link The URL of the item. http://www.nytimes.com/2002/09/07/movies/07FEST.html
description The item synopsis. Some of the most heated chatter at the Venice Film Festival this week was about the way that the arrival of the stars at the Palazzo del Cinema was being staged.
author Email address of the author of the item. More. <EMAIL>
category Includes the item in one or more categories. More. Simpsons Characters
comments URL of a page for comments relating to the item. More. http://www.myblog.org/cgi-local/mt/mt-comments.cgi?entry_id=290
enclosure Describes a media object that is attached to the item. More. <enclosure url="http://live.curry.com/mp3/celebritySCms.mp3" length="1069871" type="audio/mpeg"/>
guid A string that uniquely identifies the item. More. <guid isPermaLink="true">http://inessential.com/2002/09/01.php#a2</guid>
pubDate Indicates when the item was published. More. Sun, 19 May 2002 15:21:36 GMT
source The RSS channel that the item came from. More. <source url="http://www.quotationspage.com/data/qotd.rss">Quotes of the Day</source>
*/
type Item struct {
Title string `xml:"title"`
Author string `xml:"author"`
Description string `xml:"description"`
Link string `xml:"link"`
PubDate string `xml:"pubDate"`
GUID string `xml:"guid"`
Category string `xml:"category"`
Comments string `xml:"comments"`
Enclosure Enclosure `xml:"enclosure"`
Source string `xml:"source"`
}
/*TextInput - <textInput> sub-element of <channel>
A channel may optionally contain a <textInput> sub-element, which contains four required sub-elements.
<title> -- The label of the Submit button in the text input area.
<description> -- Explains the text input area.
<name> -- The name of the text object in the text input area.
<link> -- The URL of the CGI script that processes text input requests.
The purpose of the <textInput> element is something of a mystery. You can use it to specify a search engine box. Or to allow a reader to provide feedback. Most aggregators ignore it.
*/
type TextInput struct {
Title string `xml:"title"`
Description string `xml:"description"`
Name string `xml:"name"`
Link string `xml:"link"`
}
/*Hour -
An XML element that contains up to 24 <hour> sub-elements whose value is a number between 0 and 23,
representing a time in GMT, when aggregators, if they support the feature,
may not read the channel on hours listed in the skipHours element.
The hour beginning at midnight is hour zero.
http://backend.userland.com/skipHoursDays#skiphours
*/
type Hour struct {
Hour string `xml:"hour"`
}
/*Day -
skipDays
An XML element that contains up to seven <day> sub-elements whose value is Monday,
Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday. Aggregators may not read
the channel during days listed in the skipDays element.
http://backend.userland.com/skipHoursDays#skiphours
*/
type Day struct {
Day string `xml:"day"`
}
/*Enclosure -
<enclosure> sub-element of <item>
<enclosure> is an optional sub-element of <item>.
It has three required attributes. url says where the enclosure is located, length says how big it is in bytes, and type says what its type is, a standard MIME type.
The url must be an http url.
<enclosure url="http://www.scripting.com/mp3s/weatherReportSuite.mp3" length="12216320" type="audio/mpeg" />
A use-case narrative for this element is here
*/
type Enclosure struct {
URL string `xml:"url,attr"`
Length string `xml:"length,attr"`
Type string `xml:"type,attr"`
}
// RSSV2ParseString will be used to parse strings and will return the Rss object
func RSSV2ParseString(s string) (*RSSV2, error) {
rss := RSSV2{}
if len(s) == 0 {
return &rss, nil
}
decoder := xml.NewDecoder(strings.NewReader(s))
decoder.CharsetReader = charset.NewReaderLabel
err := decoder.Decode(&rss)
if err != nil {
return nil, err
}
return &rss, nil
}
// RSSV2CompareItemsBetweenOldAndNew - This function will used to compare 2 RSS xml item objects
// and will return a list of items that are specifically in the newer feed but not in
// the older feed
func RSSV2CompareItemsBetweenOldAndNew(oldRSS *RSSV2, newRSS *RSSV2) []Item {
itemList := []Item{}
for _, item1 := range newRSS.Channel.ItemList {
exists := false
for _, item2 := range oldRSS.Channel.ItemList {
if len(item1.GUID) > 0 && item1.GUID == item2.GUID {
exists = true
break
} else if item1.PubDate == item2.PubDate && item1.Title == item2.Title {
exists = true
break
}
}
if !exists {
itemList = append(itemList, item1)
}
}
return itemList
} | server/rss-v2-parser.go | 0.684897 | 0.410166 | rss-v2-parser.go | starcoder |
package typeutil
import (
"errors"
"fmt"
"strconv"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"go.uber.org/zap"
)
func GetMaxLengthOfVarLengthField(fieldSchema *schemapb.FieldSchema) (int, error) {
maxLength := 0
var err error
paramsMap := make(map[string]string)
for _, p := range fieldSchema.TypeParams {
paramsMap[p.Key] = p.Value
}
maxLengthPerRowKey := "max_length_per_row"
switch fieldSchema.DataType {
case schemapb.DataType_VarChar:
maxLengthPerRowValue, ok := paramsMap[maxLengthPerRowKey]
if !ok {
return 0, fmt.Errorf("the max_length_per_row was not specified, field type is %s", fieldSchema.DataType.String())
}
maxLength, err = strconv.Atoi(maxLengthPerRowValue)
if err != nil {
return 0, err
}
default:
return 0, fmt.Errorf("field %s is not a variable-length type", fieldSchema.DataType.String())
}
return maxLength, nil
}
// EstimateSizePerRecord returns the estimate size of a record in a collection
func EstimateSizePerRecord(schema *schemapb.CollectionSchema) (int, error) {
res := 0
for _, fs := range schema.Fields {
switch fs.DataType {
case schemapb.DataType_Bool, schemapb.DataType_Int8:
res++
case schemapb.DataType_Int16:
res += 2
case schemapb.DataType_Int32, schemapb.DataType_Float:
res += 4
case schemapb.DataType_Int64, schemapb.DataType_Double:
res += 8
case schemapb.DataType_VarChar:
maxLengthPerRow, err := GetMaxLengthOfVarLengthField(fs)
if err != nil {
return 0, err
}
res += maxLengthPerRow
case schemapb.DataType_BinaryVector:
for _, kv := range fs.TypeParams {
if kv.Key == "dim" {
v, err := strconv.Atoi(kv.Value)
if err != nil {
return -1, err
}
res += v / 8
break
}
}
case schemapb.DataType_FloatVector:
for _, kv := range fs.TypeParams {
if kv.Key == "dim" {
v, err := strconv.Atoi(kv.Value)
if err != nil {
return -1, err
}
res += v * 4
break
}
}
}
}
return res, nil
}
// SchemaHelper provides methods to get the schema of fields
type SchemaHelper struct {
schema *schemapb.CollectionSchema
nameOffset map[string]int
idOffset map[int64]int
primaryKeyOffset int
}
// CreateSchemaHelper returns a new SchemaHelper object
func CreateSchemaHelper(schema *schemapb.CollectionSchema) (*SchemaHelper, error) {
if schema == nil {
return nil, errors.New("schema is nil")
}
schemaHelper := SchemaHelper{schema: schema, nameOffset: make(map[string]int), idOffset: make(map[int64]int), primaryKeyOffset: -1}
for offset, field := range schema.Fields {
if _, ok := schemaHelper.nameOffset[field.Name]; ok {
return nil, errors.New("duplicated fieldName: " + field.Name)
}
if _, ok := schemaHelper.idOffset[field.FieldID]; ok {
return nil, errors.New("duplicated fieldID: " + strconv.FormatInt(field.FieldID, 10))
}
schemaHelper.nameOffset[field.Name] = offset
schemaHelper.idOffset[field.FieldID] = offset
if field.IsPrimaryKey {
if schemaHelper.primaryKeyOffset != -1 {
return nil, errors.New("primary key is not unique")
}
schemaHelper.primaryKeyOffset = offset
}
}
return &schemaHelper, nil
}
// GetPrimaryKeyField returns the schema of the primary key
func (helper *SchemaHelper) GetPrimaryKeyField() (*schemapb.FieldSchema, error) {
if helper.primaryKeyOffset == -1 {
return nil, fmt.Errorf("failed to get primary key field: no primary in schema")
}
return helper.schema.Fields[helper.primaryKeyOffset], nil
}
// GetFieldFromName is used to find the schema by field name
func (helper *SchemaHelper) GetFieldFromName(fieldName string) (*schemapb.FieldSchema, error) {
offset, ok := helper.nameOffset[fieldName]
if !ok {
return nil, fmt.Errorf("failed to get field schema by name: fieldName(%s) not found", fieldName)
}
return helper.schema.Fields[offset], nil
}
// GetFieldFromID returns the schema of specified field
func (helper *SchemaHelper) GetFieldFromID(fieldID int64) (*schemapb.FieldSchema, error) {
offset, ok := helper.idOffset[fieldID]
if !ok {
return nil, fmt.Errorf("fieldID(%d) not found", fieldID)
}
return helper.schema.Fields[offset], nil
}
// GetVectorDimFromID returns the dimension of specified field
func (helper *SchemaHelper) GetVectorDimFromID(fieldID int64) (int, error) {
sch, err := helper.GetFieldFromID(fieldID)
if err != nil {
return 0, err
}
if !IsVectorType(sch.DataType) {
return 0, fmt.Errorf("field type = %s not has dim", schemapb.DataType_name[int32(sch.DataType)])
}
for _, kv := range sch.TypeParams {
if kv.Key == "dim" {
dim, err := strconv.Atoi(kv.Value)
if err != nil {
return 0, err
}
return dim, nil
}
}
return 0, fmt.Errorf("fieldID(%d) not has dim", fieldID)
}
// IsVectorType returns true if input is a vector type, otherwise false
func IsVectorType(dataType schemapb.DataType) bool {
switch dataType {
case schemapb.DataType_FloatVector, schemapb.DataType_BinaryVector:
return true
default:
return false
}
}
// IsIntegerType returns true if input is an integer type, otherwise false
func IsIntegerType(dataType schemapb.DataType) bool {
switch dataType {
case schemapb.DataType_Int8, schemapb.DataType_Int16,
schemapb.DataType_Int32, schemapb.DataType_Int64:
return true
default:
return false
}
}
// IsFloatingType returns true if input is a floating type, otherwise false
func IsFloatingType(dataType schemapb.DataType) bool {
switch dataType {
case schemapb.DataType_Float, schemapb.DataType_Double:
return true
default:
return false
}
}
// IsBoolType returns true if input is a bool type, otherwise false
func IsBoolType(dataType schemapb.DataType) bool {
switch dataType {
case schemapb.DataType_Bool:
return true
default:
return false
}
}
// AppendFieldData appends fields data of specified index from src to dst
func AppendFieldData(dst []*schemapb.FieldData, src []*schemapb.FieldData, idx int64) {
for i, fieldData := range src {
switch fieldType := fieldData.Field.(type) {
case *schemapb.FieldData_Scalars:
if dst[i] == nil || dst[i].GetScalars() == nil {
dst[i] = &schemapb.FieldData{
Type: fieldData.Type,
FieldName: fieldData.FieldName,
FieldId: fieldData.FieldId,
Field: &schemapb.FieldData_Scalars{
Scalars: &schemapb.ScalarField{},
},
}
}
dstScalar := dst[i].GetScalars()
switch srcScalar := fieldType.Scalars.Data.(type) {
case *schemapb.ScalarField_BoolData:
if dstScalar.GetBoolData() == nil {
dstScalar.Data = &schemapb.ScalarField_BoolData{
BoolData: &schemapb.BoolArray{
Data: []bool{srcScalar.BoolData.Data[idx]},
},
}
} else {
dstScalar.GetBoolData().Data = append(dstScalar.GetBoolData().Data, srcScalar.BoolData.Data[idx])
}
case *schemapb.ScalarField_IntData:
if dstScalar.GetIntData() == nil {
dstScalar.Data = &schemapb.ScalarField_IntData{
IntData: &schemapb.IntArray{
Data: []int32{srcScalar.IntData.Data[idx]},
},
}
} else {
dstScalar.GetIntData().Data = append(dstScalar.GetIntData().Data, srcScalar.IntData.Data[idx])
}
case *schemapb.ScalarField_LongData:
if dstScalar.GetLongData() == nil {
dstScalar.Data = &schemapb.ScalarField_LongData{
LongData: &schemapb.LongArray{
Data: []int64{srcScalar.LongData.Data[idx]},
},
}
} else {
dstScalar.GetLongData().Data = append(dstScalar.GetLongData().Data, srcScalar.LongData.Data[idx])
}
case *schemapb.ScalarField_FloatData:
if dstScalar.GetFloatData() == nil {
dstScalar.Data = &schemapb.ScalarField_FloatData{
FloatData: &schemapb.FloatArray{
Data: []float32{srcScalar.FloatData.Data[idx]},
},
}
} else {
dstScalar.GetFloatData().Data = append(dstScalar.GetFloatData().Data, srcScalar.FloatData.Data[idx])
}
case *schemapb.ScalarField_DoubleData:
if dstScalar.GetDoubleData() == nil {
dstScalar.Data = &schemapb.ScalarField_DoubleData{
DoubleData: &schemapb.DoubleArray{
Data: []float64{srcScalar.DoubleData.Data[idx]},
},
}
} else {
dstScalar.GetDoubleData().Data = append(dstScalar.GetDoubleData().Data, srcScalar.DoubleData.Data[idx])
}
default:
log.Error("Not supported field type", zap.String("field type", fieldData.Type.String()))
}
case *schemapb.FieldData_Vectors:
dim := fieldType.Vectors.Dim
if dst[i] == nil || dst[i].GetVectors() == nil {
dst[i] = &schemapb.FieldData{
Type: fieldData.Type,
FieldName: fieldData.FieldName,
FieldId: fieldData.FieldId,
Field: &schemapb.FieldData_Vectors{
Vectors: &schemapb.VectorField{
Dim: dim,
},
},
}
}
dstVector := dst[i].GetVectors()
switch srcVector := fieldType.Vectors.Data.(type) {
case *schemapb.VectorField_BinaryVector:
if dstVector.GetBinaryVector() == nil {
srcToCopy := srcVector.BinaryVector[idx*(dim/8) : (idx+1)*(dim/8)]
dstVector.Data = &schemapb.VectorField_BinaryVector{
BinaryVector: make([]byte, len(srcToCopy)),
}
copy(dstVector.Data.(*schemapb.VectorField_BinaryVector).BinaryVector, srcToCopy)
} else {
dstBinaryVector := dstVector.Data.(*schemapb.VectorField_BinaryVector)
dstBinaryVector.BinaryVector = append(dstBinaryVector.BinaryVector, srcVector.BinaryVector[idx*(dim/8):(idx+1)*(dim/8)]...)
}
case *schemapb.VectorField_FloatVector:
if dstVector.GetFloatVector() == nil {
srcToCopy := srcVector.FloatVector.Data[idx*dim : (idx+1)*dim]
dstVector.Data = &schemapb.VectorField_FloatVector{
FloatVector: &schemapb.FloatArray{
Data: make([]float32, len(srcToCopy)),
},
}
copy(dstVector.Data.(*schemapb.VectorField_FloatVector).FloatVector.Data, srcToCopy)
} else {
dstVector.GetFloatVector().Data = append(dstVector.GetFloatVector().Data, srcVector.FloatVector.Data[idx*dim:(idx+1)*dim]...)
}
default:
log.Error("Not supported field type", zap.String("field type", fieldData.Type.String()))
}
}
}
}
func FillFieldBySchema(columns []*schemapb.FieldData, schema *schemapb.CollectionSchema) error {
if len(columns) != len(schema.GetFields()) {
return fmt.Errorf("len(columns) mismatch the len(fields), len(columns): %d, len(fields): %d",
len(columns), len(schema.GetFields()))
}
for idx, f := range schema.GetFields() {
columns[idx].FieldName = f.Name
columns[idx].Type = f.DataType
columns[idx].FieldId = f.FieldID
}
return nil
} | internal/util/typeutil/schema.go | 0.658637 | 0.441432 | schema.go | starcoder |
package match
import (
"go/types"
"github.com/yndd/ndd-tools/internal/comments"
"github.com/yndd/ndd-tools/internal/fields"
)
// An Object matcher is a function that returns true if the supplied object
// matches.
type Object func(o types.Object) bool
// Managed returns an Object matcher that returns true if the supplied Object is
// a ndd managed resource.
func Managed() Object {
return func(o types.Object) bool {
return fields.Has(o,
fields.IsTypeMeta().And(fields.IsEmbedded()),
fields.IsObjectMeta().And(fields.IsEmbedded()),
fields.IsSpec().And(fields.HasFieldThat(
fields.IsResourceSpec().And(fields.IsEmbedded()),
)),
fields.IsStatus().And(fields.HasFieldThat(
fields.IsResourceStatus().And(fields.IsEmbedded()),
)),
)
}
}
// ManagedList returns an Object matcher that returns true if the supplied
// Object is a list of ndd managed resource.
func ManagedList() Object {
return func(o types.Object) bool {
return fields.Has(o,
fields.IsTypeMeta().And(fields.IsEmbedded()),
fields.IsItems().And(fields.IsSlice()).And(fields.HasFieldThat(
fields.IsTypeMeta().And(fields.IsEmbedded()),
fields.IsObjectMeta().And(fields.IsEmbedded()),
fields.IsSpec().And(fields.HasFieldThat(
fields.IsResourceSpec().And(fields.IsEmbedded()),
)),
fields.IsStatus().And(fields.HasFieldThat(
fields.IsResourceStatus().And(fields.IsEmbedded()),
)),
)),
)
}
}
// NetworkNode returns an Object matcher that returns true if the supplied
// Object is a NetworkNode.
func NetworkNode() Object {
return func(o types.Object) bool {
return fields.Has(o,
fields.IsTypeMeta().And(fields.IsEmbedded()),
fields.IsObjectMeta().And(fields.IsEmbedded()),
fields.IsSpec(),
fields.IsStatus().And(fields.HasFieldThat(
fields.IsNetworkNodeStatus().And(fields.IsEmbedded()),
)),
)
}
}
// NetworkNodeUsage returns an Object matcher that returns true if the supplied
// Object is a NetworkNodeUsage.
func NetworkNodeUsage() Object {
return func(o types.Object) bool {
return fields.Has(o,
fields.IsTypeMeta().And(fields.IsEmbedded()),
fields.IsObjectMeta().And(fields.IsEmbedded()),
fields.IsNetworkNodeUsage().And(fields.IsEmbedded()),
)
}
}
// NetworkNodeUsageList returns an Object matcher that returns true if the
// supplied Object is a list of NetworkNode usages.
func NetworkNodeUsageList() Object {
return func(o types.Object) bool {
return fields.Has(o,
fields.IsTypeMeta().And(fields.IsEmbedded()),
fields.IsItems().And(fields.IsSlice()).And(fields.HasFieldThat(
fields.IsTypeMeta().And(fields.IsEmbedded()),
fields.IsObjectMeta().And(fields.IsEmbedded()),
fields.IsNetworkNodeUsage().And(fields.IsEmbedded()),
)),
)
}
}
// HasMarker returns an Object matcher that returns true if the supplied Object
// has a comment marker k with the value v. Comment markers are read from the
// supplied Comments.
func HasMarker(c comments.Comments, k, v string) Object {
return func(o types.Object) bool {
for _, val := range comments.ParseMarkers(c.For(o))[k] {
if val == v {
return true
}
}
for _, val := range comments.ParseMarkers(c.Before(o))[k] {
if val == v {
return true
}
}
return false
}
}
// DoesNotHaveMarker returns and Object matcher that returns true if the
// supplied Object does not have a comment marker k with the value v. Comment
// marker are read from the supplied Comments.
func DoesNotHaveMarker(c comments.Comments, k, v string) Object {
return func(o types.Object) bool {
return !HasMarker(c, k, v)(o)
}
}
// AllOf returns an Object matcher that returns true if all of the supplied
// Object matchers return true.
func AllOf(match ...Object) Object {
return func(o types.Object) bool {
for _, fn := range match {
if !fn(o) {
return false
}
}
return true
}
}
// AnyOf returns an Object matcher that returns true if any of the supplied
// Object matchers return true.
func AnyOf(match ...Object) Object {
return func(o types.Object) bool {
for _, fn := range match {
if fn(o) {
return true
}
}
return false
}
} | internal/match/match.go | 0.732687 | 0.454714 | match.go | starcoder |
package taipei
// As defined by the bittorrent protocol, this bitset is big-endian, such that
// the high bit of the first byte is block 0
type Bitset struct {
b []byte
n int
endIndex int
endMask byte // Which bits of the last byte are valid
}
func NewBitset(n int) *Bitset {
endIndex, endOffset := n>>3, n&7
endMask := ^byte(255 >> byte(endOffset))
if endOffset == 0 {
endIndex = -1
}
return &Bitset{make([]byte, (n+7)>>3), n, endIndex, endMask}
}
// Creates a new bitset from a given byte stream. Returns nil if the
// data is invalid in some way.
func NewBitsetFromBytes(n int, data []byte) *Bitset {
bitset := NewBitset(n)
if len(bitset.b) != len(data) {
return nil
}
copy(bitset.b, data)
if bitset.endIndex >= 0 && bitset.b[bitset.endIndex]&(^bitset.endMask) != 0 {
return nil
}
return bitset
}
func (b *Bitset) Set(index int) {
if index < 0 || index >= b.n {
panic("Index out of range.")
}
b.b[index>>3] |= byte(128 >> byte(index&7))
}
func (b *Bitset) Clear(index int) {
if index < 0 || index >= b.n {
panic("Index out of range.")
}
b.b[index>>3] &= ^byte(128 >> byte(index&7))
}
func (b *Bitset) IsSet(index int) bool {
if index < 0 || index >= b.n {
panic("Index out of range.")
}
return (b.b[index>>3] & byte(128>>byte(index&7))) != 0
}
func (b *Bitset) AndNot(b2 *Bitset) {
if b.n != b2.n {
panic("Unequal bitset sizes")
}
for i := 0; i < len(b.b); i++ {
b.b[i] = b.b[i] & ^b2.b[i]
}
b.clearEnd()
}
func (b *Bitset) clearEnd() {
if b.endIndex >= 0 {
b.b[b.endIndex] &= b.endMask
}
}
func (b *Bitset) IsEndValid() bool {
if b.endIndex >= 0 {
return (b.b[b.endIndex] & b.endMask) == 0
}
return true
}
// TODO: Make this fast
func (b *Bitset) FindNextSet(index int) int {
for i := index; i < b.n; i++ {
if (b.b[i>>3] & byte(128>>byte(i&7))) != 0 {
return i
}
}
return -1
}
// TODO: Make this fast
func (b *Bitset) FindNextClear(index int) int {
for i := index; i < b.n; i++ {
if (b.b[i>>3] & byte(128>>byte(i&7))) == 0 {
return i
}
}
return -1
}
func (b *Bitset) Bytes() []byte {
return b.b
} | bitset.go | 0.613237 | 0.429549 | bitset.go | starcoder |
package optimus
import "sync"
// Table is a representation of a table of data.
type Table interface {
// Rows returns a channel that provides the Rows in the table.
Rows() <-chan Row
// Err returns the first non-EOF error that was encountered by the Table.
Err() error
// Stop signifies that a Table should stop sending Rows down its channel.
// A Table is also responsible for calling Stop on any upstream Tables it knows about.
// Stop should be idempotent. It's expected that Stop will never be called by a consumer of a
// Table unless that consumer is also a Table. It can be used to Stop all upstream Tables in
// the event of an error that needs to halt the pipeline.
Stop()
}
// A Sink function takes a Table and consumes all of its Rows.
type Sink func(Table) error
// Row is a representation of a line of data in a Table.
type Row map[string]interface{}
// TransformFunc is a function that can be applied to a Table to transform it. It should receive the
// Rows from in and may send any number of Rows to out. It should not return until it has finished
// all work (received all the Rows it's going to receive, sent all the Rows it's going to send).
type TransformFunc func(in <-chan Row, out chan<- Row) error
// Transform returns a new Table that provides all the Rows of the input Table transformed with the TransformFunc.
func Transform(source Table, transform TransformFunc) Table {
return newTransformedTable(source, transform)
}
type transformedTable struct {
source Table
err error
rows chan Row
m sync.Mutex
stopped bool
}
func (t *transformedTable) Rows() <-chan Row {
return t.rows
}
func (t *transformedTable) Err() error {
return t.err
}
func (t *transformedTable) Stop() {
t.m.Lock()
stopped := t.stopped
t.m.Unlock()
if stopped {
return
}
t.m.Lock()
t.stopped = true
t.m.Unlock()
t.source.Stop()
}
func drain(c <-chan Row) {
for _ = range c {
// Drain everything left in the channel
}
}
func (t *transformedTable) start(transform TransformFunc) {
// A level of indirection is necessary between the i/o channels and the TransformFunc so that
// the TransformFunc doesn't need to know about the stop state of any of the Tables.
in := make(chan Row)
out := make(chan Row)
errChan := make(chan error)
doneChan := make(chan struct{})
stop := func() {
t.Stop()
drain(t.source.Rows())
drain(out)
close(t.rows)
}
defer stop()
// Once the transform function has returned, close out and error channels
go func() {
defer close(errChan)
defer close(out)
if err := transform(in, out); err != nil {
errChan <- err
}
}()
// Copy from the TransformFunc's out channel to the Table's out channel, then signal done
go func() {
defer func() {
doneChan <- struct{}{}
}()
for row := range out {
t.m.Lock()
stopped := t.stopped
t.m.Unlock()
if stopped {
continue
}
t.rows <- row
}
}()
// Copy from the Table's source to the TransformFunc's in channel, then signal done
go func() {
defer func() {
doneChan <- struct{}{}
}()
defer close(in)
for row := range t.source.Rows() {
t.m.Lock()
stopped := t.stopped
t.m.Unlock()
if stopped {
continue
}
in <- row
}
}()
for err := range errChan {
t.err = err
return
}
// Wait for all channels to finish
<-doneChan // Once to make sure we've consumed the output of the TransformFunc
<-doneChan // Once to make sure we've consumed the output of the source Table
if t.source.Err() != nil {
t.err = t.source.Err()
}
}
func newTransformedTable(source Table, transform TransformFunc) Table {
table := &transformedTable{
source: source,
rows: make(chan Row),
}
go table.start(transform)
return table
} | plugins/data/transform/optimus/optimus.go | 0.634656 | 0.518059 | optimus.go | starcoder |
package tree
import (
"fmt"
"sync"
)
type TreeNode struct {
value int
left *TreeNode
right *TreeNode
}
type BinarySearchTree struct {
root *TreeNode
lock sync.RWMutex
}
func (tree *BinarySearchTree) InsertElement(value int) {
tree.lock.Lock()
defer tree.lock.Unlock()
var currTree *TreeNode
currTree = &TreeNode{value, nil, nil}
if tree.root == nil {
tree.root = currTree
} else {
insertTreeNode(tree.root, currTree)
}
}
func insertTreeNode(tree *TreeNode, newTree *TreeNode) {
if tree == nil {
tree = newTree
return
}
if newTree.value < tree.value {
if tree.left == nil {
tree.left = newTree
} else {
insertTreeNode(tree.left, newTree)
}
} else {
if tree.right == nil {
tree.right = newTree
} else {
insertTreeNode(tree.right, newTree)
}
}
}
// InOrderTraverseTree method
func (tree *BinarySearchTree) InOrderTraverseTree(function func(int)) {
tree.lock.RLock()
defer tree.lock.RUnlock()
inOrderTraverseTree(tree.root, function)
}
// inOrderTraverseTree method traverses the left, the root, and the right tree.
func inOrderTraverseTree(treeNode *TreeNode, function func(int)) {
if treeNode != nil {
inOrderTraverseTree(treeNode.left, function)
function(treeNode.value)
inOrderTraverseTree(treeNode.right, function)
}
}
// PreOrderTraverseTree method
func (tree *BinarySearchTree) PreOrderTraverseTree(function func(int)) {
tree.lock.Lock()
defer tree.lock.Unlock()
preOrderTraverseTree(tree.root, function)
}
// preOrderTraverseTree method
func preOrderTraverseTree(treeNode *TreeNode, function func(int)) {
if treeNode != nil {
function(treeNode.value)
preOrderTraverseTree(treeNode.left, function)
preOrderTraverseTree(treeNode.right, function)
}
}
// PostOrderTraverseTree method
func (tree *BinarySearchTree) PostOrderTraverseTree(function func(int)) {
tree.lock.Lock()
defer tree.lock.Unlock()
postOrderTraverseTree(tree.root, function)
}
// PostOrderTraverseTree method
func postOrderTraverseTree(treeNode *TreeNode, function func(int)) {
if treeNode != nil {
postOrderTraverseTree(treeNode.left, function)
postOrderTraverseTree(treeNode.right, function)
function(treeNode.value)
}
}
//MinNode method
func (tree *BinarySearchTree) MinNode() *int {
tree.lock.RLock()
defer tree.lock.RUnlock()
treeNode := tree.root
if treeNode == nil {
return (*int)(nil)
}
for {
if treeNode.left == nil {
return &treeNode.value
}
treeNode = treeNode.left
}
}
// MaxNode method
func (tree *BinarySearchTree) MaxNode() *int {
tree.lock.Unlock()
defer tree.lock.Unlock()
treenode := tree.root
if treenode == nil {
return (*int)(nil)
}
for {
if treenode.right == nil {
return &treenode.value
}
treenode = treenode.right
}
}
// SearchNode method
func (tree *BinarySearchTree) SearchNode(key int) bool {
tree.lock.RLock()
defer tree.lock.RUnlock()
return search(tree.root, key)
}
func search(tree *TreeNode, key int) bool {
if tree == nil {
return false
}
if key < tree.value {
return search(tree.left, key)
}
if key > tree.value {
return search(tree.right, key)
}
return true
}
func (tree *BinarySearchTree) RemoveNode(key int) {
tree.lock.Lock()
defer tree.lock.Unlock()
remove(tree.root, key)
}
func remove(treeNode *TreeNode, key int) *TreeNode {
if treeNode == nil {
return nil
}
if key < treeNode.value {
treeNode.left = remove(treeNode.left, key)
return treeNode
}
if key > treeNode.value {
treeNode.right = remove(treeNode.right, key)
return treeNode
}
if treeNode.left == nil {
treeNode = treeNode.right
return treeNode
}
if treeNode.right == nil {
treeNode = treeNode.left
return treeNode
}
var leftMostRight *TreeNode
leftMostRight = treeNode.right
for {
if leftMostRight != nil && leftMostRight.left != nil {
leftMostRight = leftMostRight.left
} else {
break
}
}
treeNode.value = leftMostRight.value
treeNode.right = remove(treeNode.right, treeNode.value)
return treeNode
}
// String method
func (tree *BinarySearchTree) String() {
tree.lock.Lock()
defer tree.lock.Unlock()
fmt.Println("******************************************")
stringify(tree.root, 0)
fmt.Println("******************************************")
}
func stringify(treeNode *TreeNode, level int) {
if treeNode == nil {
return
}
format := ""
for i := 0; i < level; i++ {
format += " "
}
format += "---[ "
level++
stringify(treeNode.left, level)
fmt.Printf(format+"%d\n", treeNode.value)
stringify(treeNode.right, level)
} | Tree/binaryTree.go | 0.617513 | 0.410874 | binaryTree.go | starcoder |
package year2021
import (
"io/ioutil"
"math"
"regexp"
"github.com/lanphiergm/adventofcodego/internal/utils"
)
// Trick Shot Part 1 computes the highest Y position the probe reaches
func TrickShotPart1(filename string) interface{} {
xMin, xMax, yMin, yMax := parseTargetArea(filename)
// Use a finite sum to determine Vx^2+Vx-2x = 0 then use the quadratic
// formula, ignoring the negative root
vXMin := int((-1.0 + math.Sqrt(1.0+8.0*float64(xMin))) / 2.0)
vXMax := int((-1.0+math.Sqrt(1.0+8.0*float64(xMax)))/2.0) + 2
// I'm sure there is a math solution to finding this, but I spent an hour
// thinking about it and the brute force solution was just so fast
vYMin := 0
vYMax := 200
maxHeight := 0
for vX0 := vXMin; vX0 <= vXMax; vX0++ {
for vY0 := vYMin; vY0 <= vYMax; vY0++ {
currMaxHeight := 0
didHit := false
isPast := false
x := 0
y := 0
vX := vX0
vY := vY0
for !isPast {
x += vX
y += vY
if y > currMaxHeight {
currMaxHeight = y
}
if vX > 0 {
vX--
}
vY--
if x >= xMin && x <= xMax && y >= yMin && y <= yMax {
didHit = true
break
}
if x > xMax || y < yMin {
isPast = true
}
}
if didHit && currMaxHeight > maxHeight {
maxHeight = currMaxHeight
}
}
}
return maxHeight
}
// Trick Shot Part 2 computes the number of distinct initial velocities that
// will hit the target area
func TrickShotPart2(filename string) interface{} {
xMin, xMax, yMin, yMax := parseTargetArea(filename)
vXMin := 0
vXMax := 300
vYMin := -300
vYMax := 300
hitCount := 0
for vX0 := vXMin; vX0 <= vXMax; vX0++ {
for vY0 := vYMin; vY0 <= vYMax; vY0++ {
isPast := false
x := 0
y := 0
vX := vX0
vY := vY0
for !isPast {
x += vX
y += vY
if vX > 0 {
vX--
}
vY--
if x >= xMin && x <= xMax && y >= yMin && y <= yMax {
hitCount++
break
}
if x > xMax || y < yMin {
isPast = true
}
}
}
}
return hitCount
}
func parseTargetArea(filename string) (int, int, int, int) {
contents, _ := ioutil.ReadFile(filename)
rs := `target area: x=([^\.]*)\.\.([^,]*), y=([^\.]*)\.\.(.*)`
r := regexp.MustCompile(rs)
result := r.FindAllStringSubmatch(string(contents), -1)
return utils.Atoi(result[0][1]), utils.Atoi(result[0][2]),
utils.Atoi(result[0][3]), utils.Atoi(result[0][4])
} | internal/puzzles/year2021/day_17_trick_shot.go | 0.646014 | 0.40987 | day_17_trick_shot.go | starcoder |
package store
import (
"bytes"
"errors"
"fmt"
"math"
"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
)
const (
arrayLengthOverhead = 64
arrayLengthGrowthIncrement = 0.1
// Grow the bins with an extra growthBuffer bins to prevent growing too often
growthBuffer = 128
)
// DenseStore is a dynamically growing contiguous (non-sparse) store. The number of bins are
// bound only by the size of the slice that can be allocated.
type DenseStore struct {
bins []float64
count float64
offset int
minIndex int
maxIndex int
}
func NewDenseStore() *DenseStore {
return &DenseStore{minIndex: math.MaxInt32, maxIndex: math.MinInt32}
}
func (s *DenseStore) Add(index int) {
s.AddWithCount(index, float64(1))
}
func (s *DenseStore) AddBin(bin Bin) {
if bin.count == 0 {
return
}
s.AddWithCount(bin.index, bin.count)
}
func (s *DenseStore) AddWithCount(index int, count float64) {
if count == 0 {
return
}
arrayIndex := s.normalize(index)
s.bins[arrayIndex] += count
s.count += count
}
// Normalize the store, if necessary, so that the counter of the specified index can be updated.
func (s *DenseStore) normalize(index int) int {
if index < s.minIndex || index > s.maxIndex {
s.extendRange(index, index)
}
return index - s.offset
}
func (s *DenseStore) getNewLength(newMinIndex, newMaxIndex int) int {
desiredLength := newMaxIndex - newMinIndex + 1
return int((float64(desiredLength+arrayLengthOverhead-1)/arrayLengthGrowthIncrement + 1) * arrayLengthGrowthIncrement)
}
func (s *DenseStore) extendRange(newMinIndex, newMaxIndex int) {
newMinIndex = min(newMinIndex, s.minIndex)
newMaxIndex = max(newMaxIndex, s.maxIndex)
if s.IsEmpty() {
initialLength := s.getNewLength(newMinIndex, newMaxIndex)
s.bins = make([]float64, initialLength)
s.offset = newMinIndex
s.minIndex = newMinIndex
s.maxIndex = newMaxIndex
s.adjust(newMinIndex, newMaxIndex)
} else if newMinIndex >= s.offset && newMaxIndex < s.offset+len(s.bins) {
s.minIndex = newMinIndex
s.maxIndex = newMaxIndex
} else {
// To avoid shifting too often when nearing the capacity of the array,
// we may grow it before we actually reach the capacity.
newLength := s.getNewLength(newMinIndex, newMaxIndex)
if newLength > len(s.bins) {
tmpBins := make([]float64, newLength)
copy(tmpBins, s.bins)
s.bins = tmpBins
}
s.adjust(newMinIndex, newMaxIndex)
}
}
// Adjust bins, offset, minIndex and maxIndex, without resizing the bins slice in order to make it fit the
// specified range.
func (s *DenseStore) adjust(newMinIndex, newMaxIndex int) {
s.centerCounts(newMinIndex, newMaxIndex)
}
func (s *DenseStore) centerCounts(newMinIndex, newMaxIndex int) {
midIndex := newMinIndex + (newMaxIndex-newMinIndex+1)/2
s.shiftCounts(s.offset + len(s.bins)/2 - midIndex)
s.minIndex = newMinIndex
s.maxIndex = newMaxIndex
}
func (s *DenseStore) shiftCounts(shift int) {
minArrIndex := s.minIndex - s.offset
maxArrIndex := s.maxIndex - s.offset
copy(s.bins[minArrIndex+shift:], s.bins[minArrIndex:maxArrIndex+1])
if shift > 0 {
s.resetBins(s.minIndex, s.minIndex+shift-1)
} else {
s.resetBins(s.maxIndex+shift+1, s.maxIndex)
}
s.offset -= shift
}
func (s *DenseStore) resetBins(fromIndex, toIndex int) {
for i := fromIndex - s.offset; i <= toIndex-s.offset; i++ {
s.bins[i] = 0
}
}
func (s *DenseStore) IsEmpty() bool {
return s.count == 0
}
func (s *DenseStore) TotalCount() float64 {
return s.count
}
func (s *DenseStore) MinIndex() (int, error) {
if s.IsEmpty() {
return 0, errors.New("MinIndex of empty store is undefined.")
}
return s.minIndex, nil
}
func (s *DenseStore) MaxIndex() (int, error) {
if s.IsEmpty() {
return 0, errors.New("MaxIndex of empty store is undefined.")
}
return s.maxIndex, nil
}
// Return the key for the value at rank
func (s *DenseStore) KeyAtRank(rank float64) int {
var n float64
for i, b := range s.bins {
n += b
if n > rank {
return i + s.offset
}
}
return s.maxIndex
}
func (s *DenseStore) MergeWith(other Store) {
if other.IsEmpty() {
return
}
o, ok := other.(*DenseStore)
if !ok {
for bin := range other.Bins() {
s.AddBin(bin)
}
return
}
if o.minIndex < s.minIndex || o.maxIndex > s.maxIndex {
s.extendRange(o.minIndex, o.maxIndex)
}
for idx := o.minIndex; idx <= o.maxIndex; idx++ {
s.bins[idx-s.offset] += o.bins[idx-o.offset]
}
s.count += o.count
}
func (s *DenseStore) Bins() <-chan Bin {
ch := make(chan Bin)
go func() {
defer close(ch)
for idx := s.minIndex; idx <= s.maxIndex; idx++ {
if s.bins[idx-s.offset] > 0 {
ch <- Bin{index: idx, count: s.bins[idx-s.offset]}
}
}
}()
return ch
}
func (s *DenseStore) Copy() Store {
bins := make([]float64, len(s.bins))
copy(bins, s.bins)
return &DenseStore{
bins: bins,
count: s.count,
offset: s.offset,
minIndex: s.minIndex,
maxIndex: s.maxIndex,
}
}
func (s *DenseStore) string() string {
var buffer bytes.Buffer
buffer.WriteString("{")
for i := 0; i < len(s.bins); i++ {
index := i + s.offset
buffer.WriteString(fmt.Sprintf("%d: %f, ", index, s.bins[i]))
}
buffer.WriteString(fmt.Sprintf("count: %v, offset: %d, minIndex: %d, maxIndex: %d}", s.count, s.offset, s.minIndex, s.maxIndex))
return buffer.String()
}
func (s *DenseStore) ToProto() *sketchpb.Store {
if s.IsEmpty() {
return &sketchpb.Store{ContiguousBinCounts: nil}
}
bins := make([]float64, s.maxIndex-s.minIndex+1)
copy(bins, s.bins[s.minIndex-s.offset:s.maxIndex-s.offset+1])
return &sketchpb.Store{
ContiguousBinCounts: bins,
ContiguousBinIndexOffset: int32(s.minIndex),
}
} | ddsketch/store/dense_store.go | 0.721253 | 0.451145 | dense_store.go | starcoder |
package gui
import (
"github.com/Glenn-Gray-Labs/g3n/gls"
"github.com/Glenn-Gray-Labs/g3n/math32"
"github.com/Glenn-Gray-Labs/g3n/text"
"github.com/Glenn-Gray-Labs/g3n/texture"
)
// Label is a panel which contains a texture with text.
// The content size of the label panel is the exact size of the texture.
type Label struct {
Panel // Embedded Panel
font *text.Font // TrueType font face
tex *texture.Texture2D // Texture with text
style *LabelStyle // The style of the panel and font attributes
text string // Text being displayed
}
// LabelStyle contains all the styling attributes of a Label.
// It's essentially a BasicStyle combined with FontAttributes.
type LabelStyle struct {
PanelStyle
text.FontAttributes
FgColor math32.Color4
}
// NewLabel creates and returns a label panel with
// the specified text drawn using the default text font.
func NewLabel(text string) *Label {
return NewLabelWithFont(text, StyleDefault().Font)
}
// NewIcon creates and returns a label panel with
// the specified text drawn using the default icon font.
func NewIcon(icon string) *Label {
return NewLabelWithFont(icon, StyleDefault().FontIcon)
}
// NewLabelWithFont creates and returns a label panel with
// the specified text drawn using the specified font.
func NewLabelWithFont(msg string, font *text.Font) *Label {
l := new(Label)
l.initialize(msg, font)
return l
}
// initialize initializes this label and is normally used by other
// components which contain a label.
func (l *Label) initialize(msg string, font *text.Font) {
l.font = font
l.Panel.Initialize(l, 0, 0)
l.Panel.mat.SetTransparent(true)
// TODO: Remove this hack in an elegant way e.g. set the label style depending of if it's an icon or text label and have two defaults (one for icon labels one for text tabels)
if font != StyleDefault().FontIcon {
l.Panel.SetPaddings(2, 0, 2, 0)
}
// Copy the style based on the default Label style
styleCopy := StyleDefault().Label
l.style = &styleCopy
l.SetText(msg)
}
// SetText sets and draws the label text using the font.
func (l *Label) SetText(text string) {
// Need at least a character to get dimensions
l.text = text
if text == "" {
text = " "
}
// Set font properties
l.font.SetAttributes(&l.style.FontAttributes)
l.font.SetColor(&l.style.FgColor)
// Create an image with the text
textImage := l.font.DrawText(text)
// Create texture if it doesn't exist yet
if l.tex == nil {
l.tex = texture.NewTexture2DFromRGBA(textImage)
l.tex.SetMagFilter(gls.NEAREST)
l.tex.SetMinFilter(gls.NEAREST)
l.Panel.Material().AddTexture(l.tex)
// Otherwise update texture with new image
} else {
l.tex.SetFromRGBA(textImage)
}
// Update label panel dimensions
l.Panel.SetContentSize(float32(textImage.Rect.Dx()), float32(textImage.Rect.Dy()))
}
// Text returns the label text.
func (l *Label) Text() string {
return l.text
}
// SetColor sets the text color.
// Alpha is set to 1 (opaque).
func (l *Label) SetColor(color *math32.Color) *Label {
l.style.FgColor.FromColor(color, 1.0)
l.SetText(l.text)
return l
}
// SetColor4 sets the text color.
func (l *Label) SetColor4(color4 *math32.Color4) *Label {
l.style.FgColor = *color4
l.SetText(l.text)
return l
}
// Color returns the text color.
func (l *Label) Color() math32.Color4 {
return l.style.FgColor
}
// SetBgColor sets the background color.
// The color alpha is set to 1.0
func (l *Label) SetBgColor(color *math32.Color) *Label {
l.style.BgColor.FromColor(color, 1.0)
l.Panel.SetColor4(&l.style.BgColor)
l.SetText(l.text)
return l
}
// SetBgColor4 sets the background color.
func (l *Label) SetBgColor4(color *math32.Color4) *Label {
l.style.BgColor = *color
l.Panel.SetColor4(&l.style.BgColor)
l.SetText(l.text)
return l
}
// BgColor returns returns the background color.
func (l *Label) BgColor() math32.Color4 {
return l.style.BgColor
}
// SetFont sets the font.
func (l *Label) SetFont(f *text.Font) {
l.font = f
l.SetText(l.text)
}
// Font returns the font.
func (l *Label) Font() *text.Font {
return l.font
}
// SetFontSize sets the point size of the font.
func (l *Label) SetFontSize(size float64) *Label {
l.style.PointSize = size
l.SetText(l.text)
return l
}
// FontSize returns the point size of the font.
func (l *Label) FontSize() float64 {
return l.style.PointSize
}
// SetFontDPI sets the resolution of the font in dots per inch (DPI).
func (l *Label) SetFontDPI(dpi float64) *Label {
l.style.DPI = dpi
l.SetText(l.text)
return l
}
// FontDPI returns the resolution of the font in dots per inch (DPI).
func (l *Label) FontDPI() float64 {
return l.style.DPI
}
// SetLineSpacing sets the spacing between lines.
func (l *Label) SetLineSpacing(spacing float64) *Label {
l.style.LineSpacing = spacing
l.SetText(l.text)
return l
}
// LineSpacing returns the spacing between lines.
func (l *Label) LineSpacing() float64 {
return l.style.LineSpacing
}
// setTextCaret sets the label text and draws a caret at the
// specified line and column.
// It is normally used by the Edit widget.
func (l *Label) setTextCaret(msg string, mx, width, line, col int) {
// Set font properties
l.font.SetAttributes(&l.style.FontAttributes)
l.font.SetColor(&l.style.FgColor)
// Create canvas and draw text
_, height := l.font.MeasureText(msg)
canvas := text.NewCanvas(width, height, &l.style.BgColor)
canvas.DrawTextCaret(mx, 0, msg, l.font, line, col)
// Creates texture if if doesnt exist.
if l.tex == nil {
l.tex = texture.NewTexture2DFromRGBA(canvas.RGBA)
l.Panel.Material().AddTexture(l.tex)
// Otherwise update texture with new image
} else {
l.tex.SetFromRGBA(canvas.RGBA)
}
// Set texture filtering parameters for text
l.tex.SetMagFilter(gls.NEAREST)
l.tex.SetMinFilter(gls.NEAREST)
// Updates label panel dimensions
l.Panel.SetContentSize(float32(width), float32(height))
l.text = msg
} | gui/label.go | 0.734501 | 0.445952 | label.go | starcoder |
package breathFirstSearch
// An image is represented by an m x n integer grid image where image[i][j] represents the pixel value of the image.
// You are also given three integers sr, sc, and newColor. You should perform a flood fill on the image starting from the pixel image[sr][sc].
// To perform a flood fill, consider the starting pixel, plus any pixels connected 4-directionally to the starting pixel of the same color as the starting pixel,
// plus any pixels connected 4-directionally to those pixels (also with the same color), and so on. Replace the color of all of the aforementioned pixels with newColor.
// Return the modified image after performing the flood fill.
// Time Complexity: O(N), where N is the number of pixels in the image. We might process every pixel.
// Space Complexity: O(N), the size of the implicit call stack when calling dfs.
func FloodFill(image [][]int, sr int, sc int, newColor int) [][]int {
color := image[sr][sc]
if color == newColor {
return image
}
dfs(image, sr, sc, color, newColor)
return image
}
func dfs(image [][]int, r int, c int, color int, newColor int) {
if image[r][c] == color {
image[r][c] = newColor
if r-1 >= 0 {
dfs(image, r-1, c, color, newColor)
}
if c-1 >= 0 {
dfs(image, r, c-1, color, newColor)
}
if r+1 < len(image) {
dfs(image, r+1, c, color, newColor)
}
if c+1 < len(image[0]) {
dfs(image, r, c+1, color, newColor)
}
}
}
// You are given an m x n binary matrix grid. An island is a group of 1's (representing land) connected 4-directionally (horizontal or vertical.) You may assume all four edges of the grid are surrounded by water.
// The area of an island is the number of cells with a value 1 in the island.
// Return the maximum area of an island in grid. If there is no island, return 0.
// Time Complexity: O(R∗C), where RRR is the number of rows in the given grid, and C is the number of columns. We visit every square once.
// Space complexity: O(R∗C), the space used by seen to keep track of visited squares, and the space used by the call stack during our recursion.
func MaxAreaOfIsland(grid [][]int) int {
result := 0
for r := range grid {
for c := range grid[r] {
if grid[r][c] == 0 {
continue
}
a := area(grid, r, c)
if result < a {
result = a
}
}
}
return result
}
func area(grid [][]int, r, c int) int {
if r < 0 || c < 0 || r >= len(grid) || c >= len(grid[0]) || grid[r][c] == 0 {
return 0
}
grid[r][c] = 0
return 1 + area(grid, r, c-1) + area(grid, r, c+1) + area(grid, r+1, c) + area(grid, r-1, c)
} | algorithmsI/breathFirstSearch/breathFirstSearch.go | 0.87866 | 0.884987 | breathFirstSearch.go | starcoder |
package assert
import (
"reflect"
"strings"
)
// isNil returns true if value v is nil.
func isNil(v interface{}) bool {
if v == nil {
return true
} else { // a `interface{}` that returns false on an equality check against nil isn't necessarily not nil and requires further attention
switch reflect.TypeOf(v).Kind() {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice:
return reflect.ValueOf(v).IsNil()
default:
return false
}
}
}
// getLength returns the length of collection c.
// Panics if c is not a collection.
func getLength(c interface{}) int {
if reflect.TypeOf(c).Kind() != reflect.String && reflect.TypeOf(c).Kind() != reflect.Array && reflect.TypeOf(c).Kind() != reflect.Slice && reflect.TypeOf(c).Kind() != reflect.Map {
panic("not a collection")
}
return reflect.ValueOf(c).Len()
}
// contains returns true if container c contains element e.
// Panics if c is not a container.
// Panics if c does not contain values of the same type as e.
func contains(c interface{}, e interface{}) bool {
if isNil(c) {
panic("nil container")
}
switch reflect.TypeOf(c).Kind() {
case reflect.Array, reflect.Slice:
if reflect.TypeOf(c).Elem() != reflect.TypeOf(e) {
panic("different types")
}
for i := 0; i < reflect.ValueOf(c).Len(); i++ {
if reflect.DeepEqual(reflect.ValueOf(c).Index(i).Interface(), e) {
return true
}
}
case reflect.Map:
if reflect.TypeOf(c).Elem() != reflect.TypeOf(e) {
panic("different types")
}
for _, k := range reflect.ValueOf(c).MapKeys() {
if reflect.DeepEqual(reflect.ValueOf(c).MapIndex(k).Interface(), e) {
return true
}
}
case reflect.String:
switch reflect.TypeOf(e).Kind() {
case reflect.String:
return strings.Contains(reflect.ValueOf(c).String(), reflect.ValueOf(e).String())
case reflect.Uint8:
return strings.ContainsRune(reflect.ValueOf(c).String(), rune(reflect.ValueOf(e).Uint()))
case reflect.Int32:
return strings.ContainsRune(reflect.ValueOf(c).String(), rune(reflect.ValueOf(e).Int()))
default:
panic("different types")
}
default:
panic("not a container")
}
return false
}
// hasKey returns true if map m has key k.
// Panics if the keys of m are of different type from k.
func hasKey(m interface{}, k interface{}) bool {
if reflect.TypeOf(m).Kind() != reflect.Map {
panic("not a map")
}
if reflect.ValueOf(m).IsNil() {
panic("nil map")
}
if reflect.TypeOf(m).Key() != reflect.TypeOf(k) {
panic("different types")
}
for _, key := range reflect.ValueOf(m).MapKeys() {
if reflect.DeepEqual(key.Interface(), k) {
return true
}
}
return false
}
// containsPair returns true if the map m contains the pair k -> v.
// Panics if the keys of m are of different type from k or if the values of m are of a different type from v.
func containsPair(m interface{}, k interface{}, v interface{}) bool {
if reflect.TypeOf(m).Kind() != reflect.Map {
panic("not a map")
}
if reflect.ValueOf(m).IsNil() {
panic("nil map")
}
if reflect.TypeOf(m).Key() != reflect.TypeOf(k) || reflect.TypeOf(m).Elem() != reflect.TypeOf(v) {
panic("different types")
}
if !reflect.ValueOf(m).MapIndex(reflect.ValueOf(k)).IsValid() {
return false
}
return reflect.DeepEqual(reflect.ValueOf(m).MapIndex(reflect.ValueOf(k)).Interface(), v)
}
// panics returns true if the function f causes a panic.
// Panics if f is nil.
func panics(f func()) (p bool) {
if f == nil {
panic("nil function")
}
p = true
defer func() { recover() }()
f()
p = false
return p
}
// panicsWithValue returns true if the function f causes a panic with value e.
// Panics if f is nil.
func panicsWithValue(f func(), e interface{}) (v bool) {
if f == nil {
panic("nil function")
}
v = true
defer func() {
v = v && reflect.DeepEqual(recover(), e)
}()
f()
v = false
return v
} | assert/util.go | 0.804943 | 0.600423 | util.go | starcoder |
package hunter
import (
"errors"
"github.com/eaglerock1337/gobat/pkg/board"
)
// These two variables allow for conversion of each square status to
// the status string and vice-versa. This allows for statuses to be stored
// as integers for faster lookup and comparison.
var (
values = map[string]int{
"Empty": 0, "Miss": 1, "Destroyer": 2, "Submarine": 3,
"Cruiser": 4, "Battleship": 5, "Carrier": 6, "Hit": 7,
}
status = [8]string{
"Empty", "Miss", "Destroyer", "Submarine",
"Cruiser", "Battleship", "Carrier", "Hit",
}
)
// The four directions (up, down, left, and right) for finding adjacent squares
var directions = [4][2]int{{0, -1}, {0, 1}, {-1, 0}, {1, 0}}
// Hunter is a struct that holds all data necessary to determine
// the optimal gameplay of Battleship.
type Hunter struct {
Turns int // How many turns the Hunter has used
Ships []board.Ship // The list of active unsunk ships
Data map[int]*PieceData // The list of possible ship positiions by size
Board board.Board // The Battleship board with known data
HeatMap HeatMap // The heat map popupated from the existing piece data
SeekMode bool // Whether the hunter is in Seek or Destroy mode
Shots []board.Square // The current turn's list of best squares to play
HitStack []board.Square // The current number of outstanding hits
}
// NewHunter initializes a Hunter struct with the full list of ships,
// all possible ship locations, an empty board, and a heat map.
func NewHunter() Hunter {
var newHunter Hunter
newHunter.Ships = board.ShipTypes()
newHunter.SeekMode = true
newHunter.Shots = make([]board.Square, 0, 5)
for _, ship := range newHunter.Ships {
if ship.GetType() != "Submarine" {
data := GenPieceData(ship)
newHunter.Data[ship.GetLength()] = &data
}
}
return newHunter
}
// DeleteShip removes a ship from the list of active ships.
func (h *Hunter) DeleteShip(s board.Ship) error {
for i, ship := range h.Ships {
if ship.GetType() == s.GetType() {
h.Ships[i] = h.Ships[len(h.Ships)-1]
h.Ships = h.Ships[:len(h.Ships)-1]
return nil
}
}
return errors.New("Ship not found")
}
// GetValidLengths returns a slice of integers for all active ship types.
func (h Hunter) GetValidLengths() []int {
var lengths []int
foundThrees := false
for _, ship := range h.Ships {
length := ship.GetLength()
if length == 3 {
if foundThrees {
continue
}
foundThrees = true
}
lengths = append(lengths, length)
}
return lengths
}
// AddHitStack adds a given Square to the hit stack.
// This probably requires error checking to ensure duplicates don't enter the stack.
func (h *Hunter) AddHitStack(s board.Square) {
h.HitStack = append(h.HitStack, s)
}
// DelHitStack removes a given Square from the hit stack.
// This probably should return an error if it can't find the square.
func (h *Hunter) DelHitStack(s board.Square) {
for i, square := range h.HitStack {
if square.Letter == s.Letter && square.Number == s.Number {
length := len(h.HitStack) - 1
h.HitStack[i] = h.HitStack[length]
h.HitStack = h.HitStack[:length]
return
}
}
}
// InHitStack checks the hit stack if a given Square is present and returns a boolean.
func (h Hunter) InHitStack(s board.Square) bool {
for _, square := range h.HitStack {
if square.Letter == s.Letter && square.Number == s.Number {
return true
}
}
return false
}
// SearchPiece searches the PieceData for the given ship for all
// possible orientations, then intersect with the current hit stack.
// If the function succeeds in retrieving one result, it will return
// the piece with the location of the ship. Otherwise, the function
// will return with an error.
func (h Hunter) SearchPiece(sq board.Square, sh board.Ship) (board.Piece, error) {
var hits []board.Piece
length := sh.GetLength()
for _, direction := range directions {
let, num := direction[0], direction[1]
// Check if the piece is in the stack
nextSquare, err := board.SquareByValue(sq.Letter+let, sq.Number+num)
if err == nil {
continue
}
lastSquare, err := board.SquareByValue(sq.Letter+let*(length-1), sq.Number+num*(length-1))
if err != nil {
continue
}
for i := 1; i < length; i++ {
square, _ := board.SquareByValue(sq.Letter+let*(i), sq.Number+num*(i))
if !h.InHitStack(square) {
continue
}
}
// Create the piece to add to the list of hits
var startSquare board.Square
if let < 0 || num < 0 {
startSquare = lastSquare
} else {
startSquare = nextSquare
}
var horizontal bool
if let != 0 {
horizontal = true
} else {
horizontal = false
}
piece, _ := board.NewPiece(sh, startSquare, horizontal)
hits = append(hits, piece)
}
if len(hits) == 0 {
return board.Piece{}, errors.New("No valid piece found in hit stack")
}
if len(hits) > 1 {
return board.Piece{}, errors.New("Duplicate pieces found, algorithm failed")
}
return hits[0], nil
}
// SinkShip will use the active hit stack, the sinking square, and the
// type of ship sunk to find the exact location of the ship, update the
// board and piece data, as well as delete the ship from the ship list.
func (h *Hunter) SinkShip(sq board.Square, sh board.Ship) error {
piece, err := h.SearchPiece(sq, sh)
if err != nil {
return errors.New("SinkShip failed due to SearchPiece not finding a piece")
}
for _, square := range piece.Coords {
h.DelHitStack(square)
}
for _, length := range h.GetValidLengths() {
h.Data[length].DeletePiece(piece)
}
err = h.DeleteShip(sh)
if err != nil {
return errors.New("SinkShip failed due to DeleteShip returning an error")
}
h.Board.SetPiece(piece)
return nil
}
// Refresh will refresh the HeatMap based on the updated piece data and
// ship data.
func (h *Hunter) Refresh() {
h.HeatMap.Initialize()
for _, ship := range h.Ships {
h.HeatMap.PopulateMap(*h.Data[ship.GetLength()], false)
}
}
// ClearShots will empty out the current shot list.
func (h *Hunter) ClearShots() {
h.Shots = make([]board.Square, 0, 5)
}
// AddShot will attempt to add the given square to the Shots array, which
// will only get accepted if in the top 5 Shots.
func (h *Hunter) AddShot(s board.Square) {
score := h.HeatMap[s.Letter][s.Number]
// Only try to add the value if it actually registered any hits
if score > 0 {
length := len(h.Shots)
// Only add if the score is high enough or if the list isn't full yet
if length < 5 || score > h.HeatMap.GetSquare(h.Shots[length-1]) {
target := length
if length > 0 {
for k := length - 1; k >= 0; k-- {
if score >= h.HeatMap.GetSquare(h.Shots[k]) {
target = k
break
}
}
}
h.Shots = append(h.Shots, s) // Add to empty array or make space
if length > 0 {
copy(h.Shots[target+1:length-1], h.Shots[target:])
h.Shots[target] = s
}
}
}
}
// Seek is the main hunting routine where the HeatMap is populated with
// all possible ship positions from the PieceData, and the top positions
// are populated in the Shots slice.
func (h *Hunter) Seek() {
h.ClearShots()
for i := 0; i < 10; i++ {
for j := 0; j < 10; j++ {
square, _ := board.SquareByValue(i, j)
h.AddShot(square)
}
}
}
// Destroy is the routine for sinking a ship that has been detected. Based
// on the squares in the HitStack, all available adjacent squares are
// checked in the HeatMap and ranked by total occurrences.
func (h *Hunter) Destroy() {
h.ClearShots()
for _, hit := range h.HitStack {
Adjacent:
for _, direction := range directions {
let, num := direction[0], direction[1]
square, err := board.SquareByValue(hit.Letter+let, hit.Number+num)
if err != nil {
for _, shot := range h.Shots {
if shot == square {
continue Adjacent
}
}
h.AddShot(square)
}
}
}
}
// Turn processes a single turn in the simulator based on the given
// square and result. The data is pruned, heatmap updated, and ideal moves
// given based on the mode the Hunter is currently in.
func (h *Hunter) Turn(s board.Square, result string) error {
err := h.Board.SetString(s, result)
if err != nil {
return errors.New("Turn failed as the result was invalid")
}
ship, _ := board.NewShip(result)
if h.Board.IsEmpty(s) {
return errors.New("Turn failed as it was given an empty result")
}
if h.Board.IsSunk(s) {
h.SinkShip(s, ship)
h.SeekMode = len(h.HitStack) == 0
}
if h.Board.IsHit(s) {
h.AddHitStack(s)
h.SeekMode = false
}
if h.Board.IsMiss(s) {
for _, length := range h.GetValidLengths() {
h.Data[length].DeleteSquare(s)
}
}
if h.SeekMode {
h.Seek()
} else {
h.Destroy()
}
h.Turns++
return nil
} | pkg/hunter/hunter.go | 0.739516 | 0.469155 | hunter.go | starcoder |
package list
// Iterator defines a list iterator
type Iterator interface {
// Next iterates to the next element in the list and returns the iterator, or nil if there is no next element
Next() Iterator
// Previous iterates to the previous element in the list and returns the iterator, or nil if there is no previous element
Previous() Iterator
// Get returns the value of the iterator's current element
Get() interface{}
// Set sets the value of the iterator's current element
Set(v interface{})
}
// List defines a list
type List interface {
// Clear resets the list to zero elements and resets the list's meta data
Clear()
// Len returns the current list length
Len() int
// Empty returns true if the current list length is zero
Empty() bool
// Chan returns a channel which iterates from the front to the back of the list
Chan(n int) <-chan interface{}
// ChanBack returns a channel which iterates from the back to the front of the list
ChanBack(n int) <-chan interface{}
// Iter returns an iterator which starts at the front of the list, or nil if there are no elements in the list
Iter() Iterator
// IterBack returns an iterator which starts at the back of the list, or nil if there are no elements in the list
IterBack() Iterator
// First returns the first value of the list and true, or false if there is no value
First() (interface{}, bool)
// Last returns the last value of the list and true, or false if there is no value
Last() (interface{}, bool)
// Get returns the value of the given index and nil, or an out of bound error if the index is incorrect
Get(i int) (interface{}, error)
// GetFunc returns the value of the first element selected by the given function and true, or false if there is no such element
GetFunc(m func(v interface{}) bool) (interface{}, bool)
// Set sets the value of the given index and returns nil, or an out of bound error if the index is incorrect
Set(i int, v interface{}) error
// SetFunc sets the value of the first element selected by the given function and returns true, or false if there is no such element
SetFunc(m func(v interface{}) bool, v interface{}) bool
// Swap swaps the value of index i with the value of index j
Swap(i, j int)
// Contains returns true if the value exists in the list, or false if it does not
Contains(v interface{}) bool
// IndexOf returns the first index of the given value and true, or false if it does not exists
IndexOf(v interface{}) (int, bool)
// LastIndexOf returns the last index of the given value and true, or false if it does not exists
LastIndexOf(v interface{}) (int, bool)
// Copy returns an exact copy of the list
Copy() List
// Slice returns a copy of the list as a slice
Slice() []interface{}
// Insert inserts a value into the list and returns nil, or an out of bound error if the index is incorrect
Insert(i int, v interface{}) error
// Remove removes and returns the value with the given index and nil, or an out of bound error if the index is incorrect
Remove(i int) (interface{}, error)
// RemoveFirstOccurrence removes the first occurrence of the given value in the list and returns true, or false if there is no such element
RemoveFirstOccurrence(v interface{}) bool
// RemoveLastOccurrence removes the last occurrence of the given value in the list and returns true, or false if there is no such element
RemoveLastOccurrence(v interface{}) bool
// Pop removes and returns the last element and true, or false if there is no such element
Pop() (interface{}, bool)
// Push inserts the given value at the end of the list
Push(v interface{})
// PushList pushes the given list
PushList(l2 List)
// Shift removes and returns the first element and true, or false if there is no such element
Shift() (interface{}, bool)
// Unshift inserts the given value at the beginning of the list
Unshift(v interface{})
// UnshiftList unshifts the given list
UnshiftList(l2 List)
// MoveAfter moves the element at index i after the element at index m and returns nil, or an out of bound error if an index is incorrect
MoveAfter(i, m int) error
// MoveToBack moves the element at index i to the back of the list and returns nil, or an out of bound error if the index is incorrect
MoveToBack(i int) error
// MoveBefore moves the element at index i before the element at index m and returns nil, or an out of bound error if an index is incorrect
MoveBefore(i, m int) error
// MoveToFront moves the element at index i to the front of the list and returns nil, or an out of bound error if the index is incorrect
MoveToFront(i int) error
} | list/list.go | 0.725746 | 0.512815 | list.go | starcoder |
package imap
type Imap struct {
tree avlTree
}
type avlNode struct {
key uint64 // Interval low
left, right *avlNode
parent *avlNode
heightCache int
high uint64
value interface{}
}
func (n *avlNode) interval() Interval {
return Interval{n.key, n.high}
}
func (m *Imap) Insert(key Interval, value interface{}) {
if key.Empty() {
return
}
low, high := key.Low, key.High
// Find the node that overlaps or just abuts the new range. If an
// existing range abuts the new range, we'll extend the existing
// range.
n := m.tree.Search(func(n *avlNode) bool {
return low <= n.high
})
pred := n
// Split intervals that intersect low or high (one interval could do
// both) and delete fully overlapping intervals.
for n != nil && n.key < high {
// Fetch the next node in case we delete this node.
nNext := n.Next()
// Make room for our new interval.
l, h := n.interval().Subtract(Interval{low, high})
lok := !l.Empty()
hok := !h.Empty()
if lok && !hok {
// n overlaps the low end of the new interval. Adjust n's
// high. Order doesn't change.
n.high = l.High
} else if !lok && hok {
// n overlaps the high end of the new interval. Adjust n's
// low. Order doesn't change.
n.key = h.Low
break
} else if lok && hok {
// The new interval falls in the middle of an existing
// interval. Split the existing interval.
if n.value == value {
// Nothing needs to be done.
return
}
n.high = l.High
n2 := m.tree.Insert(h.Low)
n2.high, n2.value = h.High, n.value
n = n2
break
} else {
// The new interval covers this interval. Delete it.
m.tree.Delete(n)
}
n = nNext
}
// Merge with existing intervals if possible. We already handled the
// completely overlapping case above.
if pred != nil && pred.high == low && pred.value == value {
// Extend the predecessor over the new range.
pred.high = high
if n != nil && n.key == high && n.value == value {
// We merged right into the successor. Extend the
// predecessor and delete the successor.
pred.high = n.high
m.tree.Delete(n)
}
return
}
if n != nil && n.key == high && n.value == value {
// Extend the successor over the new range.
n.key = low
return
}
// We should now have space for the new interval.
n = m.tree.Insert(low)
n.high, n.value = high, value
}
// Find returns the value at addr and the interval over which value is
// the same (which may be smaller than the interval originally
// inserted). If no interval contains value, it returns Interval{}, nil.
func (m *Imap) Find(addr uint64) (key Interval, value interface{}) {
n := m.tree.Search(func(n *avlNode) bool {
return addr < n.high
})
if n != nil && n.key <= addr {
return n.interval(), n.value
}
return Interval{}, nil
} | internal/imap/imap.go | 0.710025 | 0.437403 | imap.go | starcoder |
package binary
import (
"math"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/executor/transform"
)
const (
// EqType checks that lhs is equal to rhs
EqType = "=="
// NotEqType checks that lhs is equal to rhs
NotEqType = "!="
// GreaterType checks that lhs is equal to rhs
GreaterType = ">"
// LesserType checks that lhs is equal to rhs
LesserType = "<"
// GreaterEqType checks that lhs is equal to rhs
GreaterEqType = ">="
// LesserEqType checks that lhs is equal to rhs
LesserEqType = "<="
// suffix to return bool values instead of lhs values
returnBoolSuffix = "BOOL"
)
// convert true to 1, false to 0
func toFloat(b bool) float64 {
if b {
return 1
}
return 0
}
// convert true to x, false to NaN
func toComparisonValue(b bool, x float64) float64 {
if b {
return x
}
return math.NaN()
}
var (
comparisonFuncs = map[string]Function{
EqType: func(x, y float64) float64 { return toComparisonValue(x == y, x) },
NotEqType: func(x, y float64) float64 { return toComparisonValue(x != y, x) },
GreaterType: func(x, y float64) float64 { return toComparisonValue(x > y, x) },
LesserType: func(x, y float64) float64 { return toComparisonValue(x < y, x) },
GreaterEqType: func(x, y float64) float64 { return toComparisonValue(x >= y, x) },
LesserEqType: func(x, y float64) float64 { return toComparisonValue(x <= y, x) },
EqType + returnBoolSuffix: func(x, y float64) float64 { return toFloat(x == y) },
NotEqType + returnBoolSuffix: func(x, y float64) float64 { return toFloat(x != y) },
GreaterType + returnBoolSuffix: func(x, y float64) float64 { return toFloat(x > y) },
LesserType + returnBoolSuffix: func(x, y float64) float64 { return toFloat(x < y) },
GreaterEqType + returnBoolSuffix: func(x, y float64) float64 { return toFloat(x >= y) },
LesserEqType + returnBoolSuffix: func(x, y float64) float64 { return toFloat(x <= y) },
}
)
// Builds a comparison processing function if able. If wrong opType supplied,
// returns no function and false
func buildComparisonFunction(
opType string,
params NodeParams,
) (processFunc, bool) {
if params.ReturnBool {
opType += returnBoolSuffix
}
fn, ok := comparisonFuncs[opType]
if !ok {
return nil, false
}
return func(lhs, rhs block.Block, controller *transform.Controller) (block.Block, error) {
return processBinary(lhs, rhs, params, controller, true, fn)
}, true
} | src/query/functions/binary/comparison.go | 0.725746 | 0.436142 | comparison.go | starcoder |
package helpers
import (
"fmt"
"log"
"runtime/debug"
"github.com/DimensionDataResearch/go-dd-cloud-compute/compute"
"github.com/DimensionDataResearch/packer-plugins-ddcloud/artifacts"
"github.com/mitchellh/multistep"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/packer"
)
// ForStateBag creates a new `State` helper for the specified multistep.StateBag.
func ForStateBag(stateBag multistep.StateBag) State {
return State{
Data: stateBag,
}
}
// State is the helper for working with `multistep` state data.
type State struct {
// The state data.
Data multistep.StateBag
}
// Get retrieves the state data with the specified key.
func (state State) Get(key string) (value interface{}) {
return state.Data.Get(key)
}
// GetOk retrieves the state data with the specified key, if it exists.
func (state State) GetOk(key string) (value interface{}, exists bool) {
return state.Data.GetOk(key)
}
// Set updates the state data with the specified key and value.
func (state State) Set(key string, value interface{}) {
state.Data.Put(key, value)
}
// GetLastError retrieves the last error (if any) from the state data.
func (state State) GetLastError() error {
value, ok := state.Data.GetOk("error")
if !ok || value == nil {
return nil
}
return value.(error)
}
// SetLastError updates the last error (if any) in the state data.
func (state State) SetLastError(err error) {
state.Data.Put("error", err)
}
// GetBuilderID gets the Id of the current builder plugin (if any) in the state data.
func (state State) GetBuilderID() string {
value, ok := state.Data.GetOk("builder_id")
if !ok || value == nil {
return ""
}
return value.(string)
}
// SetBuilderID updates the Id of the current builder plugin (if any) in the state data.
func (state State) SetBuilderID(builderID string) {
state.Data.Put("builder_id", builderID)
}
// GetUI gets a reference to the Packer UI from the state data.
func (state State) GetUI() packer.Ui {
value, ok := state.Data.GetOk("ui")
if !ok || value == nil {
log.Printf("helpers.State.GetUI: Warning - UI not available.\n%s",
debug.Stack(),
)
return nil
}
return value.(packer.Ui)
}
// SetUI updates the reference to the Packer UI in the state data.
func (state State) SetUI(ui packer.Ui) {
state.Data.Put("ui", ui)
}
// GetHook gets a reference to the Packer extensibility hook from the state data.
func (state State) GetHook() packer.Hook {
value, ok := state.Data.GetOk("hook")
if !ok || value == nil {
log.Printf("helpers.State.GetHook: Warning - Hook not available.\n%s",
debug.Stack(),
)
return nil
}
return value.(packer.Hook)
}
// SetHook updates the reference to the Packer extensibility hook in the state data.
func (state State) SetHook(hook packer.Hook) {
state.Data.Put("hook", hook)
}
// GetPackerConfig gets the Packer configuration from the state data.
func (state State) GetPackerConfig() *common.PackerConfig {
value, ok := state.Data.GetOk("config")
if !ok || value == nil {
return nil
}
return value.(*common.PackerConfig)
}
// SetPackerConfig updates the Packer configuration in the state data.
func (state State) SetPackerConfig(config *common.PackerConfig) {
state.Data.Put("config", config)
}
// GetSettings gets the plugin settings from the state data.
func (state State) GetSettings() PluginConfig {
value, ok := state.Data.GetOk("settings")
if !ok || value == nil {
return nil
}
return value.(PluginConfig)
}
// SetSettings updates the plugin settings in the state data.
func (state State) SetSettings(config PluginConfig) {
state.Data.Put("settings", config)
}
// GetClient gets the CloudControl API client from the state data.
func (state State) GetClient() *compute.Client {
value, ok := state.Data.GetOk("client")
if !ok || value == nil {
return nil
}
return value.(*compute.Client)
}
// SetClient updates the CloudControl API client in the state data.
func (state State) SetClient(client *compute.Client) {
state.Data.Put("client", client)
}
// GetTargetDatacenter gets the target datacenter from the state data.
func (state State) GetTargetDatacenter() *compute.Datacenter {
value, ok := state.Data.GetOk("target_datacenter")
if !ok || value == nil {
return nil
}
return value.(*compute.Datacenter)
}
// SetTargetDatacenter updates the target datacenter in the state data.
func (state State) SetTargetDatacenter(datacenter *compute.Datacenter) {
state.Data.Put("target_datacenter", datacenter)
}
// GetNetworkDomain gets the target network domain from the state data.
func (state State) GetNetworkDomain() *compute.NetworkDomain {
value, ok := state.Data.GetOk("network_domain")
if !ok || value == nil {
return nil
}
return value.(*compute.NetworkDomain)
}
// SetNetworkDomain updates the target network domain in the state data.
func (state State) SetNetworkDomain(networkDomain *compute.NetworkDomain) {
state.Data.Put("network_domain", networkDomain)
}
// GetVLAN gets the target VLAN from the state data.
func (state State) GetVLAN() *compute.VLAN {
value, ok := state.Data.GetOk("vlan")
if !ok || value == nil {
return nil
}
return value.(*compute.VLAN)
}
// SetVLAN updates the target VLAN in the state data.
func (state State) SetVLAN(vlan *compute.VLAN) {
state.Data.Put("vlan", vlan)
}
// GetServer gets the target server from the state data.
func (state State) GetServer() *compute.Server {
value, ok := state.Data.GetOk("server")
if !ok || value == nil {
return nil
}
return value.(*compute.Server)
}
// SetServer updates the target server in the state data.
func (state State) SetServer(server *compute.Server) {
state.Data.Put("server", server)
}
// GetNATRule gets the NAT rule from the state data.
func (state State) GetNATRule() *compute.NATRule {
value, ok := state.Data.GetOk("nat_rule")
if !ok || value == nil {
return nil
}
return value.(*compute.NATRule)
}
// SetNATRule updates the NAT rule in the state data.
func (state State) SetNATRule(natRule *compute.NATRule) {
state.Data.Put("nat_rule", natRule)
}
// GetFirewallRule gets the firewall rule from the state data.
func (state State) GetFirewallRule() *compute.FirewallRule {
value, ok := state.Data.GetOk("firewall_rule")
if !ok || value == nil {
return nil
}
return value.(*compute.FirewallRule)
}
// SetFirewallRule updates the firewall rule in the state data.
func (state State) SetFirewallRule(firewallRule *compute.FirewallRule) {
state.Data.Put("firewall_rule", firewallRule)
}
// GetSourceImage gets the source image from the state data.
func (state State) GetSourceImage() compute.Image {
value, ok := state.Data.GetOk("source_image")
if !ok || value == nil {
return nil
}
return value.(compute.Image)
}
// SetSourceImage updates the source image in the state data.
func (state State) SetSourceImage(image compute.Image) {
state.Data.Put("source_image", image)
}
// GetSourceImageArtifact gets the source image artifact from the state data.
func (state State) GetSourceImageArtifact() *artifacts.Image {
value, ok := state.Data.GetOk("source_image_artifact")
if !ok || value == nil {
return nil
}
return value.(*artifacts.Image)
}
// SetSourceImageArtifact updates the source image artifact in the state data.
func (state State) SetSourceImageArtifact(sourceArtifact *artifacts.Image) {
state.Data.Put("source_image_artifact", sourceArtifact)
}
// GetTargetImage gets the target image from the state data.
func (state State) GetTargetImage() *compute.CustomerImage {
value, ok := state.Data.GetOk("target_image")
if !ok || value == nil {
return nil
}
return value.(*compute.CustomerImage)
}
// SetTargetImage updates the target image in the state data.
func (state State) SetTargetImage(image *compute.CustomerImage) {
state.Data.Put("target_image", image)
}
// GetTargetImageArtifact gets the target image artifact from the state data.
func (state State) GetTargetImageArtifact() *artifacts.Image {
value, ok := state.Data.GetOk("target_image_artifact")
if !ok || value == nil {
return nil
}
return value.(*artifacts.Image)
}
// SetTargetImageArtifact updates the target image artifact in the state data.
func (state State) SetTargetImageArtifact(sourceArtifact *artifacts.Image) {
state.Data.Put("target_image_artifact", sourceArtifact)
}
// GetRemoteOVFPackageArtifact gets the remote OVF package artifact from the state data.
func (state State) GetRemoteOVFPackageArtifact() *artifacts.RemoteOVFPackage {
value, ok := state.Data.GetOk("remote_ovf_package_artifact")
if !ok || value == nil {
return nil
}
return value.(*artifacts.RemoteOVFPackage)
}
// SetRemoteOVFPackageArtifact updates the remote OVF package artifact in the state data.
func (state State) SetRemoteOVFPackageArtifact(packageArtifact *artifacts.RemoteOVFPackage) {
state.Data.Put("remote_ovf_package_artifact", packageArtifact)
}
// GetSourceArtifact gets the source artifact from the state data.
func (state State) GetSourceArtifact() packer.Artifact {
value, ok := state.Data.GetOk("source_artifact")
if !ok || value == nil {
return nil
}
return value.(packer.Artifact)
}
// SetSourceArtifact updates the source artifact in the state data.
func (state State) SetSourceArtifact(sourceArtifact packer.Artifact) {
state.Data.Put("source_artifact", sourceArtifact)
}
// GetTargetArtifact gets the target artifact from the state data.
func (state State) GetTargetArtifact() packer.Artifact {
value, ok := state.Data.GetOk("target_artifact")
if !ok || value == nil {
return nil
}
return value.(packer.Artifact)
}
// SetTargetArtifact updates the target artifact in the state data.
func (state State) SetTargetArtifact(targetArtifact packer.Artifact) {
state.Data.Put("target_artifact", targetArtifact)
}
// ShowMessage displays the specified message via the UI (if available, otherwise via log.Printf).
func (state State) ShowMessage(message string, formatArgs ...interface{}) {
ui := state.GetUI()
if ui != nil {
ui.Message(fmt.Sprintf(message, formatArgs...))
} else {
log.Printf(message, formatArgs...)
}
}
// ShowError displays the specified error via the UI, and persists it using SetLastError.
func (state State) ShowError(err error) {
state.SetLastError(err)
ui := state.GetUI()
if ui != nil {
ui.Error(err.Error())
}
}
// ShowErrorMessage displays the specified error message via the UI, and persists it using SetLastError.
func (state State) ShowErrorMessage(errorMessage string, formatArgs ...interface{}) {
state.ShowError(
fmt.Errorf(errorMessage, formatArgs...),
)
} | helpers/state.go | 0.666497 | 0.448185 | state.go | starcoder |
package advent
import (
. "github.com/davidparks11/advent2021/internal/advent/day18"
)
var _ Problem = &snailFish{}
type snailFish struct {
dailyProblem
}
func NewSnailFish() Problem {
return &snailFish{
dailyProblem{
day: 18,
},
}
}
func (s *snailFish) Solve() interface{} {
input := s.GetInputLines()
var results []int
results = append(results, s.part1(input))
results = append(results, s.part2(input))
return results
}
/*
You descend into the ocean trench and encounter some snailfish. They say they saw the sleigh keys! They'll even tell you which direction the keys went if you help one of the smaller snailfish with his math homework.
Snailfish numbers aren't like regular numbers. Instead, every snailfish number is a pair - an ordered list of two elements. Each element of the pair can be either a regular number or another pair.
Pairs are written as [x,y], where x and y are the elements within the pair. Here are some example snailfish numbers, one snailfish number per line:
[1,2]
[[1,2],3]
[9,[8,7]]
[[1,9],[8,5]]
[[[[1,2],[3,4]],[[5,6],[7,8]]],9]
[[[9,[3,8]],[[0,9],6]],[[[3,7],[4,9]],3]]
[[[[1,3],[5,3]],[[1,3],[8,7]]],[[[4,9],[6,9]],[[8,2],[7,3]]]]
This snailfish homework is about addition. To add two snailfish numbers, form a pair from the left and right parameters of the addition operator. For example, [1,2] + [[3,4],5] becomes [[1,2],[[3,4],5]].
There's only one problem: snailfish numbers must always be reduced, and the process of adding two snailfish numbers can result in snailfish numbers that need to be reduced.
To reduce a snailfish number, you must repeatedly do the first action in this list that applies to the snailfish number:
If any pair is nested inside four pairs, the leftmost such pair explodes.
If any regular number is 10 or greater, the leftmost such regular number splits.
Once no action in the above list applies, the snailfish number is reduced.
During reduction, at most one action applies, after which the process returns to the top of the list of actions. For example, if split produces a pair that meets the explode criteria, that pair explodes before other splits occur.
To explode a pair, the pair's left value is added to the first regular number to the left of the exploding pair (if any), and the pair's right value is added to the first regular number to the right of the exploding pair (if any). Exploding pairs will always consist of two regular numbers. Then, the entire exploding pair is replaced with the regular number 0.
Here are some examples of a single explode action:
[[[[[9,8],1],2],3],4] becomes [[[[0,9],2],3],4] (the 9 has no regular number to its left, so it is not added to any regular number).
[7,[6,[5,[4,[3,2]]]]] becomes [7,[6,[5,[7,0]]]] (the 2 has no regular number to its right, and so it is not added to any regular number).
[[6,[5,[4,[3,2]]]],1] becomes [[6,[5,[7,0]]],3].
[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]] becomes [[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]] (the pair [3,2] is unaffected because the pair [7,3] is further to the left; [3,2] would explode on the next action).
[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]] becomes [[3,[2,[8,0]]],[9,[5,[7,0]]]].
To split a regular number, replace it with a pair; the left element of the pair should be the regular number divided by two and rounded down, while the right element of the pair should be the regular number divided by two and rounded up. For example, 10 becomes [5,5], 11 becomes [5,6], 12 becomes [6,6], and so on.
Here is the process of finding the reduced result of [[[[4,3],4],4],[7,[[8,4],9]]] + [1,1]:
after addition: [[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]
after explode: [[[[0,7],4],[7,[[8,4],9]]],[1,1]]
after explode: [[[[0,7],4],[15,[0,13]]],[1,1]]
after split: [[[[0,7],4],[[7,8],[0,13]]],[1,1]]
after split: [[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]
after explode: [[[[0,7],4],[[7,8],[6,0]]],[8,1]]
Once no reduce actions apply, the snailfish number that remains is the actual result of the addition operation: [[[[0,7],4],[[7,8],[6,0]]],[8,1]].
The homework assignment involves adding up a list of snailfish numbers (your puzzle input). The snailfish numbers are each listed on a separate line. Add the first snailfish number and the second, then add that result and the third, then add that result and the fourth, and so on until all numbers in the list have been used once.
For example, the final sum of this list is [[[[1,1],[2,2]],[3,3]],[4,4]]:
[1,1]
[2,2]
[3,3]
[4,4]
The final sum of this list is [[[[3,0],[5,3]],[4,4]],[5,5]]:
[1,1]
[2,2]
[3,3]
[4,4]
[5,5]
The final sum of this list is [[[[5,0],[7,4]],[5,5]],[6,6]]:
[1,1]
[2,2]
[3,3]
[4,4]
[5,5]
[6,6]
Here's a slightly larger example:
[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]
[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]
[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]
[[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]
[7,[5,[[3,8],[1,4]]]]
[[2,[2,2]],[8,[8,1]]]
[2,9]
[1,[[[9,3],9],[[9,0],[0,7]]]]
[[[5,[7,4]],7],1]
[[[[4,2],2],6],[8,7]]
The final sum [[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]] is found after adding up the above snailfish numbers:
[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]
+ [7,[[[3,7],[4,3]],[[6,3],[8,8]]]]
= [[[[4,0],[5,4]],[[7,7],[6,0]]],[[8,[7,7]],[[7,9],[5,0]]]]
[[[[4,0],[5,4]],[[7,7],[6,0]]],[[8,[7,7]],[[7,9],[5,0]]]]
+ [[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]
= [[[[6,7],[6,7]],[[7,7],[0,7]]],[[[8,7],[7,7]],[[8,8],[8,0]]]]
[[[[6,7],[6,7]],[[7,7],[0,7]]],[[[8,7],[7,7]],[[8,8],[8,0]]]]
+ [[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]
= [[[[7,0],[7,7]],[[7,7],[7,8]]],[[[7,7],[8,8]],[[7,7],[8,7]]]]
[[[[7,0],[7,7]],[[7,7],[7,8]]],[[[7,7],[8,8]],[[7,7],[8,7]]]]
+ [7,[5,[[3,8],[1,4]]]]
= [[[[7,7],[7,8]],[[9,5],[8,7]]],[[[6,8],[0,8]],[[9,9],[9,0]]]]
[[[[7,7],[7,8]],[[9,5],[8,7]]],[[[6,8],[0,8]],[[9,9],[9,0]]]]
+ [[2,[2,2]],[8,[8,1]]]
= [[[[6,6],[6,6]],[[6,0],[6,7]]],[[[7,7],[8,9]],[8,[8,1]]]]
[[[[6,6],[6,6]],[[6,0],[6,7]]],[[[7,7],[8,9]],[8,[8,1]]]]
+ [2,9]
= [[[[6,6],[7,7]],[[0,7],[7,7]]],[[[5,5],[5,6]],9]]
[[[[6,6],[7,7]],[[0,7],[7,7]]],[[[5,5],[5,6]],9]]
+ [1,[[[9,3],9],[[9,0],[0,7]]]]
= [[[[7,8],[6,7]],[[6,8],[0,8]]],[[[7,7],[5,0]],[[5,5],[5,6]]]]
[[[[7,8],[6,7]],[[6,8],[0,8]]],[[[7,7],[5,0]],[[5,5],[5,6]]]]
+ [[[5,[7,4]],7],1]
= [[[[7,7],[7,7]],[[8,7],[8,7]]],[[[7,0],[7,7]],9]]
[[[[7,7],[7,7]],[[8,7],[8,7]]],[[[7,0],[7,7]],9]]
+ [[[[4,2],2],6],[8,7]]
= [[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]
To check whether it's the right answer, the snailfish teacher only checks the magnitude of the final sum. The magnitude of a pair is 3 times the magnitude of its left element plus 2 times the magnitude of its right element. The magnitude of a regular number is just that number.
For example, the magnitude of [9,1] is 3*9 + 2*1 = 29; the magnitude of [1,9] is 3*1 + 2*9 = 21. Magnitude calculations are recursive: the magnitude of [[9,1],[1,9]] is 3*29 + 2*21 = 129.
Here are a few more magnitude examples:
[[1,2],[[3,4],5]] becomes 143.
[[[[0,7],4],[[7,8],[6,0]]],[8,1]] becomes 1384.
[[[[1,1],[2,2]],[3,3]],[4,4]] becomes 445.
[[[[3,0],[5,3]],[4,4]],[5,5]] becomes 791.
[[[[5,0],[7,4]],[5,5]],[6,6]] becomes 1137.
[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]] becomes 3488.
So, given this example homework assignment:
[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]
The final sum is:
[[[[6,6],[7,6]],[[7,7],[7,0]]],[[[7,7],[7,7]],[[7,8],[9,9]]]]
The magnitude of this final sum is 4140.
Add up all of the snailfish numbers from the homework assignment in the order they appear. What is the magnitude of the final sum?
*/
func (s *snailFish) part1(input []string) int {
nodes := ParseInput(input)
var sum *Node = nodes[0]
for i := 1; i < len(nodes); i++ {
sum = AddNode(sum, nodes[i])
}
return CalcMagnitude(sum)
}
/*
You notice a second question on the back of the homework assignment:
What is the largest magnitude you can get from adding only two of the snailfish numbers?
Note that snailfish addition is not commutative - that is, x + y and y + x can produce different results.
Again considering the last example homework assignment above:
[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]
The largest magnitude of the sum of any two snailfish numbers in this list is 3993. This is the magnitude of [[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]] + [[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]], which reduces to [[[[7,8],[6,6]],[[6,0],[7,7]]],[[[7,8],[8,8]],[[7,9],[0,6]]]].
What is the largest magnitude of any sum of two different snailfish numbers from the homework assignment?
*/
func (s *snailFish) part2(input []string) int {
list := ParseInput(input)
return MaxMagnitude(list)
} | internal/advent/day18.go | 0.53048 | 0.506286 | day18.go | starcoder |
package smartagentreceiver
import (
"fmt"
"time"
sfx "github.com/signalfx/golib/v3/datapoint"
"go.opentelemetry.io/collector/consumer/pdata"
"go.uber.org/zap"
)
var (
errUnsupportedMetricTypeTimestamp = fmt.Errorf("unsupported metric type timestamp")
errNoIntValue = fmt.Errorf("no valid value for expected IntValue")
errNoFloatValue = fmt.Errorf("no valid value for expected FloatValue")
)
type Converter struct {
logger *zap.Logger
}
// Based on https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.15.0/receiver/signalfxreceiver/signalfxv2_to_metricdata.go
// toMetrics() will respect the timestamp of any datapoint that isn't the zero value for time.Time,
// using timeReceived otherwise.
func (c *Converter) toMetrics(datapoints []*sfx.Datapoint, timeReceived time.Time) (pdata.Metrics, int) {
numDropped := 0
md := pdata.NewMetrics()
md.ResourceMetrics().Resize(1)
rm := md.ResourceMetrics().At(0)
rm.InstrumentationLibraryMetrics().Resize(1)
ilm := rm.InstrumentationLibraryMetrics().At(0)
metrics := ilm.Metrics()
metrics.Resize(len(datapoints))
i := 0
for _, datapoint := range datapoints {
if datapoint == nil {
continue
}
m := metrics.At(i)
err := setDataType(datapoint, m)
if err != nil {
numDropped++
c.logger.Debug("SignalFx datapoint type conversion error",
zap.Error(err),
zap.String("metric", datapoint.String()))
continue
}
m.SetName(datapoint.Metric)
switch m.DataType() {
case pdata.MetricDataTypeIntGauge:
err = fillIntDatapoint(datapoint, m.IntGauge().DataPoints(), timeReceived)
case pdata.MetricDataTypeIntSum:
err = fillIntDatapoint(datapoint, m.IntSum().DataPoints(), timeReceived)
case pdata.MetricDataTypeDoubleGauge:
err = fillDoubleDatapoint(datapoint, m.DoubleGauge().DataPoints(), timeReceived)
case pdata.MetricDataTypeDoubleSum:
err = fillDoubleDatapoint(datapoint, m.DoubleSum().DataPoints(), timeReceived)
}
if err != nil {
numDropped++
c.logger.Debug("SignalFx datapoint datum conversion error",
zap.Error(err),
zap.String("metric", datapoint.Metric))
continue
}
i++
}
metrics.Resize(i)
return md, numDropped
}
func setDataType(datapoint *sfx.Datapoint, m pdata.Metric) error {
sfxMetricType := datapoint.MetricType
if sfxMetricType == sfx.Timestamp {
return errUnsupportedMetricTypeTimestamp
}
var isFloat bool
switch datapoint.Value.(type) {
case sfx.IntValue:
case sfx.FloatValue:
isFloat = true
default:
return fmt.Errorf("unsupported value type %T: %v", datapoint.Value, datapoint.Value)
}
switch sfxMetricType {
case sfx.Gauge, sfx.Enum, sfx.Rate:
if isFloat {
m.SetDataType(pdata.MetricDataTypeDoubleGauge)
} else {
m.SetDataType(pdata.MetricDataTypeIntGauge)
}
case sfx.Count:
if isFloat {
m.SetDataType(pdata.MetricDataTypeDoubleSum)
m.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityDelta)
m.DoubleSum().SetIsMonotonic(true)
} else {
m.SetDataType(pdata.MetricDataTypeIntSum)
m.IntSum().SetAggregationTemporality(pdata.AggregationTemporalityDelta)
m.IntSum().SetIsMonotonic(true)
}
case sfx.Counter:
if isFloat {
m.SetDataType(pdata.MetricDataTypeDoubleSum)
m.DoubleSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative)
m.DoubleSum().SetIsMonotonic(true)
} else {
m.SetDataType(pdata.MetricDataTypeIntSum)
m.IntSum().SetAggregationTemporality(pdata.AggregationTemporalityCumulative)
m.IntSum().SetIsMonotonic(true)
}
default:
return fmt.Errorf("unsupported metric type %T: %v", sfxMetricType, sfxMetricType)
}
return nil
}
func fillIntDatapoint(datapoint *sfx.Datapoint, dps pdata.IntDataPointSlice, timeReceived time.Time) error {
var intValue sfx.IntValue
var ok bool
if intValue, ok = datapoint.Value.(sfx.IntValue); !ok {
return errNoIntValue
}
timestamp := datapoint.Timestamp
if timestamp.IsZero() {
timestamp = timeReceived
}
dps.Resize(1)
dp := dps.At(0)
dp.SetTimestamp(pdata.TimestampUnixNano(uint64(timestamp.UnixNano())))
dp.SetValue(intValue.Int())
fillInLabels(datapoint.Dimensions, dp.LabelsMap())
return nil
}
func fillDoubleDatapoint(datapoint *sfx.Datapoint, dps pdata.DoubleDataPointSlice, timeReceived time.Time) error {
var floatValue sfx.FloatValue
var ok bool
if floatValue, ok = datapoint.Value.(sfx.FloatValue); !ok {
return errNoFloatValue
}
timestamp := datapoint.Timestamp
if timestamp.IsZero() {
timestamp = timeReceived
}
dps.Resize(1)
dp := dps.At(0)
dp.SetTimestamp(pdata.TimestampUnixNano(uint64(timestamp.UnixNano())))
dp.SetValue(floatValue.Float())
fillInLabels(datapoint.Dimensions, dp.LabelsMap())
return nil
}
func fillInLabels(dimensions map[string]string, labels pdata.StringMap) {
labels.InitEmptyWithCapacity(len(dimensions))
for k, v := range dimensions {
labels.Insert(k, v)
}
} | internal/receiver/smartagentreceiver/convert.go | 0.715026 | 0.403978 | convert.go | starcoder |
package dataframe
import (
"fmt"
"strings"
"github.com/isuruceanu/gota/series"
)
type combineFuncType func(a, b series.Series) bool
type combineHeaderBuilderFuncType func(a, b series.Series) (string, interface{}, bool)
// Merge struct definition
type Merge struct {
a DataFrame
b DataFrame
keys []string
combine bool
combineCompareFn combineFuncType
combineResultHeaderFn combineHeaderBuilderFuncType
}
// Merge returns a Merge struct for containing ifo about merge
func (df DataFrame) Merge(b DataFrame, keys ...string) Merge {
return Merge{a: df, b: b, keys: keys}
}
// WithCombine specify to merge same columns into one
func (m Merge) WithCombine(fn func(aSerie, bSerie series.Series) bool) Merge {
m.combine = true
m.combineCompareFn = fn
return m
}
func (m Merge) WithResultHeader(fn func(a, b series.Series) (string, interface{}, bool)) Merge {
m.combineResultHeaderFn = fn
return m
}
func (m Merge) OuterJoin() DataFrame {
if m.combine {
return m.a.outerJoinHashWithCombine(m.b, m.combineCompareFn, m.combineResultHeaderFn, m.keys...)
}
return m.a.outerJoinHashWithCombine(m.b, nil, nil, m.keys...)
}
func (m Merge) RightJoin() DataFrame {
if m.combine {
return m.a.rightJoinHashWithCombine(m.b, m.combineCompareFn, m.combineResultHeaderFn, m.keys...)
}
return m.a.rightJoinHashWithCombine(m.b, nil, nil, m.keys...)
}
func (m Merge) InnerJoin() DataFrame {
if m.combine {
return m.a.innerJoinHashWithCombine(m.b, m.combineCompareFn, m.combineResultHeaderFn, m.keys...)
}
return m.a.innerJoinHashWithCombine(m.b, nil, nil, m.keys...)
}
func (m Merge) LeftJoin() DataFrame {
if m.combine {
return m.a.leftJoinHashWithCombine(m.b, m.combineCompareFn, m.combineResultHeaderFn, m.keys...)
}
return m.a.leftJoinHashWithCombine(m.b, nil, nil, m.keys...)
}
type tuple struct {
aIdx int
bIdx int
rAIdx int
rBIdx int
}
type tupleArr []tuple
func (t tupleArr) findTuple(val int, fn func(int, tuple) bool) (int, bool) {
for idx, v := range t {
if fn(val, v) {
return idx, true
}
}
return -1, false
}
func (df DataFrame) outerJoinWithCombine(b DataFrame,
compareFn combineFuncType,
combineHeaderBuilder combineHeaderBuilderFuncType,
keys ...string) DataFrame {
iKeysA, iKeysB, errorArr := checkDataframesForJoins(df, b, keys...)
if len(errorArr) != 0 {
return DataFrame{Err: fmt.Errorf(strings.Join(errorArr, "\n"))}
}
aCols := df.columns
bCols := b.columns
// Initialize newCols
var newCols []series.Series
for _, i := range iKeysA {
newCols = append(newCols, aCols[i].Empty())
}
var iCombinedCols tupleArr
if compareFn != nil {
for i := 0; i < df.ncols; i++ {
if !inIntSlice(i, iKeysA) {
for j := 0; j < b.ncols; j++ {
if !inIntSlice(j, iKeysB) {
if compareFn(aCols[i], bCols[j]) {
iCombinedCols = append(iCombinedCols, tuple{i, j, -1, -1})
}
}
}
}
}
}
var iNotKeysA []int
for i := 0; i < df.ncols; i++ {
if !inIntSlice(i, iKeysA) {
iNotKeysA = append(iNotKeysA, i)
newCols = append(newCols, aCols[i].Empty())
if cIdx, cf := iCombinedCols.findTuple(i, findInA); cf {
iCombinedCols[cIdx].rAIdx = len(newCols) - 1
}
}
}
var iNotKeysB []int
for i := 0; i < b.ncols; i++ {
if !inIntSlice(i, iKeysB) {
iNotKeysB = append(iNotKeysB, i)
newCols = append(newCols, bCols[i].Empty())
if cIdx, cf := iCombinedCols.findTuple(i, findInB); cf {
iCombinedCols[cIdx].rBIdx = len(newCols) - 1
}
}
}
// Fill newCols
for i := 0; i < df.nrows; i++ {
matched := false
for j := 0; j < b.nrows; j++ {
match := true
for k := range keys {
aElem := aCols[iKeysA[k]].Elem(i)
bElem := bCols[iKeysB[k]].Elem(j)
match = match && aElem.Eq(bElem)
}
if match {
matched = true
ii := 0
for _, k := range iKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysB {
elem := bCols[k].Elem(j)
newCols[ii].Append(elem)
ii++
}
}
}
if !matched {
ii := 0
for _, k := range iKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _ = range iNotKeysB {
newCols[ii].Append(nil)
ii++
}
}
}
for j := 0; j < b.nrows; j++ {
matched := false
for i := 0; i < df.nrows; i++ {
match := true
for k := range keys {
aElem := aCols[iKeysA[k]].Elem(i)
bElem := bCols[iKeysB[k]].Elem(j)
match = match && aElem.Eq(bElem)
}
if match {
matched = true
}
}
if !matched {
ii := 0
for _, k := range iKeysB {
elem := bCols[k].Elem(j)
newCols[ii].Append(elem)
ii++
}
for _ = range iNotKeysA {
newCols[ii].Append(nil)
ii++
}
for _, k := range iNotKeysB {
elem := bCols[k].Elem(j)
newCols[ii].Append(elem)
ii++
}
}
}
newCols = combineColumns(iCombinedCols, newCols, combineHeaderBuilder)
return New(newCols...)
}
func (df DataFrame) rightJoinWithCombine(b DataFrame, compareFn combineFuncType,
combineHeaderBuilder combineHeaderBuilderFuncType,
keys ...string) DataFrame {
iKeysA, iKeysB, errorArr := checkDataframesForJoins(df, b, keys...)
if len(errorArr) != 0 {
return DataFrame{Err: fmt.Errorf(strings.Join(errorArr, "\n"))}
}
aCols := df.columns
bCols := b.columns
// Initialize newCols
var newCols []series.Series
for _, i := range iKeysA {
newCols = append(newCols, aCols[i].Empty())
}
var iCombinedCols tupleArr
if compareFn != nil {
for i := 0; i < df.ncols; i++ {
if !inIntSlice(i, iKeysA) {
for j := 0; j < b.ncols; j++ {
if !inIntSlice(j, iKeysB) {
if compareFn(aCols[i], bCols[j]) {
iCombinedCols = append(iCombinedCols, tuple{i, j, -1, -1})
}
}
}
}
}
}
var iNotKeysA []int
for i := 0; i < df.ncols; i++ {
if !inIntSlice(i, iKeysA) {
iNotKeysA = append(iNotKeysA, i)
newCols = append(newCols, aCols[i].Empty())
if cIdx, cf := iCombinedCols.findTuple(i, findInA); cf {
iCombinedCols[cIdx].rAIdx = len(newCols) - 1
}
}
}
var iNotKeysB []int
for i := 0; i < b.ncols; i++ {
if !inIntSlice(i, iKeysB) {
iNotKeysB = append(iNotKeysB, i)
newCols = append(newCols, bCols[i].Empty())
if cIdx, cf := iCombinedCols.findTuple(i, findInB); cf {
iCombinedCols[cIdx].rBIdx = len(newCols) - 1
}
}
}
// Fill newCols
var yesmatched []struct{ i, j int }
var nonmatched []int
for j := 0; j < b.nrows; j++ {
matched := false
for i := 0; i < df.nrows; i++ {
match := true
for k := range keys {
aElem := aCols[iKeysA[k]].Elem(i)
bElem := bCols[iKeysB[k]].Elem(j)
match = match && aElem.Eq(bElem)
}
if match {
matched = true
yesmatched = append(yesmatched, struct{ i, j int }{i, j})
}
}
if !matched {
nonmatched = append(nonmatched, j)
}
}
for _, v := range yesmatched {
i := v.i
j := v.j
ii := 0
for _, k := range iKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysB {
elem := bCols[k].Elem(j)
newCols[ii].Append(elem)
ii++
}
}
for _, j := range nonmatched {
ii := 0
for _, k := range iKeysB {
elem := bCols[k].Elem(j)
newCols[ii].Append(elem)
ii++
}
for _ = range iNotKeysA {
newCols[ii].Append(nil)
ii++
}
for _, k := range iNotKeysB {
elem := bCols[k].Elem(j)
newCols[ii].Append(elem)
ii++
}
}
newCols = combineColumns(iCombinedCols, newCols, combineHeaderBuilder)
return New(newCols...)
}
// InnerJoin returns a DataFrame containing the inner join of two DataFrames.
func (df DataFrame) innerJoinWithCombine(b DataFrame, compareFn combineFuncType,
combineHeaderBuilder combineHeaderBuilderFuncType,
keys ...string) DataFrame {
iKeysA, iKeysB, errorArr := checkDataframesForJoins(df, b, keys...)
if len(errorArr) != 0 {
return DataFrame{Err: fmt.Errorf("%v", strings.Join(errorArr, "\n"))}
}
aCols := df.columns
bCols := b.columns
// Initialize newCols
var newCols []series.Series
for _, i := range iKeysA {
newCols = append(newCols, aCols[i].Empty())
}
var iCombinedCols tupleArr
if compareFn != nil {
for i := 0; i < df.ncols; i++ {
if !inIntSlice(i, iKeysA) {
for j := 0; j < b.ncols; j++ {
if !inIntSlice(j, iKeysB) {
if compareFn(aCols[i], bCols[j]) {
iCombinedCols = append(iCombinedCols, tuple{i, j, -1, -1})
}
}
}
}
}
}
var iNotKeysA []int
for i := 0; i < df.ncols; i++ {
if !inIntSlice(i, iKeysA) {
iNotKeysA = append(iNotKeysA, i)
newCols = append(newCols, aCols[i].Empty())
if cIdx, cf := iCombinedCols.findTuple(i, findInA); cf {
iCombinedCols[cIdx].rAIdx = len(newCols) - 1
}
}
}
var iNotKeysB []int
for i := 0; i < b.ncols; i++ {
if !inIntSlice(i, iKeysB) {
iNotKeysB = append(iNotKeysB, i)
newCols = append(newCols, bCols[i].Empty())
if cIdx, cf := iCombinedCols.findTuple(i, findInB); cf {
iCombinedCols[cIdx].rBIdx = len(newCols) - 1
}
}
}
// Fill newCols
for i := 0; i < df.nrows; i++ {
for j := 0; j < b.nrows; j++ {
match := true
for k := range keys {
aElem := aCols[iKeysA[k]].Elem(i)
bElem := bCols[iKeysB[k]].Elem(j)
match = match && aElem.Eq(bElem)
}
if match {
ii := 0
for _, k := range iKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysB {
elem := bCols[k].Elem(j)
newCols[ii].Append(elem)
ii++
}
}
}
}
newCols = combineColumns(iCombinedCols, newCols, combineHeaderBuilder)
return New(newCols...)
}
func (df DataFrame) leftJoinWithCombine(b DataFrame, compareFn combineFuncType,
combineHeaderBuilder combineHeaderBuilderFuncType,
keys ...string) DataFrame {
iKeysA, iKeysB, errorArr := checkDataframesForJoins(df, b, keys...)
if len(errorArr) != 0 {
return DataFrame{Err: fmt.Errorf(strings.Join(errorArr, "\n"))}
}
aCols := df.columns
bCols := b.columns
// Initialize newCols
var newCols []series.Series
for _, i := range iKeysA {
newCols = append(newCols, aCols[i].Empty())
}
var iCombinedCols tupleArr
if compareFn != nil {
for i := 0; i < df.ncols; i++ {
if !inIntSlice(i, iKeysA) {
for j := 0; j < b.ncols; j++ {
if !inIntSlice(j, iKeysB) {
if compareFn(aCols[i], bCols[j]) {
iCombinedCols = append(iCombinedCols, tuple{i, j, -1, -1})
}
}
}
}
}
}
var iNotKeysA []int
for i := 0; i < df.ncols; i++ {
if !inIntSlice(i, iKeysA) {
iNotKeysA = append(iNotKeysA, i)
newCols = append(newCols, aCols[i].Empty())
if cIdx, cf := iCombinedCols.findTuple(i, findInA); cf {
iCombinedCols[cIdx].rAIdx = len(newCols) - 1
}
}
}
var iNotKeysB []int
for i := 0; i < b.ncols; i++ {
if !inIntSlice(i, iKeysB) {
iNotKeysB = append(iNotKeysB, i)
newCols = append(newCols, bCols[i].Empty())
if cIdx, cf := iCombinedCols.findTuple(i, findInB); cf {
iCombinedCols[cIdx].rBIdx = len(newCols) - 1
}
}
}
// Fill newCols
for i := 0; i < df.nrows; i++ {
matched := false
for j := 0; j < b.nrows; j++ {
match := true
for k := range keys {
aElem := aCols[iKeysA[k]].Elem(i)
bElem := bCols[iKeysB[k]].Elem(j)
match = match && aElem.Eq(bElem)
}
if match {
matched = true
ii := 0
for _, k := range iKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysB {
elem := bCols[k].Elem(j)
newCols[ii].Append(elem)
ii++
}
}
}
if !matched {
ii := 0
for _, k := range iKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for _, k := range iNotKeysA {
elem := aCols[k].Elem(i)
newCols[ii].Append(elem)
ii++
}
for range iNotKeysB {
newCols[ii].Append(nil)
ii++
}
}
}
newCols = combineColumns(iCombinedCols, newCols, combineHeaderBuilder)
return New(newCols...)
}
func combineColumns(iCombinedCols tupleArr, newCols []series.Series, headerBuilderFn combineHeaderBuilderFuncType) []series.Series {
for _, c := range iCombinedCols {
if c.rAIdx == -1 || c.rBIdx == -1 {
continue
}
cobCol := newCols[c.rAIdx].Combine(newCols[c.rBIdx])
if cobCol.Err == nil {
if headerBuilderFn != nil {
name, otherInfo, ignore := headerBuilderFn(newCols[c.rAIdx], newCols[c.rBIdx])
if !ignore {
cobCol.Name = name
cobCol.OtherInfo = otherInfo
}
}
newCols[c.rAIdx] = cobCol
}
}
result := []series.Series{}
for idx, s := range newCols {
if _, ok := iCombinedCols.findTuple(idx, findInRB); ok {
continue
}
result = append(result, s)
}
return result
}
func checkDataframesForJoins(a, b DataFrame, keys ...string) ([]int, []int, []string) {
if len(keys) == 0 {
return nil, nil, []string{"join keys not specified"}
}
// Check that we have all given keys in both DataFrames
errorArr := []string{}
var iKeysA []int
var iKeysB []int
for _, key := range keys {
i := a.ColIndex(key)
if i < 0 {
errorArr = append(errorArr, fmt.Sprint("can't find key \"", key, "\" on left DataFrame"))
}
iKeysA = append(iKeysA, i)
j := b.ColIndex(key)
if j < 0 {
errorArr = append(errorArr, fmt.Sprint("can't find key \"", key, "\" on right DataFrame"))
}
iKeysB = append(iKeysB, j)
}
return iKeysA, iKeysB, errorArr
}
var (
findInA = func(val int, t tuple) bool {
return val == t.aIdx
}
findInB = func(val int, t tuple) bool {
return val == t.bIdx
}
findInRB = func(val int, t tuple) bool {
return val == t.rBIdx
}
)
func (df DataFrame) innerJoinHashWithCombine(b DataFrame, compareFn combineFuncType, combineHeaderBuilder combineHeaderBuilderFuncType, keys ...string) DataFrame {
joinInput, err := prepareJoin(df, b, compareFn, keys...)
if err != nil {
return DataFrame{Err: err}
}
combineColumnsInput := prepareInnerJoinHashForCombineColumns(joinInput)
newCols := combineColumns(joinInput.iCombinedCols, combineColumnsInput.newCols, combineHeaderBuilder)
return New(newCols...)
}
func (df DataFrame) outerJoinHashWithCombine(b DataFrame, compareFn combineFuncType, combineHeaderBuilder combineHeaderBuilderFuncType, keys ...string) DataFrame {
joinInput, err := prepareJoin(df, b, compareFn, keys...)
if err != nil {
return DataFrame{Err: err}
}
combineColumnsInput := prepareOuterJoinHashForCombineColumns(joinInput)
newCols := combineColumns(joinInput.iCombinedCols, combineColumnsInput.newCols, combineHeaderBuilder)
return New(newCols...)
}
func (df DataFrame) leftJoinHashWithCombine(b DataFrame, compareFn combineFuncType, combineHeaderBuilder combineHeaderBuilderFuncType, keys ...string) DataFrame {
joinInput, err := prepareJoin(df, b, compareFn, keys...)
if err != nil {
return DataFrame{Err: err}
}
combineColumnsInput := prepareLeftJoinHashForCombineColumns(joinInput)
newCols := combineColumns(joinInput.iCombinedCols, combineColumnsInput.newCols, combineHeaderBuilder)
return New(newCols...)
}
func (df DataFrame) rightJoinHashWithCombine(b DataFrame, compareFn combineFuncType, combineHeaderBuilder combineHeaderBuilderFuncType, keys ...string) DataFrame {
joinInput, err := prepareJoin(df, b, compareFn, keys...)
if err != nil {
return DataFrame{Err: err}
}
combineColumnsInput := prepareRightJoinHashForCombineColumns(joinInput)
newCols := combineColumns(joinInput.iCombinedCols, combineColumnsInput.newCols, combineHeaderBuilder)
return New(newCols...)
} | dataframe/merge.go | 0.567457 | 0.541651 | merge.go | starcoder |
package services
import "fmt"
type Vertex struct {
Data string
Vertices map[string]*Vertex
}
func NewVertex(data string) *Vertex {
return &Vertex{
Data: data,
Vertices: make(map[string]*Vertex),
}
}
type Graph struct {
Vertices map[string]*Vertex
directed bool
}
func NewDirectedGraph() *Graph {
return &Graph{
Vertices: make(map[string]*Vertex),
directed: true,
}
}
func NewUndirectedGraph() *Graph {
return &Graph{
Vertices: make(map[string]*Vertex),
}
}
func (g *Graph) AddVertex(data string) {
v := NewVertex(data)
g.Vertices[data] = v
}
func (g *Graph) AddEdge(from, to string) error {
v1:= g.Vertices[from]
v2:= g.Vertices[to]
if v1 == nil || v2 == nil {
return fmt.Errorf("Vertex not found")
}
if _, ok := v1.Vertices[v2.Data]; ok {
return fmt.Errorf("Edge already exists")
}
v1.Vertices[v2.Data] = v2
if !g.directed && v1.Data != v2.Data {
v2.Vertices[v1.Data] = v1
}
g.Vertices[v1.Data] = v1
g.Vertices[v2.Data] = v2
return nil
}
func (g *Graph) RemoveEdge(from, to string) error {
v1:= g.Vertices[from]
v2:= g.Vertices[to]
if v1 == nil || v2 == nil {
return fmt.Errorf("Vertex not found")
}
if _, ok := v1.Vertices[v2.Data]; !ok {
return fmt.Errorf("Edge not found")
}
delete(v1.Vertices, v2.Data)
if !g.directed && v1.Data != v2.Data {
delete(v2.Vertices, v1.Data)
}
return nil
}
func IsAncestor(g *Graph, ancestor, descendant string) bool {
if g.directed {
return isAncestorDirected(g, ancestor, descendant)
} else {
return isAncestorUndirected(g, ancestor, descendant)
}
}
func isAncestorDirected(g *Graph, ancestor, descendant string) bool {
if g.Vertices[ancestor] == nil || g.Vertices[descendant] == nil {
return false
}
if g.Vertices[ancestor].Data == descendant {
return true
}
for _, v := range g.Vertices[ancestor].Vertices {
if v.Data == descendant {
return true
}
}
return false
}
func isAncestorUndirected(g *Graph, ancestor, descendant string) bool {
if g.Vertices[ancestor] == nil || g.Vertices[descendant] == nil {
return false
}
if g.Vertices[ancestor].Data == descendant {
return true
}
for _, v := range g.Vertices[ancestor].Vertices {
if v.Data == descendant {
return true
}
}
for _, v := range g.Vertices[descendant].Vertices {
if v.Data == ancestor {
return true
}
}
return false
} | pkg/server/services/graph.go | 0.710528 | 0.485539 | graph.go | starcoder |
package ical
import (
"encoding/base64"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// MIME type and file extension for iCal, defined in RFC 5545 section 8.1.
const (
MIMEType = "text/calendar"
Extension = "ics"
)
// Params is a set of property parameters.
type Params map[string][]string
func (params Params) Get(name string) string {
if values := params[strings.ToUpper(name)]; len(values) > 0 {
return values[0]
}
return ""
}
func (params Params) Set(name, value string) {
params[strings.ToUpper(name)] = []string{value}
}
func (params Params) Add(name, value string) {
name = strings.ToUpper(name)
params[name] = append(params[name], value)
}
func (params Params) Del(name string) {
delete(params, strings.ToUpper(name))
}
// Prop is a component property.
type Prop struct {
Name string
Params Params
Value string
}
// NewProp creates a new property with the specified name.
func NewProp(name string) *Prop {
return &Prop{
Name: strings.ToUpper(name),
Params: make(Params),
}
}
func (prop *Prop) ValueType() ValueType {
t := ValueType(prop.Params.Get(ParamValue))
if t == ValueDefault {
t = defaultValueTypes[prop.Name]
}
return t
}
func (prop *Prop) SetValueType(t ValueType) {
dt, ok := defaultValueTypes[prop.Name]
if t == ValueDefault || (ok && t == dt) {
prop.Params.Del(ParamValue)
} else {
prop.Params.Set(ParamValue, string(t))
}
}
func (prop *Prop) expectValueType(want ValueType) error {
t := prop.ValueType()
if t != ValueDefault && t != want {
return fmt.Errorf("ical: property %q: expected type %q, got %q", prop.Name, want, t)
}
return nil
}
func (prop *Prop) Binary() ([]byte, error) {
if err := prop.expectValueType(ValueBinary); err != nil {
return nil, err
}
return base64.StdEncoding.DecodeString(prop.Value)
}
func (prop *Prop) SetBinary(b []byte) {
prop.SetValueType(ValueBinary)
prop.Params.Set("ENCODING", "BASE64")
prop.Value = base64.StdEncoding.EncodeToString(b)
}
func (prop *Prop) Bool() (bool, error) {
if err := prop.expectValueType(ValueBool); err != nil {
return false, err
}
switch strings.ToUpper(prop.Value) {
case "TRUE":
return true, nil
case "FALSE":
return false, nil
default:
return false, fmt.Errorf("ical: invalid boolean: %q", prop.Value)
}
}
// DateTime parses the property value as a date-time or a date.
func (prop *Prop) DateTime(loc *time.Location) (time.Time, error) {
// Use the TZID location, if available, otherwise the given location.
// Default to UTC, if there is no TZID or given location.
if tzid := prop.Params.Get(PropTimezoneID); tzid != "" {
tzLoc, err := time.LoadLocation(tzid)
if err != nil {
return time.Time{}, err
}
loc = tzLoc
}
if loc == nil {
loc = time.UTC
}
const (
dateFormat = "20060102"
datetimeFormat = "20060102T150405"
datetimeUTCFormat = "20060102T150405Z"
)
valueType := prop.ValueType()
valueLength := len(prop.Value)
switch valueType {
case ValueDate:
return time.ParseInLocation(dateFormat, prop.Value, loc)
case ValueDateTime:
if valueLength == len(datetimeFormat) {
return time.ParseInLocation(datetimeFormat, prop.Value, loc)
}
return time.ParseInLocation(datetimeUTCFormat, prop.Value, time.UTC)
case ValueDefault:
switch valueLength {
case len(dateFormat):
return time.ParseInLocation(dateFormat, prop.Value, loc)
case len(datetimeFormat):
return time.ParseInLocation(datetimeFormat, prop.Value, loc)
case len(datetimeUTCFormat):
return time.ParseInLocation(datetimeUTCFormat, prop.Value, time.UTC)
}
}
return time.Time{}, fmt.Errorf("ical: cannot process: (%q) %s", valueType, prop.Value)
}
func (prop *Prop) SetDateTime(t time.Time) {
prop.SetValueType(ValueDateTime)
switch t.Location() {
case nil, time.UTC:
prop.Value = t.Format("20060102T150405Z")
default:
prop.Params.Set(PropTimezoneID, t.Location().String())
prop.Value = t.Format("20060102T150405")
}
}
type durationParser struct {
s string
}
func (p *durationParser) consume(c byte) bool {
if len(p.s) == 0 || p.s[0] != c {
return false
}
p.s = p.s[1:]
return true
}
func (p *durationParser) parseCount() (time.Duration, error) {
// Find the first non-digit
i := strings.IndexFunc(p.s, func(r rune) bool {
return r < '0' || r > '9'
})
if i == 0 {
return 0, fmt.Errorf("ical: invalid duration: expected a digit")
}
if i < 0 {
i = len(p.s)
}
n, err := strconv.ParseUint(p.s[:i], 10, 64)
if err != nil {
return 0, fmt.Errorf("ical: invalid duration: %v", err)
}
p.s = p.s[i:]
return time.Duration(n), nil
}
func (p *durationParser) parseDuration() (time.Duration, error) {
neg := p.consume('-')
if !neg {
_ = p.consume('+')
}
if !p.consume('P') {
return 0, fmt.Errorf("ical: invalid duration: expected 'P'")
}
var dur time.Duration
isTime := false
for len(p.s) > 0 {
if p.consume('T') {
isTime = true
}
n, err := p.parseCount()
if err != nil {
return 0, err
}
if !isTime {
if p.consume('D') {
dur += n * 24 * time.Hour
} else if p.consume('W') {
dur += n * 7 * 24 * time.Hour
} else {
return 0, fmt.Errorf("ical: invalid duration: expected 'D' or 'W'")
}
} else {
if p.consume('H') {
dur += n * time.Hour
} else if p.consume('M') {
dur += n * time.Minute
} else if p.consume('S') {
dur += n * time.Second
} else {
return 0, fmt.Errorf("ical: invalid duration: expected 'H', 'M' or 'S'")
}
}
}
if neg {
dur = -dur
}
return dur, nil
}
func (prop *Prop) Duration() (time.Duration, error) {
if err := prop.expectValueType(ValueDuration); err != nil {
return 0, err
}
p := durationParser{strings.ToUpper(prop.Value)}
return p.parseDuration()
}
func (prop *Prop) SetDuration(dur time.Duration) {
prop.SetValueType(ValueDuration)
sec := dur.Milliseconds() / 1000
neg := sec < 0
if sec < 0 {
sec = -sec
}
var s string
if neg {
s += "-"
}
s += "PT"
s += strconv.FormatInt(sec, 10)
s += "S"
prop.Value = s
}
func (prop *Prop) Float() (float64, error) {
if err := prop.expectValueType(ValueFloat); err != nil {
return 0, err
}
return strconv.ParseFloat(prop.Value, 64)
}
func (prop *Prop) Int() (int, error) {
if err := prop.expectValueType(ValueInt); err != nil {
return 0, err
}
return strconv.Atoi(prop.Value)
}
func (prop *Prop) TextList() ([]string, error) {
if err := prop.expectValueType(ValueText); err != nil {
return nil, err
}
var l []string
var sb strings.Builder
for i := 0; i < len(prop.Value); i++ {
switch c := prop.Value[i]; c {
case '\\':
i++
if i >= len(prop.Value) {
return nil, fmt.Errorf("ical: malformed text: antislash at end of text")
}
switch c := prop.Value[i]; c {
case '\\', ';', ',':
sb.WriteByte(c)
case 'n', 'N':
sb.WriteByte('\n')
default:
return nil, fmt.Errorf("ical: malformed text: invalid escape sequence '\\%v'", c)
}
case ',':
l = append(l, sb.String())
sb.Reset()
default:
sb.WriteByte(c)
}
}
l = append(l, sb.String())
return l, nil
}
func (prop *Prop) SetTextList(l []string) {
prop.SetValueType(ValueText)
var sb strings.Builder
for i, text := range l {
if i > 0 {
sb.WriteByte(',')
}
sb.Grow(len(text))
for _, r := range text {
switch r {
case '\\', ';', ',':
sb.WriteByte('\\')
sb.WriteRune(r)
case '\n':
sb.WriteString("\\n")
default:
sb.WriteRune(r)
}
}
}
prop.Value = sb.String()
}
func (prop *Prop) Text() (string, error) {
l, err := prop.TextList()
if err != nil {
return "", err
}
if len(l) == 0 {
return "", nil
}
return l[0], nil
}
func (prop *Prop) SetText(text string) {
prop.SetTextList([]string{text})
}
// URI parses the property value as a URI or binary. If the value is binary, a
// data URI is returned.
func (prop *Prop) URI() (*url.URL, error) {
switch t := prop.ValueType(); t {
case ValueDefault, ValueURI:
return url.Parse(prop.Value)
case ValueBinary:
mediaType := prop.Params.Get(ParamFormatType)
return &url.URL{
Scheme: "data",
Opaque: mediaType + ";base64," + prop.Value,
}, nil
default:
return nil, fmt.Errorf("ical: expected URI or BINARY, got %q", t)
}
}
func (prop *Prop) SetURI(u *url.URL) {
prop.SetValueType(ValueURI)
prop.Value = u.String()
}
// TODO: Period, RecurrenceRule, Time, URI, UTCOffset
// Props is a set of component properties.
type Props map[string][]Prop
func (props Props) Get(name string) *Prop {
if l := props[strings.ToUpper(name)]; len(l) > 0 {
return &l[0]
}
return nil
}
func (props Props) Set(prop *Prop) {
props[prop.Name] = []Prop{*prop}
}
func (props Props) Add(prop *Prop) {
props[prop.Name] = append(props[prop.Name], *prop)
}
func (props Props) Del(name string) {
delete(props, name)
}
func (props Props) Text(name string) (string, error) {
if prop := props.Get(name); prop != nil {
return prop.Text()
}
return "", nil
}
func (props Props) SetText(name, text string) {
prop := NewProp(name)
prop.SetText(text)
props.Set(prop)
}
func (props Props) DateTime(name string, loc *time.Location) (time.Time, error) {
if prop := props.Get(name); prop != nil {
return prop.DateTime(loc)
}
return time.Time{}, nil
}
func (props Props) SetDateTime(name string, t time.Time) {
prop := NewProp(name)
prop.SetDateTime(t)
props.Set(prop)
}
func (props Props) SetURI(name string, u *url.URL) {
prop := NewProp(name)
prop.SetURI(u)
props.Set(prop)
}
func (props Props) URI(name string) (*url.URL, error) {
if prop := props.Get(name); prop != nil {
return prop.URI()
}
return nil, nil
}
// Component is an iCalendar component: collections of properties that express
// a particular calendar semantic. A components can be an events, a to-do, a
// journal entry, timezone information, free/busy time information, or an
// alarm.
type Component struct {
Name string
Props Props
Children []*Component
}
// NewComponent creates a new component with the specified name.
func NewComponent(name string) *Component {
return &Component{
Name: strings.ToUpper(name),
Props: make(Props),
}
} | ical.go | 0.634543 | 0.426262 | ical.go | starcoder |
package evolution
import "fmt"
// DualTreeNode represents a a treeNode with a maximum of two children.
// It is not technically a binary treeNode as it DOES not place any ordering on left and right children as binary trees
// prototypically do.
type DualTreeNode struct {
key string
value string
left *DualTreeNode //left
right *DualTreeNode //right
arity int
}
// IsEqual checks to see if all aspects of a DualTreeNode are equivalent. This includes value as well as pointers
func (b *DualTreeNode) IsEqual(t DualTreeNode) bool {
if b.key != t.key {
return false
}
if b.value != t.value {
return false
}
if b.arity != t.arity {
return false
}
return true
}
// IsValEqual is a simple check to see if values of strings in the nodes are equal
func (d *DualTreeNode) IsValEqual(t DualTreeNode) bool {
if d.value == t.value {
return true
}
return false
}
// IsLeaf checks to see if a given node is a leaf
func (d *DualTreeNode) IsLeaf() bool {
if d.arity == 0 {
if d.right == nil || d.left == nil {
return true
}
}
return false
}
// ArityRemainder calculates the remaining available node connections based on arity for a given root node.
// This is used to balance the NonTerminals and the Terminals depending on their requirements.
func (d *DualTreeNode) ArityRemainder() int {
available := d.arity
if d.arity == 2 {
if d.right != nil {
available--
}
if d.left != nil {
available--
}
return available
} else if d.arity == 1 {
if d.left != nil {
available--
}
return available
}
return 0
}
// IsLeaf checks to see if a given node is a leaf
func (d *DualTreeNode) ToSymbolicExpression() (SymbolicExpression, error) {
err := d.isValid()
if err != nil {
return SymbolicExpression{}, err
}
kind := 0
if d.arity == 2 {
kind = 1
}
return SymbolicExpression{
arity: d.arity,
value: d.value,
kind: kind,
}, err
}
// ToDualTree takes a given node and returns a treeNode from it by following the path.
func (d *DualTreeNode) ToDualTree() (DualTree, error) {
err := d.isValid()
if err != nil {
return DualTree{}, err
}
return DualTree{
root: d,
}, err
}
func (d *DualTreeNode) isValid() error {
if d.key == "" {
return fmt.Errorf("ToDualTree | key is empty")
}
if d.value == "" {
return fmt.Errorf("ToDualTree | value is empty")
}
return nil
}
// Clone performs an O(N) deep clone of a given DualTreeNode and returns a new DualTreeNode,
// granted no errors are present.
func (d DualTreeNode) Clone() DualTreeNode {
d.key = RandString(5)
return d
} | evolution/dualtreenode.go | 0.819821 | 0.630656 | dualtreenode.go | starcoder |
package feedforward
// Represents a layer of a feedforward neural network.
type layer interface {
initialize(Initializer)
processInput([]float64) []float64
getOutputCache() []float64
processError([]float64) []float64
getWeights() [][]float64
getBiases() []float64
}
// Base layer implementation
type baseLayer struct {
weights [][]float64
biases []float64
activation ActivationFunction
prevLayerNeurons int
neurons int
outputCache []float64
}
// Initializes weights and biases of the entire layer using the provided initializer
func (l *baseLayer) initialize(initializer Initializer) {
initializer.Initialize(l.weights)
for i := 0; i < l.neurons; i++ {
l.biases[i] = 0
}
}
// Computes output of the entire layer for the given input and caches the output before returning to caller.
// The output is computed concurrently for each neuron through goroutines, synchronized through a WaitGroup.
func (l *baseLayer) processInput(input []float64) []float64 {
output := make([]float64, l.neurons)
for i := 0; i < l.neurons; i++ {
output[i] = l.activation.Value(l.net(i, input))
}
l.outputCache = output
return output
}
// Computes net of i-th neuron
func (l *baseLayer) net(i int, input []float64) float64 {
net := l.biases[i]
for j := 0; j < l.prevLayerNeurons; j++ {
net += input[j] * l.weights[j][i]
}
return net
}
// Gets cached output
func (l *baseLayer) getOutputCache() []float64 {
return l.outputCache
}
// Gets underlying weight slice
func (l *baseLayer) getWeights() [][]float64 {
return l.weights
}
// Gets underlying bias slice
func (l *baseLayer) getBiases() []float64 {
return l.biases
}
// Type representing a hidden layer.
// Extends all properties from the baseLayer.
// Additionally holds outgoing weights used in calculating the weighted layer error.
type hiddenLayer struct {
baseLayer
nextLayerNeurons int
nextLayerWeights [][]float64
}
// Constructor of a hidden layer.
func newHiddenLayer(weights [][]float64, biases []float64, nextLayerWeights [][]float64, activation ActivationFunction) layer {
return &hiddenLayer{
baseLayer: baseLayer{weights: weights, biases: biases, activation: activation, prevLayerNeurons: len(weights), neurons: len(biases)},
nextLayerWeights: nextLayerWeights,
nextLayerNeurons: len(nextLayerWeights[0]),
}
}
// Computes the weighted error of this layer.
// Computations are performed concurrently for every neuron of this layer.
func (h *hiddenLayer) processError(delta []float64) []float64 {
layerError := make([]float64, h.neurons)
output := h.outputCache
for i := 0; i < h.neurons; i++ {
sum := 0.
for j := 0; j < h.nextLayerNeurons; j++ {
sum += delta[j] * h.nextLayerWeights[i][j]
}
layerError[i] = h.activation.Gradient(output[i]) * sum
}
return layerError
}
// Type representing a hidden layer.
// Extends all properties from the baseLayer and provides implementation of processError.
type outputLayer struct {
baseLayer
}
// Constructor of an output layer.
func newOutputLayer(weights [][]float64, biases []float64, activation ActivationFunction) layer {
return &outputLayer{
baseLayer: baseLayer{weights: weights, biases: biases, activation: activation, prevLayerNeurons: len(weights), neurons: len(biases)},
}
}
// Computes the error of the output layer.
func (o *outputLayer) processError(delta []float64) []float64 {
layerError := make([]float64, o.neurons)
output := o.outputCache
for i := 0; i < o.neurons; i++ {
layerError[i] = o.activation.Gradient(output[i]) * delta[i]
}
return layerError
} | layers.go | 0.88 | 0.461199 | layers.go | starcoder |
package accounting
import (
"encoding/json"
)
// TaxType Represents a tax in the external accounting system.
type TaxType struct {
// The code/ID of the tax in the external accounting system.
Code string `json:"code"`
// The display name of the tax.
Name *string `json:"name,omitempty"`
}
// NewTaxType instantiates a new TaxType object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewTaxType(code string) *TaxType {
this := TaxType{}
this.Code = code
return &this
}
// NewTaxTypeWithDefaults instantiates a new TaxType object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewTaxTypeWithDefaults() *TaxType {
this := TaxType{}
return &this
}
// GetCode returns the Code field value
func (o *TaxType) GetCode() string {
if o == nil {
var ret string
return ret
}
return o.Code
}
// GetCodeOk returns a tuple with the Code field value
// and a boolean to check if the value has been set.
func (o *TaxType) GetCodeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Code, true
}
// SetCode sets field value
func (o *TaxType) SetCode(v string) {
o.Code = v
}
// GetName returns the Name field value if set, zero value otherwise.
func (o *TaxType) GetName() string {
if o == nil || o.Name == nil {
var ret string
return ret
}
return *o.Name
}
// GetNameOk returns a tuple with the Name field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *TaxType) GetNameOk() (*string, bool) {
if o == nil || o.Name == nil {
return nil, false
}
return o.Name, true
}
// HasName returns a boolean if a field has been set.
func (o *TaxType) HasName() bool {
if o != nil && o.Name != nil {
return true
}
return false
}
// SetName gets a reference to the given string and assigns it to the Name field.
func (o *TaxType) SetName(v string) {
o.Name = &v
}
func (o TaxType) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["code"] = o.Code
}
if o.Name != nil {
toSerialize["name"] = o.Name
}
return json.Marshal(toSerialize)
}
type NullableTaxType struct {
value *TaxType
isSet bool
}
func (v NullableTaxType) Get() *TaxType {
return v.value
}
func (v *NullableTaxType) Set(val *TaxType) {
v.value = val
v.isSet = true
}
func (v NullableTaxType) IsSet() bool {
return v.isSet
}
func (v *NullableTaxType) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableTaxType(val *TaxType) *NullableTaxType {
return &NullableTaxType{value: val, isSet: true}
}
func (v NullableTaxType) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableTaxType) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | generated/accounting/model_tax_type.go | 0.757705 | 0.426083 | model_tax_type.go | starcoder |
package cast
import (
"fmt"
"time"
)
// ToDuration casts an interface{} to a time.Duration.
func ToDuration(i interface{}, unit ...string) time.Duration {
v, _ := ToDurationE(i, unit...)
return v
}
// ToDurationE casts an interface{} to a time.Duration.
func ToDurationE(i interface{}, unit ...string) (time.Duration, error) {
switch s := i.(type) {
case nil:
return 0, nil
case int:
return parseIntDuration(int64(s), unit...), nil
case int8:
return parseIntDuration(int64(s), unit...), nil
case int16:
return parseIntDuration(int64(s), unit...), nil
case int32:
return parseIntDuration(int64(s), unit...), nil
case int64:
return parseIntDuration(s, unit...), nil
case *int:
return parseIntDuration(int64(*s), unit...), nil
case *int8:
return parseIntDuration(int64(*s), unit...), nil
case *int16:
return parseIntDuration(int64(*s), unit...), nil
case *int32:
return parseIntDuration(int64(*s), unit...), nil
case *int64:
return parseIntDuration(*s, unit...), nil
case uint:
return parseIntDuration(int64(s), unit...), nil
case uint8:
return parseIntDuration(int64(s), unit...), nil
case uint16:
return parseIntDuration(int64(s), unit...), nil
case uint32:
return parseIntDuration(int64(s), unit...), nil
case uint64:
return parseIntDuration(int64(s), unit...), nil
case *uint:
return parseIntDuration(int64(*s), unit...), nil
case *uint8:
return parseIntDuration(int64(*s), unit...), nil
case *uint16:
return parseIntDuration(int64(*s), unit...), nil
case *uint32:
return parseIntDuration(int64(*s), unit...), nil
case *uint64:
return parseIntDuration(int64(*s), unit...), nil
case float32:
return parseFloatDuration(float64(s), unit...), nil
case float64:
return parseFloatDuration(s, unit...), nil
case *float32:
return parseFloatDuration(float64(*s), unit...), nil
case *float64:
return parseFloatDuration(*s, unit...), nil
case string:
return time.ParseDuration(s)
case *string:
return time.ParseDuration(*s)
case time.Duration:
return s, nil
default:
return 0, fmt.Errorf("unable to cast %#v of type %T to time.Duration", i, i)
}
}
func parseIntDuration(v int64, unit ...string) time.Duration {
unitN := int64(time.Nanosecond)
if len(unit) > 0 {
unitN, _ = unitMap[unit[0]]
}
return time.Duration(v * unitN)
}
func parseFloatDuration(v float64, unit ...string) time.Duration {
unitN := int64(time.Nanosecond)
if len(unit) > 0 {
unitN, _ = unitMap[unit[0]]
}
return time.Duration(v * float64(unitN))
} | cast/duration.go | 0.758689 | 0.515193 | duration.go | starcoder |
package runematcher
import (
"fmt"
"strings"
"unicode/utf8"
)
func negate(m Matcher) Matcher {
return negatedMatcher{m}
}
func and(first, second Matcher) Matcher {
return andMatcher{first, second}
}
func or(first, second Matcher) Matcher {
return orMatcher{first, second}
}
func matchesAnyOf(m Matcher, str string) bool {
return !m.MatchesNoneOf(str)
}
func matchesAllOf(m Matcher, str string) bool {
runes := []rune(str)
for i := len(runes) - 1; i >= 0; i-- {
if !m.Matches(runes[i]) {
return false
}
}
return true
}
func matchesNoneOf(m Matcher, str string) bool {
return m.IndexIn(str, 0) == -1
}
func indexInRunes(m Matcher, runes []rune, start int) int {
l := len(runes)
if start < 0 || start >= l {
return -1
}
for i := start; i < l; i++ {
if m.Matches(runes[i]) {
return i
}
}
return -1
}
func indexIn(m Matcher, str string, start int) int {
return m.IndexInRunes([]rune(str), start)
}
func lastIndexIn(m Matcher, str string) int {
runes := []rune(str)
for i := len(runes) - 1; i >= 0; i-- {
if m.Matches(runes[i]) {
return i
}
}
return -1
}
func countIn(m Matcher, str string) int {
count := 0
for _, r := range str {
if m.Matches(r) {
count++
}
}
return count
}
func removeFrom(m Matcher, str string) string {
runes := []rune(str)
pos := m.IndexInRunes(runes, 0)
if pos == -1 {
return str
}
spread := 1
for {
pos++
for {
if pos == len(runes) {
return string(runes[0 : pos-spread])
}
if m.Matches(runes[pos]) {
break
}
runes[pos-spread] = runes[pos]
pos++
}
spread++
}
}
func retainFrom(m Matcher, str string) string {
return m.Negate().RemoveFrom(str)
}
func replaceFromRune(m Matcher, str string, replacement rune) string {
runes := []rune(str)
pos := m.IndexInRunes(runes, 0)
if pos == -1 {
return str
}
runes[pos] = replacement
for i := pos + 1; i < len(runes); i++ {
if m.Matches(runes[i]) {
runes[i] = replacement
}
}
return string(runes)
}
func replaceFrom(m Matcher, str string, replacement string) string {
replacementLen := len(replacement)
if replacementLen == 0 {
return m.RemoveFrom(str)
}
if replacementLen == 1 {
r, _ := utf8.DecodeRuneInString(replacement)
return m.ReplaceFromRune(str, r)
}
runes := []rune(str)
pos := m.IndexInRunes(runes, 0)
if pos == -1 {
return str
}
buf := strings.Builder{}
buf.Grow((len(str) * 3 / 2) + 16)
oldpos := 0
for {
buf.WriteString(string(runes[oldpos:pos]))
buf.WriteString(replacement)
oldpos = pos + 1
pos = m.IndexInRunes(runes, oldpos)
if pos == -1 {
break
}
}
buf.WriteString(string(runes[oldpos:]))
return buf.String()
}
func trimFrom(m Matcher, str string) string {
runes := []rune(str)
length := len(runes)
var first int
var last int
for first = 0; first < length; first++ {
if !m.Matches(runes[first]) {
break
}
}
for last = length - 1; last > first; last-- {
if !m.Matches(runes[last]) {
break
}
}
return string(runes[first : last+1])
}
func trimLeadingFrom(m Matcher, str string) string {
runes := []rune(str)
length := len(runes)
for first := 0; first < length; first++ {
if !m.Matches(runes[first]) {
return string(runes[first:length])
}
}
return ""
}
func trimTrailingFrom(m Matcher, str string) string {
runes := []rune(str)
for last := len(runes) - 1; last >= 0; last-- {
if !m.Matches(runes[last]) {
return string(runes[0 : last+1])
}
}
return ""
}
func collapseFrom(m Matcher, str string, replacement rune) string {
runes := []rune(str)
length := len(runes)
for i := 0; i < length; i++ {
c := runes[i]
if m.Matches(c) {
if c == replacement && (i == length-1 || !m.Matches(runes[i+1])) {
// a no-op replacement
i++
} else {
builder := &strings.Builder{}
builder.Grow(length)
builder.WriteString(string(runes[0:i]))
builder.WriteRune(replacement)
return finishCollapseFrom(m, runes, i+1, length, replacement, builder, true)
}
}
}
// no replacement needed
return str
}
func trimAndCollapseFrom(m Matcher, str string, replacement rune) string {
runes := []rune(str)
length := len(runes)
first := 0
last := length - 1
for first < length && m.Matches(runes[first]) {
first++
}
for last > first && m.Matches(runes[last]) {
last--
}
if first == 0 && last == length-1 {
return collapseFrom(m, str, replacement)
}
builder := &strings.Builder{}
builder.Grow(last + 1 - first)
return finishCollapseFrom(m, runes, first, last+1, replacement, builder, false)
}
func finishCollapseFrom(m Matcher, runes []rune, start, end int, repl rune, builder *strings.Builder, inMatchingGroup bool) string {
for i := start; i < end; i++ {
c := runes[i]
if m.Matches(c) {
if !inMatchingGroup {
builder.WriteRune(repl)
inMatchingGroup = true
}
} else {
builder.WriteRune(c)
inMatchingGroup = false
}
}
return builder.String()
}
func showCharacter(r rune) string {
return "\\u" + fmt.Sprintf("%U", r)[2:]
} | base/runematcher/common.go | 0.508544 | 0.431045 | common.go | starcoder |
package main
/*
--- Day 2: Dive! ---
Now, you need to figure out how to pilot this thing.
It seems like the submarine can take a series of commands like forward 1, down 2, or up 3:
forward X increases the horizontal position by X units.
down X increases the depth by X units.
up X decreases the depth by X units.
Note that since you're on a submarine, down and up affect your depth, and so they have the opposite result of what you might expect.
The submarine seems to already have a planned course (your puzzle input). You should probably figure out where it's going. For example:
forward 5
down 5
forward 8
up 3
down 8
forward 2
Your horizontal position and depth both start at 0. The steps above would then modify them as follows:
forward 5 adds 5 to your horizontal position, a total of 5.
down 5 adds 5 to your depth, resulting in a value of 5.
forward 8 adds 8 to your horizontal position, a total of 13.
up 3 decreases your depth by 3, resulting in a value of 2.
down 8 adds 8 to your depth, resulting in a value of 10.
forward 2 adds 2 to your horizontal position, a total of 15.
After following these instructions, you would have a horizontal position of 15 and a depth of 10. (Multiplying these together produces 150.)
Calculate the horizontal position and depth you would have after following the planned course. What do you get if you multiply your final horizontal position by your final depth?
*/
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
const (
Forward int = 0
Down = 1
Up = 2
)
type Direction struct {
Direction int
Spaces int
}
// aim is used only for part 2
type Position struct {
X int
Y int
Aim int
}
func ReadInput(c chan<- Direction, r io.Reader) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
fmt.Println(scanner.Text())
tokens := strings.Split(scanner.Text(), " ")
if len(tokens) != 2 {
panic("failed to parse line")
}
var dir int = Forward
switch tokens[0] {
case "forward":
dir = Forward
case "down":
dir = Down
case "up":
dir = Up
default:
panic("bad line")
}
spaces, err := strconv.Atoi(tokens[1])
if err != nil {
panic(err)
}
c <- Direction{dir, spaces}
}
close(c)
}
// process input for part 1
func ProcessInput(c <-chan Direction) Position {
p := Position{0, 0, 0}
for dir := range c {
switch dir.Direction {
case Forward:
p.X += dir.Spaces
case Down:
p.Y += dir.Spaces
case Up:
p.Y -= dir.Spaces
}
}
return p
}
// day2 process input
func ProcessInput2(c <-chan Direction) Position {
p := Position{0, 0, 0}
for dir := range c {
switch dir.Direction {
case Forward:
p.X += dir.Spaces
p.Y = p.Y + (p.Aim * dir.Spaces)
case Down:
p.Aim += dir.Spaces
case Up:
p.Aim -= dir.Spaces
}
}
return p
}
// scan file and find puzzle answer
// when aim is true, do part 2 puzzle input
func ScanFile(file string, aim bool) {
c := make(chan Direction)
var pos Position
f, err := os.Open(file)
if err != nil {
panic(err)
}
go ReadInput(c, f)
if aim {
pos = ProcessInput2(c)
} else {
pos = ProcessInput(c)
}
fmt.Printf("range: %d depth: %d\n", pos.X, pos.Y)
fmt.Printf("multiplicand: %d\n", pos.X*pos.Y)
}
func main() {
ScanFile("sample.txt", false)
fmt.Println()
ScanFile("input.txt", false)
fmt.Println()
//part 2
ScanFile("sample.txt", true)
fmt.Println()
ScanFile("input.txt", true)
} | day2/day2.go | 0.591015 | 0.687902 | day2.go | starcoder |
package list
const WithParamFunctions = `
//-------------------------------------------------------------------------------------------------
// List:With[{{.TypeParameter}}]
// FoldLeft{{.TypeParameter.LongName}} applies a binary operator to a start value and all elements of this list, going left to right.
func (list {{.TName}}List) FoldLeft{{.TypeParameter.LongName}}(zero {{.TypeParameter}}, fn func({{.TypeParameter}}, {{.PName}}) {{.TypeParameter}}) {{.TypeParameter}} {
sum := zero
for _, v := range list {
sum = fn(sum, v)
}
return sum
}
// FoldRight{{.TypeParameter.LongName}} applies a binary operator to a start value and all elements of this list, going right to left.
func (list {{.TName}}List) FoldRight{{.TypeParameter.LongName}}(zero {{.TypeParameter}}, fn func({{.TypeParameter}}, {{.PName}}) {{.TypeParameter}}) {{.TypeParameter}} {
sum := zero
for i := len(list) - 1; i >= 0; i-- {
sum = fn(sum, list[i])
}
return sum
}
{{if .TypeParameter.Comparable}}
// This method requires {{.TypeParameter}} be comparable.
// GroupBy{{.TypeParameter.LongName}} groups elements into a map keyed by {{.TypeParameter}}.
func (list {{.TName}}List) GroupBy{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) map[{{.TypeParameter}}]{{.TName}}List {
result := make(map[{{.TypeParameter}}]{{.TName}}List)
for _, v := range list {
key := fn(v)
result[key] = append(result[key], v)
}
return result
}
{{end}}
{{if .TypeParameter.Numeric}}
// These methods require {{.TypeParameter}} be numeric.
// Sum{{.TypeParameter.LongName}} sums {{.PName}} over elements in {{.TName}}List.
func (list {{.TName}}List) Sum{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) (result {{.TypeParameter}}) {
for _, v := range list {
result += fn(v)
}
return
}
// Mean{{.TypeParameter.LongName}} sums {{.TypeParameter}} over all elements and divides by len({{.TName}}List).
// Panics if there are no elements.
func (list {{.TName}}List) Mean{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) (result {{.TypeParameter}}) {
l := len(list)
if l == 0 {
panic("Cannot determine the mean of an empty list.")
}
for _, v := range list {
result += fn(v)
}
result = result / {{.TypeParameter}}(l)
return
}
{{end}}
{{if .TypeParameter.Ordered}}
// These methods require {{.TypeParameter}} be ordered.
// MinBy{{.TypeParameter.LongName}} finds the first element which yields the smallest value measured by function fn.
// fn is usually called a projection or measuring function.
// Panics if there are no elements.
func (list {{.TName}}List) MinBy{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) (result {{.PName}}) {
l := len(list)
if l == 0 {
panic("Cannot determine the minimum of an empty list.")
return
}
result = list[0]
if l > 1 {
min := fn(result)
for i := 1; i < l; i++ {
v := list[i]
f := fn(v)
if min > f {
min = f
result = v
}
}
}
return
}
// MaxBy{{.TypeParameter.LongName}} finds the first element which yields the largest value measured by function fn.
// fn is usually called a projection or measuring function.
// Panics if there are no elements.
func (list {{.TName}}List) MaxBy{{.TypeParameter.LongName}}(fn func({{.PName}}) {{.TypeParameter}}) (result {{.PName}}) {
l := len(list)
if l == 0 {
panic("Cannot determine the maximum of an empty list.")
}
result = list[0]
if l > 1 {
max := fn(result)
for i := 1; i < l; i++ {
v := list[i]
f := fn(v)
if max < f {
max = f
result = v
}
}
}
return
}
{{end}}
` | internal/list/withT.go | 0.692642 | 0.64607 | withT.go | starcoder |
package vp8
// filter2 modifies a 2-pixel wide or 2-pixel high band along an edge.
func filter2(pix []byte, level, index, iStep, jStep int) {
for n := 16; n > 0; n, index = n-1, index+iStep {
p1 := int(pix[index-2*jStep])
p0 := int(pix[index-1*jStep])
q0 := int(pix[index+0*jStep])
q1 := int(pix[index+1*jStep])
if abs(p0-q0)<<1+abs(p1-q1)>>1 > level {
continue
}
a := 3*(q0-p0) + clamp127(p1-q1)
a1 := clamp15((a + 4) >> 3)
a2 := clamp15((a + 3) >> 3)
pix[index-1*jStep] = clamp255(p0 + a2)
pix[index+0*jStep] = clamp255(q0 - a1)
}
}
// filter246 modifies a 2-, 4- or 6-pixel wide or high band along an edge.
func filter246(pix []byte, n, level, ilevel, hlevel, index, iStep, jStep int, fourNotSix bool) {
for ; n > 0; n, index = n-1, index+iStep {
p3 := int(pix[index-4*jStep])
p2 := int(pix[index-3*jStep])
p1 := int(pix[index-2*jStep])
p0 := int(pix[index-1*jStep])
q0 := int(pix[index+0*jStep])
q1 := int(pix[index+1*jStep])
q2 := int(pix[index+2*jStep])
q3 := int(pix[index+3*jStep])
if abs(p0-q0)<<1+abs(p1-q1)>>1 > level {
continue
}
if abs(p3-p2) > ilevel ||
abs(p2-p1) > ilevel ||
abs(p1-p0) > ilevel ||
abs(q1-q0) > ilevel ||
abs(q2-q1) > ilevel ||
abs(q3-q2) > ilevel {
continue
}
if abs(p1-p0) > hlevel || abs(q1-q0) > hlevel {
// Filter 2 pixels.
a := 3*(q0-p0) + clamp127(p1-q1)
a1 := clamp15((a + 4) >> 3)
a2 := clamp15((a + 3) >> 3)
pix[index-1*jStep] = clamp255(p0 + a2)
pix[index+0*jStep] = clamp255(q0 - a1)
} else if fourNotSix {
// Filter 4 pixels.
a := 3 * (q0 - p0)
a1 := clamp15((a + 4) >> 3)
a2 := clamp15((a + 3) >> 3)
a3 := (a1 + 1) >> 1
pix[index-2*jStep] = clamp255(p1 + a3)
pix[index-1*jStep] = clamp255(p0 + a2)
pix[index+0*jStep] = clamp255(q0 - a1)
pix[index+1*jStep] = clamp255(q1 - a3)
} else {
// Filter 6 pixels.
a := clamp127(3*(q0-p0) + clamp127(p1-q1))
a1 := (27*a + 63) >> 7
a2 := (18*a + 63) >> 7
a3 := (9*a + 63) >> 7
pix[index-3*jStep] = clamp255(p2 + a3)
pix[index-2*jStep] = clamp255(p1 + a2)
pix[index-1*jStep] = clamp255(p0 + a1)
pix[index+0*jStep] = clamp255(q0 - a1)
pix[index+1*jStep] = clamp255(q1 - a2)
pix[index+2*jStep] = clamp255(q2 - a3)
}
}
}
// simpleFilter implements the simple filter, as specified in section 15.2.
func (d *Decoder) simpleFilter() {
for mby := 0; mby < d.mbh; mby++ {
for mbx := 0; mbx < d.mbw; mbx++ {
f := d.perMBFilterParams[d.mbw*mby+mbx]
if f.level == 0 {
continue
}
l := int(f.level)
yIndex := (mby*d.img.YStride + mbx) * 16
if mbx > 0 {
filter2(d.img.Y, l+4, yIndex, d.img.YStride, 1)
}
if f.inner {
filter2(d.img.Y, l, yIndex+0x4, d.img.YStride, 1)
filter2(d.img.Y, l, yIndex+0x8, d.img.YStride, 1)
filter2(d.img.Y, l, yIndex+0xc, d.img.YStride, 1)
}
if mby > 0 {
filter2(d.img.Y, l+4, yIndex, 1, d.img.YStride)
}
if f.inner {
filter2(d.img.Y, l, yIndex+d.img.YStride*0x4, 1, d.img.YStride)
filter2(d.img.Y, l, yIndex+d.img.YStride*0x8, 1, d.img.YStride)
filter2(d.img.Y, l, yIndex+d.img.YStride*0xc, 1, d.img.YStride)
}
}
}
}
// normalFilter implements the normal filter, as specified in section 15.3.
func (d *Decoder) normalFilter() {
for mby := 0; mby < d.mbh; mby++ {
for mbx := 0; mbx < d.mbw; mbx++ {
f := d.perMBFilterParams[d.mbw*mby+mbx]
if f.level == 0 {
continue
}
l, il, hl := int(f.level), int(f.ilevel), int(f.hlevel)
yIndex := (mby*d.img.YStride + mbx) * 16
cIndex := (mby*d.img.CStride + mbx) * 8
if mbx > 0 {
filter246(d.img.Y, 16, l+4, il, hl, yIndex, d.img.YStride, 1, false)
filter246(d.img.Cb, 8, l+4, il, hl, cIndex, d.img.CStride, 1, false)
filter246(d.img.Cr, 8, l+4, il, hl, cIndex, d.img.CStride, 1, false)
}
if f.inner {
filter246(d.img.Y, 16, l, il, hl, yIndex+0x4, d.img.YStride, 1, true)
filter246(d.img.Y, 16, l, il, hl, yIndex+0x8, d.img.YStride, 1, true)
filter246(d.img.Y, 16, l, il, hl, yIndex+0xc, d.img.YStride, 1, true)
filter246(d.img.Cb, 8, l, il, hl, cIndex+0x4, d.img.CStride, 1, true)
filter246(d.img.Cr, 8, l, il, hl, cIndex+0x4, d.img.CStride, 1, true)
}
if mby > 0 {
filter246(d.img.Y, 16, l+4, il, hl, yIndex, 1, d.img.YStride, false)
filter246(d.img.Cb, 8, l+4, il, hl, cIndex, 1, d.img.CStride, false)
filter246(d.img.Cr, 8, l+4, il, hl, cIndex, 1, d.img.CStride, false)
}
if f.inner {
filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0x4, 1, d.img.YStride, true)
filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0x8, 1, d.img.YStride, true)
filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0xc, 1, d.img.YStride, true)
filter246(d.img.Cb, 8, l, il, hl, cIndex+d.img.CStride*0x4, 1, d.img.CStride, true)
filter246(d.img.Cr, 8, l, il, hl, cIndex+d.img.CStride*0x4, 1, d.img.CStride, true)
}
}
}
}
// filterParam holds the loop filter parameters for a macroblock.
type filterParam struct {
// The first three fields are thresholds used by the loop filter to smooth
// over the edges and interior of a macroblock. level is used by both the
// simple and normal filters. The inner level and high edge variance level
// are only used by the normal filter.
level, ilevel, hlevel uint8
// inner is whether the inner loop filter cannot be optimized out as a
// no-op for this particular macroblock.
inner bool
}
// computeFilterParams computes the loop filter parameters, as specified in
// section 15.4.
func (d *Decoder) computeFilterParams() {
for i := range d.filterParams {
baseLevel := d.filterHeader.level
if d.segmentHeader.useSegment {
baseLevel = d.segmentHeader.filterStrength[i]
if d.segmentHeader.relativeDelta {
baseLevel += d.filterHeader.level
}
}
for j := range d.filterParams[i] {
p := &d.filterParams[i][j]
p.inner = j != 0
level := baseLevel
if d.filterHeader.useLFDelta {
// The libwebp C code has a "TODO: only CURRENT is handled for now."
level += d.filterHeader.refLFDelta[0]
if j != 0 {
level += d.filterHeader.modeLFDelta[0]
}
}
if level <= 0 {
p.level = 0
continue
}
if level > 63 {
level = 63
}
ilevel := level
if d.filterHeader.sharpness > 0 {
if d.filterHeader.sharpness > 4 {
ilevel >>= 2
} else {
ilevel >>= 1
}
if x := int8(9 - d.filterHeader.sharpness); ilevel > x {
ilevel = x
}
}
if ilevel < 1 {
ilevel = 1
}
p.ilevel = uint8(ilevel)
p.level = uint8(2*level + ilevel)
if d.frameHeader.KeyFrame {
if level < 15 {
p.hlevel = 0
} else if level < 40 {
p.hlevel = 1
} else {
p.hlevel = 2
}
} else {
if level < 15 {
p.hlevel = 0
} else if level < 20 {
p.hlevel = 1
} else if level < 40 {
p.hlevel = 2
} else {
p.hlevel = 3
}
}
}
}
}
// intSize is either 32 or 64.
const intSize = 32 << (^uint(0) >> 63)
func abs(x int) int {
// m := -1 if x < 0. m := 0 otherwise.
m := x >> (intSize - 1)
// In two's complement representation, the negative number
// of any number (except the smallest one) can be computed
// by flipping all the bits and add 1. This is faster than
// code with a branch.
// See Hacker's Delight, section 2-4.
return (x ^ m) - m
}
func clamp15(x int) int {
if x < -16 {
return -16
}
if x > 15 {
return 15
}
return x
}
func clamp127(x int) int {
if x < -128 {
return -128
}
if x > 127 {
return 127
}
return x
}
func clamp255(x int) uint8 {
if x < 0 {
return 0
}
if x > 255 {
return 255
}
return uint8(x)
} | vendor/golang.org/x/image/vp8/filter.go | 0.553143 | 0.415729 | filter.go | starcoder |
package cmd
import (
"fmt"
"image"
"image/draw"
"os"
"github.com/spf13/cobra"
i "github.com/davidwarshaw/tiletool/cmd/internal"
)
var extrudeCmd *cobra.Command
var thickness int
func extrudeTile(tileImage *image.NRGBA, thickness int) (extruded *image.NRGBA) {
extrudedRect := tileImage.Bounds().Inset(-thickness)
extruded = image.NewNRGBA(extrudedRect)
draw.Draw(extruded, tileImage.Bounds(), tileImage, tileImage.Bounds().Min, draw.Src)
// Horizontal
for x := tileImage.Bounds().Min.X; x < tileImage.Bounds().Max.X; x++ {
// Top
for y := extruded.Bounds().Min.Y; y < tileImage.Bounds().Min.Y; y++ {
extruded.Set(x, y, tileImage.At(x, tileImage.Bounds().Min.Y))
}
// Bottom
for y := extruded.Bounds().Max.Y - 1; y > tileImage.Bounds().Max.Y-1; y-- {
extruded.Set(x, y, tileImage.At(x, tileImage.Bounds().Max.Y-1))
}
}
// Vertical
for y := tileImage.Bounds().Min.Y; y < tileImage.Bounds().Max.Y; y++ {
// Left
for x := extruded.Bounds().Min.X; x < tileImage.Bounds().Min.X; x++ {
extruded.Set(x, y, tileImage.At(tileImage.Bounds().Min.X, y))
}
// Right
for x := extruded.Bounds().Max.X - 1; x > tileImage.Bounds().Max.X-1; x-- {
extruded.Set(x, y, tileImage.At(tileImage.Bounds().Max.X-1, y))
}
}
// The four corners
// Left
for x := extruded.Bounds().Min.X; x < extruded.Bounds().Min.X+thickness; x++ {
// Top
for y := extruded.Bounds().Min.Y; y < extruded.Bounds().Min.Y+thickness; y++ {
extruded.Set(x, y, tileImage.At(tileImage.Bounds().Min.X, tileImage.Bounds().Min.Y))
}
// Bottom
for y := extruded.Bounds().Max.Y - 1; y > extruded.Bounds().Max.Y-thickness-1; y-- {
extruded.Set(x, y, tileImage.At(tileImage.Bounds().Min.X, tileImage.Bounds().Max.Y-1))
}
}
// Right
for x := extruded.Bounds().Max.X - 1; x > extruded.Bounds().Max.X-thickness-1; x-- {
// Top
for y := extruded.Bounds().Min.Y; y < extruded.Bounds().Min.Y+thickness; y++ {
extruded.Set(x, y, tileImage.At(tileImage.Bounds().Min.X, tileImage.Bounds().Min.Y))
}
// Bottom
for y := extruded.Bounds().Max.Y - 1; y > extruded.Bounds().Max.Y-thickness-1; y-- {
extruded.Set(x, y, tileImage.At(tileImage.Bounds().Min.X, tileImage.Bounds().Max.Y-1))
}
}
return
}
func init() {
extrudeCmd = &cobra.Command{
Use: "extrude <filename>",
Short: "Extrude the tiles of a tileset.",
Long: "The extrude command copies tile content into the margin around, and spacing between, tiles. Extrusion mitigates texture bleeding or tearing during tileset map scrolling. The extrude command will increase the tileset margin by the amount of extrusion thickness and increase the tileset spacing by twice the extrusion thickness.",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
fmt.Fprintln(os.Stderr, "One arg required: <filename>")
fmt.Fprintln(os.Stderr, "Use \"tiletool extrude --help\" for more information.")
os.Exit(1)
}
return nil
},
PreRun: func(cmd *cobra.Command, args []string) {
if err := i.ValidatePixelValue(thickness); err != nil {
fmt.Fprintf(os.Stderr, "Invalid thickness: %s\n", err.Error())
os.Exit(1)
}
},
Run: func(cmd *cobra.Command, args []string) {
filename := args[0]
img := i.Open(filename, Verbose)
tc.ReadImage(img)
outTc := tc
outTc.TileWidth += 2 * thickness
outTc.TileHeight += 2 * thickness
etis := make([]*image.NRGBA, len(outTc.TileImages))
for i, tileImage := range outTc.TileImages {
extruded := extrudeTile(tileImage, thickness)
etis[i] = extruded
}
outTc.TileImages = etis
if Verbose {
fmt.Printf("Extruding with thickness: %d\n", thickness)
}
fmt.Printf("Extruded tileset has margin: %d and spacing: %d\n", thickness, 2*thickness)
tilesetImage := outTc.ToImage()
i.Save(tilesetImage, Output, Verbose)
},
}
extrudeCmd.Flags().IntVar(&thickness, "thickness", 1, "extrusion thickness in pixels (default 1)")
} | cmd/extrude.go | 0.542379 | 0.436562 | extrude.go | starcoder |
package cross_validation
import (
"fmt"
mat "github.com/gonum/matrix/mat64"
"math/rand"
"sync"
"time"
)
func shuffleMatrix(returnDatasets []*mat.Dense, dataset mat.Matrix, testSize int, seed int64, wg *sync.WaitGroup) {
numGen := rand.New(rand.NewSource(seed))
// We don't want to alter the original dataset.
shuffledSet := mat.DenseCopyOf(dataset)
rowCount, colCount := shuffledSet.Dims()
temp := make([]float64, colCount)
// Fisher–Yates shuffle
for i := 0; i < rowCount; i++ {
j := numGen.Intn(i+1)
if j != i {
// Make a "hard" copy to avoid pointer craziness.
copy(temp, shuffledSet.RowView(i))
shuffledSet.SetRow(i, shuffledSet.RowView(j))
shuffledSet.SetRow(j, temp)
}
}
trainSize := rowCount - testSize
returnDatasets[0] = mat.NewDense(trainSize, colCount, shuffledSet.RawMatrix().Data[:trainSize*colCount])
returnDatasets[1] = mat.NewDense(testSize, colCount, shuffledSet.RawMatrix().Data[trainSize*colCount:])
wg.Done()
}
// TrainTestSplit splits input DenseMatrix into subsets for testing.
// The function expects a test size number (int) or percentage (float64), and a random state or nil to get "random" shuffle.
// It returns a list containing the train-test split and an error status.
func TrainTestSplit(size interface{}, randomState interface{}, datasets ...*mat.Dense) ([]*mat.Dense, error) {
// Get number of instances (rows).
instanceCount, _ := datasets[0].Dims()
// Input should be one or two matrices.
dataCount := len(datasets)
if dataCount > 2 {
return nil, fmt.Errorf("Expected 1 or 2 datasets, got %d\n", dataCount)
}
if dataCount == 2 {
// Test for consistency.
labelCount, labelFeatures := datasets[1].Dims()
if labelCount != instanceCount {
return nil, fmt.Errorf("Data and labels must have the same number of instances")
} else if labelFeatures != 1 {
return nil, fmt.Errorf("Label matrix must have single feature")
}
}
var testSize int
switch size := size.(type) {
// If size is an integer, treat it as the test data instance count.
case int:
testSize = size
case float64:
// If size is a float, treat it as a percentage of the instances to be allocated to the test set.
testSize = int(float64(instanceCount)*size + 0.5)
default:
return nil, fmt.Errorf("Expected a test instance count (int) or percentage (float64)")
}
var randSeed int64
// Create a deterministic shuffle, or a "random" one based on current time.
if seed, ok := randomState.(int); ok {
randSeed = int64(seed)
} else {
// Use seconds since epoch as seed
randSeed = time.Now().Unix()
}
// Wait group for goroutine syncronization.
wg := new(sync.WaitGroup)
wg.Add(dataCount)
// Return slice will hold training and test data and optional labels matrix.
returnDatasets := make([]*mat.Dense, 2*dataCount)
for i, dataset := range datasets {
// Send proper returnDataset slice.
// This is needed so goroutine doesn't mess up the expected return order.
// Perhaps returning a map is a better solution...
go shuffleMatrix(returnDatasets[i:i+2], dataset, testSize, randSeed, wg)
}
wg.Wait()
return returnDatasets, nil
} | cross_validation/cross_validation.go | 0.655667 | 0.527134 | cross_validation.go | starcoder |
package main
import (
"regexp"
"strconv"
s "strings"
)
/**
--- Day 3: Crossed Wires ---
The gravity assist was successful, and you're well on your way to the Venus refuelling station. During the rush back on Earth, the fuel management system wasn't completely installed, so that's next on the priority list.
Opening the front panel reveals a jumble of wires. Specifically, two wires are connected to a central port and extend outward on a grid. You trace the path each wire takes as it leaves the central port, one wire per line of text (your puzzle input).
The wires twist and turn, but the two wires occasionally cross paths. To fix the circuit, you need to find the intersection point closest to the central port. Because the wires are on a grid, use the Manhattan distance for this measurement. While the wires do technically cross right at the central port where they both start, this point does not count, nor does a wire count as crossing with itself.
For example, if the first wire's path is R8,U5,L5,D3, then starting from the central port (o), it goes right 8, up 5, left 5, and finally down 3:
...........
...........
...........
....+----+.
....|....|.
....|....|.
....|....|.
.........|.
.o-------+.
...........
Then, if the second wire's path is U7,R6,D4,L4, it goes up 7, right 6, down 4, and left 4:
...........
.+-----+...
.|.....|...
.|..+--X-+.
.|..|..|.|.
.|.-X--+.|.
.|..|....|.
.|.......|.
.o-------+.
...........
These wires cross at two locations (marked X), but the lower-left one is closer to the central port: its distance is 3 + 3 = 6.
Here are a few more examples:
R75,D30,R83,U83,L12,D49,R71,U7,L72
U62,R66,U55,R34,D71,R55,D58,R83 = distance 159
R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51
U98,R91,D20,R16,D67,R40,U7,R15,U6,R7 = distance 135
What is the Manhattan distance from the central port to the closest intersection?
*/
const (
dup = iota
ddown
dleft
dright
)
type instruction struct {
Direction int
Displacement int
}
type vector struct {
x, y int
}
func parseWireInstruction(input string) *instruction {
re := regexp.MustCompile(`(\w{1})(\d*)`)
// split via the regex and return all substrings
result := re.Split(input, -1)
dir := result[0]
dis, err := strconv.Atoi(result[1])
if err != nil {
dis = 0
}
ins := &instruction{
Displacement: dis,
}
switch dir {
case "R":
ins.Direction = dright
break
case "L":
ins.Direction = dleft
break
case "U":
ins.Direction = dup
break
case "D":
ins.Direction = ddown
break
}
return ins
}
func fnmap(data []string, f func(string) string) []instruction {
dataLen := len(data)
ins := make([]instruction, dataLen)
for i := 0; i < dataLen; i++ {
ins[i] = f(data[i])
}
return ins
}
func generateInstructions(lines []string) {
// double array or single??
instructions := []instruction{}
for _, line := range lines {
i := s.Split(line, ",")
instructions = fnmap(i, parseWireInstruction)
}
}
func main() {
// split lines by \n ??
} | go/2019/2019_3.go | 0.770378 | 0.63392 | 2019_3.go | starcoder |
package mat
// Point is a vector type with X and Y coordinates.
type Point struct {
X, Y int
}
// ZP is zero value Point
var ZP Point
// P returns a new vector with the given coordinates.
func P(x, y int) Point {
return Point{x, y}
}
// XY returns the components of the vector in two return values.
func (v Point) XY() (x, y int) {
return v.X, v.Y
}
// Add returns the sum of vectors v and v.
func (v Point) Add(u Point) Point {
return Point{
v.X + u.X,
v.Y + u.Y,
}
}
// Sub subtracts u from v and returns recult.
func (v Point) Sub(u Point) Point {
return Point{
v.X - u.X,
v.Y - u.Y,
}
}
// Div returns the vector v divided by the vector u component-wise.
func (v Point) Div(u Point) Point {
return Point{v.X / u.X, v.Y / u.Y}
}
// Mul returns the vector v multiplied by the vector u component-wise.
func (v Point) Mul(u Point) Point {
return Point{v.X * u.X, v.Y * u.Y}
}
// To returns the vector from v to u. Equivalent to u.Sub(v).
func (v Point) To(u Point) Point {
return Point{
u.X - v.X,
u.Y - v.Y,
}
}
// AddE same as v = v.Add(u)
func (v *Point) AddE(u Point) {
v.X += u.X
v.Y += u.Y
}
// SubE same as v = v.Sub(u)
func (v *Point) SubE(u Point) {
v.X -= u.X
v.Y -= u.Y
}
// MulE same as v = v.Mul(u)
func (v *Point) MulE(u Point) {
v.X *= u.X
v.Y *= u.Y
}
// DivE same as v = v.Div(u)
func (v *Point) DivE(u Point) {
v.X /= u.X
v.Y /= u.Y
}
// Scaled returns the vector v multiplied by c.
func (v Point) Scaled(c int) Point {
return Point{v.X * c, v.Y * c}
}
// Divided returns the vector v divided by c.
func (v Point) Divided(c int) Point {
return Point{v.X / c, v.Y / c}
}
// Inv returns v with both components inverted
func (v Point) Inv() Point {
v.X = -v.X
v.Y = -v.Y
return v
}
// Flatten flatens the Point into Array, values are
// ordered as they would on stack
func (v Point) Flatten() [2]int {
return [...]int{v.X, v.Y}
}
// Mutator similar to Flatten returns array with vector components
// though these are pointers to componenets instead
func (v *Point) Mutator() [2]*int {
return [...]*int{&v.X, &v.Y}
} | mat/gogen-output.go | 0.940953 | 0.588416 | gogen-output.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.