code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package main
// Animations, matching the animation interface, are added to the animator.
// The animator ensures regular callbacks to Animate() ending with a call
// to Wrap().
// Animation provides regular callbacks to motion updaters.
// An animation is expected to run for a bit and then finish as opposed
// to the continuous animations run from the application loop.
type animation interface {
// Animate is called regularly to control motion. Animate returns
// true as long as it is running. By convention the first call
// to Animate is for initialization purposes and deltaTime is 0.
Animate(deltaTime float64) bool
// Wrap is called to stop an animation and skip to the end state.
// Generally expected to be used so the user can skip longer or repeated
// animations.
Wrap()
}
// animation
// ===========================================================================
// animator
// animator runs animations. It keeps track of animations, runs the active
// ones, and discards completed animations.
type animator struct {
animations []animation
}
// addAnimation adds a new animation to the list active of animations.
// It also calls Animate(0) as the initialization convention.
func (a *animator) addAnimation(ani animation) {
if a.animations == nil {
a.animations = []animation{}
}
a.animations = append(a.animations, ani)
// initialize with the first call, don't wait for the update loop.
ani.Animate(0)
}
// animate runs each of the active animations one step.
// It is expected to be called each update loop.
func (a *animator) animate(deltaTime float64) {
active := []animation{}
startA := len(a.animations)
for _, animation := range a.animations {
if animation.Animate(deltaTime) {
active = append(active, animation)
}
}
// Only reset the list if animations have not been
// added during the Animate() callbacks.
if startA == len(a.animations) {
a.animations = active
}
}
// skip wraps up any current animations and discards
// the list of active animations.
func (a *animator) skip() {
for _, animation := range a.animations {
animation.Wrap()
}
a.animations = []animation{}
}
// animator
// ===========================================================================
// transitionAnimation
// transitionAnimation runs an action in-between two animations. Generally
// used for transitioning between two screens. It is a composite animation
// that acts like a single Animation.
type transitionAnimation struct {
firstA animation // First animation.
mid func() // The function to run between the animations.
lastA animation // Second animation.
state int // Track which animation is running.
}
// state constants for transitionAnimation
const (
runFirst = iota // Running the first animation.
runLast // Running the last animation.
)
// newTransitionAnimation creates a composite animation using two animations
// and an action that is run between the two animations.
func newTransitionAnimation(firstA, lastA animation, mid func()) animation {
return &transitionAnimation{firstA, mid, lastA, runFirst}
}
// Animate runs the animations and the transition action in sequence.
func (ta *transitionAnimation) Animate(dt float64) bool {
switch ta.state {
case runFirst:
if ta.firstA == nil || !ta.firstA.Animate(dt) {
if ta.mid != nil {
ta.mid()
}
ta.state = runLast
}
case runLast:
if ta.lastA != nil {
return ta.lastA.Animate(dt)
}
return false // finished animaiton.
}
return true // keep running.
}
// Wrap forces the animation to the end. This ensures that both animations
// are wrapped and that the action has been run.
func (ta *transitionAnimation) Wrap() {
if ta.state == runFirst {
if ta.firstA != nil {
ta.firstA.Wrap()
}
if ta.mid != nil {
ta.mid()
}
ta.state = runLast
}
if ta.state == runLast {
if ta.lastA != nil {
ta.lastA.Wrap()
}
}
} | animate.go | 0.799951 | 0.530662 | animate.go | starcoder |
// Package arraylist implements the array list.
// Structure is not concurrent safe.
// Reference: https://en.wikipedia.org/wiki/Dynamic_array
package arraylist
import (
"errors"
)
var (
// ErrIndex is returned when the index is out of the list
ErrIndex = errors.New("index is out of the list")
// ErrIndexOf is returned when the index of value can not found
ErrIndexOf = errors.New("index of value can not found")
)
const (
growthFactor = float32(2.0) // growth factor by 100%
shrinkFactor = float32(0.25) // shrink factor by 25%
)
// List represents a array list structure.
type List struct {
elements []interface{} // elements of array list
size int // size of array list
caps int // capacity of array list
}
// Growth the list capacity.
// If the number of elements is greater than the current capacity,
// expand the capacity by 100%.
func (list *List) growth(n int) {
currCap := cap(list.elements)
if list.size+n >= currCap {
newCap := int(float32(currCap+n) * growthFactor)
list.resize(newCap)
}
}
// Shrink the list capacity.
// If the number of elements is less than 25% of the current capacity,
// reduce the capacity to number of elements.
func (list *List) shrink() {
// shrinkFactor equal 0.0 mean never shrink
if shrinkFactor == 0.0 {
return
}
currCap := cap(list.elements)
if list.size <= int(float32(currCap)*shrinkFactor) {
list.resize(list.size)
}
}
// Resize the list capacity.
func (list *List) resize(caps int) {
newElements := make([]interface{}, caps, caps)
copy(newElements, list.elements)
list.elements = newElements
list.caps = caps
}
// New array list.
func New(values ...interface{}) *List {
list := &List{}
if len(values) != 0 {
list.Append(values...)
}
return list
}
// List Interface
// Append values (one or more than one) to list.
func (list *List) Append(values ...interface{}) {
list.growth(len(values))
for _, v := range values {
list.elements[list.size] = v
list.size++
}
}
// Check if the index is within the length of the list.
func (list *List) indexInRange(index int) bool {
if index >= 0 && index < list.size {
return true
}
return false
}
// Get value by index.
func (list *List) Get(index int) (interface{}, error) {
if !list.indexInRange(index) {
return nil, ErrIndex
}
return list.elements[index], nil
}
// Remove element by index.
func (list *List) Remove(index int) error {
if !list.indexInRange(index) {
return ErrIndex
}
// remove
list.elements[index] = nil
copy(list.elements[index:], list.elements[index+1:list.size])
list.size--
list.shrink()
return nil
}
// Contains returns true if list contains values, false otherwise.
func (list *List) Contains(values ...interface{}) bool {
if len(values) == 0 {
return true
}
if list.size == 0 {
return false
}
for _, value := range values {
found := false
for _, element := range list.elements {
if element == value {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// Swap value by index.
func (list *List) Swap(i, j int) error {
if !list.indexInRange(i) || !list.indexInRange(j) {
return ErrIndex
}
if i == j {
return nil
}
list.elements[i], list.elements[j] = list.elements[j], list.elements[i]
return nil
}
// Insert value (one or more than one) after index.
func (list *List) Insert(index int, values ...interface{}) error {
if !list.indexInRange(index) {
return ErrIndex
}
if len(values) == 0 {
return nil
}
if index == list.size-1 {
list.Append(values...)
return nil
}
// insert
l := len(values)
list.growth(l)
list.size += l
copy(list.elements[index+1+l:], list.elements[index+1:])
copy(list.elements[index+1:], values)
return nil
}
// Set element by index.
func (list *List) Set(index int, value interface{}) error {
if !list.indexInRange(index) {
return ErrIndex
}
list.elements[index] = value
return nil
}
// IndexOf get index by value.
func (list *List) IndexOf(value interface{}) (int, error) {
for i := 0; i < list.Size(); i++ {
if value == list.elements[i] {
return i, nil
}
}
return -1, ErrIndexOf
}
// Container Interface
// Empty returns true if the list is empty, otherwise returns false.
func (list *List) Empty() bool {
return list.size == 0
}
// Size returns the size of the list.
func (list *List) Size() int {
return list.size
}
// Clear the list.
func (list *List) Clear() {
list.size = 0
list.elements = []interface{}{}
}
// Values returns the values of list.
func (list *List) Values() []interface{} {
newElements := make([]interface{}, list.size, list.size)
copy(newElements, list.elements)
return newElements
} | list/arraylist/arraylist.go | 0.759671 | 0.452596 | arraylist.go | starcoder |
package sync
import (
"github.com/pkg/errors"
)
// EnsureValid ensures that Cache's invariants are respected.
func (c *Cache) EnsureValid() error {
// A nil cache is considered valid (though obviously that requires using
// the GetEntries accessor).
if c == nil {
return errors.New("nil cache")
}
// Technically we could validate each path, but that's error prone,
// expensive, and not really needed for memory safety. Also note that an
// empty path is valid when the synchronization root is a file.
// Nil cache entries are invalid.
for _, e := range c.Entries {
if e == nil {
return errors.New("nil cache entry detected")
} else if e.ModificationTime == nil {
return errors.New("cache entry will nil modification time detected")
}
}
// Success.
return nil
}
// ReverseLookupMap provides facilities for doing reverse lookups to avoid
// expensive staging operations in the case of renames and copies.
type ReverseLookupMap struct {
// map20 provides mappings for SHA-1 hashes.
map20 map[[20]byte]string
}
// Lookup attempts a lookup in the map.
func (m *ReverseLookupMap) Lookup(digest []byte) (string, bool) {
// Handle based on digest length.
if len(digest) == 20 {
// Create a key.
var key [20]byte
copy(key[:], digest)
// Attempt a lookup.
result, ok := m.map20[key]
// Done.
return result, ok
}
// If the digest wasn't of a supported length, then there's no harm.
return "", false
}
// GenerateReverseLookupMap creates a reverse lookup map from a cache.
func (c *Cache) GenerateReverseLookupMap() (*ReverseLookupMap, error) {
// Create the map.
result := &ReverseLookupMap{}
// Track the digest size and ensure it's consistent.
digestSize := -1
// Loop over entries.
for p, e := range c.Entries {
// Compute and validate the digest size and allocate the map.
if digestSize == -1 {
digestSize = len(e.Digest)
if digestSize == 20 {
result.map20 = make(map[[20]byte]string, len(c.Entries))
} else {
return nil, errors.New("unsupported digest size")
}
} else if len(e.Digest) != digestSize {
return nil, errors.New("inconsistent digest sizes")
}
// Handle the entry based on digest size.
if digestSize == 20 {
var key [20]byte
copy(key[:], e.Digest)
result.map20[key] = p
} else {
panic("invalid digest size allowed")
}
}
// Success.
return result, nil
} | pkg/sync/cache.go | 0.713032 | 0.456834 | cache.go | starcoder |
package main
import "math"
const defaultSurvivalLevelOne = 30
const defaultSurvivalLevelBase = 1.5
// SurvivalDifficulty controls how the game difficulty scales with the player's score.
type SurvivalDifficulty interface {
// NumBugInit returns the number of bugs to initializer the game with.
NumBugInit() int
// BugRateInit returns a constant amount of time between bug spawns during
// initialization.
BugRateInit() float64
// BugDistribution returns the distribution of bugs and colors
BugDistribution(lvl int) BugDistribution
// NextLevel returns the total score required to achive the next level.
NextLevel(lvl int) int64
// BugRate returns the expected number of seconds between individual bug
// spawns for the current level. An normal distribution will determine the
// actual duration between each spawn.
BugRate(lvl int) float64
// ItemRate returns the expected number of seconds between individual item
// spawns for the current level and the expected number of seconds for a
// spawned item to despawn. A normal distribution will determine the
// actual duration between each spawn. Another normal distribution
// determins the duration each spawn exists. Multiple items may exist at
// the same time.
ItemRate(lvl int) (spawn, despawn float64)
// ItemDistribution returns the distribution of item types seen on the
// given level.
ItemDistribution(lvl int) ItemDistribution
}
type simpleSurvivalDifficulty struct{}
func (s *simpleSurvivalDifficulty) NextLevel(lvl int) int64 {
return int64(float64(defaultSurvivalLevelOne) * math.Pow(defaultSurvivalLevelBase, float64(lvl)))
}
func (s *simpleSurvivalDifficulty) NumBugInit() int {
return 12
}
func (s *simpleSurvivalDifficulty) BugRateInit() float64 {
return 0.3
}
func (s *simpleSurvivalDifficulty) BugRate(lvl int) float64 {
const initialRate = 7 // about every 5 seconds
const baseReduction = 0.99
return initialRate * math.Pow(baseReduction, float64(lvl))
}
func (s *simpleSurvivalDifficulty) ItemRate(lvl int) (spawn, despawn float64) {
const initialSpawnRate = 15 // about every 10 seconds
const initialDespawnRate = 10 // about 5 seconds
const baseSpawnReduction = 0.90
const baseDespawnReduction = 0.96
spawn = initialSpawnRate * math.Pow(baseSpawnReduction, float64(lvl))
despawn = initialDespawnRate * math.Pow(baseDespawnReduction, float64(lvl))
return spawn, despawn
}
func (s *simpleSurvivalDifficulty) ItemDistribution(lvl int) ItemDistribution {
return itemTypeDistn{
ItemRowClear: 10,
ItemPushUp: 10,
ItemBullet: 10,
ItemScramble: 10,
ItemRecolor: 10,
}
}
func (s *simpleSurvivalDifficulty) BugDistribution(lvl int) BugDistribution {
if lvl < 3 {
return &simpleDistribution{
&bugTypeDistn{
BugSmall: 500,
BugLarge: 400,
BugGnat: 200,
BugMagic: 0,
BugBomb: 0,
BugLightning: 0,
BugRock: 0,
BugMultiChain: 0,
},
&bugColorCondDistn{
BugSmall: {ColorBug + 0: 1},
BugLarge: {ColorBug + 2: 1},
},
}
}
if lvl < 5 {
return &simpleDistribution{
&bugTypeDistn{
BugSmall: 390,
BugLarge: 385,
BugGnat: 195,
BugMagic: 0,
BugBomb: 30,
BugLightning: 0,
BugRock: 0,
BugMultiChain: 0,
},
&bugColorCondDistn{
BugSmall: {ColorBug + 0: 1, ColorBug + 1: 1},
BugLarge: {ColorBug + 2: 1, ColorBug + 3: 0},
},
}
}
if lvl == 6 {
return &simpleDistribution{
&bugTypeDistn{
BugSmall: 380,
BugLarge: 375,
BugGnat: 192,
BugMagic: 0,
BugBomb: 15,
BugLightning: 15,
BugRock: 10,
BugMultiChain: 10,
},
&bugColorCondDistn{
BugSmall: {ColorBug + 0: 1, ColorBug + 1: 1},
BugLarge: {ColorBug + 2: 1, ColorBug + 3: 1},
},
}
}
if lvl == 7 {
return &simpleDistribution{
&bugTypeDistn{
BugSmall: 373,
BugLarge: 363,
BugGnat: 190,
BugMagic: 10,
BugBomb: 15,
BugLightning: 15,
BugRock: 15,
BugMultiChain: 10,
},
&bugColorCondDistn{
BugSmall: {ColorBug + 0: 1, ColorBug + 1: 1},
BugLarge: {ColorBug + 2: 1, ColorBug + 3: 1},
},
}
}
if lvl == 8 {
return &simpleDistribution{
&bugTypeDistn{
BugSmall: 378,
BugLarge: 358,
BugGnat: 180,
BugMagic: 15,
BugBomb: 15,
BugLightning: 15,
BugRock: 15,
BugMultiChain: 15,
},
&bugColorCondDistn{
BugSmall: {ColorBug + 0: 1, ColorBug + 1: 1},
BugLarge: {ColorBug + 2: 1, ColorBug + 3: 1},
},
}
}
if lvl == 9 {
return &simpleDistribution{
&bugTypeDistn{
BugSmall: 380,
BugLarge: 350,
BugGnat: 170,
BugMagic: 20,
BugBomb: 20,
BugLightning: 20,
BugRock: 20,
BugMultiChain: 20,
},
&bugColorCondDistn{
BugSmall: {ColorBug + 0: 1, ColorBug + 1: 1},
BugLarge: {ColorBug + 2: 1, ColorBug + 3: 1},
},
}
}
if lvl == 10 {
return &simpleDistribution{
&bugTypeDistn{
BugSmall: 383,
BugLarge: 343,
BugGnat: 160,
BugMagic: 25,
BugBomb: 25,
BugLightning: 25,
BugRock: 25,
BugMultiChain: 25,
},
&bugColorCondDistn{
BugSmall: {ColorBug + 0: 1, ColorBug + 1: 1},
BugLarge: {ColorBug + 2: 1, ColorBug + 3: 1},
},
}
}
return &simpleDistribution{
&bugTypeDistn{
BugSmall: 390,
BugLarge: 340,
BugGnat: 150,
BugMagic: 30,
BugBomb: 30,
BugLightning: 30,
BugRock: 30,
BugMultiChain: 30,
},
&bugColorCondDistn{
BugSmall: {ColorBug + 0: 1, ColorBug + 1: 1},
BugLarge: {ColorBug + 2: 1, ColorBug + 3: 1},
},
}
} | survival.go | 0.576542 | 0.439928 | survival.go | starcoder |
package forGraphBLASGo
func MatrixReduceBinaryOp[D any](w *Vector[D], mask *Vector[bool], accum, op BinaryOp[D, D, D], A *Matrix[D], desc Descriptor) error {
nrows, ncols, err := A.Size()
if err != nil {
return err
}
isTran, err := desc.Is(Inp0, Tran)
if err != nil {
panic(err)
}
if isTran {
nrows, ncols = ncols, nrows
}
if err = w.expectSize(nrows); err != nil {
return err
}
maskAsStructure, err := vectorMask(mask, nrows)
if err != nil {
return err
}
w.ref = newVectorReference[D](newComputedVector[D](
nrows, w.ref, maskAsStructure, accum,
newMatrixReduceBinaryOp[D](op, maybeTran(A.ref, isTran)),
desc,
), -1)
return nil
}
func MatrixReduceMonoid[D any](w *Vector[D], mask *Vector[bool], accum BinaryOp[D, D, D], op Monoid[D], A *Matrix[D], desc Descriptor) error {
return MatrixReduceBinaryOp(w, mask, accum, op.operator(), A, desc)
}
func VectorReduceBinaryOpScalar[D any](s *Scalar[D], accum, op BinaryOp[D, D, D], u *Vector[D], _ Descriptor) error {
if s == nil || s.ref == nil || u == nil || u.ref == nil {
return UninitializedObject
}
s.ref = newScalarReference[D](newComputedScalar[D](s.ref, accum, newVectorReduceBinaryOpScalar[D](op, u.ref)))
return nil
}
func VectorReduceMonoidScalar[D any](s *Scalar[D], accum BinaryOp[D, D, D], op Monoid[D], u *Vector[D], desc Descriptor) error {
return VectorReduceBinaryOpScalar(s, accum, op.operator(), u, desc)
}
func VectorReduce[D any](value *D, accum BinaryOp[D, D, D], op Monoid[D], u *Vector[D], _ Descriptor) error {
if u == nil || u.ref == nil {
return UninitializedObject
}
result, ok := vectorPipelineReduce(u.ref.getPipeline(), op.operator())
if !ok {
result = op.identity()
}
if accum == nil {
*value = result
return nil
}
*value = accum(*value, result)
return nil
}
func MatrixReduceBinaryOpScalar[D any](s *Scalar[D], accum, op BinaryOp[D, D, D], A *Matrix[D], _ Descriptor) error {
if s == nil || s.ref == nil || A == nil || A.ref == nil {
return UninitializedObject
}
s.ref = newScalarReference[D](newComputedScalar[D](s.ref, accum, newMatrixReduceBinaryOpScalar[D](op, A.ref)))
return nil
}
func MatrixReduceMonoidScalar[D any](s *Scalar[D], accum BinaryOp[D, D, D], op Monoid[D], A *Matrix[D], desc Descriptor) error {
return MatrixReduceBinaryOpScalar(s, accum, op.operator(), A, desc)
}
func MatrixReduce[D any](value *D, accum BinaryOp[D, D, D], op Monoid[D], A *Matrix[D], _ Descriptor) error {
if A == nil || A.ref == nil {
return UninitializedObject
}
result, ok := matrixPipelineReduce(A.ref.getPipeline(), op.operator())
if !ok {
result = op.identity()
}
if accum == nil {
*value = result
return nil
}
*value = accum(*value, result)
return nil
} | api_Reduce.go | 0.689515 | 0.667629 | api_Reduce.go | starcoder |
package packets
import (
"encoding/binary"
"fmt"
)
type BodyMatrix struct {
Point Point
Residual float32
Rotation [9]float32
}
func (b BodyMatrix) String() string {
return fmt.Sprintf(
"x:%v y:%v z:%v r:%v [[%v %v %v][%v %v %v][%v %v %v]]",
b.Point.X, b.Point.Y, b.Point.Z, b.Residual,
b.Rotation[0], b.Rotation[1], b.Rotation[2],
b.Rotation[3], b.Rotation[4], b.Rotation[5],
b.Rotation[6], b.Rotation[7], b.Rotation[8],
)
}
type Component6D struct {
Droprate uint16
OutOfSyncRate uint16
Bodies []BodyMatrix
}
func (c Component6D) String() string {
return fmt.Sprintf("Droprate: %v OutOfSyncRate: %v Bodies: %v\n", c.Droprate, c.OutOfSyncRate, c.Bodies)
}
func (c *Component6D) UnmarshalBinary(data []byte) error {
if len(data) == 0 {
return nil
}
numberOfBodies := binary.LittleEndian.Uint32(data[0:4])
c.Droprate = binary.LittleEndian.Uint16(data[4:6])
c.OutOfSyncRate = binary.LittleEndian.Uint16(data[6:8])
pos := 8
c.Bodies = make([]BodyMatrix, 0, numberOfBodies)
for m := uint32(0); m < numberOfBodies; m++ {
body := BodyMatrix{
Point: Point{
X: Float32frombytes(data[pos : pos+4]),
Y: Float32frombytes(data[pos+4 : pos+8]),
Z: Float32frombytes(data[pos+8 : pos+12]),
},
}
for i := 0; i < 9; i++ {
body.Rotation[i] = Float32frombytes(data[pos+12+i*4 : pos+16+i*4])
}
c.Bodies = append(c.Bodies, body)
pos += 48
}
return nil
}
type Component6DResidual Component6D
func (c Component6DResidual) String() string {
return Component6D(c).String()
}
func (c *Component6DResidual) UnmarshalBinary(data []byte) error {
numberOfBodies := binary.LittleEndian.Uint32(data[0:4])
c.Droprate = binary.LittleEndian.Uint16(data[4:6])
c.OutOfSyncRate = binary.LittleEndian.Uint16(data[6:8])
pos := 8
c.Bodies = make([]BodyMatrix, 0, numberOfBodies)
for m := uint32(0); m < numberOfBodies; m++ {
body := BodyMatrix{
Point: Point{
X: Float32frombytes(data[pos : pos+4]),
Y: Float32frombytes(data[pos+4 : pos+8]),
Z: Float32frombytes(data[pos+8 : pos+12]),
},
}
for i := 0; i < 9; i++ {
body.Rotation[i] = Float32frombytes(data[pos+12+i*4 : pos+16+i*4])
}
body.Residual = Float32frombytes(data[pos+48 : pos+52])
c.Bodies = append(c.Bodies, body)
pos += 52
}
return nil
}
type BodyEuler struct {
Point Point
Residual float32
Angles [3]float32
}
func (b BodyEuler) String() string {
return fmt.Sprintf(
"x:%v y:%v z:%v r:%v [%v %v %v]",
b.Point.X, b.Point.Y, b.Point.Z, b.Residual,
b.Angles[0], b.Angles[1], b.Angles[2],
)
}
type Component6DEuler struct {
Droprate uint16
OutOfSyncRate uint16
Bodies []BodyEuler
}
func (c Component6DEuler) String() string {
return fmt.Sprintf("Droprate: %v OutOfSyncRate: %v Bodies: %v\n", c.Droprate, c.OutOfSyncRate, c.Bodies)
}
func (c *Component6DEuler) UnmarshalBinary(data []byte) error {
numberOfBodies := binary.LittleEndian.Uint32(data[0:4])
c.Droprate = binary.LittleEndian.Uint16(data[4:6])
c.OutOfSyncRate = binary.LittleEndian.Uint16(data[6:8])
pos := 8
c.Bodies = make([]BodyEuler, 0, numberOfBodies)
for m := uint32(0); m < numberOfBodies; m++ {
body := BodyEuler{
Point: Point{
X: Float32frombytes(data[pos : pos+4]),
Y: Float32frombytes(data[pos+4 : pos+8]),
Z: Float32frombytes(data[pos+8 : pos+12]),
},
}
for i := 0; i < 3; i++ {
body.Angles[i] = Float32frombytes(data[pos+12+i*4 : pos+16+i*4])
}
c.Bodies = append(c.Bodies, body)
pos += 24
}
return nil
}
type Component6DEulerResidual Component6DEuler
func (c Component6DEulerResidual) String() string {
return Component6DEuler(c).String()
}
func (c *Component6DEulerResidual) UnmarshalBinary(data []byte) error {
numberOfBodies := binary.LittleEndian.Uint32(data[0:4])
c.Droprate = binary.LittleEndian.Uint16(data[4:6])
c.OutOfSyncRate = binary.LittleEndian.Uint16(data[6:8])
pos := 8
c.Bodies = make([]BodyEuler, 0, numberOfBodies)
for m := uint32(0); m < numberOfBodies; m++ {
body := BodyEuler{
Point: Point{
X: Float32frombytes(data[pos : pos+4]),
Y: Float32frombytes(data[pos+4 : pos+8]),
Z: Float32frombytes(data[pos+8 : pos+12]),
},
}
for i := 0; i < 3; i++ {
body.Angles[i] = Float32frombytes(data[pos+12+i*4 : pos+16+i*4])
}
body.Residual = Float32frombytes(data[pos+24 : pos+28])
c.Bodies = append(c.Bodies, body)
pos += 28
}
return nil
} | pkg/packets/objects.go | 0.566738 | 0.44903 | objects.go | starcoder |
// Package day07 solves AoC 2021 day 7.
package day07
import (
"math"
"sort"
"github.com/fis/aoc/glue"
"github.com/fis/aoc/util"
)
func init() {
glue.RegisterSolver(2021, 7, glue.IntSolver(solve))
}
func solve(input []int) ([]string, error) {
_, p1 := align1MedianQS(input)
_, p2 := align2Mean(input)
return glue.Ints(p1, p2), nil
}
func align(input []int, f func(n, x int) int) (x, cost int) {
min, max := bounds(input)
costs := make([]int, max-min+1)
for _, n := range input {
for i := range costs {
costs[i] += f(n, min+i)
}
}
x, cost = argmin(costs)
return min + x, cost
}
func cost1(n, x int) int {
return abs(n - x)
}
func cost2(n, x int) int {
d := abs(n - x)
return d * (d + 1) / 2
}
func align1Points(input []int) (x, cost int) {
min, max := bounds(input)
costs := make([]int, max-min+1)
for i := range costs {
costs[i] = math.MaxInt
}
for _, x := range input {
if costs[x-min] == math.MaxInt {
costs[x-min] = 0
for _, n := range input {
costs[x-min] += abs(n - x)
}
}
}
x, cost = argmin(costs)
return min + x, cost
}
func align1MedianSort(input []int) (x, cost int) {
sorted := append([]int(nil), input...)
sort.Ints(sorted)
x = sorted[len(sorted)/2]
for _, n := range input {
cost += abs(n - x)
}
return x, cost
}
func align1MedianQS(input []int) (x, cost int) {
x = util.QuickSelect(input, len(input)/2)
for _, n := range input {
cost += abs(n - x)
}
return x, cost
}
func align2Mean(input []int) (x, cost int) {
mean := 0
for _, n := range input {
mean += n
}
mean = (mean + len(input)/2) / len(input)
cost = math.MaxInt
for estX := mean - 2; estX <= mean+2; estX++ {
estCost := 0
for _, n := range input {
d := abs(n - estX)
estCost += d * (d + 1) / 2
}
if estCost < cost {
cost = estCost
x = estX
}
}
return x, cost
}
func bounds(input []int) (min, max int) {
min = input[0]
max = input[0]
for _, n := range input[1:] {
if n < min {
min = n
}
if n > max {
max = n
}
}
return min, max
}
func argmin(input []int) (minI, minN int) {
minI, minN = 0, input[0]
for i, n := range input[1:] {
if n < minN {
minI = i + 1
minN = n
}
}
return minI, minN
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
} | 2021/day07/day07.go | 0.616012 | 0.402363 | day07.go | starcoder |
package util
import (
"compress/gzip"
"github.com/itchio/go-brotli/enc"
"io"
"strings"
)
type Recompression struct {
Add CompressionType
Remove CompressionType
}
type CompressionType int
const (
CompressionTypeNone CompressionType = 0
CompressionTypeGzip CompressionType = 1
CompressionTypeBrotli CompressionType = 2
)
func NewGzipDecodingReader(body io.ReadCloser) (io.ReadCloser, error) {
gzReader, err := gzip.NewReader(body)
if err != nil {
return nil, err
}
return gzReader, nil
}
func NewBrotliEncodingWriter(w io.Writer, compressionLevel int) io.WriteCloser {
if compressionLevel < 0 || compressionLevel > 11 {
compressionLevel = 0
}
return enc.NewBrotliWriter(w, &enc.BrotliWriterOptions{
Quality: compressionLevel,
LGWin: 0,
})
}
func NewGzipEncodingWriter(w io.Writer, compressionLevel int) (io.WriteCloser, error) {
if compressionLevel < gzip.BestSpeed || compressionLevel > gzip.BestCompression {
compressionLevel = gzip.DefaultCompression
}
w, err := gzip.NewWriterLevel(w, compressionLevel)
if err != nil {
return nil, err
}
wCloser, ok := w.(io.WriteCloser)
if !ok {
panic("Gzip writer is not an io.Closer")
}
return wCloser, nil
}
func ContentEncodingFromCompressionType(compressionType CompressionType) string {
switch compressionType {
case CompressionTypeGzip:
return "gzip"
case CompressionTypeBrotli:
return "br"
default:
return ""
}
}
func GetRecompression(acceptEncoding string, contentEncoding string, contentType string) Recompression {
switch acceptsEncodingFromString(acceptEncoding) {
case acceptsBrotli:
switch contentEncoding {
case "br":
return Recompression{Add: CompressionTypeNone, Remove: CompressionTypeNone}
case "gzip":
return Recompression{Add: CompressionTypeBrotli, Remove: CompressionTypeGzip}
default:
return fallbackCompressionWithDefault(contentEncoding, contentType, CompressionTypeBrotli)
}
case acceptsGzip:
switch contentEncoding {
case "gzip":
return Recompression{Add: CompressionTypeNone, Remove: CompressionTypeNone}
case "br":
return Recompression{Add: CompressionTypeBrotli, Remove: CompressionTypeNone}
default:
return fallbackCompressionWithDefault(contentEncoding, contentType, CompressionTypeGzip)
}
case acceptsBrokenClient:
if contentEncoding == "gzip" {
return Recompression{Add: CompressionTypeNone, Remove: CompressionTypeGzip}
}
return Recompression{Add: CompressionTypeNone, Remove: CompressionTypeNone} // Handle this differently from default, as default might change. Broken client does not change.
case acceptsOther:
break
}
return fallbackCompressionWithDefault(contentEncoding, contentType, CompressionTypeNone)
}
type acceptsEncoding int
const (
acceptsOther acceptsEncoding = 0
acceptsGzip acceptsEncoding = 1
acceptsBrotli acceptsEncoding = 2
acceptsBrokenClient acceptsEncoding = 3
)
func acceptsEncodingFromString(s string) acceptsEncoding {
if strings.Contains(s, ";") {
return acceptsBrokenClient
} else if strings.Contains(s, "br") {
return acceptsBrotli
} else if strings.Contains(s, "gzip") {
return acceptsGzip
}
return acceptsOther
}
func fallbackCompressionWithDefault(contentEncoding string, contentType string, def CompressionType) Recompression {
if (contentEncoding == "" || contentEncoding == "identity") && (contentType == "application/json" || strings.HasPrefix(contentType, "text/")) {
return Recompression{Add: def, Remove: CompressionTypeNone}
}
return Recompression{Add: CompressionTypeNone, Remove: CompressionTypeNone}
} | util/compress.go | 0.598899 | 0.446253 | compress.go | starcoder |
package spell
import (
"fmt"
"strings"
"unicode"
"github.com/PaulioRandall/scarlet-go/scarlet/value"
)
type (
// Spell represents a builtin function.
Spell func(env Runtime, in []value.Value, out *Output)
// Output is a container for spell return arguments.
Output struct {
size int
out []value.Value
}
// Book represents a collections of named spells.
Book map[string]Inscription
// Inscription represents a spell inscribed within a spell book.
Inscription struct {
Spell
Name string
Outputs int
}
// Scope represents a mapping of declared identifiers, with their current
// values, available within the current scope.
Scope map[value.Ident]value.Value
// Runtime is a handler for performing memory related and context dependent
// instructions such as access to scope variables and storing exit and error
// information for the processor. It's a subset of the Runtime used by the
// Processor that only exposes appropriate functionality for spells.
Runtime interface {
// Spellbook returns the book containing all spells available. Changes made
// will not be reflected within the current environment.
Spellbook() Book
// Scope returns a copy of the current scope. Changes made will not be
// reflected within the current environment.
Scope() Scope
// Exists returns true if the specified identifier exists within the current
//scope.
Exists(value.Ident) value.Bool
// Fetch returns the value associated with the specified identifier.
Fetch(value.Ident) value.Value
// Fail sets the error and exit status a non-recoverable error occurs
// during execution.
Fail(int, error)
// Exit causes the program to exit with the specified exit code.
Exit(int)
// GetErr returns the error if set else returns nil.
GetErr() error
// GetExitCode returns the currently set exit code. Only meaningful if the
// exit flag has been set.
GetExitCode() int
// GetExitFlag returns true if the program should stop execution after
// finishing any instruction currently being executed.
GetExitFlag() bool
}
)
// Inscribe stores a named spell within the Book returning an error if any of
// the arguments are invalid.
func (b Book) Inscribe(name string, outputs int, spell Spell) error {
if name == "" {
panic(fmt.Errorf("Attempted to register a spell with no name"))
}
if !isSpellIdent(name) {
return fmt.Errorf("Attempted to register spell with bad name %q", name)
}
if spell == nil {
return fmt.Errorf("Attempted to register nil spell with name %q", name)
}
if outputs < 0 {
return fmt.Errorf("Attempted to register spell"+
" with variable or negative output parameters %q", name)
}
k := strings.ToLower(name)
b[k] = Inscription{
Spell: spell,
Name: name,
Outputs: outputs,
}
return nil
}
// Names returns the unsorted names of all spells in the Book.
func (b Book) Names() []string {
keys := make([]string, len(b))
var i int
for k := range b {
keys[i] = k
i++
}
return keys
}
// Lookup returns the spell given its name. If the spell is nil then no such
// spell exists.
func (b Book) Lookup(name string) (Inscription, bool) {
k := strings.ToLower(name)
s, ok := b[k]
return s, ok
}
func isSpellIdent(id string) bool {
// E.g. "abc" or "abc.xyz"
r := []rune(id)
size := len(r)
i := 0
parsePart := func() bool {
for ; i < size; i++ {
if !unicode.IsLetter(r[i]) {
return false
}
}
return true
}
if parsePart() {
return true
}
if r[i] != '.' {
return false
}
i++
return parsePart()
}
// NewOutput returns a new initialised output.
func NewOutput(size int) *Output {
return &Output{
size: size,
out: make([]value.Value, size),
}
}
// Get returns the return value of the index 'i' or nil if it has not been set
// yet.
func (o *Output) Get(i int) value.Value {
if i >= o.size || i < 0 {
panic("Out of range: invalid spell output index")
}
return o.out[i]
}
// Set sets the value of a spell return value.
func (o *Output) Set(i int, v value.Value) {
if i >= o.size || i < 0 {
panic("Out of range: invalid spell output index")
}
o.out[i] = v
}
// Slice returns a slice of all return values. Note that unset slots will be
// nil.
func (o *Output) Slice() []value.Value {
return o.out
} | scarlet/spell/book.go | 0.709925 | 0.441613 | book.go | starcoder |
package main
import (
"crypto/tls"
"net"
)
// Generate is the third step of the algorithm. Given the
// observed round trips, we generate measurement targets and
// execute those measurements so the probe has a benchmark.
// URLMeasurement is a measurement of a given URL that
// includes connectivity measurement for each endpoint
// implied by the given URL.
type URLMeasurement struct {
// URL is the URL we're using
URL string
// DNS contains the domain names resolved by the helper.
DNS *DNSMeasurement
// RoundTrip is the related round trip.
RoundTrip *RoundTrip
// Endpoints contains endpoint measurements.
Endpoints []*HTTPEndpointMeasurement
}
// DNSMeasurement is a DNS measurement.
type DNSMeasurement struct {
// Domain is the domain we wanted to resolve.
Domain string
// Addrs contains the resolved addresses.
Addrs []string
}
// HTTPEndpointMeasurement is a measurement of a specific HTTP endpoint.
type HTTPEndpointMeasurement struct {
// Endpoint is the endpoint we're measuring.
Endpoint string
// TCPConnectMeasurement is the related TCP connect measurement.
TCPConnectMeasurement *TCPConnectMeasurement
// TLSHandshakeMeasurement is the related TLS handshake measurement.
TLSHandshakeMeasurement *TLSHandshakeMeasurement
}
// Implementation note: OONI uses nil to indicate no error but here
// it's more convenient to just use an empty string.
// TCPConnectMeasurement is a TCP connect measurement.
type TCPConnectMeasurement struct {
// Failure is the error that occurred.
Failure string
}
// TLSHandshakeMeasurement is a TLS handshake measurement.
type TLSHandshakeMeasurement struct {
// Failure is the error that occurred.
Failure string
}
// Generate takes in input a list of round trips and outputs
// a list of connectivity measurements for each of them.
func Generate(rts []*RoundTrip) ([]*URLMeasurement, error) {
var out []*URLMeasurement
for _, rt := range rts {
addrs, err := net.LookupHost(rt.Request.URL.Hostname())
if err != nil {
return nil, err
}
currentURL := &URLMeasurement{
DNS: &DNSMeasurement{
Domain: rt.Request.URL.Hostname(),
Addrs: addrs,
},
RoundTrip: rt,
URL: rt.Request.URL.String(),
}
out = append(out, currentURL)
for _, addr := range addrs {
// simplified algorithm to choose the port.
var endpoint string
switch rt.Request.URL.Scheme {
case "http":
endpoint = net.JoinHostPort(addr, "80")
case "https":
endpoint = net.JoinHostPort(addr, "443")
default:
panic("should not happen")
}
currentEndpoint := &HTTPEndpointMeasurement{
Endpoint: endpoint,
}
currentURL.Endpoints = append(currentURL.Endpoints, currentEndpoint)
tcpConn, err := net.Dial("tcp", endpoint)
if err != nil {
s := err.Error()
currentEndpoint.TCPConnectMeasurement = &TCPConnectMeasurement{
Failure: s,
}
continue
}
defer tcpConn.Close() // suboptimal of course
currentEndpoint.TCPConnectMeasurement = &TCPConnectMeasurement{}
if rt.Request.URL.Scheme == "https" {
tlsConn := tls.Client(tcpConn, &tls.Config{
ServerName: rt.Request.URL.Hostname(),
})
err := tlsConn.Handshake()
if err != nil {
s := err.Error()
currentEndpoint.TLSHandshakeMeasurement = &TLSHandshakeMeasurement{
Failure: s,
}
continue
}
defer tlsConn.Close() // suboptimal of course
currentEndpoint.TLSHandshakeMeasurement = &TLSHandshakeMeasurement{}
}
}
}
return out, nil
} | generate.go | 0.586049 | 0.414425 | generate.go | starcoder |
package circuit
import (
"encoding/json"
"fmt"
"math"
"github.com/heustis/tsp-solver-go/model"
)
// ClosestGreedy is an O(n^2) greedy algorithm that performs the following steps:
// 1. builds a convex hull surrounding the points _(optimum for 2D, an approximation for 3D and graphs)_,
// a. Compute the midpoint of all the points.
// b. Finds the point farthest from the midpoint.
// c. Finds the point farthest from the point in 1a.
// d. Creates initial edges 1b->1c and 1c->1b _(note: all other points are exterior at this time)_
// e. Finds the exterior point farthest from its closest edge and attach it to the circuit by splitting its closest edge.
// f. Find any points that were external to the circuit and are now internal to the circuit, and stop considering them for future iterations.
// g. Repeat 1e and 1f until all points are attached to the circuit or internal to the circuit.
// 2. tracks each unattached point and its the closest edge,
// 3. selects the point that increases the length of the circuit the least, when attached to its closest edge,
// 4. attaches the point from step 3 to the circuit,
// 5. updates the closest edge for all remaining unattached points, to account for splitting an existing edge into two new edges,
// 6. repeats steps 3-5 until all points are attached to the circuit.
type ClosestGreedy struct {
circuitEdges []model.CircuitEdge
closestEdges *model.Heap
enableInteriorUpdates bool
interiorVertices map[model.CircuitVertex]bool
length float64
unattachedVertices map[model.CircuitVertex]bool
}
// Creates a new ClosestGreedy, builds the convex hull, and prepares the closestEdges heap.
func NewClosestGreedy(vertices []model.CircuitVertex, perimeterBuilder model.PerimeterBuilder, enableInteriorUpdates bool) *ClosestGreedy {
circuitEdges, unattachedVertices := perimeterBuilder(vertices)
// Find the closest edge for all interior points, based on distance increase; store them in a heap for retrieval from closest to farthest.
interiorVertices := make(map[model.CircuitVertex]bool)
closestEdges := model.NewHeap(model.GetDistanceToEdgeForHeap)
for vertex := range unattachedVertices {
if enableInteriorUpdates {
interiorVertices[vertex] = true
}
closest := model.FindClosestEdge(vertex, circuitEdges)
closestEdges.PushHeap(&model.DistanceToEdge{
Vertex: vertex,
Edge: closest,
Distance: closest.DistanceIncrease(vertex),
})
}
length := 0.0
for _, edge := range circuitEdges {
length += edge.GetLength()
}
return &ClosestGreedy{
circuitEdges: circuitEdges,
closestEdges: closestEdges,
enableInteriorUpdates: enableInteriorUpdates,
interiorVertices: interiorVertices,
length: length,
unattachedVertices: unattachedVertices,
}
}
func (c *ClosestGreedy) FindNextVertexAndEdge() (model.CircuitVertex, model.CircuitEdge) {
if next, okay := c.closestEdges.PopHeap().(*model.DistanceToEdge); okay {
return next.Vertex, next.Edge
} else {
return nil, nil
}
}
func (c *ClosestGreedy) GetAttachedEdges() []model.CircuitEdge {
return c.circuitEdges
}
func (c *ClosestGreedy) GetAttachedVertices() []model.CircuitVertex {
vertices := make([]model.CircuitVertex, len(c.circuitEdges))
for i, edge := range c.circuitEdges {
vertices[i] = edge.GetStart()
}
return vertices
}
func (c *ClosestGreedy) GetClosestEdges() *model.Heap {
return c.closestEdges
}
func (c *ClosestGreedy) GetInteriorVertices() map[model.CircuitVertex]bool {
return c.interiorVertices
}
func (c *ClosestGreedy) GetLength() float64 {
return c.length
}
func (c *ClosestGreedy) GetUnattachedVertices() map[model.CircuitVertex]bool {
return c.unattachedVertices
}
func (c *ClosestGreedy) Update(vertexToAdd model.CircuitVertex, edgeToSplit model.CircuitEdge) {
if vertexToAdd != nil {
var edgeIndex int
c.circuitEdges, edgeIndex = model.SplitEdge(c.circuitEdges, edgeToSplit, vertexToAdd)
if edgeIndex < 0 {
expectedEdgeJson, _ := json.Marshal(edgeToSplit)
actualCircuitJson, _ := json.Marshal(c.circuitEdges)
panic(fmt.Errorf("edge not found in circuit=%p, expected=%s, \ncircuit=%s", c, string(expectedEdgeJson), string(actualCircuitJson)))
}
delete(c.unattachedVertices, vertexToAdd)
if c.enableInteriorUpdates {
c.updateInteriorPoints(edgeToSplit, c.circuitEdges[edgeIndex], c.circuitEdges[edgeIndex+1])
} else {
c.updateClosestEdges(edgeToSplit, c.circuitEdges[edgeIndex], c.circuitEdges[edgeIndex+1])
}
}
}
func (c *ClosestGreedy) getClosestEdgeForAttachedPoint(vertex model.CircuitVertex) model.CircuitEdge {
prev := c.circuitEdges[len(c.circuitEdges)-1]
for _, edge := range c.circuitEdges {
if edge.GetStart() == vertex {
return prev.GetStart().EdgeTo(edge.GetEnd())
}
prev = edge
}
return nil
}
func (c *ClosestGreedy) updateClosestEdges(removedEdge model.CircuitEdge, edgeA model.CircuitEdge, edgeB model.CircuitEdge) {
c.length += edgeA.GetLength() + edgeB.GetLength() - removedEdge.GetLength()
for _, x := range c.closestEdges.GetValues() {
previous := x.(*model.DistanceToEdge)
distA := edgeA.DistanceIncrease(previous.Vertex)
distB := edgeB.DistanceIncrease(previous.Vertex)
if distA < previous.Distance && distA <= distB {
previous.Edge = edgeA
previous.Distance = distA
} else if distB < previous.Distance {
previous.Edge = edgeB
previous.Distance = distB
} else if previous.Edge == removedEdge {
previous.Edge = model.FindClosestEdge(previous.Vertex, c.circuitEdges)
previous.Distance = previous.Edge.DistanceIncrease(previous.Vertex)
}
}
c.closestEdges.Heapify()
}
func (c *ClosestGreedy) updateInteriorPoints(removedEdge model.CircuitEdge, edgeA model.CircuitEdge, edgeB model.CircuitEdge) {
c.length += edgeA.GetLength() + edgeB.GetLength() - removedEdge.GetLength()
// Detach any interior, attached vertices that are now closer to either created edge than they are to their attached edge.
for vertex := range c.interiorVertices {
// Ignore unattached vertices and vertices attached to one of the newly created edges.
if c.unattachedVertices[vertex] || edgeA.GetStart() == vertex || edgeA.GetEnd() == vertex || edgeB.GetEnd() == vertex {
continue
}
closestAttached := c.getClosestEdgeForAttachedPoint(vertex)
previousDistance := closestAttached.DistanceIncrease(vertex)
if edgeA.DistanceIncrease(vertex) < previousDistance || edgeB.DistanceIncrease(vertex) < previousDistance {
c.unattachedVertices[vertex] = true
c.circuitEdges, _, _, _ = model.MergeEdgesByVertex(c.circuitEdges, vertex)
// This will be updated by ReplaceAll in the next step, so the edge value and distance are unimportant.
c.closestEdges.PushHeap(&model.DistanceToEdge{
Vertex: vertex,
Edge: nil,
Distance: math.MaxFloat64,
})
}
}
// Since multiple edges could have been replaced (due to both the newly attached point and any removed points) recalculate the closest edge for each unattached vertex.
c.closestEdges.ReplaceAll(func(x interface{}) interface{} {
previous := x.(*model.DistanceToEdge)
previous.Edge = model.FindClosestEdge(previous.Vertex, c.circuitEdges)
previous.Distance = previous.Edge.DistanceIncrease(previous.Vertex)
return previous
})
}
var _ model.Circuit = (*ClosestGreedy)(nil) | circuit/closestgreedy.go | 0.761937 | 0.71768 | closestgreedy.go | starcoder |
package model
import (
"github.com/alexhans1/certainty_poker/helpers"
)
// MoneyInQuestionRound returns the amount that the player has in the pot of the current question round
func (p *Player) MoneyInQuestionRound() int {
var m int = 0
for _, b := range p.Game.CurrentQuestionRound().BettingRounds {
for _, bet := range b.Bets {
if bet.PlayerID == p.ID {
m += bet.Amount
}
}
}
return m
}
// MoneyInBettingRound returns the amount that the player has in the pot of the current betting round
func (p *Player) MoneyInBettingRound() int {
var m int = 0
for _, bet := range p.Game.CurrentQuestionRound().CurrentBettingRound().Bets {
if bet.PlayerID == p.ID {
m += bet.Amount
}
}
return m
}
// HasFolded returns true if the player in included in the FoldedPlayerId list of the current QR
func (p *Player) HasFolded() bool {
return helpers.ContainsString(p.Game.CurrentQuestionRound().FoldedPlayerIds, p.ID)
}
// IsOutGame returns true if the player has no more money left and
// has no chance of winning some in the current question round
func (p *Player) IsOutGame() bool {
if p.Money > 0 {
return false
}
if p.MoneyInQuestionRound() > 0 && !p.HasFolded() && !p.Game.CurrentQuestionRound().IsOver {
return false
}
p.IsDead = true
return true
}
// IsActive returns true if the player can win money in current QR
func (p *Player) IsActive() bool {
return !(p.HasFolded() || p.IsOutGame())
}
// IsActionable returns true if the player can still place bets in current QR
func (p *Player) IsActionable() bool {
return !(p.HasFolded() || p.IsOutGame() || p.IsAllIn())
}
// IsAllIn returns true if the player has no money left but money in current QR and has not folded
func (p *Player) IsAllIn() bool {
return !(p.Money > 0 || p.IsOutGame() || p.HasFolded())
}
// FindNextActionablePlayer returns the next neighbour that is actionable
func (p *Player) FindNextActionablePlayer() *Player {
nextPlayer := p.getNextPlayer()
if nextPlayer.IsActionable() {
return nextPlayer
}
return nextPlayer.FindNextActionablePlayer()
}
// FindNextInPlayer returns the next neighbour that is active
func (p *Player) FindNextInPlayer() *Player {
nextPlayer := p.getNextPlayer()
if !nextPlayer.IsOutGame() {
return nextPlayer
}
return nextPlayer.FindNextInPlayer()
}
func (p *Player) getNextPlayer() *Player {
players := p.Game.Players
for i, player := range players {
if player.ID == p.ID {
return players[(i+1)%len(players)]
}
}
return nil
}
// FindPlayer finds player by ID in given player slice
func FindPlayer(slice []*Player, id string) (player *Player) {
for i := range slice {
if slice[i].ID == id {
return slice[i]
}
}
return nil
} | server/graph/model/player.go | 0.699254 | 0.406509 | player.go | starcoder |
package fixtures
// MessageA is type used as a dogma.Message in tests.
type MessageA struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageA) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageA1 is an instance of MessageA with a distinct value.
MessageA1 = MessageA{"A1"}
// MessageA2 is an instance of MessageA with a distinct value.
MessageA2 = MessageA{"A2"}
// MessageA3 is an instance of MessageA with a distinct value.
MessageA3 = MessageA{"A3"}
)
// MessageB is type used as a dogma.Message in tests.
type MessageB struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageB) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageB1 is an instance of MessageB with a distinct value.
MessageB1 = MessageB{"B1"}
// MessageB2 is an instance of MessageB with a distinct value.
MessageB2 = MessageB{"B2"}
// MessageB3 is an instance of MessageB with a distinct value.
MessageB3 = MessageB{"B3"}
)
// MessageC is type used as a dogma.Message in tests.
type MessageC struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageC) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageC1 is an instance of MessageC with a distinct value.
MessageC1 = MessageC{"C1"}
// MessageC2 is an instance of MessageC with a distinct value.
MessageC2 = MessageC{"C2"}
// MessageC3 is an instance of MessageC with a distinct value.
MessageC3 = MessageC{"C3"}
)
// MessageD is type used as a dogma.Message in tests.
type MessageD struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageD) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageD1 is an instance of MessageD with a distinct value.
MessageD1 = MessageD{"D1"}
// MessageD2 is an instance of MessageD with a distinct value.
MessageD2 = MessageD{"D2"}
// MessageD3 is an instance of MessageD with a distinct value.
MessageD3 = MessageD{"D3"}
)
// MessageE is type used as a dogma.Message in tests.
type MessageE struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageE) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageE1 is an instance of MessageE with a distinct value.
MessageE1 = MessageE{"E1"}
// MessageE2 is an instance of MessageE with a distinct value.
MessageE2 = MessageE{"E2"}
// MessageE3 is an instance of MessageE with a distinct value.
MessageE3 = MessageE{"E3"}
)
// MessageF is type used as a dogma.Message in tests.
type MessageF struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageF) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageF1 is an instance of MessageF with a distinct value.
MessageF1 = MessageF{"F1"}
// MessageF2 is an instance of MessageF with a distinct value.
MessageF2 = MessageF{"F2"}
// MessageF3 is an instance of MessageF with a distinct value.
MessageF3 = MessageF{"F3"}
)
// MessageG is type used as a dogma.Message in tests.
type MessageG struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageG) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageG1 is an instance of MessageG with a distinct value.
MessageG1 = MessageG{"G1"}
// MessageG2 is an instance of MessageG with a distinct value.
MessageG2 = MessageG{"G2"}
// MessageG3 is an instance of MessageG with a distinct value.
MessageG3 = MessageG{"G3"}
)
// MessageH is type used as a dogma.Message in tests.
type MessageH struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageH) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageH1 is an instance of MessageH with a distinct value.
MessageH1 = MessageH{"H1"}
// MessageH2 is an instance of MessageH with a distinct value.
MessageH2 = MessageH{"H2"}
// MessageH3 is an instance of MessageH with a distinct value.
MessageH3 = MessageH{"H3"}
)
// MessageI is type used as a dogma.Message in tests.
type MessageI struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageI) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageI1 is an instance of MessageI with a distinct value.
MessageI1 = MessageI{"I1"}
// MessageI2 is an instance of MessageI with a distinct value.
MessageI2 = MessageI{"I2"}
// MessageI3 is an instance of MessageI with a distinct value.
MessageI3 = MessageI{"I3"}
)
// MessageJ is type used as a dogma.Message in tests.
type MessageJ struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageJ) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageJ1 is an instance of MessageJ with a distinct value.
MessageJ1 = MessageJ{"J1"}
// MessageJ2 is an instance of MessageJ with a distinct value.
MessageJ2 = MessageJ{"J2"}
// MessageJ3 is an instance of MessageJ with a distinct value.
MessageJ3 = MessageJ{"J3"}
)
// MessageK is type used as a dogma.Message in tests.
type MessageK struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageK) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageK1 is an instance of MessageK with a distinct value.
MessageK1 = MessageK{"K1"}
// MessageK2 is an instance of MessageK with a distinct value.
MessageK2 = MessageK{"K2"}
// MessageK3 is an instance of MessageK with a distinct value.
MessageK3 = MessageK{"K3"}
)
// MessageL is type used as a dogma.Message in tests.
type MessageL struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageL) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageL1 is an instance of MessageL with a distinct value.
MessageL1 = MessageL{"L1"}
// MessageL2 is an instance of MessageL with a distinct value.
MessageL2 = MessageL{"L2"}
// MessageL3 is an instance of MessageL with a distinct value.
MessageL3 = MessageL{"L3"}
)
// MessageM is type used as a dogma.Message in tests.
type MessageM struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageM) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageM1 is an instance of MessageM with a distinct value.
MessageM1 = MessageM{"M1"}
// MessageM2 is an instance of MessageM with a distinct value.
MessageM2 = MessageM{"M2"}
// MessageM3 is an instance of MessageM with a distinct value.
MessageM3 = MessageM{"M3"}
)
// MessageN is type used as a dogma.Message in tests.
type MessageN struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageN) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageN1 is an instance of MessageN with a distinct value.
MessageN1 = MessageN{"N1"}
// MessageN2 is an instance of MessageN with a distinct value.
MessageN2 = MessageN{"N2"}
// MessageN3 is an instance of MessageN with a distinct value.
MessageN3 = MessageN{"N3"}
)
// MessageO is type used as a dogma.Message in tests.
type MessageO struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageO) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageO1 is an instance of MessageO with a distinct value.
MessageO1 = MessageO{"O1"}
// MessageO2 is an instance of MessageO with a distinct value.
MessageO2 = MessageO{"O2"}
// MessageO3 is an instance of MessageO with a distinct value.
MessageO3 = MessageO{"O3"}
)
// MessageP is type used as a dogma.Message in tests.
type MessageP struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageP) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageP1 is an instance of MessageP with a distinct value.
MessageP1 = MessageP{"P1"}
// MessageP2 is an instance of MessageP with a distinct value.
MessageP2 = MessageP{"P2"}
// MessageP3 is an instance of MessageP with a distinct value.
MessageP3 = MessageP{"P3"}
)
// MessageQ is type used as a dogma.Message in tests.
type MessageQ struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageQ) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageQ1 is an instance of MessageQ with a distinct value.
MessageQ1 = MessageQ{"Q1"}
// MessageQ2 is an instance of MessageQ with a distinct value.
MessageQ2 = MessageQ{"Q2"}
// MessageQ3 is an instance of MessageQ with a distinct value.
MessageQ3 = MessageQ{"Q3"}
)
// MessageR is type used as a dogma.Message in tests.
type MessageR struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageR) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageR1 is an instance of MessageR with a distinct value.
MessageR1 = MessageR{"R1"}
// MessageR2 is an instance of MessageR with a distinct value.
MessageR2 = MessageR{"R2"}
// MessageR3 is an instance of MessageR with a distinct value.
MessageR3 = MessageR{"R3"}
)
// MessageS is type used as a dogma.Message in tests.
type MessageS struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageS) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageS1 is an instance of MessageS with a distinct value.
MessageS1 = MessageS{"S1"}
// MessageS2 is an instance of MessageS with a distinct value.
MessageS2 = MessageS{"S2"}
// MessageS3 is an instance of MessageS with a distinct value.
MessageS3 = MessageS{"S3"}
)
// MessageT is type used as a dogma.Message in tests.
type MessageT struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageT) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageT1 is an instance of MessageT with a distinct value.
MessageT1 = MessageT{"T1"}
// MessageT2 is an instance of MessageT with a distinct value.
MessageT2 = MessageT{"T2"}
// MessageT3 is an instance of MessageT with a distinct value.
MessageT3 = MessageT{"T3"}
)
// MessageU is type used as a dogma.Message in tests.
type MessageU struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageU) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageU1 is an instance of MessageU with a distinct value.
MessageU1 = MessageU{"U1"}
// MessageU2 is an instance of MessageU with a distinct value.
MessageU2 = MessageU{"U2"}
// MessageU3 is an instance of MessageU with a distinct value.
MessageU3 = MessageU{"U3"}
)
// MessageV is type used as a dogma.Message in tests.
type MessageV struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageV) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageV1 is an instance of MessageV with a distinct value.
MessageV1 = MessageV{"V1"}
// MessageV2 is an instance of MessageV with a distinct value.
MessageV2 = MessageV{"V2"}
// MessageV3 is an instance of MessageV with a distinct value.
MessageV3 = MessageV{"V3"}
)
// MessageW is type used as a dogma.Message in tests.
type MessageW struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageW) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageW1 is an instance of MessageW with a distinct value.
MessageW1 = MessageW{"W1"}
// MessageW2 is an instance of MessageW with a distinct value.
MessageW2 = MessageW{"W2"}
// MessageW3 is an instance of MessageW with a distinct value.
MessageW3 = MessageW{"W3"}
)
// MessageX is type used as a dogma.Message in tests.
type MessageX struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageX) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageX1 is an instance of MessageX with a distinct value.
MessageX1 = MessageX{"X1"}
// MessageX2 is an instance of MessageX with a distinct value.
MessageX2 = MessageX{"X2"}
// MessageX3 is an instance of MessageX with a distinct value.
MessageX3 = MessageX{"X3"}
)
// MessageY is type used as a dogma.Message in tests.
type MessageY struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageY) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageY1 is an instance of MessageY with a distinct value.
MessageY1 = MessageY{"Y1"}
// MessageY2 is an instance of MessageY with a distinct value.
MessageY2 = MessageY{"Y2"}
// MessageY3 is an instance of MessageY with a distinct value.
MessageY3 = MessageY{"Y3"}
)
// MessageZ is type used as a dogma.Message in tests.
type MessageZ struct {
Value interface{}
}
// Validate returns m.Value if it is an error.
func (m MessageZ) Validate() error {
err, _ := m.Value.(error)
return err
}
var (
// MessageZ1 is an instance of MessageZ with a distinct value.
MessageZ1 = MessageZ{"Z1"}
// MessageZ2 is an instance of MessageZ with a distinct value.
MessageZ2 = MessageZ{"Z2"}
// MessageZ3 is an instance of MessageZ with a distinct value.
MessageZ3 = MessageZ{"Z3"}
) | fixtures/message.go | 0.711732 | 0.52208 | message.go | starcoder |
package responses
type Operation struct {
D struct {
Results []struct {
Metadata struct {
ID string `json:"id"`
URI string `json:"uri"`
Type string `json:"type"`
Etag string `json:"etag"`
} `json:"__metadata"`
InspectionPlanGroup string `json:"InspectionPlanGroup"`
BOOOperationInternalID string `json:"BOOOperationInternalID"`
BOOCharacteristic string `json:"BOOCharacteristic"`
BOOCharacteristicVersion string `json:"BOOCharacteristicVersion"`
BOOOpInternalVersionCounter string `json:"BOOOpInternalVersionCounter"`
InspectionPlanInternalVersion string `json:"InspectionPlanInternalVersion"`
InspectionPlan string `json:"InspectionPlan"`
ValidityStartDate string `json:"ValidityStartDate"`
ValidityEndDate string `json:"ValidityEndDate"`
ChangeNumber string `json:"ChangeNumber"`
IsDeleted bool `json:"IsDeleted"`
BOOOperationPRTInternalID string `json:"BOOOperationPRTInternalID"`
InspectionMethod string `json:"InspectionMethod"`
InspectionMethodVersion string `json:"InspectionMethodVersion"`
InspectionMethodPlant string `json:"InspectionMethodPlant"`
InspSpecImportanceCode string `json:"InspSpecImportanceCode"`
InspectorQualification string `json:"InspectorQualification"`
InspectionSpecification string `json:"InspectionSpecification"`
InspectionSpecificationVersion string `json:"InspectionSpecificationVersion"`
InspectionSpecificationPlant string `json:"InspectionSpecificationPlant"`
BOOCharcHasInspSpecReference string `json:"BOOCharcHasInspSpecReference"`
ProdnRsceToolItemNumber string `json:"ProdnRsceToolItemNumber"`
InspSpecControlIndicators string `json:"InspSpecControlIndicators"`
InspSpecIsQuantitative string `json:"InspSpecIsQuantitative"`
InspSpecIsMeasuredValueRqd string `json:"InspSpecIsMeasuredValueRqd"`
InspSpecIsSelectedSetRequired string `json:"InspSpecIsSelectedSetRequired"`
InspSpecIsUpperLimitRequired string `json:"InspSpecIsUpperLimitRequired"`
InspSpecIsLowerLimitRequired string `json:"InspSpecIsLowerLimitRequired"`
InspSpecIsTargetValueInLimit string `json:"InspSpecIsTargetValueInLimit"`
InspectionScope string `json:"InspectionScope"`
InspSpecIsLongTermInspection string `json:"InspSpecIsLongTermInspection"`
InspSpecRecordingType string `json:"InspSpecRecordingType"`
InspResultIsDocumentationRqd string `json:"InspResultIsDocumentationRqd"`
InspSpecCharcCategory string `json:"InspSpecCharcCategory"`
InspSpecIsSampleQtyAdditive string `json:"InspSpecIsSampleQtyAdditive"`
InspSpecIsDestructive string `json:"InspSpecIsDestructive"`
InspSpecResultCalculation string `json:"InspSpecResultCalculation"`
InspSpecIsSamplingProcedRqd string `json:"InspSpecIsSamplingProcedRqd"`
InspSpecIsScrapRelevant string `json:"InspSpecIsScrapRelevant"`
InspSpecHasFixedCtrlIndicators string `json:"InspSpecHasFixedCtrlIndicators"`
InspSpecIsTestEquipmentRqd string `json:"InspSpecIsTestEquipmentRqd"`
InspSpecIsDefectRecordingRqd string `json:"InspSpecIsDefectRecordingRqd"`
InspSpecIsDefectsRecgAutomatic string `json:"InspSpecIsDefectsRecgAutomatic"`
InspSpecIsChgDocRequired string `json:"InspSpecIsChgDocRequired"`
InspSpecIsControlChartUsed string `json:"InspSpecIsControlChartUsed"`
InspSpecPrintControl string `json:"InspSpecPrintControl"`
InspSpecFirstUpperSpecLimit string `json:"InspSpecFirstUpperSpecLimit"`
InspSpecHasFirstUpperSpecLimit string `json:"InspSpecHasFirstUpperSpecLimit"`
InspSpecFirstLowerSpecLimit string `json:"InspSpecFirstLowerSpecLimit"`
InspSpecHasFirstLowerSpecLimit string `json:"InspSpecHasFirstLowerSpecLimit"`
InspSpecSecondUpperSpecLimit string `json:"InspSpecSecondUpperSpecLimit"`
InspSpecHasSecondUprSpecLimit string `json:"InspSpecHasSecondUprSpecLimit"`
InspSpecSecondLowerSpecLimit string `json:"InspSpecSecondLowerSpecLimit"`
InspSpecHasSecondLowrSpecLimit string `json:"InspSpecHasSecondLowrSpecLimit"`
InspSpecInputProcedure string `json:"InspSpecInputProcedure"`
InspSpecHasFormula string `json:"InspSpecHasFormula"`
InspSpecFormula1 string `json:"InspSpecFormula1"`
InspSpecFormula2 string `json:"InspSpecFormula2"`
InspSpecNumberOfClasses int `json:"InspSpecNumberOfClasses"`
InspSpecClassWidthQty string `json:"InspSpecClassWidthQty"`
InspSpecHasClassWidth string `json:"InspSpecHasClassWidth"`
InspSpecClassMidpointQty string `json:"InspSpecClassMidpointQty"`
InspSpecHasClassMidpoint string `json:"InspSpecHasClassMidpoint"`
InspToleranceSpecification string `json:"InspToleranceSpecification"`
InspSpecDecimalPlaces int `json:"InspSpecDecimalPlaces"`
InspectionSpecificationUnit string `json:"InspectionSpecificationUnit"`
InspSpecTargetValue string `json:"InspSpecTargetValue"`
InspSpecHasTargetValue string `json:"InspSpecHasTargetValue"`
InspSpecUpperLimit string `json:"InspSpecUpperLimit"`
InspSpecLowerLimit string `json:"InspSpecLowerLimit"`
InspSpecHasLowerLimit string `json:"InspSpecHasLowerLimit"`
InspSpecHasUpperLimit string `json:"InspSpecHasUpperLimit"`
InspSpecDefectCodeGrpRejection string `json:"InspSpecDefectCodeGrpRejection"`
InspSpecDefectCodeRejection string `json:"InspSpecDefectCodeRejection"`
InspSpecDefectCodeGrpRjcnUpper string `json:"InspSpecDefectCodeGrpRjcnUpper"`
InspSpecDefectCodeRjcnUpper string `json:"InspSpecDefectCodeRjcnUpper"`
InspSpecDefectCodeGrpRjcnLower string `json:"InspSpecDefectCodeGrpRjcnLower"`
InspSpecDefectCodeRjcnLower string `json:"InspSpecDefectCodeRjcnLower"`
SelectedCodeSet string `json:"SelectedCodeSet"`
SelectedCodeSetPlant string `json:"SelectedCodeSetPlant"`
InspSpecAdditionalCatalog2 string `json:"InspSpecAdditionalCatalog2"`
InspSpecAdditionalSelectedSet2 string `json:"InspSpecAdditionalSelectedSet2"`
InspSpecAdditionalCodeGroup2 string `json:"InspSpecAdditionalCodeGroup2"`
InspSpecAddlSeldCodeSetPlant2 string `json:"InspSpecAddlSeldCodeSetPlant2"`
InspSpecAdditionalCatalog3 string `json:"InspSpecAdditionalCatalog3"`
InspSpecAdditionalSelectedSet3 string `json:"InspSpecAdditionalSelectedSet3"`
InspSpecAdditionalCodeGroup3 string `json:"InspSpecAdditionalCodeGroup3"`
InspSpecAddlSeldCodeSetPlant3 string `json:"InspSpecAddlSeldCodeSetPlant3"`
InspSpecAdditionalCatalog4 string `json:"InspSpecAdditionalCatalog4"`
InspSpecAdditionalSelectedSet4 string `json:"InspSpecAdditionalSelectedSet4"`
InspSpecAdditionalCodeGroup4 string `json:"InspSpecAdditionalCodeGroup4"`
InspSpecAddlSeldCodeSetPlant4 string `json:"InspSpecAddlSeldCodeSetPlant4"`
InspSpecAdditionalCatalog5 string `json:"InspSpecAdditionalCatalog5"`
InspSpecAdditionalSelectedSet5 string `json:"InspSpecAdditionalSelectedSet5"`
InspSpecAdditionalCodeGroup5 string `json:"InspSpecAdditionalCodeGroup5"`
InspSpecAddlSeldCodeSetPlant5 string `json:"InspSpecAddlSeldCodeSetPlant5"`
SamplingProcedure string `json:"SamplingProcedure"`
InspCharacteristicSampleUnit string `json:"InspCharacteristicSampleUnit"`
BOOCharcSampleQuantity string `json:"BOOCharcSampleQuantity"`
InspSpecInformationField1 string `json:"InspSpecInformationField1"`
InspSpecInformationField2 string `json:"InspSpecInformationField2"`
InspSpecInformationField3 string `json:"InspSpecInformationField3"`
InspectionSpecificationText string `json:"InspectionSpecificationText"`
CreationDate string `json:"CreationDate"`
LastChangeDate string `json:"LastChangeDate"`
BillOfOperationsVersion string `json:"BillOfOperationsVersion"`
ChangedDateTime string `json:"ChangedDateTime"`
} `json:"results"`
} `json:"d"`
} | SAP_API_Caller/responses/operation.go | 0.510252 | 0.412353 | operation.go | starcoder |
package graph
import "time"
type DataMap map[string]interface{}
/*
Function specified by user to define the computational work of the node.
The node function is called each time a complete set of node inputs is received.
Args:
in contains a mapping of the port name to the actual data.
params contains a mapping of the static parameter name and value.
Returns:
a mapping where the key represents the output port name and value contains the
output data to send to the respective port.
*/
type NodeFunc func(in DataMap, params DataMap) DataMap
/*
Node represents a node in a computational graph. Each node of the graph can do
work on inputs provided to it and output the resulting values to other nodes
connected to it via the output ports. Each node has a set of parameters
that represents constant settings (e.g. threshold value, model parameters, etc).
*/
type Node struct {
In map[string]*Port
Out map[string]*Port
Params DataMap
fn NodeFunc
quit chan bool
stats map[string]float64
}
/*
Start this node and kickoff all go routines to handle computation for this node.
Returns this node for chaining.
*/
func (n *Node) Start() *Node {
for _, p := range n.Out {
p.Start()
}
go func() {
for {
// Needed in case no inputs to this node.
select {
case <-n.quit:
return
default:
}
inputs := make(DataMap)
for k, p := range n.In {
select {
case inputs[k] = <-p.Data:
case <-n.quit:
return
}
}
// Input complete. Trigger function.
n.stats["count"] += 1
startTime := time.Now()
outputs := n.fn(inputs, n.Params)
elapsedMs := time.Since(startTime).Seconds() * 1000.0
n.stats["last_runtime"] = elapsedMs
w := 1.0 / n.stats["count"]
n.stats["avg_runtime"] = ((1.0 - w) * n.stats["avg_runtime"]) + (w * elapsedMs)
for k, v := range outputs {
if p, ok := n.Out[k]; ok {
p.SendAsync(v)
}
}
}
}()
return n
}
/*
Shutdown the node. This is a blocking call. Returns this node for chaining.
*/
func (n *Node) Shutdown() *Node {
for {
quit := false
select {
case n.quit <- true:
case <-time.After(100 * time.Millisecond):
quit = true
}
if quit {
break
}
}
for _, p := range n.Out {
p.Shutdown()
}
return n
}
/*
Connect input ports to the specified ports. Input is a map containing the input
ports and the corresponding output ports to connect to. Returns this node for
chaining.
*/
func (n *Node) SetInputs(bindings map[string]*Port) *Node {
for i, o := range bindings {
o.BindTo(n.In[i])
}
return n
}
/*
Construct a node.
Example usage:
fn := func(in DataMap, params DataMap) DataMap {
// These would need to fetch synchronously since function computation depends on these values
bytes1 := in["img1"].([]byte)
bytes2 := in["img2"].([]byte)
result1 := ... // some computation here
result2 := ... // some computation here
return DataMap {
"diff": result1,
"mask": result2,
}
}
graph.CreateNode([]string{"img1", "img2"}, []string{"diff", "mask"}, fn)
*/
func CreateNode(in []string, out []string, fn NodeFunc) *Node {
inputPorts := make(map[string]*Port)
for _, name := range in {
inputPorts[name] = CreatePort(name)
}
outputPorts := make(map[string]*Port)
for _, name := range out {
outputPorts[name] = CreatePort(name)
}
return &Node{
In: inputPorts,
Out: outputPorts,
Params: make(DataMap),
fn: fn,
quit: make(chan bool),
stats: make(map[string]float64),
}
} | node.go | 0.803019 | 0.659007 | node.go | starcoder |
package docs
import (
"github.com/swaggo/swag"
)
var doc = `{
"swagger": "2.0",
"info": {
"description": "This is a temporarily centralized directory/PKI/metrics API to allow us to get the other Nym node types running. Its functionality will eventually be folded into other parts of Nym.",
"title": "Nym Directory API",
"termsOfService": "http://swagger.io/terms/",
"contact": {},
"license": {
"name": "Apache 2.0",
"url": "https://github.com/nymtech/nym-directory/license"
},
"version": "0.0.4"
},
"paths": {
"/api/healthcheck": {
"get": {
"description": "Returns a 200 if the directory server is available. Good route to use for automated monitoring.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"healthcheck"
],
"summary": "Lets the directory server tell the world it's alive.",
"operationId": "healthCheck",
"responses": {
"200": {}
}
}
},
"/api/metrics/mixes": {
"get": {
"description": "For demo and debug purposes it gives us the ability to generate useful visualisations of network traffic.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"metrics"
],
"summary": "Lists mixnode activity in the past 3 seconds",
"operationId": "listMixMetrics",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/models.MixMetric"
}
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"404": {
"description": "Not Found",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
}
}
},
"post": {
"description": "For demo and debug purposes it gives us the ability to generate useful visualisations of network traffic.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"metrics"
],
"summary": "Create a metric detailing how many messages a given mixnode sent and received",
"operationId": "createMixMetric",
"parameters": [
{
"description": "object",
"name": "object",
"in": "body",
"required": true,
"schema": {
"type": "object",
"$ref": "#/definitions/models.MixMetric"
}
}
],
"responses": {
"201": {},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"404": {
"description": "Not Found",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
}
}
}
},
"/api/presence/allow": {
"post": {
"description": "Sometimes when a node isn't working we need to temporarily remove it. This allows us to re-enable it once it's working again.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"presence"
],
"summary": "Removes a disallowed node from the disallowed nodes list",
"operationId": "allow",
"parameters": [
{
"description": "object",
"name": "object",
"in": "body",
"required": true,
"schema": {
"type": "object",
"$ref": "#/definitions/models.MixNodeID"
}
}
],
"responses": {
"200": {},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"404": {
"description": "Not Found",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
}
}
}
},
"/api/presence/coconodes": {
"post": {
"description": "Nym Coconut nodes can ping this method to let the directory server know they're up. We can then use this info to create topologies of the overall Nym network.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"presence"
],
"summary": "Lets a coconut node tell the directory server it's alive",
"operationId": "addCocoNode",
"parameters": [
{
"description": "object",
"name": "object",
"in": "body",
"required": true,
"schema": {
"type": "object",
"$ref": "#/definitions/models.CocoHostInfo"
}
}
],
"responses": {
"201": {},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"404": {
"description": "Not Found",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
}
}
}
},
"/api/presence/disallow": {
"post": {
"description": "Sometimes when a node isn't working we need to temporarily remove it from use so that it doesn't mess up QoS for the whole network.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"presence"
],
"summary": "Takes a node out of the regular topology and puts it in the disallowed nodes list",
"operationId": "disallow",
"parameters": [
{
"description": "object",
"name": "object",
"in": "body",
"required": true,
"schema": {
"type": "object",
"$ref": "#/definitions/models.MixNodeID"
}
}
],
"responses": {
"201": {},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"404": {
"description": "Not Found",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
}
}
}
},
"/api/presence/disallowed": {
"get": {
"description": "Sometimes we need to take mixnodes out of the network for repair. This shows which ones are currently disallowed.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"presence"
],
"summary": "Lists Nym mixnodes that are currently disallowed",
"operationId": "disallowed",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/models.MixNodePresence"
}
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"404": {
"description": "Not Found",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
}
}
}
},
"/api/presence/gateways": {
"post": {
"description": "Nym mix gateways can ping this method to let the directory server know they're up. We can then use this info to create topologies of the overall Nym network.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"presence"
],
"summary": "Lets a gateway tell the directory server it's alive",
"operationId": "addGateway",
"parameters": [
{
"description": "object",
"name": "object",
"in": "body",
"required": true,
"schema": {
"type": "object",
"$ref": "#/definitions/models.GatewayHostInfo"
}
}
],
"responses": {
"201": {},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"404": {
"description": "Not Found",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
}
}
}
},
"/api/presence/mixnodes": {
"post": {
"description": "Nym mixnodes can ping this method to let the directory server know they're up. We can then use this info to create topologies of the overall Nym network.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"presence"
],
"summary": "Lets mixnode a node tell the directory server it's alive",
"operationId": "addMixNode",
"parameters": [
{
"description": "object",
"name": "object",
"in": "body",
"required": true,
"schema": {
"type": "object",
"$ref": "#/definitions/models.MixHostInfo"
}
}
],
"responses": {
"201": {},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"404": {
"description": "Not Found",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
}
}
}
},
"/api/presence/topology": {
"get": {
"description": "Nym nodes periodically ping the directory server to register that they're alive. This method provides a list of nodes which have been most recently seen.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"presence"
],
"summary": "Lists which Nym mixnodes, providers, gateways, and coconodes are alive",
"operationId": "topology",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Topology"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"404": {
"description": "Not Found",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"$ref": "#/definitions/models.Error"
}
}
}
}
}
},
"definitions": {
"models.CocoHostInfo": {
"type": "object",
"required": [
"type",
"pubKey",
"version"
],
"properties": {
"host": {
"type": "string"
},
"location": {
"type": "string"
},
"pubKey": {
"type": "string"
},
"type": {
"type": "string"
},
"version": {
"type": "string"
}
}
},
"models.CocoPresence": {
"type": "object",
"required": [
"pubKey",
"version",
"type",
"lastSeen"
],
"properties": {
"host": {
"type": "string"
},
"lastSeen": {
"type": "integer"
},
"location": {
"type": "string"
},
"pubKey": {
"type": "string"
},
"type": {
"type": "string"
},
"version": {
"type": "string"
}
}
},
"models.Error": {
"type": "object",
"properties": {
"error": {
"type": "string"
}
}
},
"models.GatewayHostInfo": {
"type": "object",
"required": [
"pubKey",
"version"
],
"properties": {
"clientListener": {
"type": "string"
},
"location": {
"type": "string"
},
"mixnetListener": {
"type": "string"
},
"pubKey": {
"type": "string"
},
"registeredClients": {
"type": "array",
"items": {
"$ref": "#/definitions/models.RegisteredClient"
}
},
"version": {
"type": "string"
}
}
},
"models.GatewayPresence": {
"type": "object",
"required": [
"lastSeen",
"pubKey",
"version"
],
"properties": {
"clientListener": {
"type": "string"
},
"lastSeen": {
"type": "integer"
},
"location": {
"type": "string"
},
"mixnetListener": {
"type": "string"
},
"pubKey": {
"type": "string"
},
"registeredClients": {
"type": "array",
"items": {
"$ref": "#/definitions/models.RegisteredClient"
}
},
"version": {
"type": "string"
}
}
},
"models.MixHostInfo": {
"type": "object",
"required": [
"pubKey",
"version",
"layer"
],
"properties": {
"host": {
"type": "string"
},
"layer": {
"type": "integer"
},
"location": {
"type": "string"
},
"pubKey": {
"type": "string"
},
"version": {
"type": "string"
}
}
},
"models.MixMetric": {
"type": "object",
"required": [
"received",
"pubKey"
],
"properties": {
"pubKey": {
"type": "string"
},
"received": {
"type": "integer"
},
"sent": {
"type": "object",
"required": [
"sent"
]
}
}
},
"models.MixNodeID": {
"type": "object",
"properties": {
"pubKey": {
"type": "string"
}
}
},
"models.MixNodePresence": {
"type": "object",
"required": [
"layer",
"lastSeen",
"pubKey",
"version"
],
"properties": {
"host": {
"type": "string"
},
"lastSeen": {
"type": "integer"
},
"layer": {
"type": "integer"
},
"location": {
"type": "string"
},
"pubKey": {
"type": "string"
},
"version": {
"type": "string"
}
}
},
"models.MixProviderPresence": {
"type": "object",
"required": [
"pubKey",
"version",
"lastSeen"
],
"properties": {
"clientListener": {
"type": "string"
},
"lastSeen": {
"type": "integer"
},
"location": {
"type": "string"
},
"mixnetListener": {
"type": "string"
},
"pubKey": {
"type": "string"
},
"registeredClients": {
"type": "array",
"items": {
"$ref": "#/definitions/models.RegisteredClient"
}
},
"version": {
"type": "string"
}
}
},
"models.RegisteredClient": {
"type": "object",
"required": [
"pubKey"
],
"properties": {
"pubKey": {
"type": "string"
}
}
},
"models.Topology": {
"type": "object",
"properties": {
"cocoNodes": {
"type": "array",
"items": {
"$ref": "#/definitions/models.CocoPresence"
}
},
"gatewayNodes": {
"type": "array",
"items": {
"$ref": "#/definitions/models.GatewayPresence"
}
},
"mixNodes": {
"type": "array",
"items": {
"$ref": "#/definitions/models.MixNodePresence"
}
},
"mixProviderNodes": {
"type": "array",
"items": {
"$ref": "#/definitions/models.MixProviderPresence"
}
}
}
}
}
}`
type s struct{}
func (s *s) ReadDoc() string {
return doc
}
func init() {
swag.Register(swag.Name, &s{})
} | docs/docs.go | 0.672009 | 0.437643 | docs.go | starcoder |
package palettegen
import (
"fmt"
"math"
"math/rand"
"github.com/shasderias/ilysa/colorful"
)
// The algorithm works in L*a*b* color space and converts to RGB in the end.
// L* in [0..1], a* and b* in [-1..1]
type lab_t struct {
L, A, B float64
}
type SoftPaletteSettings struct {
// A function which can be used to restrict the allowed color-space.
CheckColor func(l, a, b float64) bool
// The higher, the better quality but the slower. Usually two figures.
Iterations int
// Use up to 160000 or 8000 samples of the L*a*b* space (and thus calls to CheckColor).
// Set this to true only if your CheckColor shapes the LAB space weirdly.
ManySamples bool
}
// Yeah, windows-stype Foo, FooEx, screw you golang...
// Uses K-means to cluster the color-space and return the means of the clusters
// as a new palette of distinctive colors. Falls back to K-medoid if the mean
// happens to fall outside of the color-space, which can only happen if you
// specify a CheckColor function.
func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]colorful.Color, error) {
// Checks whether it's a valid RGB and also fulfills the potentially provided constraint.
check := func(col lab_t) bool {
c := colorful.LAB(col.L, col.A, col.B)
return c.IsValid() && (settings.CheckColor == nil || settings.CheckColor(col.L, col.A, col.B))
}
// Sample the color space. These will be the points k-means is run on.
dl := 0.05
dab := 0.1
if settings.ManySamples {
dl = 0.01
dab = 0.05
}
samples := make([]lab_t, 0, int(1.0/dl*2.0/dab*2.0/dab))
for l := 0.0; l <= 1.0; l += dl {
for a := -1.0; a <= 1.0; a += dab {
for b := -1.0; b <= 1.0; b += dab {
if check(lab_t{l, a, b}) {
samples = append(samples, lab_t{l, a, b})
}
}
}
}
// That would cause some infinite loops down there...
if len(samples) < colorsCount {
return nil, fmt.Errorf("palettegen: more colors requested (%v) than samples available (%v). Your requested color count may be wrong, you might want to use many samples or your constraint function makes the valid color space too small", colorsCount, len(samples))
} else if len(samples) == colorsCount {
return labs2cols(samples), nil // Oops?
}
// We take the initial means out of the samples, so they are in fact medoids.
// This helps us avoid infinite loops or arbitrary cutoffs with too restrictive constraints.
means := make([]lab_t, colorsCount)
for i := 0; i < colorsCount; i++ {
for means[i] = samples[rand.Intn(len(samples))]; in(means, i, means[i]); means[i] = samples[rand.Intn(len(samples))] {
}
}
clusters := make([]int, len(samples))
samples_used := make([]bool, len(samples))
// The actual k-means/medoid iterations
for i := 0; i < settings.Iterations; i++ {
// Reassing the samples to clusters, i.e. to their closest mean.
// By the way, also check if any sample is used as a medoid and if so, mark that.
for isample, sample := range samples {
samples_used[isample] = false
mindist := math.Inf(+1)
for imean, mean := range means {
dist := lab_dist(sample, mean)
if dist < mindist {
mindist = dist
clusters[isample] = imean
}
// Mark samples which are used as a medoid.
if lab_eq(sample, mean) {
samples_used[isample] = true
}
}
}
// Compute new means according to the samples.
for imean := range means {
// The new mean is the average of all samples belonging to it..
nsamples := 0
newmean := lab_t{0.0, 0.0, 0.0}
for isample, sample := range samples {
if clusters[isample] == imean {
nsamples++
newmean.L += sample.L
newmean.A += sample.A
newmean.B += sample.B
}
}
if nsamples > 0 {
newmean.L /= float64(nsamples)
newmean.A /= float64(nsamples)
newmean.B /= float64(nsamples)
} else {
// That mean doesn't have any samples? Get a new mean from the sample list!
var inewmean int
for inewmean = rand.Intn(len(samples_used)); samples_used[inewmean]; inewmean = rand.Intn(len(samples_used)) {
}
newmean = samples[inewmean]
samples_used[inewmean] = true
}
// But now we still need to check whether the new mean is an allowed color.
if nsamples > 0 && check(newmean) {
// It does, life's good (TM)
means[imean] = newmean
} else {
// New mean isn't an allowed color or doesn't have any samples!
// Switch to medoid mode and pick the closest (unused) sample.
// This should always find something thanks to len(samples) >= colorsCount
mindist := math.Inf(+1)
for isample, sample := range samples {
if !samples_used[isample] {
dist := lab_dist(sample, newmean)
if dist < mindist {
mindist = dist
newmean = sample
}
}
}
}
}
}
return labs2cols(means), nil
}
// A wrapper which uses common parameters.
func SoftPalette(colorsCount int) ([]colorful.Color, error) {
return SoftPaletteEx(colorsCount, SoftPaletteSettings{nil, 50, false})
}
func in(haystack []lab_t, upto int, needle lab_t) bool {
for i := 0; i < upto && i < len(haystack); i++ {
if haystack[i] == needle {
return true
}
}
return false
}
const LAB_DELTA = 1e-6
func lab_eq(lab1, lab2 lab_t) bool {
return math.Abs(lab1.L-lab2.L) < LAB_DELTA &&
math.Abs(lab1.A-lab2.A) < LAB_DELTA &&
math.Abs(lab1.B-lab2.B) < LAB_DELTA
}
// That's faster than using colorful's DistanceLAB since we would have to
// convert back and forth for that. Here is no conversion.
func lab_dist(lab1, lab2 lab_t) float64 {
return math.Sqrt(sq(lab1.L-lab2.L) + sq(lab1.A-lab2.A) + sq(lab1.B-lab2.B))
}
func labs2cols(labs []lab_t) (cols []colorful.Color) {
cols = make([]colorful.Color, len(labs))
for k, v := range labs {
cols[k] = colorful.LAB(v.L, v.A, v.B)
}
return cols
}
func sq(v float64) float64 {
return v * v
} | colorful/palettegen/soft_palettegen.go | 0.810254 | 0.546012 | soft_palettegen.go | starcoder |
package pulse
import (
"fmt"
"strings"
"time"
"github.com/insolar/insolar/network/consensus/common/longbits"
)
const InvalidPulseEpoch uint32 = 0
const EphemeralPulseEpoch = InvalidPulseEpoch + 1
var _ DataReader = &Data{}
type Data struct {
PulseNumber Number
DataExt
}
type DataExt struct {
// ByteSize=44
PulseEpoch uint32
PulseEntropy longbits.Bits256
NextPulseDelta uint16
PrevPulseDelta uint16
Timestamp uint32
}
type DataReader interface {
GetPulseNumber() Number
GetStartOfEpoch() Number
// GetPulseEntropy() [4]uint64
GetNextPulseDelta() uint16
GetPrevPulseDelta() uint16
GetTimestamp() uint64
IsExpectedPulse() bool
IsFromEphemeral() bool
}
func NewFirstPulsarData(delta uint16, entropy longbits.Bits256) *Data {
return newPulsarData(OfNow(), delta, entropy)
}
func NewPulsarData(pn Number, deltaNext uint16, deltaPrev uint16, entropy longbits.Bits256) *Data {
r := newPulsarData(pn, deltaNext, entropy)
r.PrevPulseDelta = deltaPrev
return r
}
func NewFirstEphemeralData() *Data {
return newEphemeralData(MinTimePulse)
}
type EntropyFunc func() longbits.Bits256
func (r Data) String() string {
buf := strings.Builder{}
buf.WriteString(fmt.Sprint(r.PulseNumber))
ep := OfUint32(r.PulseEpoch)
if ep != r.PulseNumber && ep != 0 {
buf.WriteString(fmt.Sprintf("@%d", ep))
}
if r.NextPulseDelta == r.PrevPulseDelta {
buf.WriteString(fmt.Sprintf(",±%d", r.NextPulseDelta))
} else {
if r.NextPulseDelta > 0 {
buf.WriteString(fmt.Sprintf(",+%d", r.NextPulseDelta))
}
if r.PrevPulseDelta > 0 {
buf.WriteString(fmt.Sprintf(",-%d", r.PrevPulseDelta))
}
}
return buf.String()
}
func newPulsarData(pn Number, delta uint16, entropy longbits.Bits256) *Data {
if delta == 0 {
panic("delta cant be zero")
}
s := Data{
PulseNumber: pn,
DataExt: DataExt{
PulseEpoch: pn.AsUint32(),
PulseEntropy: entropy,
Timestamp: uint32(time.Now().Unix()),
NextPulseDelta: delta,
PrevPulseDelta: 0,
},
}
return &s
}
func newEphemeralData(pn Number) *Data {
s := Data{
PulseNumber: pn,
DataExt: DataExt{
PulseEpoch: EphemeralPulseEpoch,
Timestamp: 0,
NextPulseDelta: 1,
PrevPulseDelta: 0,
},
}
fixedPulseEntropy(&s.PulseEntropy, s.PulseNumber)
return &s
}
/* This function has a fixed implementation and MUST remain unchanged as some elements of Consensus rely on identical behavior of this functions. */
func fixedPulseEntropy(v *longbits.Bits256, pn Number) {
longbits.FillBitsWithStaticNoise(uint32(pn), (*v)[:])
}
func (r *Data) EnsurePulseData() {
if !r.PulseNumber.IsTimePulse() {
panic("incorrect pulse number")
}
if !OfUint32(r.PulseEpoch).IsSpecialOrTimePulse() {
panic("incorrect pulse epoch")
}
if r.NextPulseDelta == 0 {
panic("next delta can't be zero")
}
}
func (r *Data) IsValidPulseData() bool {
if !r.PulseNumber.IsTimePulse() {
return false
}
if !OfUint32(r.PulseEpoch).IsSpecialOrTimePulse() {
return false
}
if r.NextPulseDelta == 0 {
return false
}
return true
}
func (r *Data) IsEmpty() bool {
return r.PulseNumber.IsUnknown()
}
func (r *Data) IsValidExpectedPulseData() bool {
if !r.PulseNumber.IsTimePulse() {
return false
}
if !OfUint32(r.PulseEpoch).IsSpecialOrTimePulse() {
return false
}
if r.PrevPulseDelta != 0 {
return false
}
return true
}
func (r *Data) EnsurePulsarData() {
if !OfUint32(r.PulseEpoch).IsTimePulse() {
panic("incorrect pulse epoch by pulsar")
}
r.EnsurePulseData()
}
func (r *Data) IsValidPulsarData() bool {
if !OfUint32(r.PulseEpoch).IsTimePulse() {
return false
}
return r.IsValidPulseData()
}
func (r *Data) EnsureEphemeralData() {
if r.PulseEpoch != EphemeralPulseEpoch {
panic("incorrect pulse epoch")
}
r.EnsurePulseData()
}
func (r *Data) IsValidEphemeralData() bool {
if r.PulseEpoch != EphemeralPulseEpoch {
return false
}
return r.IsValidPulseData()
}
func (r *Data) IsFromPulsar() bool {
return r.PulseNumber.IsTimePulse() && OfUint32(r.PulseEpoch).IsTimePulse()
}
func (r *Data) IsFromEphemeral() bool {
return r.PulseNumber.IsTimePulse() && r.PulseEpoch == EphemeralPulseEpoch
}
func (r *Data) GetStartOfEpoch() Number {
ep := OfUint32(r.PulseEpoch)
if r.PulseNumber.IsTimePulse() {
return ep
}
return r.PulseNumber
}
func (r *Data) CreateNextPulse(entropyGen EntropyFunc) *Data {
if r.IsFromEphemeral() {
return r.createNextEphemeralPulse()
}
return r.createNextPulsarPulse(r.NextPulseDelta, entropyGen)
}
func (r *Data) IsValidNext(n *Data) bool {
if r.IsExpectedPulse() || r.GetNextPulseNumber() != n.PulseNumber || r.NextPulseDelta != n.PrevPulseDelta {
return false
}
switch {
case r.IsFromPulsar():
return n.IsValidPulsarData()
case r.IsFromEphemeral():
return n.IsValidEphemeralData()
}
return n.IsValidPulseData()
}
func (r *Data) IsValidPrev(p *Data) bool {
switch {
case r.IsFirstPulse() || p.IsExpectedPulse() || p.GetNextPulseNumber() != r.PulseNumber || p.NextPulseDelta != r.PrevPulseDelta:
return false
case r.IsFromPulsar():
return p.IsValidPulsarData()
case r.IsFromEphemeral():
return p.IsValidEphemeralData()
default:
return p.IsValidPulseData()
}
}
func (r *Data) GetNextPulseNumber() Number {
if r.IsExpectedPulse() {
panic("illegal state")
}
return r.PulseNumber.Next(r.NextPulseDelta)
}
func (r *Data) GetPrevPulseNumber() Number {
if r.IsFirstPulse() {
panic("illegal state")
}
return r.PulseNumber.Prev(r.PrevPulseDelta)
}
func (r *Data) CreateNextExpected() *Data {
s := Data{
PulseNumber: r.GetNextPulseNumber(),
DataExt: DataExt{
PrevPulseDelta: r.NextPulseDelta,
NextPulseDelta: 0,
},
}
if r.IsFromEphemeral() {
s.PulseEpoch = r.PulseEpoch
}
return &s
}
func (r *Data) CreateNextEphemeralPulse() *Data {
if !r.IsFromEphemeral() {
panic("prev is not ephemeral")
}
return r.createNextEphemeralPulse()
}
func (r *Data) createNextEphemeralPulse() *Data {
s := newEphemeralData(r.GetNextPulseNumber())
s.PrevPulseDelta = r.NextPulseDelta
return s
}
func (r *Data) CreateNextPulsarPulse(delta uint16, entropyGen EntropyFunc) *Data {
if r.IsFromEphemeral() {
panic("prev is ephemeral")
}
return r.createNextPulsarPulse(delta, entropyGen)
}
func (r *Data) createNextPulsarPulse(delta uint16, entropyGen EntropyFunc) *Data {
s := newPulsarData(r.GetNextPulseNumber(), delta, entropyGen())
s.PrevPulseDelta = r.NextPulseDelta
return s
}
func (r *Data) GetPulseNumber() Number {
return r.PulseNumber
}
func (r *Data) GetNextPulseDelta() uint16 {
return r.NextPulseDelta
}
func (r *Data) GetPrevPulseDelta() uint16 {
return r.PrevPulseDelta
}
func (r *Data) GetTimestamp() uint64 {
return uint64(r.Timestamp)
}
func (r *Data) IsExpectedPulse() bool {
return r.PulseNumber.IsTimePulse() && r.NextPulseDelta == 0
}
func (r *Data) IsFirstPulse() bool {
return r.PulseNumber.IsTimePulse() && r.PrevPulseDelta == 0
} | network/consensus/common/pulse/pulse_data.go | 0.665737 | 0.4575 | pulse_data.go | starcoder |
package synth
// Z-order curve utility methods. A z-order number represents a single value
// of a global x,y positioning. The number of bits in the z-order number
// indicate the zoom level. Also known as Morton codes.
// https://en.wikipedia.org/wiki/Z-order_curve
// https://fgiesen.wordpress.com/2009/12/13/decoding-morton-codes
// http://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN
// ZMerge two numbers by interleaving their bits and creating a new
// z-order encoded number. The b-bits will precede the a-bits which
// means the b value can be at most a 31 bit value.
func ZMerge(a, b uint32) (m uint64) { return expand(a) | (expand(b) << 1) }
// ZSplit a z-order encoded number by de-interleaving the bits
// into two numbers. The b value represents the higher order bits.
func ZSplit(m uint64) (a, b uint32) { return compact(m), compact(m >> 1) }
// expand prepares a number to be bit interleaved with another number
// by inserting zeros before each bit.
func expand(n uint32) uint64 {
a := uint64(n) & 0x00000000ffffffff // ---- ---- ---- ---- ---- ---- ---- ---- fedc ba98 7654 3210 fedc ba98 7654 3210
a = (a ^ (a << 16)) & 0x0000ffff0000ffff // ---- ---- ---- ---- fedc ba98 7654 3210 ---- ---- ---- ---- fedc ba98 7654 3210
a = (a ^ (a << 8)) & 0x00ff00ff00ff00ff // ---- ---- fedc ba98 ---- ---- 7654 3210 ---- ---- fedc ba98 ---- ---- 7654 3210
a = (a ^ (a << 4)) & 0x0f0f0f0f0f0f0f0f // ---- fedc ---- ba98 ---- 7654 ---- 3210 ---- fedc ---- ba98 ---- 7654 ---- 3210
a = (a ^ (a << 2)) & 0x3333333333333333 // --fe --dc --ba --98 --76 --54 --32 --10 --fe --dc --ba --98 --76 --54 --32 --10
a = (a ^ (a << 1)) & 0x5555555555555555 // -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
return a
}
// compact reverses expand by discarding every other bit
// and collapsing the remaining 32 bits together.
func compact(n uint64) uint32 {
a := n & 0x5555555555555555 // -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
a = (a ^ (a >> 1)) & 0x3333333333333333 // --fe --dc --ba --98 --76 --54 --32 --10 --fe --dc --ba --98 --76 --54 --32 --10
a = (a ^ (a >> 2)) & 0x0f0f0f0f0f0f0f0f // ---- fedc ---- ba98 ---- 7654 ---- 3210 ---- fedc ---- ba98 ---- 7654 ---- 3210
a = (a ^ (a >> 4)) & 0x00ff00ff00ff00ff // ---- ---- fedc ba98 ---- ---- 7654 3210 ---- ---- fedc ba98 ---- ---- 7654 3210
a = (a ^ (a >> 8)) & 0x0000ffff0000ffff // ---- ---- ---- ---- fedc ba98 7654 3210 ---- ---- ---- ---- fedc ba98 7654 3210
a = (a ^ (a >> 16)) & 0x00000000ffffffff // ---- ---- ---- ---- ---- ---- ---- ---- fedc ba98 7654 3210 fedc ba98 7654 3210
return uint32(a)
}
// ZLabel returns a label for a zorder merge value.
func ZLabel(zoom uint, merge uint64) (key string) {
mask := uint64(3)
buff := make([]byte, zoom)
for z := zoom; z > 0; z-- {
part := byte('0')
mask = 3 << ((z - 1) * 2)
switch merge & mask >> ((z - 1) * 2) {
case 3:
part = '3'
case 2:
part = '2'
case 1:
part = '1'
case 0:
}
buff[zoom-z] = part
}
return string(buff)
} | synth/zorder.go | 0.778144 | 0.572723 | zorder.go | starcoder |
package rtp
/** This class is used by the transmission component to store the incoming RTP and RTCP data in. */
type RawPacket struct {
packetdata []byte
receivetime *RTPTime
senderaddress Address
isrtp bool
}
/** Creates an instance which stores data from \c data with length \c datalen.
* Creates an instance which stores data from \c data with length \c datalen. Only the pointer
* to the data is stored, no actual copy is made! The address from which this packet originated
* is set to \c address and the time at which the packet was received is set to \c recvtime.
* The flag which indicates whether this data is RTP or RTCP data is set to \c rtp. A memory
* manager can be installed as well.
*/
func NewRawPacket(data []byte,
address Address,
recvtime *RTPTime,
rtp bool) *RawPacket {
this := &RawPacket{}
this.receivetime = recvtime
this.packetdata = data
this.senderaddress = address
this.isrtp = rtp
return this
}
/** Returns the pointer to the data which is contained in this packet. */
func (this *RawPacket) GetData() []byte {
return this.packetdata
}
/** Returns the length of the packet described by this instance. */
func (this *RawPacket) GetDataLength() int {
return len(this.packetdata)
}
/** Returns the time at which this packet was received. */
func (this *RawPacket) GetReceiveTime() *RTPTime {
return this.receivetime
}
/** Returns the address stored in this packet. */
func (this *RawPacket) GetSenderAddress() Address {
return this.senderaddress
}
/** Returns \c true if this data is RTP data, \c false if it is RTCP data. */
func (this *RawPacket) IsRTP() bool {
return this.isrtp
}
/** Sets the pointer to the data stored in this packet to zero.
* Sets the pointer to the data stored in this packet to zero. This will prevent
* a \c delete call for the actual data when the destructor of RTPRawPacket is called.
* This function is used by the RTPPacket and RTCPCompoundPacket classes to obtain
* the packet data (without having to copy it) and to make sure the data isn't deleted
* when the destructor of RTPRawPacket is called.
*/
func (this *RawPacket) ZeroData() {
this.packetdata = nil
} | rtp/rawpacket.go | 0.712332 | 0.440409 | rawpacket.go | starcoder |
package physics
import (
"encoding/gob"
"io"
"math"
"github.com/egonelbre/exp/bit"
)
const (
HistorySize = 16
UnitsPerMeter = 512
UnitsPerQuat = 1024
PositionUnit = 1.0 / UnitsPerMeter
QuatUnit = 1.0 / UnitsPerQuat
)
type State struct {
History [HistorySize]*Frame
FrameIndex int
}
type Frame struct {
Cubes []Cube
}
type Rotation struct{ X, Y, Z, W float32 }
func same32(delta, precision float32) bool {
if delta < 0 {
delta = -delta
}
return delta <= precision
}
func (a Rotation) Equals(b Rotation) bool {
return same32(a.X-b.X, QuatUnit) &&
same32(a.Y-b.Y, QuatUnit) &&
same32(a.Z-b.Z, QuatUnit) &&
same32(a.W-b.W, QuatUnit)
}
type Position struct{ X, Y, Z float32 }
func (a Position) Sub(b Position) Position {
return Position{
X: a.X - b.X,
Y: a.Y - b.Y,
Z: a.Z - b.Z,
}
}
func (p Position) Len() float32 {
return float32(math.Sqrt(float64(p.X*p.X + p.Y*p.Y + p.Z*p.Z)))
}
func (a Position) Equals(b Position) bool {
return same32(a.X-b.X, PositionUnit) &&
same32(a.Y-b.Y, PositionUnit) &&
same32(a.Z-b.Z, PositionUnit)
}
type Cube struct {
Pos Position
Rot Rotation
Interacting bool
}
func NewFrame(size int) *Frame { return &Frame{make([]Cube, size)} }
func (frame *Frame) Assign(other *Frame) {
frame.Cubes = append([]Cube{}, other.Cubes...)
}
func (frame *Frame) ReadFrom(r io.Reader) error {
for i := range frame.Cubes {
if err := frame.Cubes[i].ReadFrom(r); err != nil {
return err
}
}
return nil
}
func (frame *Frame) WriteTo(w io.Writer) error {
for i := range frame.Cubes {
if err := frame.Cubes[i].WriteTo(w); err != nil {
return err
}
}
return nil
}
func (a *Frame) Equals(b *Frame) bool {
if len(a.Cubes) != len(b.Cubes) {
return false
}
for i, ax := range a.Cubes {
bx := b.Cubes[i]
switch {
case ax.Interacting != bx.Interacting:
return false
case !ax.Pos.Equals(bx.Pos):
return false
case !ax.Rot.Equals(bx.Rot):
return false
}
}
return true
}
// Cube utilities
func (cube *Cube) ReadFrom(r io.Reader) error {
interacting := int32(0)
err := bit.Read(r,
&cube.Rot.X, &cube.Rot.Y, &cube.Rot.Z, &cube.Rot.W,
&cube.Pos.X, &cube.Pos.Y, &cube.Pos.Z,
&interacting,
)
cube.Interacting = interacting == 1
return err
}
func (cube *Cube) WriteTo(w io.Writer) error {
interacting := int32(0)
if cube.Interacting {
interacting = 1
}
return bit.Write(w,
&cube.Rot.X, &cube.Rot.Y, &cube.Rot.Z, &cube.Rot.W,
&cube.Pos.X, &cube.Pos.Y, &cube.Pos.Z,
&interacting,
)
}
func NewState(size int) *State {
s := &State{}
for i := range s.History {
s.History[i] = NewFrame(size)
}
s.FrameIndex = -1
return s
}
func (s *State) IncFrame() {
s.FrameIndex += 1
}
func (s *State) ReadNext(r io.Reader) error {
s.FrameIndex += 1
return s.Current().ReadFrom(r)
}
func (s *State) Current() *Frame { return s.Prev(0) }
func (s *State) Baseline() *Frame { return s.Prev(6) }
func (s *State) Historic() *Frame { return s.Prev(8) }
func (s *State) Prev(i int) *Frame {
return s.History[(s.FrameIndex-i+HistorySize)%HistorySize]
}
func init() {
gob.Register([]Cube{})
} | physicscompress2/physics/state.go | 0.741768 | 0.427755 | state.go | starcoder |
package hbook
import (
"errors"
"sort"
)
// Indices for the under- and over-flow 1-dim bins.
const (
UnderflowBin1D = -1
OverflowBin1D = -2
)
var (
errInvalidXAxis = errors.New("hbook: invalid X-axis limits")
errEmptyXAxis = errors.New("hbook: X-axis with zero bins")
errShortXAxis = errors.New("hbook: too few 1-dim X-bins")
errOverlapXAxis = errors.New("hbook: invalid X-binning (overlap)")
errNotSortedXAxis = errors.New("hbook: X-edges slice not sorted")
errDupEdgesXAxis = errors.New("hbook: duplicates in X-edge values")
errInvalidYAxis = errors.New("hbook: invalid Y-axis limits")
errEmptyYAxis = errors.New("hbook: Y-axis with zero bins")
errShortYAxis = errors.New("hbook: too few 1-dim Y-bins")
errOverlapYAxis = errors.New("hbook: invalid Y-binning (overlap)")
errNotSortedYAxis = errors.New("hbook: Y-edges slice not sorted")
errDupEdgesYAxis = errors.New("hbook: duplicates in Y-edge values")
)
// Binning1D is a 1-dim binning of the x-axis.
type Binning1D struct {
Bins []Bin1D
Dist Dist1D
Outflows [2]Dist1D
XRange Range
}
func newBinning1D(n int, xmin, xmax float64) Binning1D {
if xmin >= xmax {
panic(errInvalidXAxis)
}
if n <= 0 {
panic(errEmptyXAxis)
}
bng := Binning1D{
Bins: make([]Bin1D, n),
XRange: Range{Min: xmin, Max: xmax},
}
width := bng.XRange.Width() / float64(n)
for i := range bng.Bins {
bin := &bng.Bins[i]
bin.Range.Min = xmin + float64(i)*width
bin.Range.Max = xmin + float64(i+1)*width
}
return bng
}
func newBinning1DFromBins(xbins []Range) Binning1D {
if len(xbins) < 1 {
panic(errShortXAxis)
}
n := len(xbins)
bng := Binning1D{
Bins: make([]Bin1D, n),
}
for i, xbin := range xbins {
bin := &bng.Bins[i]
bin.Range = xbin
}
sort.Sort(Bin1Ds(bng.Bins))
for i := 0; i < len(bng.Bins)-1; i++ {
b0 := bng.Bins[i]
b1 := bng.Bins[i+1]
if b0.Range.Max > b1.Range.Min {
panic(errOverlapXAxis)
}
}
bng.XRange = Range{Min: bng.Bins[0].XMin(), Max: bng.Bins[n-1].XMax()}
return bng
}
func newBinning1DFromEdges(edges []float64) Binning1D {
if len(edges) <= 1 {
panic(errShortXAxis)
}
if !sort.IsSorted(sort.Float64Slice(edges)) {
panic(errNotSortedXAxis)
}
n := len(edges) - 1
bng := Binning1D{
Bins: make([]Bin1D, n),
XRange: Range{Min: edges[0], Max: edges[n]},
}
for i := range bng.Bins {
bin := &bng.Bins[i]
xmin := edges[i]
xmax := edges[i+1]
if xmin == xmax {
panic(errDupEdgesXAxis)
}
bin.Range.Min = xmin
bin.Range.Max = xmax
}
return bng
}
func (bng *Binning1D) clone() Binning1D {
o := Binning1D{
Bins: make([]Bin1D, len(bng.Bins)),
Dist: bng.Dist.clone(),
Outflows: [2]Dist1D{
bng.Outflows[0].clone(),
bng.Outflows[1].clone(),
},
XRange: bng.XRange.clone(),
}
for i, bin := range bng.Bins {
o.Bins[i] = bin.clone()
}
return o
}
func (bng *Binning1D) entries() int64 {
return bng.Dist.Entries()
}
func (bng *Binning1D) effEntries() float64 {
return bng.Dist.EffEntries()
}
// xMin returns the low edge of the X-axis
func (bng *Binning1D) xMin() float64 {
return bng.XRange.Min
}
// xMax returns the high edge of the X-axis
func (bng *Binning1D) xMax() float64 {
return bng.XRange.Max
}
func (bng *Binning1D) fill(x, w float64) {
idx := bng.coordToIndex(x)
bng.Dist.fill(x, w)
if idx < 0 {
bng.Outflows[-idx-1].fill(x, w)
return
}
if idx == len(bng.Bins) {
// gap bin.
return
}
bng.Bins[idx].fill(x, w)
}
// coordToIndex returns the bin index corresponding to the coordinate x.
func (bng *Binning1D) coordToIndex(x float64) int {
switch {
case x < bng.XRange.Min:
return UnderflowBin1D
case x >= bng.XRange.Max:
return OverflowBin1D
}
return Bin1Ds(bng.Bins).IndexOf(x)
}
func (bng *Binning1D) scaleW(f float64) {
bng.Dist.scaleW(f)
bng.Outflows[0].scaleW(f)
bng.Outflows[1].scaleW(f)
for i := range bng.Bins {
bin := &bng.Bins[i]
bin.scaleW(f)
}
}
func (bng *Binning1D) Underflow() *Dist1D {
return &bng.Outflows[0]
}
func (bng *Binning1D) Overflow() *Dist1D {
return &bng.Outflows[1]
} | hbook/binning1d.go | 0.606732 | 0.484014 | binning1d.go | starcoder |
package assertions
import (
"fmt"
"github.com/particle-iot/particle-cli-wrapper/Godeps/_workspace/src/github.com/smartystreets/assertions/internal/oglematchers"
)
// ShouldBeGreaterThan receives exactly two parameters and ensures that the first is greater than the second.
func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
}
if matchError := oglematchers.GreaterThan(expected[0]).Matches(actual); matchError != nil {
return fmt.Sprintf(shouldHaveBeenGreater, actual, expected[0])
}
return success
}
// ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that the first is greater than or equal to the second.
func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
} else if matchError := oglematchers.GreaterOrEqual(expected[0]).Matches(actual); matchError != nil {
return fmt.Sprintf(shouldHaveBeenGreaterOrEqual, actual, expected[0])
}
return success
}
// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than the second.
func ShouldBeLessThan(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
} else if matchError := oglematchers.LessThan(expected[0]).Matches(actual); matchError != nil {
return fmt.Sprintf(shouldHaveBeenLess, actual, expected[0])
}
return success
}
// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than or equal to the second.
func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string {
if fail := need(1, expected); fail != success {
return fail
} else if matchError := oglematchers.LessOrEqual(expected[0]).Matches(actual); matchError != nil {
return fmt.Sprintf(shouldHaveBeenLess, actual, expected[0])
}
return success
}
// ShouldBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound.
// It ensures that the actual value is between both bounds (but not equal to either of them).
func ShouldBeBetween(actual interface{}, expected ...interface{}) string {
if fail := need(2, expected); fail != success {
return fail
}
lower, upper, fail := deriveBounds(expected)
if fail != success {
return fail
} else if !isBetween(actual, lower, upper) {
return fmt.Sprintf(shouldHaveBeenBetween, actual, lower, upper)
}
return success
}
// ShouldNotBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound.
// It ensures that the actual value is NOT between both bounds.
func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string {
if fail := need(2, expected); fail != success {
return fail
}
lower, upper, fail := deriveBounds(expected)
if fail != success {
return fail
} else if isBetween(actual, lower, upper) {
return fmt.Sprintf(shouldNotHaveBeenBetween, actual, lower, upper)
}
return success
}
func deriveBounds(values []interface{}) (lower interface{}, upper interface{}, fail string) {
lower = values[0]
upper = values[1]
if ShouldNotEqual(lower, upper) != success {
return nil, nil, fmt.Sprintf(shouldHaveDifferentUpperAndLower, lower)
} else if ShouldBeLessThan(lower, upper) != success {
lower, upper = upper, lower
}
return lower, upper, success
}
func isBetween(value, lower, upper interface{}) bool {
if ShouldBeGreaterThan(value, lower) != success {
return false
} else if ShouldBeLessThan(value, upper) != success {
return false
}
return true
}
// ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound.
// It ensures that the actual value is between both bounds or equal to one of them.
func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string {
if fail := need(2, expected); fail != success {
return fail
}
lower, upper, fail := deriveBounds(expected)
if fail != success {
return fail
} else if !isBetweenOrEqual(actual, lower, upper) {
return fmt.Sprintf(shouldHaveBeenBetweenOrEqual, actual, lower, upper)
}
return success
}
// ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound.
// It ensures that the actual value is nopt between the bounds nor equal to either of them.
func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string {
if fail := need(2, expected); fail != success {
return fail
}
lower, upper, fail := deriveBounds(expected)
if fail != success {
return fail
} else if isBetweenOrEqual(actual, lower, upper) {
return fmt.Sprintf(shouldNotHaveBeenBetweenOrEqual, actual, lower, upper)
}
return success
}
func isBetweenOrEqual(value, lower, upper interface{}) bool {
if ShouldBeGreaterThanOrEqualTo(value, lower) != success {
return false
} else if ShouldBeLessThanOrEqualTo(value, upper) != success {
return false
}
return true
} | Godeps/_workspace/src/github.com/smartystreets/assertions/quantity.go | 0.832577 | 0.748076 | quantity.go | starcoder |
package learning
import (
"github.com/amirblum/SynergyAI/model"
)
type DeltaCalcer interface {
CalcDelta(float64) (float64, bool)
}
// Just returns the difference
type SimpleDelta struct {
eta float64
}
func CreateSimpleDelta(eta float64) *SimpleDelta {
return &SimpleDelta{eta}
}
func (delta *SimpleDelta) CalcDelta(diff float64) (float64, bool) {
return delta.eta * diff, true
}
// Average the difference and return the average
type AverageDelta struct {
eta float64
lastDifferences []float64
frequency int
count int
}
func CreateAverageDelta(eta float64, frequency int) *AverageDelta {
return &AverageDelta{eta, make([]float64, frequency), frequency, 0}
}
func (delta *AverageDelta) CalcDelta(diff float64) (float64, bool) {
delta.lastDifferences[delta.count] = diff
delta.count++
if delta.count == delta.frequency {
sum := 0.
for i := 0; i < delta.frequency; i++ {
sum += delta.lastDifferences[i]
}
delta.count = 0
delta.lastDifferences = make([]float64, delta.frequency)
return delta.eta * (sum / float64(delta.frequency)), true
}
return 0, false
}
type TemporalDifferenceAlgorithm struct {
deltaCalcer DeltaCalcer
}
func CreateTemporalDifferenceAlgorithm(calcer DeltaCalcer) *TemporalDifferenceAlgorithm {
return &TemporalDifferenceAlgorithm{calcer}
}
func (alg TemporalDifferenceAlgorithm) LearnSynergy(world, realWorld *model.World, team *model.Team, task model.Task) {
// Nothing to learn from teams smaller than 2
if team.Length() < 2 {
return
}
// Create a "boring world", where no-one affects anyone elses work. This gives us a normalizing factor.
boringWorld := model.CreateWorld(world.Workers, false)
normalizingFactor, _, _ := boringWorld.ScoreTeam(team, task)
// Cover our asses in case of bad team
if normalizingFactor == 0 {
return
}
myScore, _, _ := world.ScoreTeam(team, task)
realScore, _, _ := realWorld.ScoreTeam(team, task)
// We normalize the scores to reduce influence from the scale of the ability.
// This way, the resulting difference is on a similar scale to the synergies, and can be used to
// learn the matrix.
myScore /= normalizingFactor
realScore /= normalizingFactor
difference := (realScore - myScore)
// In addition, we further reduce the difference because bigger teams give a bigger score.
difference /= float64(team.Length())
if delta, toChange := alg.deltaCalcer.CalcDelta(difference); toChange {
// Update the matrix
for _, worker := range team.Workers {
for _, otherWorker := range team.Workers {
if worker.ID > otherWorker.ID {
world.Synergy[worker.ID][otherWorker.ID] += delta
if world.Synergy[worker.ID][otherWorker.ID] < 0 {
world.Synergy[worker.ID][otherWorker.ID] = 0
}
}
}
}
}
} | learning/reinforcementLearning.go | 0.775222 | 0.598547 | reinforcementLearning.go | starcoder |
package flat
import (
"go-simulate-a-city/common/commonopengl"
"time"
"go-simulate-a-city/sim/config"
"go-simulate-a-city/sim/core/gamegrid"
"go-simulate-a-city/sim/input"
"github.com/go-gl/glfw/v3.2/glfw"
"github.com/go-gl/mathgl/mgl32"
)
type Camera struct {
mouseMoves chan mgl32.Vec2
mouseScrolls chan float32
keyPresses chan glfw.Key
keyReleases chan glfw.Key
highResTicks chan time.Time
ControlChannel chan int
offsetChangeRegs []chan mgl32.Vec2
OffsetChangeRegChannel chan chan mgl32.Vec2
scaleChangeRegs []chan float32
ScaleChangeRegChannel chan chan float32
isLeftPressed bool
isRightPressed bool
isUpPressed bool
isDownPressed bool
boardPosRegs []chan mgl32.Vec2
BoardPosRegChannel chan chan mgl32.Vec2
Scale float32
Offset mgl32.Vec2
lastUpdateTicks uint
keyMotionAmount float32
}
func NewCamera(
mouseMoveRegChannel chan chan mgl32.Vec2,
mouseScrollRegChannel chan chan float32,
keyPressedRegChannel chan chan glfw.Key,
keyReleasedRegChannel chan chan glfw.Key) *Camera {
camera := Camera{
Scale: 1.0,
Offset: mgl32.Vec2{0, 0},
mouseMoves: make(chan mgl32.Vec2, 2),
mouseScrolls: make(chan float32, 2),
keyPresses: make(chan glfw.Key, 2),
keyReleases: make(chan glfw.Key, 2),
ControlChannel: make(chan int),
lastUpdateTicks: 0,
offsetChangeRegs: make([]chan mgl32.Vec2, 0),
OffsetChangeRegChannel: make(chan chan mgl32.Vec2),
scaleChangeRegs: make([]chan float32, 0),
ScaleChangeRegChannel: make(chan chan float32),
boardPosRegs: make([]chan mgl32.Vec2, 0),
BoardPosRegChannel: make(chan chan mgl32.Vec2)}
mouseMoveRegChannel <- camera.mouseMoves
mouseScrollRegChannel <- camera.mouseScrolls
keyPressedRegChannel <- camera.keyPresses
keyReleasedRegChannel <- camera.keyReleases
go camera.run()
return &camera
}
func (c *Camera) parseKeyCode(keyCode glfw.Key, stateTransition bool) {
switch keyCode {
case input.GetKeyCode(input.MoveUpKey):
c.isUpPressed = stateTransition
break
case input.GetKeyCode(input.MoveRightKey):
c.isRightPressed = stateTransition
break
case input.GetKeyCode(input.MoveDownKey):
c.isDownPressed = stateTransition
break
case input.GetKeyCode(input.MoveLeftKey):
c.isLeftPressed = stateTransition
break
default:
break
}
}
func (c *Camera) StepUpdate(interval float32) {
keyMotionAmount := interval * config.Config.Ui.Camera.KeyMotionFactor * (1.0 / c.Scale)
offsetChanged := false
if c.isLeftPressed {
c.Offset[0] -= keyMotionAmount
offsetChanged = true
}
if c.isRightPressed {
c.Offset[0] += keyMotionAmount
offsetChanged = true
}
if c.isUpPressed {
c.Offset[1] -= keyMotionAmount
offsetChanged = true
}
if c.isDownPressed {
c.Offset[1] += keyMotionAmount
offsetChanged = true
}
if offsetChanged {
for _, reg := range c.offsetChangeRegs {
reg <- c.Offset
}
}
}
func (c *Camera) run() {
for {
select {
case reg := <-c.BoardPosRegChannel:
c.boardPosRegs = append(c.boardPosRegs, reg)
case reg := <-c.OffsetChangeRegChannel:
c.offsetChangeRegs = append(c.offsetChangeRegs, reg)
case reg := <-c.ScaleChangeRegChannel:
c.scaleChangeRegs = append(c.scaleChangeRegs, reg)
case mousePos := <-c.mouseMoves:
boardPos := c.MapPixelPosToBoard(mousePos)
for _, reg := range c.boardPosRegs {
reg <- boardPos
}
case scrollAmount := <-c.mouseScrolls:
c.Scale *= (1.0 + scrollAmount*config.Config.Ui.Camera.MouseScrollFactor)
for _, reg := range c.scaleChangeRegs {
reg <- c.Scale
}
case keyCode := <-c.keyPresses:
c.parseKeyCode(keyCode, true)
case keyCode := <-c.keyReleases:
c.parseKeyCode(keyCode, false)
case _ = <-c.ControlChannel:
return
}
}
}
// Maps a position in pixels to the board
func (c *Camera) MapPixelPosToBoard(pixelPos mgl32.Vec2) mgl32.Vec2 {
windowSize := commonOpenGl.GetWindowSize()
return gamegrid.MapToBoard(
mgl32.Vec2{pixelPos.X() / windowSize.X(), pixelPos.Y() / windowSize.Y()},
c.Offset,
c.Scale)
}
func (c *Camera) MapEngineLineToScreen(line [2]mgl32.Vec2) [2]mgl32.Vec2 {
return [2]mgl32.Vec2{
gamegrid.MapPositionToScreen(line[0], c.Scale, c.Offset),
gamegrid.MapPositionToScreen(line[1], c.Scale, c.Offset)}
} | sim/ui/flat/camera.go | 0.613121 | 0.503784 | camera.go | starcoder |
// Package ristretto wraps "github.com/gtank/ristretto255" and exposes a simple prime-order group API with hash-to-curve.
package ristretto
import (
"crypto"
"github.com/gtank/ristretto255"
"github.com/bytemare/crypto/group/hash2curve"
"github.com/bytemare/crypto/group/internal"
)
const (
ristrettoInputLength = 64
// H2C represents the hash-to-curve string identifier.
H2C = "ristretto255_XMD:SHA-512_R255MAP_RO_"
)
// Group represents the Ristretto255 group. It exposes a prime-order group API with hash-to-curve operations.
type Group struct{}
// NewScalar returns a new, empty, scalar.
func (r Group) NewScalar() internal.Scalar {
return &Scalar{ristretto255.NewScalar()}
}
// ElementLength returns the byte size of an encoded element.
func (r Group) ElementLength() int {
return canonicalEncodingLength
}
// NewElement returns a new, empty, element.
func (r Group) NewElement() internal.Point {
return &Point{ristretto255.NewElement()}
}
// Identity returns the group's identity element.
func (r Group) Identity() internal.Point {
return &Point{ristretto255.NewElement().Zero()}
}
// HashToGroup allows arbitrary input to be safely mapped to the curve of the group.
func (r Group) HashToGroup(input, dst []byte) internal.Point {
uniform := hash2curve.ExpandXMD(crypto.SHA512, input, dst, ristrettoInputLength)
return &Point{ristretto255.NewElement().FromUniformBytes(uniform)}
}
// EncodeToGroup allows arbitrary input to be mapped non-uniformly to points in the Group.
func (r Group) EncodeToGroup(input, dst []byte) internal.Point {
return r.HashToGroup(input, dst)
}
// HashToScalar allows arbitrary input to be safely mapped to the field.
func (r Group) HashToScalar(input, dst []byte) internal.Scalar {
uniform := hash2curve.ExpandXMD(crypto.SHA512, input, dst, ristrettoInputLength)
return &Scalar{ristretto255.NewScalar().FromUniformBytes(uniform)}
}
// Base returns group's base point a.k.a. canonical generator.
func (r Group) Base() internal.Point {
return &Point{ristretto255.NewElement().Base()}
}
// MultBytes allows []byte encodings of a scalar and an element of the group to be multiplied.
func (r Group) MultBytes(s, e []byte) (internal.Point, error) {
sc, err := r.NewScalar().Decode(s)
if err != nil {
return nil, err
}
el, err := r.NewElement().Decode(e)
if err != nil {
return nil, err
}
return el.Mult(sc), nil
} | group/ristretto/ristretto.go | 0.880938 | 0.408926 | ristretto.go | starcoder |
package bls
import (
"crypto/cipher"
"crypto/sha256"
"encoding/hex"
"io"
"github.com/drand/kyber"
"github.com/drand/kyber/group/mod"
)
var domainG2 = [8]byte{2, 2, 2, 2, 2, 2, 2, 2}
// KyberG2 is a kyber.Point holding a G2 point on BLS12-381 curve
type KyberG2 struct {
p *PointG2
}
func nullKyberG2() *KyberG2 {
var p PointG2
return newKyberG2(&p)
}
func newKyberG2(p *PointG2) *KyberG2 {
return &KyberG2{p: p}
}
func (k *KyberG2) Equal(k2 kyber.Point) bool {
return NewG2(nil).Equal(k.p, k2.(*KyberG2).p)
}
func (k *KyberG2) Null() kyber.Point {
return newKyberG2(NewG2(nil).Zero())
}
func (k *KyberG2) Base() kyber.Point {
return newKyberG2(NewG2(nil).One())
}
func (k *KyberG2) Pick(rand cipher.Stream) kyber.Point {
var dst, src [32]byte
rand.XORKeyStream(dst[:], src[:])
return k.Hash(dst[:])
}
func (k *KyberG2) Set(q kyber.Point) kyber.Point {
k.p.Set(q.(*KyberG2).p)
return k
}
func (k *KyberG2) Clone() kyber.Point {
var p PointG2
p.Set(k.p)
return newKyberG2(&p)
}
func (k *KyberG2) EmbedLen() int {
panic("bls12-381: unsupported operation")
}
func (k *KyberG2) Embed(data []byte, rand cipher.Stream) kyber.Point {
panic("bls12-381: unsupported operation")
}
func (k *KyberG2) Data() ([]byte, error) {
panic("bls12-381: unsupported operation")
}
func (k *KyberG2) Add(a, b kyber.Point) kyber.Point {
aa := a.(*KyberG2)
bb := b.(*KyberG2)
NewG2(nil).Add(k.p, aa.p, bb.p)
return k
}
func (k *KyberG2) Sub(a, b kyber.Point) kyber.Point {
aa := a.(*KyberG2)
bb := b.(*KyberG2)
NewG2(nil).Sub(k.p, aa.p, bb.p)
return k
}
func (k *KyberG2) Neg(a kyber.Point) kyber.Point {
aa := a.(*KyberG2)
NewG2(nil).Neg(k.p, aa.p)
return k
}
func (k *KyberG2) Mul(s kyber.Scalar, q kyber.Point) kyber.Point {
if q == nil {
q = nullKyberG2().Base()
}
NewG2(nil).MulScalar(k.p, q.(*KyberG2).p, &s.(*mod.Int).V)
return k
}
func (k *KyberG2) MarshalBinary() ([]byte, error) {
return NewG2(nil).ToCompressed(k.p), nil
}
func (k *KyberG2) UnmarshalBinary(buff []byte) error {
var err error
k.p, err = NewG2(nil).FromCompressed(buff)
return err
}
func (k *KyberG2) MarshalTo(w io.Writer) (int, error) {
buf, err := k.MarshalBinary()
if err != nil {
return 0, err
}
return w.Write(buf)
}
func (k *KyberG2) UnmarshalFrom(r io.Reader) (int, error) {
buf := make([]byte, k.MarshalSize())
n, err := io.ReadFull(r, buf)
if err != nil {
return n, err
}
return n, k.UnmarshalBinary(buf)
}
func (k *KyberG2) MarshalSize() int {
return 96
}
func (k *KyberG2) String() string {
b, _ := k.MarshalBinary()
return "bls12-381.G1: " + hex.EncodeToString(b)
}
func (k *KyberG2) Hash(m []byte) kyber.Point {
if len(m) != 32 {
m = sha256Hash(m)
}
var s [32]byte
copy(s[:], m)
pg2 := hashWithDomainG2(NewG2(nil), s, domainG2)
k.p = pg2
return k
}
func sha256Hash(in []byte) []byte {
h := sha256.New()
h.Write(in)
return h.Sum(nil)
} | vendor/github.com/drand/bls12-381/kyber_g2.go | 0.683314 | 0.572842 | kyber_g2.go | starcoder |
package generator
var pairIndexes = [][2]uint8{
{0, 1},
{0, 2},
{0, 3},
{0, 6},
{1, 2},
{1, 4},
{1, 7},
{2, 5},
{2, 8},
{3, 4},
{3, 5},
{3, 6},
{4, 5},
{4, 7},
{5, 8},
{6, 7},
{6, 8},
{7, 8},
}
// exocet removes candidates. When 2 of the 3 cells in a box-line intersection together contain 3 or 4 candidates, then in each of the two boxes in the same band but in different lines, if there are cells with the same 3 or 4 candidates, any others can be removed. See https://www.sudokuwiki.org/Exocet for explanation/discussion.
func (g *Grid) exocet(verbose uint) (res bool) {
for _, b := range box.unit {
for _, pi := range pairIndexes {
// Find base cells.
p1 := b[pi[0]]
p2 := b[pi[1]]
cell1 := *g.pt(p1)
cell2 := *g.pt(p2)
common := cell1 | cell2
if bitCount[cell1] < 2 || bitCount[cell2] < 2 {
continue
}
if bc := bitCount[common]; bc < 3 || bc > 4 {
continue
}
b1 := boxOfPoint(p1)
// Pattern rule 1 satisfied. Now find target cells.
var targets []point
if p1.c == p2.c {
c1 := (p1.c+1)%3 + p1.c/3*3
c2 := (p1.c+2)%3 + p1.c/3*3
findTargets(b1, c1, c2, &col, &targets)
} else { // p1.r == p2.r
r1 := (p1.r+1)%3 + p1.r/3*3
r2 := (p1.r+2)%3 + p1.r/3*3
findTargets(b1, r1, r2, &row, &targets)
}
// Pattern rule 2.
targetPairs := make(map[pair]bool)
for _, t1 := range targets {
for _, t2 := range targets {
if boxOfPoint(t1) == boxOfPoint(t2) || t1.c == t2.c || t1.r == t2.r {
continue
}
t1cell := *g.pt(t1)
t2cell := *g.pt(t2)
if t1cell&common != common || t2cell&common != common {
continue
}
// Pattern rule 3: the companion cells of the target cell must not contain the base candidates.
var c1, c2 point
if p1.c == p2.c {
c1 = point{t1.r, t2.c}
c2 = point{t2.r, t1.c}
} else { // p1.r == p2.r
c1 = point{t2.r, t1.c}
c2 = point{t1.r, t2.c}
}
if common&*g.pt(c1) != 0 || common&*g.pt(c2) != 0 {
continue
}
if !targetPairs[pair{t2, t1}] {
targetPairs[pair{t1, t2}] = true
}
}
}
if len(targetPairs) == 0 {
continue
}
outer:
for pair := range targetPairs {
t1 := pair.left
t2 := pair.right
// Find the cross-lines.
var crossLines [3][]point
if p1.c == p2.c {
crossLines = findCrossLines(p1, p2, t1, t2, &row, func(p point) uint8 { return p.r })
} else { // p1.r == p2.r
crossLines = findCrossLines(p1, p2, t1, t2, &col, func(p point) uint8 { return p.c })
}
// Pattern rule 4. Cross lines must not contain more than 2 instances of each base candidate. Same for cover lines (lines that run perpendiculr to cross lines).
for i := 0; i < 6; i++ {
var digits [10]uint8
for c := 0; c < 3; c++ {
cell := *g.pt(crossLines[c][i]) & common
for d := 1; d <= 9; d++ {
if cell&(1<<d) != 0 {
digits[d]++
}
}
}
for d := 1; d <= 9; d++ {
if digits[d] > 2 {
continue outer
}
}
}
for i := 0; i < 3; i++ {
var digits [10]uint8
for _, p := range crossLines[i] {
cell := *g.pt(p) & common
for d := 1; d <= 9; d++ {
if cell&(1<<d) != 0 {
digits[d]++
}
}
}
for d := 1; d <= 9; d++ {
if digits[d] > 2 {
continue outer
}
}
}
// Elimination rule 1: candidates in the target cells that are not in the base cells can be removed.
e1 := *g.pt(t1) &^ common
e2 := *g.pt(t2) &^ common
if g.pt(t1).andNot(e1) {
g.cellChange(&res, verbose, "exocet: in %s, remove %s\n", t1, e1)
}
if g.pt(t2).andNot(e2) {
g.cellChange(&res, verbose, "exocet: in %s, remove %s\n", t2, e2)
}
}
}
}
return
}
func findCrossLines(p1, p2, t1, t2 point, gr *group, sel func(point) uint8) (crossLines [3][]point) {
b1 := boxOfPoint(p1)
bt1 := boxOfPoint(t1)
bt2 := boxOfPoint(t2)
var indexes [3]uint8
if sel(p1)%3 == 0 && sel(p2)%3 == 1 {
indexes[0] = sel(p1)/3*3 + 2
} else if sel(p1)%3 == 0 && sel(p2)%3 == 2 {
indexes[0] = sel(p1)/3*3 + 1
} else { // sel(p1)%3 == 1 && sel(p2)%3 == 2 {
indexes[0] = sel(p1) / 3 * 3
}
indexes[1] = sel(t1)
indexes[2] = sel(t2)
for i, ind := range indexes {
for _, p := range gr.unit[ind] {
b := boxOfPoint(p)
if b1 == b || bt1 == b || bt2 == b {
continue
}
crossLines[i] = append(crossLines[i], p)
}
}
return
}
func findTargets(excludeBox, line1, line2 uint8, gr *group, targets *[]point) {
for _, p := range gr.unit[line1] {
if excludeBox == boxOfPoint(p) {
continue
}
*targets = append(*targets, p)
}
for _, p := range gr.unit[line2] {
if excludeBox == boxOfPoint(p) {
continue
}
*targets = append(*targets, p)
}
} | generator/exocet.go | 0.59749 | 0.561034 | exocet.go | starcoder |
package indicators
import (
"errors"
"github.com/thetruetrade/gotrade"
)
// A Plus Directional Indicator (PlusDi), no storage, for use in other indicators
type PlusDiWithoutStorage struct {
*baseIndicatorWithFloatBounds
// private variables
periodCounter int
previousHigh float64
previousLow float64
previousPlusDM float64
previousTrueRange float64
currentTrueRange float64
trueRange *TrueRange
timePeriod int
}
// NewPlusDiWithoutStorage creates a Plus Directional Indicator (PlusDi) without storage
func NewPlusDiWithoutStorage(timePeriod int, valueAvailableAction ValueAvailableActionFloat) (indicator *PlusDiWithoutStorage, err error) {
// an indicator without storage MUST have a value available action
if valueAvailableAction == nil {
return nil, ErrValueAvailableActionIsNil
}
// the minimum timeperiod for this indicator is 1
if timePeriod < 1 {
return nil, errors.New("timePeriod is less than the minimum (1)")
}
// check the maximum timeperiod
if timePeriod > MaximumLookbackPeriod {
return nil, errors.New("timePeriod is greater than the maximum (100000)")
}
lookback := 1
if timePeriod > 1 {
lookback = timePeriod
}
ind := PlusDiWithoutStorage{
baseIndicatorWithFloatBounds: newBaseIndicatorWithFloatBounds(lookback, valueAvailableAction),
periodCounter: -1,
previousPlusDM: 0.0,
previousTrueRange: 0.0,
currentTrueRange: 0.0,
timePeriod: timePeriod,
}
ind.trueRange, err = NewTrueRange()
ind.trueRange.valueAvailableAction = func(dataItem float64, streamBarIndex int) {
ind.currentTrueRange = dataItem
}
return &ind, nil
}
// A Plus Directional Indicator (PlusDi)
type PlusDi struct {
*PlusDiWithoutStorage
// public variables
Data []float64
}
// NewPlusDi creates a Plus Directional Indicator (PlusDi) for online usage
func NewPlusDi(timePeriod int) (indicator *PlusDi, err error) {
ind := PlusDi{}
ind.PlusDiWithoutStorage, err = NewPlusDiWithoutStorage(timePeriod, func(dataItem float64, streamBarIndex int) {
ind.Data = append(ind.Data, dataItem)
})
return &ind, err
}
// NewDefaultPlusDi creates a Plus Directional Indicator (PlusDi) for online usage with default parameters
// - timePeriod: 14
func NewDefaultPlusDi() (indicator *PlusDi, err error) {
timePeriod := 14
return NewPlusDi(timePeriod)
}
// NewPlusDiWithSrcLen creates a Plus Directional Indicator (PlusDi) for offline usage
func NewPlusDiWithSrcLen(sourceLength uint, timePeriod int) (indicator *PlusDi, err error) {
ind, err := NewPlusDi(timePeriod)
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewDefaultPlusDiWithSrcLen creates a Plus Directional Indicator (PlusDi) for offline usage with default parameters
func NewDefaultPlusDiWithSrcLen(sourceLength uint) (indicator *PlusDi, err error) {
ind, err := NewDefaultPlusDi()
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewPlusDiForStream creates a Plus Directional Indicator (PlusDi) for online usage with a source data stream
func NewPlusDiForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *PlusDi, err error) {
ind, err := NewPlusDi(timePeriod)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultPlusDiForStream creates a Plus Directional Indicator (PlusDi) for online usage with a source data stream
func NewDefaultPlusDiForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *PlusDi, err error) {
ind, err := NewDefaultPlusDi()
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewPlusDiForStreamWithSrcLen creates a Plus Directional Indicator (PlusDi) for offline usage with a source data stream
func NewPlusDiForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *PlusDi, err error) {
ind, err := NewPlusDiWithSrcLen(sourceLength, timePeriod)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultPlusDiForStreamWithSrcLen creates a Plus Directional Indicator (PlusDi) for offline usage with a source data stream
func NewDefaultPlusDiForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *PlusDi, err error) {
ind, err := NewDefaultPlusDiWithSrcLen(sourceLength)
priceStream.AddTickSubscription(ind)
return ind, err
}
// ReceiveDOHLCVTick consumes a source data DOHLCV price tick
func (ind *PlusDiWithoutStorage) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) {
// forward to the true range indicator first using previous data
ind.trueRange.ReceiveDOHLCVTick(tickData, streamBarIndex)
ind.periodCounter += 1
high := tickData.H()
low := tickData.L()
diffP := high - ind.previousHigh
diffM := ind.previousLow - low
if ind.lookbackPeriod == 1 {
if ind.periodCounter > 0 {
// forward to the true range indicator first using previous data
ind.trueRange.ReceiveDOHLCVTick(tickData, streamBarIndex)
var result float64
if (diffP > 0) && (diffP > diffM) && ind.currentTrueRange != 0.0 {
result = diffP / ind.currentTrueRange
} else {
result = 0
}
ind.UpdateIndicatorWithNewValue(result, streamBarIndex)
}
} else {
if ind.periodCounter > 0 {
if ind.periodCounter < ind.timePeriod {
if (diffP > 0) && (diffP > diffM) {
ind.previousPlusDM += diffP
}
ind.previousTrueRange += ind.currentTrueRange
} else {
var result float64
ind.previousTrueRange = ind.previousTrueRange - (ind.previousTrueRange / float64(ind.timePeriod)) + ind.currentTrueRange
if (diffP > 0) && (diffP > diffM) {
ind.previousPlusDM = ind.previousPlusDM - (ind.previousPlusDM / float64(ind.timePeriod)) + diffP
} else {
ind.previousPlusDM = ind.previousPlusDM - (ind.previousPlusDM / float64(ind.timePeriod))
}
if ind.previousTrueRange != 0.0 {
result = float64(100.0) * ind.previousPlusDM / ind.previousTrueRange
} else {
result = 0.0
}
ind.UpdateIndicatorWithNewValue(result, streamBarIndex)
}
}
}
ind.previousHigh = high
ind.previousLow = low
} | indicators/plusdi.go | 0.669529 | 0.457076 | plusdi.go | starcoder |
package fiat
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/shopspring/decimal"
)
const (
// coinCapHistoryAPI is the endpoint we hit for historical price data.
coinCapHistoryAPI = "https://api.coincap.io/v2/assets/bitcoin/history"
// coinCapDefaultCurrency is the currency that the price data returned
// by the Coin Cap API is quoted in.
coinCapDefaultCurrency = "USD"
)
// ErrQueryTooLong is returned when we cannot get a granularity level for a
// period of time because it is too long.
var ErrQueryTooLong = errors.New("period too long for coincap api, " +
"please reduce")
// Granularity indicates the level of aggregation price information will be
// provided at.
type Granularity struct {
// aggregation is the level of aggregation at which prices are provided.
aggregation time.Duration
// label is the string that we send to the API to select granularity.
label string
// maximumQuery is the maximum time range that prices can be queried
// at this level of granularity.
maximumQuery time.Duration
}
func newGranularity(aggregation, maxQuery time.Duration,
label string) Granularity {
return Granularity{
aggregation: aggregation,
label: label,
maximumQuery: maxQuery,
}
}
var (
// GranularityMinute aggregates the bitcoin price over 1 minute.
GranularityMinute = newGranularity(time.Minute, time.Hour*24, "m1")
// Granularity5Minute aggregates the bitcoin price over 5 minute.
Granularity5Minute = newGranularity(
time.Minute*5, time.Hour*24*5, "m5",
)
// Granularity15Minute aggregates the bitcoin price over 15 minutes.
Granularity15Minute = newGranularity(
time.Minute*15, time.Hour*24*7, "m15",
)
// Granularity30Minute aggregates the bitcoin price over 30 minutes.
Granularity30Minute = newGranularity(
time.Minute*30, time.Hour*24*14, "m30",
)
// GranularityHour aggregates the bitcoin price over 1 hour.
GranularityHour = newGranularity(
time.Hour, time.Hour*24*30, "h1",
)
// Granularity6Hour aggregates the bitcoin price over 6 hours.
Granularity6Hour = newGranularity(
time.Hour*6, time.Hour*24*183, "h6",
)
// Granularity12Hour aggregates the bitcoin price over 12 hours.
Granularity12Hour = newGranularity(
time.Hour*12, time.Hour*24*365, "h12",
)
// GranularityDay aggregates the bitcoin price over one day.
GranularityDay = newGranularity(
time.Hour*24, time.Hour*24*7305, "d1",
)
)
// ascendingGranularity stores all the levels of granularity that coincap
// allows in ascending order so that we can get the best value for a query
// duration. We require this list because we cannot iterate through maps in
// order.
var ascendingGranularity = []Granularity{
GranularityMinute, Granularity5Minute, Granularity15Minute,
Granularity30Minute, GranularityHour, Granularity6Hour,
Granularity12Hour, GranularityDay,
}
// BestGranularity takes a period of time and returns the lowest granularity
// that we can query the coincap api in a single query. This helper is used
// to provide default granularity periods when they are not provided by
// requests.
func BestGranularity(duration time.Duration) (Granularity, error) {
for _, granularity := range ascendingGranularity {
// If our target duration is less than the upper limit for this
// granularity level, we can use it.
if duration <= granularity.maximumQuery {
return granularity, nil
}
}
// If our duration is longer than all maximum query periods, we fail
// and request a query over a shorter period.
return Granularity{}, ErrQueryTooLong
}
// coinCapAPI implements the fiatApi interface, getting historical Bitcoin
// prices from coincap.
type coinCapAPI struct {
// Coincap's api allows us to request prices at varying levels of
// granularity. This field represents the granularity requested.
granularity Granularity
// query is the function that makes the http call out to coincap's api.
// It is set within the struct so that it can be mocked for testing.
query func(start, end time.Time, g Granularity) ([]byte, error)
// convert produces usd prices from the output of the query function.
// It is set within the struct so that it can be mocked for testing.
convert func([]byte) ([]*Price, error)
}
// newCoinCapAPI returns a coin cap api struct which can be used to query
// historical prices.
func newCoinCapAPI(granularity Granularity) *coinCapAPI {
return &coinCapAPI{
granularity: granularity,
query: queryCoinCap,
convert: parseCoinCapData,
}
}
// queryCoinCap returns a function which will httpQuery coincap for historical
// prices.
func queryCoinCap(start, end time.Time, granularity Granularity) ([]byte,
error) {
// The coincap api requires milliseconds.
startMs := start.Unix() * 1000
endMs := end.Unix() * 1000
url := fmt.Sprintf("%v?interval=%v&start=%v&end=%v",
coinCapHistoryAPI, granularity.label, startMs,
endMs)
log.Debugf("coincap url: %v", url)
// Query the http endpoint with the url provided
// #nosec G107
response, err := http.Get(url)
if err != nil {
return nil, err
}
defer response.Body.Close()
return ioutil.ReadAll(response.Body)
}
type coinCapResponse struct {
Data []*coinCapDataPoint `json:"data"`
}
type coinCapDataPoint struct {
Price string `json:"priceUsd"`
Timestamp int64 `json:"time"`
}
// parseCoinCapData parses http response data to usc price structs, using
// intermediary structs to get around parsing.
func parseCoinCapData(data []byte) ([]*Price, error) {
var priceEntries coinCapResponse
if err := json.Unmarshal(data, &priceEntries); err != nil {
return nil, err
}
var usdRecords = make([]*Price, len(priceEntries.Data))
// Convert each entry from the api to a usable record with a converted
// time and parsed price.
for i, entry := range priceEntries.Data {
decPrice, err := decimal.NewFromString(entry.Price)
if err != nil {
return nil, err
}
ns := time.Duration(entry.Timestamp) * time.Millisecond
usdRecords[i] = &Price{
Timestamp: time.Unix(0, ns.Nanoseconds()),
Price: decPrice,
Currency: coinCapDefaultCurrency,
}
}
return usdRecords, nil
}
// rawPriceData retrieves price information from coincap's api. If the range
// requested is more than coincap will serve us in a single request, we break
// our queries up into multiple chunks.
func (c *coinCapAPI) rawPriceData(ctx context.Context, startTime,
endTime time.Time) ([]*Price, error) {
// When we query prices over a range, it is likely that the first data
// point we get is after our starting point, since we have discrete
// points in time. To make sure that the first price point we get is
// before our starting time, we add a buffer (equal to our granularity
// level) to our start time so that the first timestamp in our data
// will definitely be before our start time. We only do this once off,
// so that we do not have overlapping data across queries.
startTime = startTime.Add(c.granularity.aggregation * -1)
var historicalRecords []*Price
// Create start and end vars to query one maximum length at a time.
maxPeriod := c.granularity.maximumQuery
start, end := startTime, startTime.Add(maxPeriod)
// Make chunked queries of size max duration until we reach our end
// time. We can check equality because we cut our query end back to our
// target end time if it surpasses it.
for start.Before(endTime) {
query := func() ([]byte, error) {
return c.query(start, end, c.granularity)
}
// Query the api for this page of data. We allow retries at this
// stage in case the api experiences a temporary limit.
records, err := retryQuery(ctx, query, c.convert)
if err != nil {
return nil, err
}
historicalRecords = append(historicalRecords, records...)
// Progress our start time to the end of the period we just
// queried for, and increase our end time by the maximum
// queryable period.
start, end = end, end.Add(maxPeriod)
// If our end time is after the period we need, we cut it off.
if end.After(endTime) {
end = endTime
}
}
return historicalRecords, nil
} | fiat/coincap_api.go | 0.664758 | 0.422207 | coincap_api.go | starcoder |
package rapid
type (
Pointer uint32
EntryPoint struct {
Head Pointer
Tail Pointer
}
Iterator[T any] struct {
Ptr Pointer
PrevPtr Pointer
NextPtr Pointer
Data T
}
)
func (this *Iterator[T]) Reset() {
this.Ptr = 0
this.NextPtr = 0
}
type Rapid[T any] struct {
Length int
Serial uint32
Recyclable array_stack // do not recycle head
Buckets []Iterator[T]
Equal func(a, b *T) bool
}
func New[T any](size uint32, eq func(a, b *T) bool) *Rapid[T] {
return &Rapid[T]{
Serial: 1,
Recyclable: []Pointer{},
Buckets: make([]Iterator[T], size+1),
Length: 0,
Equal: eq,
}
}
func (this Rapid[T]) Begin(entrypoint EntryPoint) *Iterator[T] {
return &this.Buckets[entrypoint.Head]
}
func (this Rapid[T]) Next(iter *Iterator[T]) *Iterator[T] {
return &this.Buckets[iter.NextPtr]
}
func (this Rapid[T]) End(iter *Iterator[T]) bool {
return iter.Ptr == 0
}
// NextID apply a pointer
func (this *Rapid[T]) NextID() Pointer {
if this.Recyclable.Len() > 0 {
return this.Recyclable.Pop()
}
var result = this.Serial
if result >= uint32(len(this.Buckets)) {
var ele Iterator[T]
this.Buckets = append(this.Buckets, ele)
}
this.Serial++
return Pointer(result)
}
// Push append an element with unique check
func (this *Rapid[T]) Push(entrypoint *EntryPoint, data *T) (replaced bool) {
var head = &this.Buckets[entrypoint.Head]
if head.Ptr == 0 {
head.Ptr = entrypoint.Head
head.Data = *data
this.Length++
return false
}
for i := head; !this.End(i); i = this.Next(i) {
if this.Equal(&i.Data, data) {
i.Data = *data
return true
}
}
var cursor = this.NextID()
var tail = &this.Buckets[entrypoint.Tail]
tail.NextPtr = cursor
entrypoint.Tail = cursor
var target = &this.Buckets[cursor]
target.Ptr = cursor
target.Data = *data
target.PrevPtr = tail.Ptr
this.Length++
return false
}
// Append append an element without unique check
func (this *Rapid[T]) Append(entrypoint *EntryPoint, data *T) {
var head = &this.Buckets[entrypoint.Head]
if head.Ptr == 0 {
head.Ptr = entrypoint.Head
head.Data = *data
this.Length++
return
}
var cursor = this.NextID()
var tail = &this.Buckets[entrypoint.Tail]
tail.NextPtr = cursor
entrypoint.Tail = cursor
var target = &this.Buckets[cursor]
target.Ptr = cursor
target.Data = *data
target.PrevPtr = tail.Ptr
this.Length++
}
// Delete do not delete in loop if no break
func (this *Rapid[T]) Delete(entrypoint *EntryPoint, target *Iterator[T]) (deleted bool) {
var head = this.Buckets[entrypoint.Head]
if head.Ptr == 0 || target == nil || target.Ptr == 0 {
return false
}
this.Length--
if target.NextPtr == 0 {
if target.PrevPtr != 0 {
var prev = &this.Buckets[target.PrevPtr]
prev.NextPtr = 0
entrypoint.Tail = prev.Ptr
this.Recyclable.Push(target.Ptr)
}
target.Reset()
return true
}
var next = &this.Buckets[target.NextPtr]
this.Recyclable.Push(next.Ptr)
next.Ptr = target.Ptr
next.PrevPtr = target.PrevPtr
*target = *next
next.Reset()
if target.NextPtr == 0 {
entrypoint.Tail = target.Ptr
}
return true
}
func (this *Rapid[T]) Find(entrypoint EntryPoint, data *T) (result *Iterator[T], exist bool) {
if entrypoint.Head == 0 {
return nil, false
}
for i := this.Begin(entrypoint); !this.End(i); i = this.Next(i) {
if this.Equal(&i.Data, data) {
return i, true
}
}
return nil, false
} | rapid/rapid.go | 0.50952 | 0.5169 | rapid.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// PlannerRoster
type PlannerRoster struct {
Entity
// Retrieves the members of the plannerRoster.
members []PlannerRosterMemberable
// Retrieves the plans contained by the plannerRoster.
plans []PlannerPlanable
}
// NewPlannerRoster instantiates a new plannerRoster and sets the default values.
func NewPlannerRoster()(*PlannerRoster) {
m := &PlannerRoster{
Entity: *NewEntity(),
}
return m
}
// CreatePlannerRosterFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreatePlannerRosterFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewPlannerRoster(), nil
}
// GetFieldDeserializers the deserialization information for the current model
func (m *PlannerRoster) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["members"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePlannerRosterMemberFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PlannerRosterMemberable, len(val))
for i, v := range val {
res[i] = v.(PlannerRosterMemberable)
}
m.SetMembers(res)
}
return nil
}
res["plans"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreatePlannerPlanFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]PlannerPlanable, len(val))
for i, v := range val {
res[i] = v.(PlannerPlanable)
}
m.SetPlans(res)
}
return nil
}
return res
}
// GetMembers gets the members property value. Retrieves the members of the plannerRoster.
func (m *PlannerRoster) GetMembers()([]PlannerRosterMemberable) {
if m == nil {
return nil
} else {
return m.members
}
}
// GetPlans gets the plans property value. Retrieves the plans contained by the plannerRoster.
func (m *PlannerRoster) GetPlans()([]PlannerPlanable) {
if m == nil {
return nil
} else {
return m.plans
}
}
// Serialize serializes information the current object
func (m *PlannerRoster) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
if m.GetMembers() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetMembers()))
for i, v := range m.GetMembers() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("members", cast)
if err != nil {
return err
}
}
if m.GetPlans() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetPlans()))
for i, v := range m.GetPlans() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err = writer.WriteCollectionOfObjectValues("plans", cast)
if err != nil {
return err
}
}
return nil
}
// SetMembers sets the members property value. Retrieves the members of the plannerRoster.
func (m *PlannerRoster) SetMembers(value []PlannerRosterMemberable)() {
if m != nil {
m.members = value
}
}
// SetPlans sets the plans property value. Retrieves the plans contained by the plannerRoster.
func (m *PlannerRoster) SetPlans(value []PlannerPlanable)() {
if m != nil {
m.plans = value
}
} | models/planner_roster.go | 0.648911 | 0.431285 | planner_roster.go | starcoder |
// this file was take from https://github.com/prometheus/alertmanager/pull/2393
package timeinterval
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"time"
)
// TimeInterval describes intervals of time. ContainsTime will tell you if a golang time is contained
// within the interval.
type TimeInterval struct {
Times []TimeRange `yaml:"times,omitempty"`
Weekdays []WeekdayRange `yaml:"weekdays,flow,omitempty"`
DaysOfMonth []DayOfMonthRange `yaml:"days_of_month,flow,omitempty"`
Months []MonthRange `yaml:"months,flow,omitempty"`
Years []YearRange `yaml:"years,flow,omitempty"`
}
// TimeRange represents a range of minutes within a 1440 minute day, exclusive of the End minute. A day consists of 1440 minutes.
// For example, 4:00PM to End of the day would Begin at 1020 and End at 1440.
type TimeRange struct {
StartMinute int
EndMinute int
}
// InclusiveRange is used to hold the Beginning and End values of many time interval components.
type InclusiveRange struct {
Begin int
End int
}
// A WeekdayRange is an inclusive range between [0, 6] where 0 = Sunday.
type WeekdayRange struct {
InclusiveRange
}
// A DayOfMonthRange is an inclusive range that may have negative Beginning/End values that represent distance from the End of the month Beginning at -1.
type DayOfMonthRange struct {
InclusiveRange
}
// A MonthRange is an inclusive range between [1, 12] where 1 = January.
type MonthRange struct {
InclusiveRange
}
// A YearRange is a positive inclusive range.
type YearRange struct {
InclusiveRange
}
type yamlTimeRange struct {
StartTime string `yaml:"start_time"`
EndTime string `yaml:"end_time"`
}
// A range with a Beginning and End that can be represented as strings.
type stringableRange interface {
setBegin(int)
setEnd(int)
// Try to map a member of the range into an integer.
memberFromString(string) (int, error)
}
func (ir *InclusiveRange) setBegin(n int) {
ir.Begin = n
}
func (ir *InclusiveRange) setEnd(n int) {
ir.End = n
}
func (ir *InclusiveRange) memberFromString(in string) (out int, err error) {
out, err = strconv.Atoi(in)
if err != nil {
return -1, err
}
return out, nil
}
func (r *WeekdayRange) memberFromString(in string) (out int, err error) {
out, ok := daysOfWeek[in]
if !ok {
return -1, fmt.Errorf("%s is not a valid weekday", in)
}
return out, nil
}
func (r *MonthRange) memberFromString(in string) (out int, err error) {
out, ok := months[in]
if !ok {
out, err = strconv.Atoi(in)
if err != nil {
return -1, fmt.Errorf("%s is not a valid month", in)
}
}
return out, nil
}
var daysOfWeek = map[string]int{
"sunday": 0,
"monday": 1,
"tuesday": 2,
"wednesday": 3,
"thursday": 4,
"friday": 5,
"saturday": 6,
}
var daysOfWeekInv = map[int]string{
0: "sunday",
1: "monday",
2: "tuesday",
3: "wednesday",
4: "thursday",
5: "friday",
6: "saturday",
}
var months = map[string]int{
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12,
}
var monthsInv = map[int]string{
1: "january",
2: "february",
3: "march",
4: "april",
5: "may",
6: "june",
7: "july",
8: "august",
9: "september",
10: "october",
11: "november",
12: "december",
}
// UnmarshalYAML implements the Unmarshaller interface for WeekdayRange.
func (r *WeekdayRange) UnmarshalYAML(unmarshal func(interface{}) error) error {
var str string
if err := unmarshal(&str); err != nil {
return err
}
if err := stringableRangeFromString(str, r); err != nil {
return err
}
if r.Begin > r.End {
return errors.New("start day cannot be before end day")
}
if r.Begin < 0 || r.Begin > 6 {
return fmt.Errorf("%s is not a valid day of the week: out of range", str)
}
if r.End < 0 || r.End > 6 {
return fmt.Errorf("%s is not a valid day of the week: out of range", str)
}
return nil
}
// UnmarshalYAML implements the Unmarshaller interface for DayOfMonthRange.
func (r *DayOfMonthRange) UnmarshalYAML(unmarshal func(interface{}) error) error {
var str string
if err := unmarshal(&str); err != nil {
return err
}
if err := stringableRangeFromString(str, r); err != nil {
return err
}
// Check beginning <= end accounting for negatives day of month indices as well.
// Months != 31 days can't be addressed here and are clamped, but at least we can catch blatant errors.
if r.Begin == 0 || r.Begin < -31 || r.Begin > 31 {
return fmt.Errorf("%d is not a valid day of the month: out of range", r.Begin)
}
if r.End == 0 || r.End < -31 || r.End > 31 {
return fmt.Errorf("%d is not a valid day of the month: out of range", r.End)
}
// Restricting here prevents errors where begin > end in longer months but not shorter months.
if r.Begin < 0 && r.End > 0 {
return fmt.Errorf("end day must be negative if start day is negative")
}
// Check begin <= end. We can't know this for sure when using negative indices
// but we can prevent cases where its always invalid (using 28 day minimum length).
checkBegin := r.Begin
checkEnd := r.End
if r.Begin < 0 {
checkBegin = 28 + r.Begin
}
if r.End < 0 {
checkEnd = 28 + r.End
}
if checkBegin > checkEnd {
return fmt.Errorf("end day %d is always before start day %d", r.End, r.Begin)
}
return nil
}
// UnmarshalYAML implements the Unmarshaller interface for MonthRange.
func (r *MonthRange) UnmarshalYAML(unmarshal func(interface{}) error) error {
var str string
if err := unmarshal(&str); err != nil {
return err
}
if err := stringableRangeFromString(str, r); err != nil {
return err
}
if r.Begin > r.End {
begin := monthsInv[r.Begin]
end := monthsInv[r.End]
return fmt.Errorf("end month %s is before start month %s", end, begin)
}
return nil
}
// UnmarshalYAML implements the Unmarshaller interface for YearRange.
func (r *YearRange) UnmarshalYAML(unmarshal func(interface{}) error) error {
var str string
if err := unmarshal(&str); err != nil {
return err
}
if err := stringableRangeFromString(str, r); err != nil {
return err
}
if r.Begin > r.End {
return fmt.Errorf("end year %d is before start year %d", r.End, r.Begin)
}
return nil
}
// UnmarshalYAML implements the Unmarshaller interface for TimeRanges.
func (tr *TimeRange) UnmarshalYAML(unmarshal func(interface{}) error) error {
var y yamlTimeRange
if err := unmarshal(&y); err != nil {
return err
}
if y.EndTime == "" || y.StartTime == "" {
return errors.New("both start and end times must be provided")
}
start, err := parseTime(y.StartTime)
if err != nil {
return err
}
end, err := parseTime(y.EndTime)
if err != nil {
return err
}
if start >= end {
return errors.New("start time cannot be equal or greater than end time")
}
tr.StartMinute, tr.EndMinute = start, end
return nil
}
// MarshalYAML implements the yaml.Marshaler interface for WeekdayRange.
func (r WeekdayRange) MarshalYAML() (interface{}, error) {
beginStr, ok := daysOfWeekInv[r.Begin]
if !ok {
return nil, fmt.Errorf("unable to convert %d into weekday string", r.Begin)
}
if r.Begin == r.End {
return interface{}(beginStr), nil
}
endStr, ok := daysOfWeekInv[r.End]
if !ok {
return nil, fmt.Errorf("unable to convert %d into weekday string", r.End)
}
rangeStr := fmt.Sprintf("%s:%s", beginStr, endStr)
return interface{}(rangeStr), nil
}
//MarshalYAML implements the yaml.Marshaler interface for TimeRange.
func (tr TimeRange) MarshalYAML() (out interface{}, err error) {
startHr := tr.StartMinute / 60
endHr := tr.EndMinute / 60
startMin := tr.StartMinute % 60
endMin := tr.EndMinute % 60
startStr := fmt.Sprintf("%02d:%02d", startHr, startMin)
endStr := fmt.Sprintf("%02d:%02d", endHr, endMin)
yTr := yamlTimeRange{startStr, endStr}
return interface{}(yTr), err
}
//MarshalYAML implements the yaml.Marshaler interface for InclusiveRange.
func (ir InclusiveRange) MarshalYAML() (interface{}, error) {
if ir.Begin == ir.End {
return strconv.Itoa(ir.Begin), nil
}
out := fmt.Sprintf("%d:%d", ir.Begin, ir.End)
return interface{}(out), nil
}
// TimeLayout specifies the layout to be used in time.Parse() calls for time intervals.
const TimeLayout = "15:04"
var validTime string = "^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$)"
var validTimeRE *regexp.Regexp = regexp.MustCompile(validTime)
// Given a time, determines the number of days in the month that time occurs in.
func daysInMonth(t time.Time) int {
monthStart := time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
monthEnd := monthStart.AddDate(0, 1, 0)
diff := monthEnd.Sub(monthStart)
return int(diff.Hours() / 24)
}
func clamp(n, min, max int) int {
if n <= min {
return min
}
if n >= max {
return max
}
return n
}
// ContainsTime returns true if the TimeInterval contains the given time, otherwise returns false.
func (tp TimeInterval) ContainsTime(t time.Time) bool {
if tp.Times != nil {
in := false
for _, validMinutes := range tp.Times {
if (t.Hour()*60+t.Minute()) >= validMinutes.StartMinute && (t.Hour()*60+t.Minute()) < validMinutes.EndMinute {
in = true
break
}
}
if !in {
return false
}
}
if tp.DaysOfMonth != nil {
in := false
for _, validDates := range tp.DaysOfMonth {
var begin, end int
daysInMonth := daysInMonth(t)
if validDates.Begin < 0 {
begin = daysInMonth + validDates.Begin + 1
} else {
begin = validDates.Begin
}
if validDates.End < 0 {
end = daysInMonth + validDates.End + 1
} else {
end = validDates.End
}
// Skip clamping if the beginning date is after the end of the month.
if begin > daysInMonth {
continue
}
// Clamp to the boundaries of the month to prevent crossing into other months.
begin = clamp(begin, -1*daysInMonth, daysInMonth)
end = clamp(end, -1*daysInMonth, daysInMonth)
if t.Day() >= begin && t.Day() <= end {
in = true
break
}
}
if !in {
return false
}
}
if tp.Months != nil {
in := false
for _, validMonths := range tp.Months {
if t.Month() >= time.Month(validMonths.Begin) && t.Month() <= time.Month(validMonths.End) {
in = true
break
}
}
if !in {
return false
}
}
if tp.Weekdays != nil {
in := false
for _, validDays := range tp.Weekdays {
if t.Weekday() >= time.Weekday(validDays.Begin) && t.Weekday() <= time.Weekday(validDays.End) {
in = true
break
}
}
if !in {
return false
}
}
if tp.Years != nil {
in := false
for _, validYears := range tp.Years {
if t.Year() >= validYears.Begin && t.Year() <= validYears.End {
in = true
break
}
}
if !in {
return false
}
}
return true
}
// Converts a string of the form "HH:MM" into the number of minutes elapsed in the day.
func parseTime(in string) (mins int, err error) {
if !validTimeRE.MatchString(in) {
return 0, fmt.Errorf("couldn't parse timestamp %s, invalid format", in)
}
timestampComponents := strings.Split(in, ":")
if len(timestampComponents) != 2 {
return 0, fmt.Errorf("invalid timestamp format: %s", in)
}
timeStampHours, err := strconv.Atoi(timestampComponents[0])
if err != nil {
return 0, err
}
timeStampMinutes, err := strconv.Atoi(timestampComponents[1])
if err != nil {
return 0, err
}
if timeStampHours < 0 || timeStampHours > 24 || timeStampMinutes < 0 || timeStampMinutes > 60 {
return 0, fmt.Errorf("timestamp %s out of range", in)
}
// Timestamps are stored as minutes elapsed in the day, so multiply hours by 60.
mins = timeStampHours*60 + timeStampMinutes
return mins, nil
}
// Converts a range that can be represented as strings (e.g. monday:wednesday) into an equivalent integer-represented range.
func stringableRangeFromString(in string, r stringableRange) (err error) {
in = strings.ToLower(in)
if strings.ContainsRune(in, ':') {
components := strings.Split(in, ":")
if len(components) != 2 {
return fmt.Errorf("couldn't parse range %s, invalid format", in)
}
start, err := r.memberFromString(components[0])
if err != nil {
return err
}
End, err := r.memberFromString(components[1])
if err != nil {
return err
}
r.setBegin(start)
r.setEnd(End)
return nil
}
val, err := r.memberFromString(in)
if err != nil {
return err
}
r.setBegin(val)
r.setEnd(val)
return nil
} | timeinterval/timeinterval.go | 0.788502 | 0.468426 | timeinterval.go | starcoder |
package extract
import (
"bufio"
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/gvso/ddjj/parser/declaration"
)
var stateTwoLines = []string{
"EXPLOTACION",
"TERRENO SIN",
"EDIFICACIONES",
"EDIFICACION PARA",
"ADJUDICACION SEGUN",
}
var totalState int64
var stateItemNumber int
var skipState = []string{
"#",
"Nº FINCA",
"DATOS PROTEGIDOS",
"PAÍS:",
"CTA. CTE. CTRAL. O PADRON",
"USO",
"DISTRITO:",
"SUP. M2",
"AÑO DE ADQ.",
"VALOR CONST. G.",
"CONST.",
"VALOR TERRENO G.",
"TIPO DE ADQ.:",
"IMPORTE",
}
// RealStates returns the real states owned by the official.
func RealStates(scanner *bufio.Scanner) ([]*declaration.RealState, error) {
scanner = MoveUntil(scanner, "1.4 INMUEBLES", true)
var states []*declaration.RealState
values := [11]string{}
index := 0
stateItemNumber = 1
// Also wants to skip item number
skipState = append(skipState, strconv.Itoa(stateItemNumber))
line, _ := getStateLine(scanner)
for line != "" {
values[index] = line
// After reading all the possible values for a single item.
if index == 10 {
state := getState(scanner, values)
states = append(states, state...)
// Skip the next item number.
stateItemNumber++
skipState[len(skipState)-1] = strconv.Itoa(stateItemNumber)
index = -1
}
index++
line, _ = getStateLine(scanner)
}
total := addRealState(states)
if total != totalState {
return nil, errors.New("real states do not match")
}
// Reset variables for next call.
totalState = 0
stateItemNumber = 0
return states, nil
}
func getState(scanner *bufio.Scanner, values [11]string) []*declaration.RealState {
// Casos 1, 4, 5.
if isCountry(values[0]) {
// En el caso 1, el valor en el último index es el tipo de adquisición.
if !isNumber(values[10]) {
return getState1(values)
}
value12, _ := getStateLine(scanner)
// Caso 4.
if isNumber(value12) {
return getState4(values, value12, scanner)
}
// Caso 5.
return getState5(values, value12, scanner)
}
// Caso 2.
if isNumber(values[3]) {
return getState2(values)
}
return getState3(values)
}
func getState1(values [11]string) []*declaration.RealState {
return []*declaration.RealState{
{
Pais: values[0],
Padron: values[1],
Uso: values[2],
Distrito: values[3],
SuperficieTerreno: stringToInt64(values[4]),
ValorTerreno: stringToInt64(values[5]),
Adquisicion: stringToYear(values[6]),
SuperficieConstruccion: stringToInt64(values[7]),
ValorConstruccion: stringToInt64(values[8]),
Importe: stringToInt64(values[9]),
TipoAdquisicion: values[10],
},
}
}
func getState2(values [11]string) []*declaration.RealState {
return []*declaration.RealState{
{
Padron: values[0],
Uso: values[1],
SuperficieTerreno: stringToInt64(values[2]),
ValorTerreno: stringToInt64(values[3]),
Pais: values[4],
Distrito: values[5],
Adquisicion: stringToYear(values[6]),
SuperficieConstruccion: stringToInt64(values[7]),
ValorConstruccion: stringToInt64(values[8]),
Importe: stringToInt64(values[9]),
TipoAdquisicion: values[10],
},
}
}
func getState3(values [11]string) []*declaration.RealState {
return []*declaration.RealState{
{
Padron: values[0],
Uso: values[1],
Pais: values[2],
Distrito: values[3],
SuperficieTerreno: stringToInt64(values[4]),
ValorTerreno: stringToInt64(values[5]),
Adquisicion: stringToYear(values[6]),
SuperficieConstruccion: stringToInt64(values[7]),
ValorConstruccion: stringToInt64(values[8]),
Importe: stringToInt64(values[9]),
TipoAdquisicion: values[10],
},
}
}
func getState4(values [11]string, nextImporte string, scanner *bufio.Scanner) []*declaration.RealState {
state1 := &declaration.RealState{
Pais: values[0],
Padron: values[1],
Uso: values[2],
Distrito: values[3],
SuperficieTerreno: stringToInt64(values[4]),
ValorTerreno: stringToInt64(values[5]),
Adquisicion: stringToYear(values[6]),
SuperficieConstruccion: stringToInt64(values[7]),
ValorConstruccion: stringToInt64(values[8]),
Importe: stringToInt64(values[9]),
// TipoAdquisicion is the 13th value.
}
// Skip the next item number.
stateItemNumber++
skipState[len(skipState)-1] = strconv.Itoa(stateItemNumber)
// Retrieve the 10 values missing from the next item.
need := 10
otherValues := [10]string{}
for need > 0 {
line, _ := getStateLine(scanner)
otherValues[10-need] = line
need--
}
// 11 regular values + 1 extra value. The type is in the 13th value, so index 0.
state1.TipoAdquisicion = otherValues[0]
state2 := &declaration.RealState{
ValorConstruccion: stringToInt64(values[10]),
Importe: stringToInt64(nextImporte),
Pais: otherValues[1],
Padron: otherValues[2],
Uso: otherValues[3],
Distrito: otherValues[4],
SuperficieTerreno: stringToInt64(otherValues[5]),
ValorTerreno: stringToInt64(otherValues[6]),
Adquisicion: stringToYear(otherValues[7]),
SuperficieConstruccion: stringToInt64(otherValues[8]),
TipoAdquisicion: otherValues[9],
}
return []*declaration.RealState{state1, state2}
}
func getState5(values [11]string, tipoAdq string, scanner *bufio.Scanner) []*declaration.RealState {
state1 := &declaration.RealState{
Pais: values[0],
Padron: values[1],
Uso: values[2],
Distrito: values[3],
SuperficieTerreno: stringToInt64(values[4]),
ValorTerreno: stringToInt64(values[5]),
Adquisicion: stringToYear(values[6]),
// Adquisicion of the next item is values[7]
SuperficieConstruccion: stringToInt64(values[8]),
ValorConstruccion: stringToInt64(values[9]),
Importe: stringToInt64(values[10]),
TipoAdquisicion: tipoAdq,
}
// Skip the next item number.
stateItemNumber++
skipState[len(skipState)-1] = strconv.Itoa(stateItemNumber)
// Retrieve the 10 values missing from the next item.
need := 10
otherValues := [10]string{}
for need > 0 {
line, _ := getStateLine(scanner)
otherValues[10-need] = line
need--
}
state2 := &declaration.RealState{
Adquisicion: stringToYear(values[7]),
Pais: otherValues[0],
Padron: otherValues[1],
Uso: otherValues[2],
Distrito: otherValues[3],
SuperficieTerreno: stringToInt64(otherValues[4]),
ValorTerreno: stringToInt64(otherValues[5]),
SuperficieConstruccion: stringToInt64(otherValues[6]),
ValorConstruccion: stringToInt64(otherValues[7]),
Importe: stringToInt64(otherValues[8]),
TipoAdquisicion: otherValues[9],
}
return []*declaration.RealState{state1, state2}
}
func getStateLine(scanner *bufio.Scanner) (line string, nextPage bool) {
for scanner.Scan() {
line = scanner.Text()
// Stop looking for real state when this is found.
if line == "TOTAL INMUEBLES:" {
totalState = getTotalInCategory(scanner)
// Next page or end.
scanner = MoveUntil(scanner, "Nº FINCA", true)
line = scanner.Text()
nextPage = true
stateItemNumber = 1
skipState[len(skipState)-1] = strconv.Itoa(stateItemNumber)
}
if contains(stateTwoLines, line) {
nextLine, _ := getStateLine(scanner)
line += " " + nextLine
}
if strings.Contains(line, "OBS:") || strings.Contains(line, "RECEPCIONADO EL:") {
continue
}
if isDate(line) || isBarCode(line) {
continue
}
if line == "" || contains(skipState, line) {
continue
}
return line, nextPage
}
return "", false
}
func addRealState(states []*declaration.RealState) int64 {
var total int64
for _, d := range states {
total += d.Importe
}
return total
} | parser/extract/state.go | 0.514888 | 0.543469 | state.go | starcoder |
package command
import (
"encoding/json"
"fmt"
"testing"
"github.com/infracloudio/botkube/pkg/execute"
"github.com/infracloudio/botkube/test/e2e/env"
"github.com/nlopes/slack"
"github.com/stretchr/testify/assert"
)
type kubectlCommand struct {
command string
expected string
channel string
}
type context struct {
*env.TestEnv
}
// Send kubectl command via Slack message and check if BotKube returns correct response
func (c *context) testKubectlCommand(t *testing.T) {
// Test cases
tests := map[string]kubectlCommand{
"BotKube get pods from configured channel": {
command: "get pods",
expected: fmt.Sprintf("```Cluster: %s\n%s```", c.Config.Settings.ClusterName, execute.KubectlResponse["-n default get pods"]),
channel: c.Config.Communications.Slack.Channel,
},
"BotKube get pods out of configured channel": {
command: "get pods",
expected: fmt.Sprintf("<@U023BECGF> get pods"),
channel: "dummy",
},
"kubectl command on forbidden verb and resource": {
command: "config set clusters.test-clustor-1.server https://172.16.17.32",
expected: "```Command not supported. Please run /botkubehelp to see supported commands.```",
channel: c.Config.Communications.Slack.Channel,
},
"kubectl command on forbidden resource": {
command: "get endpoints",
expected: "```Command not supported. Please run /botkubehelp to see supported commands.```",
channel: c.Config.Communications.Slack.Channel,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
if c.TestEnv.Config.Communications.Slack.Enabled {
// Send message to a channel
c.SlackServer.SendMessageToBot(test.channel, test.command)
// Get last seen slack message
lastSeenMsg := c.GetLastSeenSlackMessage()
// Convert text message into Slack message structure
m := slack.Message{}
err := json.Unmarshal([]byte(*lastSeenMsg), &m)
assert.NoError(t, err, "message should decode properly")
assert.Equal(t, test.channel, m.Channel)
assert.Equal(t, test.expected, m.Text)
}
})
}
}
// Run tests
func (c *context) Run(t *testing.T) {
// Run kubectl tests
t.Run("Test Kubectl command", c.testKubectlCommand)
t.Run("Test BotKube command", c.testBotkubeCommand)
t.Run("Test disable notifier", c.testNotifierCommand)
}
// E2ETests runs command execution tests
func E2ETests(testEnv *env.TestEnv) env.E2ETest {
return &context{
testEnv,
}
} | test/e2e/command/kubectl.go | 0.624523 | 0.653182 | kubectl.go | starcoder |
package build
import (
"flag"
"os"
"github.com/mmcloughlin/avo/attr"
"github.com/mmcloughlin/avo/buildtags"
"github.com/mmcloughlin/avo/gotypes"
"github.com/mmcloughlin/avo/operand"
"github.com/mmcloughlin/avo/reg"
)
// ctx provides a global build context.
var ctx = NewContext()
// TEXT starts building a new function called name, with attributes a, and sets its signature (see SignatureExpr).
func TEXT(name string, a attr.Attribute, signature string) {
ctx.Function(name)
ctx.Attributes(a)
ctx.SignatureExpr(signature)
}
// GLOBL declares a new static global data section with the given attributes.
func GLOBL(name string, a attr.Attribute) operand.Mem {
// TODO(mbm): should this be static?
g := ctx.StaticGlobal(name)
ctx.DataAttributes(a)
return g
}
// DATA adds a data value to the active data section.
func DATA(offset int, v operand.Constant) {
ctx.AddDatum(offset, v)
}
var flags = NewFlags(flag.CommandLine)
// Generate builds and compiles the avo file built with the global context. This
// should be the final line of any avo program. Configuration is determined from command-line flags.
func Generate() {
if !flag.Parsed() {
flag.Parse()
}
cfg := flags.Config()
status := Main(cfg, ctx)
// To record coverage of integration tests we wrap main() functions in a test
// functions. In this case we need the main function to terminate, therefore we
// only exit for failure status codes.
if status != 0 {
os.Exit(status)
}
}
// Package sets the package the generated file will belong to. Required to be able to reference types in the package.
func Package(path string) { ctx.Package(path) }
// Constraints sets build constraints for the file.
func Constraints(t buildtags.ConstraintsConvertable) { ctx.Constraints(t) }
// Constraint appends a constraint to the file's build constraints.
func Constraint(t buildtags.ConstraintConvertable) { ctx.Constraint(t) }
// ConstraintExpr appends a constraint to the file's build constraints. The
// constraint to add is parsed from the given expression. The expression should
// look the same as the content following "// +build " in regular build
// constraint comments.
func ConstraintExpr(expr string) { ctx.ConstraintExpr(expr) }
// GP8 allocates and returns a general-purpose 8-bit register.
func GP8() reg.GPVirtual { return ctx.GP8() }
// GP16 allocates and returns a general-purpose 16-bit register.
func GP16() reg.GPVirtual { return ctx.GP16() }
// GP32 allocates and returns a general-purpose 32-bit register.
func GP32() reg.GPVirtual { return ctx.GP32() }
// GP64 allocates and returns a general-purpose 64-bit register.
func GP64() reg.GPVirtual { return ctx.GP64() }
// XMM allocates and returns a 128-bit vector register.
func XMM() reg.VecVirtual { return ctx.XMM() }
// YMM allocates and returns a 256-bit vector register.
func YMM() reg.VecVirtual { return ctx.YMM() }
// ZMM allocates and returns a 512-bit vector register.
func ZMM() reg.VecVirtual { return ctx.ZMM() }
// Param returns a the named argument of the active function.
func Param(name string) gotypes.Component { return ctx.Param(name) }
// ParamIndex returns the ith argument of the active function.
func ParamIndex(i int) gotypes.Component { return ctx.ParamIndex(i) }
// Return returns a the named return value of the active function.
func Return(name string) gotypes.Component { return ctx.Return(name) }
// ReturnIndex returns the ith argument of the active function.
func ReturnIndex(i int) gotypes.Component { return ctx.ReturnIndex(i) }
// Load the function argument src into register dst. Returns the destination
// register. This is syntactic sugar: it will attempt to select the right MOV
// instruction based on the types involved.
func Load(src gotypes.Component, dst reg.Register) reg.Register { return ctx.Load(src, dst) }
// Store register src into return value dst. This is syntactic sugar: it will
// attempt to select the right MOV instruction based on the types involved.
func Store(src reg.Register, dst gotypes.Component) { ctx.Store(src, dst) }
// Dereference loads a pointer and returns its element type.
func Dereference(ptr gotypes.Component) gotypes.Component { return ctx.Dereference(ptr) }
// Doc sets documentation comment lines for the currently active function.
func Doc(lines ...string) { ctx.Doc(lines...) }
// Pragma adds a compiler directive to the currently active function.
func Pragma(directive string, args ...string) { ctx.Pragma(directive, args...) }
// Attributes sets function attributes for the currently active function.
func Attributes(a attr.Attribute) { ctx.Attributes(a) }
// Implement starts building a function of the given name, whose type is
// specified by a stub in the containing package.
func Implement(name string) { ctx.Implement(name) }
// AllocLocal allocates size bytes in the stack of the currently active function.
// Returns a reference to the base pointer for the newly allocated region.
func AllocLocal(size int) operand.Mem { return ctx.AllocLocal(size) }
// Label adds a label to the active function.
func Label(name string) { ctx.Label(name) }
// Comment adds comment lines to the active function.
func Comment(lines ...string) { ctx.Comment(lines...) }
// Commentf adds a formtted comment line.
func Commentf(format string, a ...interface{}) { ctx.Commentf(format, a...) }
// ConstData builds a static data section containing just the given constant.
func ConstData(name string, v operand.Constant) operand.Mem { return ctx.ConstData(name, v) } | build/global.go | 0.520984 | 0.442275 | global.go | starcoder |
package sarama
import (
"hash"
"hash/fnv"
"math/rand"
"time"
)
// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
// as simple default implementations.
type Partitioner interface {
// Partition takes a message and partition count and chooses a partition
Partition(message *ProducerMessage, numPartitions int32) (int32, error)
// RequiresConsistency indicates to the user of the partitioner whether the
// mapping of key->partition is consistent or not. Specifically, if a
// partitioner requires consistency then it must be allowed to choose from all
// partitions (even ones known to be unavailable), and its choice must be
// respected by the caller. The obvious example is the HashPartitioner.
RequiresConsistency() bool
}
// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
type PartitionerConstructor func(topic string) Partitioner
type manualPartitioner struct{}
// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
// ProducerMessage's Partition field as the partition to produce to.
func NewManualPartitioner(topic string) Partitioner {
return new(manualPartitioner)
}
func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return message.Partition, nil
}
func (p *manualPartitioner) RequiresConsistency() bool {
return true
}
type randomPartitioner struct {
generator *rand.Rand
}
// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
func NewRandomPartitioner(topic string) Partitioner {
p := new(randomPartitioner)
p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
return p
}
func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return int32(p.generator.Intn(int(numPartitions))), nil
}
func (p *randomPartitioner) RequiresConsistency() bool {
return false
}
type roundRobinPartitioner struct {
partition int32
}
// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
func NewRoundRobinPartitioner(topic string) Partitioner {
return &roundRobinPartitioner{}
}
func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if p.partition >= numPartitions {
p.partition = 0
}
ret := p.partition
p.partition++
return ret, nil
}
func (p *roundRobinPartitioner) RequiresConsistency() bool {
return false
}
type hashPartitioner struct {
random Partitioner
hasher hash.Hash32
}
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
// modulus the number of partitions. This ensures that messages with the same key always end up on the
// same partition.
func NewHashPartitioner(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = fnv.New32a()
return p
}
func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if message.Key == nil {
return p.random.Partition(message, numPartitions)
}
bytes, err := message.Key.Encode()
if err != nil {
return -1, err
}
p.hasher.Reset()
_, err = p.hasher.Write(bytes)
if err != nil {
return -1, err
}
partition := int32(p.hasher.Sum32()) % numPartitions
if partition < 0 {
partition = -partition
}
return partition, nil
}
func (p *hashPartitioner) RequiresConsistency() bool {
return true
} | vendor/github.com/Shopify/sarama/partitioner.go | 0.732305 | 0.415432 | partitioner.go | starcoder |
package constant
import (
"fmt"
"github.com/umaumax/llvm/ir/types"
)
// --- [ Conversion expressions ] ----------------------------------------------
// ~~~ [ trunc ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprTrunc is an LLVM IR trunc expression.
type ExprTrunc struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewTrunc returns a new trunc expression based on the given source value and
// target type.
func NewTrunc(from Constant, to types.Type) *ExprTrunc {
e := &ExprTrunc{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprTrunc) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprTrunc) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprTrunc) Ident() string {
// 'trunc' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("trunc (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprTrunc) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ zext ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprZExt is an LLVM IR zext expression.
type ExprZExt struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewZExt returns a new zext expression based on the given source value and
// target type.
func NewZExt(from Constant, to types.Type) *ExprZExt {
e := &ExprZExt{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprZExt) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprZExt) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprZExt) Ident() string {
// 'zext' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("zext (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprZExt) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ sext ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprSExt is an LLVM IR sext expression.
type ExprSExt struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewSExt returns a new sext expression based on the given source value and
// target type.
func NewSExt(from Constant, to types.Type) *ExprSExt {
e := &ExprSExt{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprSExt) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprSExt) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprSExt) Ident() string {
// 'sext' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("sext (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprSExt) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ fptrunc ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprFPTrunc is an LLVM IR fptrunc expression.
type ExprFPTrunc struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewFPTrunc returns a new fptrunc expression based on the given source value
// and target type.
func NewFPTrunc(from Constant, to types.Type) *ExprFPTrunc {
e := &ExprFPTrunc{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprFPTrunc) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprFPTrunc) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprFPTrunc) Ident() string {
// 'fptrunc' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("fptrunc (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprFPTrunc) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ fpext ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprFPExt is an LLVM IR fpext expression.
type ExprFPExt struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewFPExt returns a new fpext expression based on the given source value and
// target type.
func NewFPExt(from Constant, to types.Type) *ExprFPExt {
e := &ExprFPExt{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprFPExt) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprFPExt) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprFPExt) Ident() string {
// 'fpext' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("fpext (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprFPExt) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ fptoui ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprFPToUI is an LLVM IR fptoui expression.
type ExprFPToUI struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewFPToUI returns a new fptoui expression based on the given source value and
// target type.
func NewFPToUI(from Constant, to types.Type) *ExprFPToUI {
e := &ExprFPToUI{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprFPToUI) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprFPToUI) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprFPToUI) Ident() string {
// 'fptoui' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("fptoui (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprFPToUI) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ fptosi ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprFPToSI is an LLVM IR fptosi expression.
type ExprFPToSI struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewFPToSI returns a new fptosi expression based on the given source value and
// target type.
func NewFPToSI(from Constant, to types.Type) *ExprFPToSI {
e := &ExprFPToSI{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprFPToSI) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprFPToSI) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprFPToSI) Ident() string {
// 'fptosi' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("fptosi (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprFPToSI) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ uitofp ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprUIToFP is an LLVM IR uitofp expression.
type ExprUIToFP struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewUIToFP returns a new uitofp expression based on the given source value and
// target type.
func NewUIToFP(from Constant, to types.Type) *ExprUIToFP {
e := &ExprUIToFP{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprUIToFP) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprUIToFP) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprUIToFP) Ident() string {
// 'uitofp' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("uitofp (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprUIToFP) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ sitofp ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprSIToFP is an LLVM IR sitofp expression.
type ExprSIToFP struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewSIToFP returns a new sitofp expression based on the given source value and
// target type.
func NewSIToFP(from Constant, to types.Type) *ExprSIToFP {
e := &ExprSIToFP{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprSIToFP) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprSIToFP) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprSIToFP) Ident() string {
// 'sitofp' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("sitofp (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprSIToFP) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ ptrtoint ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprPtrToInt is an LLVM IR ptrtoint expression.
type ExprPtrToInt struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewPtrToInt returns a new ptrtoint expression based on the given source value
// and target type.
func NewPtrToInt(from Constant, to types.Type) *ExprPtrToInt {
e := &ExprPtrToInt{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprPtrToInt) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprPtrToInt) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprPtrToInt) Ident() string {
// 'ptrtoint' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("ptrtoint (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprPtrToInt) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ inttoptr ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprIntToPtr is an LLVM IR inttoptr expression.
type ExprIntToPtr struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewIntToPtr returns a new inttoptr expression based on the given source value
// and target type.
func NewIntToPtr(from Constant, to types.Type) *ExprIntToPtr {
e := &ExprIntToPtr{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprIntToPtr) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprIntToPtr) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprIntToPtr) Ident() string {
// 'inttoptr' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("inttoptr (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprIntToPtr) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ bitcast ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprBitCast is an LLVM IR bitcast expression.
type ExprBitCast struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewBitCast returns a new bitcast expression based on the given source value
// and target type.
func NewBitCast(from Constant, to types.Type) *ExprBitCast {
e := &ExprBitCast{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprBitCast) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprBitCast) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprBitCast) Ident() string {
// 'bitcast' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("bitcast (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprBitCast) Simplify() Constant {
panic("not yet implemented")
}
// ~~~ [ addrspacecast ] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ExprAddrSpaceCast is an LLVM IR addrspacecast expression.
type ExprAddrSpaceCast struct {
// Value before conversion.
From Constant
// Type after conversion.
To types.Type
}
// NewAddrSpaceCast returns a new addrspacecast expression based on the given
// source value and target type.
func NewAddrSpaceCast(from Constant, to types.Type) *ExprAddrSpaceCast {
e := &ExprAddrSpaceCast{From: from, To: to}
// Compute type.
e.Type()
return e
}
// String returns the LLVM syntax representation of the constant expression as a
// type-value pair.
func (e *ExprAddrSpaceCast) String() string {
return fmt.Sprintf("%s %s", e.Type(), e.Ident())
}
// Type returns the type of the constant expression.
func (e *ExprAddrSpaceCast) Type() types.Type {
return e.To
}
// Ident returns the identifier associated with the constant expression.
func (e *ExprAddrSpaceCast) Ident() string {
// 'addrspacecast' '(' From=TypeConst 'to' To=Type ')'
return fmt.Sprintf("addrspacecast (%s to %s)", e.From, e.To)
}
// Simplify returns an equivalent (and potentially simplified) constant to the
// constant expression.
func (e *ExprAddrSpaceCast) Simplify() Constant {
panic("not yet implemented")
} | ir/constant/expr_conversion.go | 0.810966 | 0.457803 | expr_conversion.go | starcoder |
package data
import (
"fmt"
"io/ioutil"
)
// A Segment is a block of memory divided into uint8s.
type Segment struct {
Mem []uint8
smap *SoftMap
}
type SegmentReader interface {
UseReadSegment(*Segment)
}
type SegmentWriter interface {
UseWriteSegment(*Segment)
}
// A Getter can return a byte from a given address.
type Getter interface {
Get(int) uint8
}
// A Setter can set the value at a given address to a given byte.
type Setter interface {
Set(int, uint8)
}
// NewSegment will return a new memory segment with for a given size.
func NewSegment(size int) *Segment {
s := new(Segment)
s.Mem = make([]uint8, size)
return s
}
func (s *Segment) UseSoftMap(sm *SoftMap) {
s.smap = sm
}
// Size returns the size of the given segment.
func (s *Segment) Size() int {
return len(s.Mem)
}
// CopySlice copies the contents of a slice of uint8s into a segment.
func (s *Segment) CopySlice(start int, bytes []uint8) (int, error) {
toWrite := len(bytes)
end := start + toWrite
if start < 0 || end > len(s.Mem) {
return 0, fmt.Errorf("destination slice is out of bounds: %v, %v", start, end)
}
_ = copy(s.Mem[start:end], bytes)
return toWrite, nil
}
// Set will set the value at a given cell. If a write function is
// registered for this cell, then we will call that and exit.
func (s *Segment) Set(addr int, val uint8) {
if s.smap != nil {
ok := s.smap.Write(addr, val)
if ok {
return
}
}
s.Mem[addr] = val
}
func (s *Segment) DirectSet(addr int, val uint8) {
s.Mem[addr] = val
}
// Get will get the value from a given cell. If a read function is
// registered, we will return whatever that is; otherwise we will return
// the value directly.
func (s *Segment) Get(addr int) uint8 {
if s.smap != nil {
val, ok := s.smap.Read(addr)
if ok {
return val
}
}
return s.Mem[addr]
}
func (s *Segment) DirectGet(addr int) uint8 {
return s.Mem[addr]
}
// WriteFile writes the contents of this segment out to a file.
func (s *Segment) WriteFile(path string) error {
bytes := make([]byte, len(s.Mem))
for i, b := range s.Mem {
bytes[i] = byte(b)
}
return ioutil.WriteFile(path, bytes, 0644)
}
// ReadFile will read the contents of a given file into the segment
// receiver.
func (s *Segment) ReadFile(path string) error {
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
s.Mem = make([]uint8, len(data))
for i, b := range data {
s.Mem[i] = uint8(b)
}
return nil
} | pkg/data/segment.go | 0.800887 | 0.463748 | segment.go | starcoder |
package nn
import (
"github.com/nlpodyssey/spago/pkg/ml/ag"
"reflect"
)
// ProcessingMode regulates the different usage of some operations (e.g. Dropout, BatchNorm, etc.) inside a Processor,
// depending on whether you're doing training or inference.
// Failing to set the right mode will yield inconsistent inference results.
type ProcessingMode int
const (
// Training is to be used during the training phase of a model. For example, dropouts are enabled.
Training ProcessingMode = iota
// Inference keeps weights fixed while using the model and disables some operations (e.g. skip dropout).
Inference
)
// Processor performs the operations on the computational graphs using the model's parameters.
type Processor interface {
// GetModel returns the model the processor belongs to.
GetModel() Model
// GetMode returns whether the processor is being used for training or inference.
GetMode() ProcessingMode
// SetMode tells the processor to operate in training or inference mode.
SetMode(mode ProcessingMode)
// GetGraph returns the computational graph on which the processor operates.
GetGraph() *ag.Graph
// RequiresFullSeq returns whether the processor needs the complete sequence to start processing
// (as in the case of BiRNN and other bidirectional models), or not.
RequiresFullSeq() bool
// Forward performs the forward step for each input and returns the result.
// Recurrent networks treats the input nodes as a sequence.
// Differently, feed-forward networks are stateless so every computation is independent.
Forward(xs ...ag.Node) []ag.Node
}
// SetProcessingMode sets the processing mode to a group of processors.
func SetProcessingMode(mode ProcessingMode, ps ...Processor) {
for _, proc := range ps {
if !reflect.ValueOf(proc).IsNil() {
proc.SetMode(mode)
}
}
}
// BaseProcessors satisfies some methods of the Processor interface.
// It is meant to be embedded in other processors to reduce the amount of boilerplate code.
type BaseProcessor struct {
Model Model
Mode ProcessingMode
Graph *ag.Graph
FullSeqProcessing bool
}
// GetModel returns the model the processor belongs to.
func (p *BaseProcessor) GetModel() Model {
return p.Model
}
// GetMode returns whether the processor is being used for training or inference.
func (p *BaseProcessor) GetMode() ProcessingMode {
return p.Mode
}
// SetMode tells the processor to operate in training or inference mode.
// It must be overridden whenever the processor includes sub-processors.
func (p *BaseProcessor) SetMode(mode ProcessingMode) {
p.Mode = mode
}
// GetGraph returns the computational graph on which the processor operates.
func (p *BaseProcessor) GetGraph() *ag.Graph {
return p.Graph
}
// RequiresFullSeq returns whether the processor needs the complete sequence to start processing
// (as in the case of BiRNN and other bidirectional models), or not.
func (p *BaseProcessor) RequiresFullSeq() bool {
return p.FullSeqProcessing
} | pkg/ml/nn/processor.go | 0.824321 | 0.521776 | processor.go | starcoder |
package showdown
import (
"github.com/JohnnyS318/RoyalAfgInGo/services/poker/models"
)
//rankSpecificHand generates a rank identification number for 5 card array out of the 7 cards.
func rankSpecificHand(cards []models.Card) int {
// 1 Byte Number:
// 4 MSB: Describe Hand State (0: High Card - 8: Straight Flush)
// 4 LSB: Describe State Identifier via Highest Card (0: Card Value 2 - 12: Card Value Ace)
// Hand Identifier ranges from 0b00000000 (Theoretically Highest Card Two) to 0b10011101 (Straight Flush with Ace -> Royal Flush)
// If a given Hand h1 is better than another h2, h1 > h2 always yields true (minor exceptions apply).
identifier := cards[0].Value
startColor := cards[0].Color // Needed for checking flushes (all the same color).
lowPair1 := -1
lowPair2 := -1
overflow := -1
y := 0 // Y - Method
firstCard := identifier // Value of the first card
mostSignificantValue := -1 //relevant for pairs. if set
secondSignificantValue := -1
minValue := identifier
validStates := 0b111111111 // Initially all card states are valid.
for i := 1; i < 5; i++ {
currentValue := cards[i].Value //value of
//Check for flush
if (cards[i].Color) != startColor {
validStates = validStates & 0b011011111
}
//Check minimum Value
if currentValue < minValue {
minValue = currentValue
}
//Update Y-State
if firstCard == currentValue {
y++
} else {
y--
}
if currentValue == identifier || currentValue == lowPair1 || currentValue == lowPair2 || currentValue == overflow {
//Current was previously visited (is already in hand) (pair)
validStates = validStates & 0b011001110
if currentValue < mostSignificantValue {
secondSignificantValue = currentValue
} else if currentValue > mostSignificantValue {
secondSignificantValue = mostSignificantValue
mostSignificantValue = currentValue
}
} else {
//Current was not seen before (not a pair)
overflow = lowPair2 // If over has a value => 4 different cards
if currentValue > identifier {
//Move all value state vars
lowPair2 = lowPair1
lowPair1 = identifier
identifier = currentValue
} else if currentValue > lowPair1 {
//Move lowPair1 to lowPair2
lowPair2 = lowPair1
lowPair1 = currentValue
} else if currentValue > lowPair2 {
//Nothing to move
lowPair2 = currentValue
} else {
//Current is smaller is previous cards.
overflow = currentValue
}
//Check if we have 4 or 5 different cards.
if overflow != -1 {
validStates = validStates & 0b100100011
}
}
}
//____further exclusion logic_____
//This algorithm works with excluding all impossible cases
//2 different cards
if lowPair2 == -1 {
//only 2 different cards... full house or 4 pair
validStates &= 0b011000000
}else if overflow == -1 {
//only 3 different cards => only 2 pair and 3 pair
validStates &= 0b000001100
}
if y == 0 || y == -2 {
//no full house
validStates &= 0b101111111
}else if y == 2 || y == 4{
//no 4 pair
validStates &= 0b110111111
}
//secondSignificantValue has a value when 4 pair
if secondSignificantValue == -1 {
//No 3 pair
validStates = validStates & 0b111111011
} else {
//No 4 pair
validStates = validStates & 0b111110111
}
//Check for range between min value and max value is 4 => (e.g. 2-6)
if mostSignificantValue == -1 && identifier-minValue == 4 {
//mostSignificant is high pair.
validStates = validStates & 0b100100000
} else if mostSignificantValue == -1 {
validStates = validStates & 0b000100001
}
//correctedState is the valid state from 0-8 0 is lowest; 8 is royal flush.
correctedState := 0
for correctedState = 8; correctedState >= 0; correctedState-- {
if (validStates & (1 << correctedState)) != 0 {
break
}
}
if mostSignificantValue == -1 {
mostSignificantValue = identifier
}
if secondSignificantValue == -1 {
if mostSignificantValue == -1 {
secondSignificantValue = lowPair1
} else {
secondSignificantValue = lowPair2
}
}
//log.Printf("Rank Cards: %v => %v", cards, (correctedState<<8)+(mostSignificantValue<<4)+secondSignificantValue)
return (correctedState << 8) + (mostSignificantValue << 4) + secondSignificantValue
} | services/poker/showdown/rankHand.go | 0.562417 | 0.437343 | rankHand.go | starcoder |
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkgbits
import (
"bytes"
"crypto/md5"
"encoding/binary"
"go/constant"
"io"
"math/big"
"runtime"
)
// A PkgEncoder provides methods for encoding a package's Unified IR
// export data.
type PkgEncoder struct {
// elems holds the bitstream for previously encoded elements.
elems [numRelocs][]string
// stringsIdx maps previously encoded strings to their index within
// the RelocString section, to allow deduplication.
stringsIdx map[string]int
syncFrames int
}
func NewPkgEncoder(syncFrames int) PkgEncoder {
return PkgEncoder{
stringsIdx: make(map[string]int),
syncFrames: syncFrames,
}
}
// DumpTo writes the package's encoded data to out0 and returns the
// package fingerprint.
func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
h := md5.New()
out := io.MultiWriter(out0, h)
writeUint32 := func(x uint32) {
assert(binary.Write(out, binary.LittleEndian, x) == nil)
}
writeUint32(0) // version
// Write elemEndsEnds.
var sum uint32
for _, elems := range &pw.elems {
sum += uint32(len(elems))
writeUint32(sum)
}
// Write elemEnds.
sum = 0
for _, elems := range &pw.elems {
for _, elem := range elems {
sum += uint32(len(elem))
writeUint32(sum)
}
}
// Write elemData.
for _, elems := range &pw.elems {
for _, elem := range elems {
_, err := io.WriteString(out, elem)
assert(err == nil)
}
}
// Write fingerprint.
copy(fingerprint[:], h.Sum(nil))
_, err := out0.Write(fingerprint[:])
assert(err == nil)
return
}
func (pw *PkgEncoder) StringIdx(s string) int {
if idx, ok := pw.stringsIdx[s]; ok {
assert(pw.elems[RelocString][idx] == s)
return idx
}
idx := len(pw.elems[RelocString])
pw.elems[RelocString] = append(pw.elems[RelocString], s)
pw.stringsIdx[s] = idx
return idx
}
func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
e := pw.NewEncoderRaw(k)
e.Sync(marker)
return e
}
func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
idx := len(pw.elems[k])
pw.elems[k] = append(pw.elems[k], "") // placeholder
return Encoder{
p: pw,
k: k,
Idx: idx,
}
}
// An Encoder provides methods for encoding an individual element's
// bitstream data.
type Encoder struct {
p *PkgEncoder
Relocs []RelocEnt
Data bytes.Buffer
encodingRelocHeader bool
k RelocKind
Idx int
}
// Flush finalizes the element's bitstream and returns its Index.
func (w *Encoder) Flush() int {
var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
// Backup the data so we write the relocations at the front.
var tmp bytes.Buffer
io.Copy(&tmp, &w.Data)
// TODO(mdempsky): Consider writing these out separately so they're
// easier to strip, along with function bodies, so that we can prune
// down to just the data that's relevant to go/types.
if w.encodingRelocHeader {
panic("encodingRelocHeader already true; recursive flush?")
}
w.encodingRelocHeader = true
w.Sync(SyncRelocs)
w.Len(len(w.Relocs))
for _, rent := range w.Relocs {
w.Sync(SyncReloc)
w.Len(int(rent.Kind))
w.Len(rent.Idx)
}
io.Copy(&sb, &w.Data)
io.Copy(&sb, &tmp)
w.p.elems[w.k][w.Idx] = sb.String()
return w.Idx
}
func (w *Encoder) checkErr(err error) {
if err != nil {
errorf("unexpected encoding error: %v", err)
}
}
func (w *Encoder) rawUvarint(x uint64) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutUvarint(buf[:], x)
_, err := w.Data.Write(buf[:n])
w.checkErr(err)
}
func (w *Encoder) rawVarint(x int64) {
// Zig-zag encode.
ux := uint64(x) << 1
if x < 0 {
ux = ^ux
}
w.rawUvarint(ux)
}
func (w *Encoder) rawReloc(r RelocKind, idx int) int {
// TODO(mdempsky): Use map for lookup.
for i, rent := range w.Relocs {
if rent.Kind == r && rent.Idx == idx {
return i
}
}
i := len(w.Relocs)
w.Relocs = append(w.Relocs, RelocEnt{r, idx})
return i
}
func (w *Encoder) Sync(m SyncMarker) {
if !EnableSync {
return
}
// Writing out stack frame string references requires working
// relocations, but writing out the relocations themselves involves
// sync markers. To prevent infinite recursion, we simply trim the
// stack frame for sync markers within the relocation header.
var frames []string
if !w.encodingRelocHeader && w.p.syncFrames > 0 {
pcs := make([]uintptr, w.p.syncFrames)
n := runtime.Callers(2, pcs)
frames = fmtFrames(pcs[:n]...)
}
// TODO(mdempsky): Save space by writing out stack frames as a
// linked list so we can share common stack frames.
w.rawUvarint(uint64(m))
w.rawUvarint(uint64(len(frames)))
for _, frame := range frames {
w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
}
}
func (w *Encoder) Bool(b bool) bool {
w.Sync(SyncBool)
var x byte
if b {
x = 1
}
err := w.Data.WriteByte(x)
w.checkErr(err)
return b
}
func (w *Encoder) Int64(x int64) {
w.Sync(SyncInt64)
w.rawVarint(x)
}
func (w *Encoder) Uint64(x uint64) {
w.Sync(SyncUint64)
w.rawUvarint(x)
}
func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
func (w *Encoder) Reloc(r RelocKind, idx int) {
w.Sync(SyncUseReloc)
w.Len(w.rawReloc(r, idx))
}
func (w *Encoder) Code(c Code) {
w.Sync(c.Marker())
w.Len(c.Value())
}
func (w *Encoder) String(s string) {
w.Sync(SyncString)
w.Reloc(RelocString, w.p.StringIdx(s))
}
func (w *Encoder) Strings(ss []string) {
w.Len(len(ss))
for _, s := range ss {
w.String(s)
}
}
func (w *Encoder) Value(val constant.Value) {
w.Sync(SyncValue)
if w.Bool(val.Kind() == constant.Complex) {
w.scalar(constant.Real(val))
w.scalar(constant.Imag(val))
} else {
w.scalar(val)
}
}
func (w *Encoder) scalar(val constant.Value) {
switch v := constant.Val(val).(type) {
default:
errorf("unhandled %v (%v)", val, val.Kind())
case bool:
w.Code(ValBool)
w.Bool(v)
case string:
w.Code(ValString)
w.String(v)
case int64:
w.Code(ValInt64)
w.Int64(v)
case *big.Int:
w.Code(ValBigInt)
w.bigInt(v)
case *big.Rat:
w.Code(ValBigRat)
w.bigInt(v.Num())
w.bigInt(v.Denom())
case *big.Float:
w.Code(ValBigFloat)
w.bigFloat(v)
}
}
func (w *Encoder) bigInt(v *big.Int) {
b := v.Bytes()
w.String(string(b)) // TODO: More efficient encoding.
w.Bool(v.Sign() < 0)
}
func (w *Encoder) bigFloat(v *big.Float) {
b := v.Append(nil, 'p', -1)
w.String(string(b)) // TODO: More efficient encoding.
} | src/internal/pkgbits/encoder.go | 0.681303 | 0.421611 | encoder.go | starcoder |
package longbits
import (
"math/bits"
)
func NewBitBuilder(expectedLen int) BitBuilder {
if expectedLen == 0 {
return BitBuilder{}
}
return BitBuilder{bytes: make([]byte, 0, expectedLen)}
}
type BitBuilder struct {
bytes []byte
accumulator uint16
}
func (p BitBuilder) IsZero() bool {
return p.accumulator == 0 && p.bytes == nil
}
func (p BitBuilder) Len() int {
return (1+len(p.bytes))<<3 - bits.LeadingZeros8(uint8(p.accumulator>>8))
}
const accInit = 0x80
func (p *BitBuilder) ensure() {
if p.accumulator == 0 {
if len(p.bytes) != 0 {
panic("illegal state")
}
p.accumulator = accInit
}
}
func (p BitBuilder) Append(bit bool) BitBuilder {
p.ensure()
p.accumulator <<= 1
if bit {
p.accumulator |= 1
}
if p.accumulator >= accInit<<8 {
p.bytes = append(p.bytes, byte(p.accumulator))
p.accumulator = accInit
}
return p
}
func (p BitBuilder) AppendN(bitCount int, bit bool) BitBuilder {
if bitCount < 0 {
panic("invalid bitCount value")
}
p.ensure()
if bitCount == 0 {
return p
}
if bit {
return p.appendN1(bitCount)
}
return p.appendN0(bitCount)
}
func (p BitBuilder) appendN0(bitCount int) BitBuilder {
p.ensure()
if p.accumulator != accInit {
alignCount := bits.LeadingZeros8(uint8(p.accumulator >> 8))
if alignCount < bitCount {
p.accumulator <<= uint8(bitCount)
return p
}
bitCount -= alignCount
p.accumulator <<= uint8(alignCount)
p.bytes = append(p.bytes, byte(p.accumulator))
p.accumulator = accInit
}
if bitCount == 0 {
return p
}
alignCount := uint8(bitCount) & 0x7
bitCount >>= 3
if bitCount > 0 {
p.bytes = append(p.bytes, make([]byte, bitCount)...)
}
p.accumulator <<= alignCount
return p
}
func (p BitBuilder) appendN1(bitCount int) BitBuilder {
p.ensure()
if p.accumulator != accInit {
alignCount := bits.LeadingZeros8(uint8(p.accumulator >> 8))
if alignCount < bitCount {
p.accumulator <<= uint8(bitCount)
p.accumulator |= 0xFF >> uint8(8-bitCount)
return p
}
bitCount -= alignCount
p.accumulator <<= uint8(alignCount)
p.accumulator |= 0xFF >> uint8(8-alignCount)
p.bytes = append(p.bytes, byte(p.accumulator))
p.accumulator = accInit
}
if bitCount == 0 {
return p
}
alignCount := uint8(bitCount) & 0x7
bitCount >>= 3
if bitCount > 0 {
i := len(p.bytes)
p.bytes = append(p.bytes, make([]byte, bitCount)...)
for ; i < len(p.bytes); i++ {
p.bytes[i] = 0xFF
}
}
p.accumulator <<= alignCount
p.accumulator |= 0xFF >> (8 - alignCount)
return p
}
func (p BitBuilder) Done() ([]byte, int) {
if p.accumulator <= accInit {
return p.bytes, len(p.bytes) << 3
}
p.bytes = append(p.bytes, byte(p.accumulator))
return p.bytes, len(p.bytes)<<3 - bits.LeadingZeros8(uint8(p.accumulator>>8))
}
func (p BitBuilder) DoneAndCopy() ([]byte, int) {
b, l := p.Done()
if len(b) == 0 {
return nil, l
}
return append(make([]byte, 0, len(b)), b...), l
}
func (p BitBuilder) DoneToByteString() (ByteString, int) {
b, l := p.Done()
return NewByteString(b), l
}
func (p BitBuilder) Copy() BitBuilder {
if p.bytes == nil {
return BitBuilder{accumulator: p.accumulator}
}
return BitBuilder{accumulator: p.accumulator, bytes: append(make([]byte, 0, cap(p.bytes)), p.bytes...)}
} | longbits/bit_builder.go | 0.565059 | 0.430147 | bit_builder.go | starcoder |
package errors
import "errors"
// IsUnavailable determines if err is an error which indicates a unavailable error.
// It supports wrapped errors.
func IsUnavailable(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 14
}
return false
}
// IsDataLoss determines if err is an error which indicates a data loss error.
// It supports wrapped errors.
func IsDataLoss(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 15
}
return false
}
// IsUnauthorized determines if err is an error which indicates a unauthorized error.
// It supports wrapped errors.
func IsUnauthorized(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 16
}
return false
}
// IsCancelled determines if err is an error which indicates a cancelled error.
// It supports wrapped errors.
func IsCancelled(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 1
}
return false
}
// IsUnknown determines if err is an error which indicates a unknown error.
// It supports wrapped errors.
func IsUnknown(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 2
}
return false
}
// IsInvalidArgument determines if err is an error which indicates an invalid argument error.
// It supports wrapped errors.
func IsInvalidArgument(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 3
}
return false
}
// IsDeadlineExceeded determines if err is an error which indicates a deadline exceeded error.
// It supports wrapped errors.
func IsDeadlineExceeded(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 4
}
return false
}
// IsNotFound determines if err is an error which indicates a not found error.
// It supports wrapped errors.
func IsNotFound(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 5
}
return false
}
// IsAlreadyExists determines if err is an error which indicates a already exsits error.
// It supports wrapped errors.
func IsAlreadyExists(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 6
}
return false
}
// IsPermissionDenied determines if err is an error which indicates a permission denied error.
// It supports wrapped errors.
func IsPermissionDenied(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 7
}
return false
}
// IsResourceExhausted determines if err is an error which indicates a resource exhausted error.
// It supports wrapped errors.
func IsResourceExhausted(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 8
}
return false
}
// IsFailedPrecondition determines if err is an error which indicates a failed precondition error.
// It supports wrapped errors.
func IsFailedPrecondition(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 9
}
return false
}
// IsAborted determines if err is an error which indicates an aborted error.
// It supports wrapped errors.
func IsAborted(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 10
}
return false
}
// IsOutOfRange determines if err is an error which indicates a out of range error.
// It supports wrapped errors.
func IsOutOfRange(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 11
}
return false
}
// IsUnimplemented determines if err is an error which indicates a unimplemented error.
// It supports wrapped errors.
func IsUnimplemented(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 12
}
return false
}
// IsInternal determines if err is an error which indicates an internal server error.
// It supports wrapped errors.
func IsInternal(err error) bool {
if se := new(Error); errors.As(err, &se) {
return se.Code == 13
}
return false
} | errors/code.go | 0.776199 | 0.429489 | code.go | starcoder |
package parse
import (
"text/template/parse"
"github.com/powerpuffpenguin/goja"
)
func (f *factory) register() {
f.Set(`IsEmptyTree`, parse.IsEmptyTree)
f.Set(`Parse`, parse.Parse)
f.Set(`NewIdentifier`, parse.NewIdentifier)
f.Set(`Mode`, Mode)
f.Accessor(`ParseComments`, f.getParseComments, nil)
f.Accessor(`SkipFuncCheck`, f.getSkipFuncCheck, nil)
f.Set(`NodeType`, NodeType)
f.Accessor(`NodeText`, f.getNodeText, nil)
f.Accessor(`NodeAction`, f.getNodeAction, nil)
f.Accessor(`NodeBool`, f.getNodeBool, nil)
f.Accessor(`NodeChain`, f.getNodeChain, nil)
f.Accessor(`NodeCommand`, f.getNodeCommand, nil)
f.Accessor(`NodeDot`, f.getNodeDot, nil)
f.Accessor(`NodeField`, f.getNodeField, nil)
f.Accessor(`NodeIdentifier`, f.getNodeIdentifier, nil)
f.Accessor(`NodeIf`, f.getNodeIf, nil)
f.Accessor(`NodeList`, f.getNodeList, nil)
f.Accessor(`NodeNil`, f.getNodeNil, nil)
f.Accessor(`NodeNumber`, f.getNodeNumber, nil)
f.Accessor(`NodePipe`, f.getNodePipe, nil)
f.Accessor(`NodeRange`, f.getNodeRange, nil)
f.Accessor(`NodeString`, f.getNodeString, nil)
f.Accessor(`NodeTemplate`, f.getNodeTemplate, nil)
f.Accessor(`NodeVariable`, f.getNodeVariable, nil)
f.Accessor(`NodeWith`, f.getNodeWith, nil)
f.Accessor(`NodeComment`, f.getNodeComment, nil)
f.Set(`Pos`, Pos)
f.Set(`New`, parse.New)
f.Set(`isActionNodePointer`, isActionNodePointer)
f.Set(`isBoolNodePointer`, isBoolNodePointer)
f.Set(`isBranchNodePointer`, isBranchNodePointer)
f.Set(`isChainNodePointer`, isChainNodePointer)
f.Set(`isCommandNodePointer`, isCommandNodePointer)
f.Set(`isCommentNodePointer`, isCommentNodePointer)
f.Set(`isDotNodePointer`, isDotNodePointer)
f.Set(`isFieldNodePointer`, isFieldNodePointer)
f.Set(`isIdentifierNodePointer`, isIdentifierNodePointer)
f.Set(`isIfNodePointer`, isIfNodePointer)
f.Set(`isListNodePointer`, isListNodePointer)
f.Set(`isMode`, isMode)
f.Set(`isNilNodePointer`, isNilNodePointer)
f.Set(`isNode`, isNode)
f.Set(`isNodeType`, isNodeType)
f.Set(`isNumberNodePointer`, isNumberNodePointer)
f.Set(`isPipeNodePointer`, isPipeNodePointer)
f.Set(`isPos`, isPos)
f.Set(`isRangeNodePointer`, isRangeNodePointer)
f.Set(`isStringNodePointer`, isStringNodePointer)
f.Set(`isTemplateNodePointer`, isTemplateNodePointer)
f.Set(`isTextNodePointer`, isTextNodePointer)
f.Set(`isTreePointer`, isTreePointer)
f.Set(`isVariableNodePointer`, isVariableNodePointer)
f.Set(`isWithNodePointer`, isWithNodePointer)
}
func isActionNodePointer(i interface{}) bool {
_, result := i.(*parse.ActionNode)
return result
}
func isBoolNodePointer(i interface{}) bool {
_, result := i.(*parse.BoolNode)
return result
}
func isBranchNodePointer(i interface{}) bool {
_, result := i.(*parse.BranchNode)
return result
}
func isChainNodePointer(i interface{}) bool {
_, result := i.(*parse.ChainNode)
return result
}
func isCommandNodePointer(i interface{}) bool {
_, result := i.(*parse.CommandNode)
return result
}
func isCommentNodePointer(i interface{}) bool {
_, result := i.(*parse.CommentNode)
return result
}
func isDotNodePointer(i interface{}) bool {
_, result := i.(*parse.DotNode)
return result
}
func isFieldNodePointer(i interface{}) bool {
_, result := i.(*parse.FieldNode)
return result
}
func isIdentifierNodePointer(i interface{}) bool {
_, result := i.(*parse.IdentifierNode)
return result
}
func isIfNodePointer(i interface{}) bool {
_, result := i.(*parse.IfNode)
return result
}
func isListNodePointer(i interface{}) bool {
_, result := i.(*parse.ListNode)
return result
}
func isMode(i interface{}) bool {
_, result := i.(parse.Mode)
return result
}
func isNilNodePointer(i interface{}) bool {
_, result := i.(*parse.NilNode)
return result
}
func isNode(i interface{}) bool {
_, result := i.(parse.Node)
return result
}
func isNodeType(i interface{}) bool {
_, result := i.(parse.NodeType)
return result
}
func isNumberNodePointer(i interface{}) bool {
_, result := i.(*parse.NumberNode)
return result
}
func isPipeNodePointer(i interface{}) bool {
_, result := i.(*parse.PipeNode)
return result
}
func isPos(i interface{}) bool {
_, result := i.(parse.Pos)
return result
}
func isRangeNodePointer(i interface{}) bool {
_, result := i.(*parse.RangeNode)
return result
}
func isStringNodePointer(i interface{}) bool {
_, result := i.(*parse.StringNode)
return result
}
func isTemplateNodePointer(i interface{}) bool {
_, result := i.(*parse.TemplateNode)
return result
}
func isTextNodePointer(i interface{}) bool {
_, result := i.(*parse.TextNode)
return result
}
func isTreePointer(i interface{}) bool {
_, result := i.(*parse.Tree)
return result
}
func isVariableNodePointer(i interface{}) bool {
_, result := i.(*parse.VariableNode)
return result
}
func isWithNodePointer(i interface{}) bool {
_, result := i.(*parse.WithNode)
return result
}
func Pos(v int) parse.Pos {
return parse.Pos(v)
}
func (f *factory) getNodeText(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeText)
}
func (f *factory) getNodeAction(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeAction)
}
func (f *factory) getNodeBool(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeBool)
}
func (f *factory) getNodeChain(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeChain)
}
func (f *factory) getNodeCommand(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeCommand)
}
func (f *factory) getNodeDot(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeDot)
}
func (f *factory) getNodeField(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeField)
}
func (f *factory) getNodeIdentifier(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeIdentifier)
}
func (f *factory) getNodeIf(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeIf)
}
func (f *factory) getNodeList(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeList)
}
func (f *factory) getNodeNil(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeNil)
}
func (f *factory) getNodeNumber(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeNumber)
}
func (f *factory) getNodePipe(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodePipe)
}
func (f *factory) getNodeRange(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeRange)
}
func (f *factory) getNodeString(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeString)
}
func (f *factory) getNodeTemplate(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeTemplate)
}
func (f *factory) getNodeVariable(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeVariable)
}
func (f *factory) getNodeWith(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeWith)
}
func (f *factory) getNodeComment(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.NodeComment)
}
func NodeType(v int) parse.NodeType {
return parse.NodeType(v)
}
func (f *factory) getParseComments(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.ParseComments)
}
func (f *factory) getSkipFuncCheck(call goja.FunctionCall) goja.Value {
return f.runtime.ToValue(parse.ParseComments << 1)
}
func Mode(v uint) parse.Mode {
return parse.Mode(v)
} | stdgo/text/template/parse/register.go | 0.50293 | 0.44565 | register.go | starcoder |
package googlemaps
import (
//"../common"
"github.com/mitroadmaps/gomapinfer/common"
"math"
)
const ORIGIN_SHIFT = 2 * math.Pi * 6378137 / 2.0
// convert latitude/longitude to Spherical Mercator EPSG:900913
// source: http://gis.stackexchange.com/questions/46729/corner-coordinates-of-google-static-map-tile
func LonLatToMeters(lonLat common.Point) common.Point {
mx := lonLat.X * ORIGIN_SHIFT / 180.0
my := math.Log(math.Tan((90 + lonLat.Y) * math.Pi / 360.0)) / (math.Pi / 180.0)
my = my * ORIGIN_SHIFT / 180.0
return common.Point{mx, my}
}
func MetersToLonLat(meters common.Point) common.Point {
lon := (meters.X / ORIGIN_SHIFT) * 180.0
lat := (meters.Y / ORIGIN_SHIFT) * 180.0
lat = 180 / math.Pi * (2 * math.Atan(math.Exp(lat * math.Pi / 180.0)) - math.Pi / 2.0)
return common.Point{lon, lat}
}
func GetMetersPerPixel(zoom int) float64 {
return 2 * math.Pi * 6378137 / math.Exp2(float64(zoom)) / 256
}
func LonLatToPixel(p common.Point, origin common.Point, zoom int) common.Point {
p = LonLatToMeters(p).Sub(LonLatToMeters(origin))
p = p.Scale(1 / GetMetersPerPixel(zoom)) // get pixel coordinates
p = common.Point{p.X, -p.Y} // invert y axis to correspond to sat image orientation
p = p.Add(common.Point{256, 256}) // sat image is offset a bit due to picking centers
return p
}
func PixelToLonLat(p common.Point, origin common.Point, zoom int) common.Point {
p = p.Sub(common.Point{256, 256})
p = common.Point{p.X, -p.Y}
p = p.Scale(GetMetersPerPixel(zoom))
p = MetersToLonLat(p.Add(LonLatToMeters(origin)))
return p
}
func LonLatToMapboxTile(lonLat common.Point, zoom int) [2]int {
n := math.Exp2(float64(zoom))
xtile := int((lonLat.X + 180) / 360 * n)
ytile := int((1 - math.Log(math.Tan(lonLat.Y * math.Pi / 180) + (1 / math.Cos(lonLat.Y * math.Pi / 180))) / math.Pi) / 2 * n)
return [2]int{xtile, ytile}
}
func LonLatToMapbox(lonLat common.Point, zoom int, originTile [2]int) common.Point {
n := math.Exp2(float64(zoom))
x := (lonLat.X + 180) / 360 * n
y := (1 - math.Log(math.Tan(lonLat.Y * math.Pi / 180) + (1 / math.Cos(lonLat.Y * math.Pi / 180))) / math.Pi) / 2 * n
xoff := x - float64(originTile[0])
yoff := y - float64(originTile[1])
return common.Point{xoff, yoff}.Scale(256)
}
func MapboxToLonLat(p common.Point, zoom int, originTile [2]int) common.Point {
n := math.Exp2(float64(zoom))
p = p.Scale(1.0/256).Add(common.Point{float64(originTile[0]), float64(originTile[1])})
x := p.X * 360 / n - 180
y := math.Atan(math.Sinh(math.Pi * (1 - 2 * p.Y / n)))
y = y * 180 / math.Pi
return common.Point{x, y}
} | googlemaps/coords.go | 0.767777 | 0.479626 | coords.go | starcoder |
package optional
import "time"
/*
Bool map functions
*/
// Map applies mapping function on optional value if it presents
func (b Bool) Map(f func(bool) bool) Bool {
if f == nil || !b.IsPresent() {
return Bool{}
}
return OfBool(f(b.value))
}
// MapToInt applies mapping function on optional value if it presents
func (b Bool) MapToInt(f func(bool) int) Int {
if f == nil || !b.IsPresent() {
return Int{}
}
return OfInt(f(b.value))
}
// MapToString applies mapping function on optional value if it presents
func (b Bool) MapToString(f func(bool) string) String {
if f == nil || !b.IsPresent() {
return String{}
}
return OfString(f(b.value))
}
// MapToFloat64 applies mapping function on optional value if it presents
func (b Bool) MapToFloat64(f func(bool) float64) Float64 {
if f == nil || !b.IsPresent() {
return Float64{}
}
return OfFloat64(f(b.value))
}
/*
Int map functions
*/
// Map applies mapping function on optional value if it presents
func (i Int) Map(f func(int) int) Int {
if f == nil || !i.IsPresent() {
return Int{}
}
return OfInt(f(i.value))
}
// MapToBool applies mapping function on optional value if it presents
func (i Int) MapToBool(f func(int) bool) Bool {
if f == nil || !i.IsPresent() {
return Bool{}
}
return OfBool(f(i.value))
}
// MapToString applies mapping function on optional value if it presents
func (i Int) MapToString(f func(int) string) String {
if f == nil || !i.IsPresent() {
return String{}
}
return OfString(f(i.value))
}
// MapToFloat64 applies mapping function on optional value if it presents
func (i Int) MapToFloat64(f func(int) float64) Float64 {
if f == nil || !i.IsPresent() {
return Float64{}
}
return OfFloat64(f(i.value))
}
/*
String map functions
*/
// Map applies mapping function on optional value if it presents
func (s String) Map(f func(string) string) String {
if f == nil || !s.IsPresent() {
return String{}
}
return OfString(f(s.value))
}
// MapToBool applies mapping function on optional value if it presents
func (s String) MapToBool(f func(string) bool) Bool {
if f == nil || !s.IsPresent() {
return Bool{}
}
return OfBool(f(s.value))
}
// MapToInt applies mapping function on optional value if it presents
func (s String) MapToInt(f func(string) int) Int {
if f == nil || !s.IsPresent() {
return Int{}
}
return OfInt(f(s.value))
}
// MapToFloat64 applies mapping function on optional value if it presents
func (s String) MapToFloat64(f func(string) float64) Float64 {
if f == nil || !s.IsPresent() {
return Float64{}
}
return OfFloat64(f(s.value))
}
/*
Float64 map functions
*/
// Map applies mapping function on optional value if it presents
func (f Float64) Map(c func(float64) float64) Float64 {
if c == nil || !f.IsPresent() {
return Float64{}
}
return OfFloat64(c(f.value))
}
// MapToBool applies mapping function on optional value if it presents
func (f Float64) MapToBool(c func(float64) bool) Bool {
if c == nil || !f.IsPresent() {
return Bool{}
}
return OfBool(c(f.value))
}
// MapToInt applies mapping function on optional value if it presents
func (f Float64) MapToInt(c func(float64) int) Int {
if c == nil || !f.IsPresent() {
return Int{}
}
return OfInt(c(f.value))
}
// MapToString applies mapping function on optional value if it presents
func (f Float64) MapToString(c func(float64) string) String {
if c == nil || !f.IsPresent() {
return String{}
}
return OfString(c(f.value))
}
/*
Time map functions
*/
// Map applies mapping function on optional value if it presents
func (t Time) Map(f func(time.Time) time.Time) Time {
if f == nil || !t.IsPresent() {
return Time{}
}
return OfTime(f(t.value))
}
/*
Int map functions
*/
// Map applies mapping function on optional value if it presents
func (d Duration) Map(f func(time.Duration) time.Duration) Duration {
if f == nil || !d.IsPresent() {
return Duration{}
}
return OfDuration(f(d.value))
}
/*
Mixed map functions
*/
// Map applies mapping function on optional value if it presents
func (m Mixed) Map(f func(interface{}) interface{}) Mixed {
if f != nil && m.IsPresent() {
return OfMixed(f(m.mixed))
}
return Mixed{}
}
// MapToBool applies mapping function on optional value if it presents
func (m Mixed) MapToBool(f func(interface{}) bool) Bool {
if f != nil && m.IsPresent() {
return OfBool(f(m.mixed))
}
return Bool{}
}
// MapToInt applies mapping function on optional value if it presents
func (m Mixed) MapToInt(f func(interface{}) int) Int {
if f != nil && m.IsPresent() {
return OfInt(f(m.mixed))
}
return Int{}
}
// MapToString applies mapping function on optional value if it presents
func (m Mixed) MapToString(f func(interface{}) string) String {
if f != nil && m.IsPresent() {
return OfString(f(m.mixed))
}
return String{}
}
// MapToFloat64 applies mapping function on optional value if it presents
func (m Mixed) MapToFloat64(f func(interface{}) float64) Float64 {
if f != nil && m.IsPresent() {
return OfFloat64(f(m.mixed))
}
return Float64{}
} | map.go | 0.756627 | 0.455986 | map.go | starcoder |
package twodimensionpacking
type Point struct {
X int
Y int
}
func PointNew(x, y int) *Point {
return &Point{x, y}
}
type Line struct {
P1, P2 *Point
}
func LineNew(p1, p2 *Point) *Line {
return &Line{p1, p2}
}
// VerticalIntersect checks if the two horizontal lines intersect in the vertical
// direction, and returns their vertical distance (l - l1).
func (l *Line) VerticalIntersect(l1 *Line) (isInter bool, dist int) {
isInter = l.P1.X < l1.P2.X && l1.P1.X < l.P2.X
dist = l.P1.Y - l1.P1.Y
return
}
// HorizontalIntersect checks if the two vertical lines intersect in the horizontal
// direction, and returns their vertical distance (l - l1).
func (l *Line) HorizontalIntersect(l1 *Line) (isInter bool, dist int) {
isInter = l.P1.Y < l1.P2.Y && l1.P1.Y < l.P2.Y
dist = l.P1.X - l1.P1.X
return
}
type Size struct {
Height int
Width int
}
func SizeNew(height, width int) *Size {
return &Size{height, width}
}
func (s *Size) Area() int {
return s.Height * s.Width
}
// CanHold returns if r can hold r1 in w and h.
func (s *Size) CanHold(s1 *Size) bool {
return s.Width >= s1.Width && s.Height >= s1.Height
}
type Rectangle struct {
*Size
// Sp is the bottom left point
Sp *Point
}
func RectangleNew(sz *Size, sp *Point) *Rectangle {
return &Rectangle{sz, sp}
}
// BottomLine returns the bottom line of r.
func (r *Rectangle) BottomLine() *Line {
bl := *r.Sp
br := PointNew(r.Sp.X+r.Width, r.Sp.Y)
return LineNew(&bl, br)
}
// TopLine returns the top line of r.
func (r *Rectangle) TopLine() *Line {
tl := PointNew(r.Sp.X, r.Sp.Y+r.Height)
tr := PointNew(r.Sp.X+r.Width, r.Sp.Y+r.Height)
return LineNew(tl, tr)
}
// LeftLine returns the line on the left side of r.
func (r *Rectangle) LeftLine() *Line {
tl := PointNew(r.Sp.X, r.Sp.Y+r.Height)
bl := *r.Sp
return LineNew(&bl, tl)
}
// RightLine returns the top line of r.
func (r *Rectangle) RightLine() *Line {
br := PointNew(r.Sp.X+r.Width, r.Sp.Y)
tr := PointNew(r.Sp.X+r.Width, r.Sp.Y+r.Height)
return LineNew(br, tr)
}
// Intersect checks if two rectangle interesect.
func (r *Rectangle) Intersect(r1 *Rectangle) bool {
isInterX := r.Sp.X < r1.Sp.X+r1.Width && r1.Sp.X < r.Sp.X+r.Width
isInterY := r.Sp.Y < r1.Sp.Y+r1.Height && r1.Sp.Y < r.Sp.Y+r.Height
return isInterX && isInterY
}
type Box struct {
*Rectangle
StoredItem []*Rectangle
}
// BoxNew returns a box with given size and the starting point at (0,0).
func BoxNew(height, width int) *Box {
return &Box{
Rectangle: &Rectangle{Size: SizeNew(height, width), Sp: PointNew(0, 0)},
}
}
type Item struct {
Size
}
func ItemNew(height, width int) *Item {
return &Item{Size: *SizeNew(height, width)}
} | twodimensionpacking/type.go | 0.901833 | 0.500183 | type.go | starcoder |
package bot
import (
"math/rand"
"time"
"github.com/beefsack/go-astar"
"github.com/chingkamhing/grpc-game-example/pkg/backend"
"github.com/google/uuid"
)
// bot controls a player in the game.
type bot struct {
playerID uuid.UUID
}
// Bots controls all bots added to a game.
type Bots struct {
bots []*bot
game *backend.Game
}
// NewBots creates a new bots instance.
func NewBots(game *backend.Game) *Bots {
return &Bots{
game: game,
bots: make([]*bot, 0),
}
}
// AddBot adds a new bot to the game.
func (bots *Bots) AddBot(name string) *backend.Player {
playerID := uuid.New()
player := &backend.Player{
Name: name,
Icon: 'b',
IdentifierBase: backend.IdentifierBase{playerID},
CurrentPosition: backend.Coordinate{X: -1, Y: 9},
}
bots.game.Mu.Lock()
bots.game.AddEntity(player)
bots.game.Mu.Unlock()
bots.bots = append(bots.bots, &bot{playerID: playerID})
return player
}
// world tracks all game tiles and is used for astar traversal.
type world struct {
tiles map[backend.Coordinate]*tile
}
// tileKind differentiates walls from normal tiles.
type tileKind int
const (
tileWall tileKind = iota
tileNone
)
// tile represents a point on the map.
type tile struct {
position backend.Coordinate
world *world
kind tileKind
}
// PathNeighbors is used by beefsack/astar to traverse.
func (t *tile) PathNeighbors() []astar.Pather {
neighbors := []astar.Pather{}
for _, difference := range []backend.Coordinate{
{X: -1, Y: 0},
{X: 1, Y: 0},
{X: 0, Y: -1},
{X: 0, Y: 1},
} {
position := t.position.Add(difference)
neighbor, ok := t.world.tiles[position]
if ok && neighbor.kind == tileNone {
neighbors = append(neighbors, neighbor)
}
}
return neighbors
}
// PathNeighborCost is used by beefsack/astar to determine the cost of a move.
func (t *tile) PathNeighborCost(to astar.Pather) float64 {
return 1
}
// PathEstimatedCost estimates the cost of moving between two points.
func (t *tile) PathEstimatedCost(to astar.Pather) float64 {
toT := to.(*tile)
return float64(t.position.Distance(toT.position))
}
// getShootDirection determines if a straight path between c1 and c2 is not
// blocked by a wall, and if not returns the direction of the path.
func getShootDirection(world *world, c1 backend.Coordinate, c2 backend.Coordinate) backend.Direction {
direction := backend.DirectionStop
diffCoordinate := backend.Coordinate{
X: 0,
Y: 0,
}
if c1.X == c2.X {
if c2.Y < c1.Y {
diffCoordinate.Y = -1
direction = backend.DirectionUp
} else if c2.Y > c1.Y {
diffCoordinate.Y = 1
direction = backend.DirectionDown
}
} else if c1.Y == c2.Y {
if c2.X < c1.X {
diffCoordinate.X = -1
direction = backend.DirectionLeft
} else if c2.X > c1.X {
diffCoordinate.X = 1
direction = backend.DirectionRight
}
}
if direction == backend.DirectionStop {
return direction
}
newPosition := c1.Add(diffCoordinate)
for {
if newPosition == c2 {
break
}
tile, ok := world.tiles[newPosition]
if ok && tile.kind == tileWall {
return backend.DirectionStop
}
newPosition = newPosition.Add(diffCoordinate)
}
return direction
}
// Start starts the goroutine used to determine bot moves.
func (bots *Bots) Start() {
go func() {
world := &world{
tiles: make(map[backend.Coordinate]*tile),
}
for symbol, positions := range bots.game.GetMapByType() {
for _, position := range positions {
if symbol == backend.MapTypeWall {
world.tiles[position] = &tile{
position: position,
world: world,
kind: tileWall,
}
} else {
world.tiles[position] = &tile{
position: position,
world: world,
kind: tileNone,
}
}
}
}
for {
bots.game.Mu.RLock()
// Get all player positions.
playerPositions := make(map[uuid.UUID]backend.Coordinate, 0)
for _, entity := range bots.game.Entities {
switch entity.(type) {
case *backend.Player:
player := entity.(*backend.Player)
playerPositions[entity.ID()] = player.Position()
}
}
bots.game.Mu.RUnlock()
for _, bot := range bots.bots {
bots.game.Mu.RLock()
player := bots.game.GetEntity(bot.playerID).(*backend.Player)
bots.game.Mu.RUnlock()
playerPosition := player.Position()
// Find the closest position.
closestPosition := backend.Coordinate{}
move := false
shootDirection := backend.DirectionStop
shoot := false
for id, position := range playerPositions {
if id == player.ID() {
continue
}
// Check if we're on top of the player and move if so.
if position == playerPosition {
closestPosition = position.Add(backend.Coordinate{
X: 1,
Y: 1,
})
move = true
break
}
// See if a player can be shot at.
shootDirection = getShootDirection(world, playerPosition, position)
if shootDirection != backend.DirectionStop {
shoot = true
break
}
// Find a close player to move to.
if !move || (position.Distance(playerPosition) < closestPosition.Distance(playerPosition)) {
closestPosition = position
move = true
}
}
// Randomly move to a close tile.
// This is pretty lazy but avoids cases where bots are locked
// into movement/laser loops.
rand.Seed(time.Now().UnixNano())
if move && rand.Intn(100) > 60 {
closestPosition = closestPosition.Add(backend.Coordinate{
X: rand.Intn(2) - 1,
Y: rand.Intn(2) - 1,
})
shoot = false
}
// Shooting takes priority over moving.
if shoot {
bots.game.ActionChannel <- backend.LaserAction{
ID: uuid.New(),
OwnerID: player.ID(),
Direction: shootDirection,
Created: time.Now(),
}
continue
}
if !move {
continue
}
// Ensure that the tiles we're moving from/to exist.
fromTile, ok := world.tiles[playerPosition]
if !ok {
continue
}
toTile, ok := world.tiles[closestPosition]
if !ok {
continue
}
// Find a path using the astar algorithm.
path, _, found := astar.Path(toTile, fromTile)
if !found {
continue
}
// Move on the path.
var moveTowards backend.Coordinate
if len(path) > 1 {
moveTowards = path[1].(*tile).position
} else {
moveTowards = path[0].(*tile).position
}
// Determine the direction to move to reach the point.
xDiff := moveTowards.X - playerPosition.X
yDiff := moveTowards.Y - playerPosition.Y
direction := backend.DirectionStop
if xDiff < 0 {
direction = backend.DirectionLeft
} else if xDiff > 0 {
direction = backend.DirectionRight
} else if yDiff < 0 {
direction = backend.DirectionUp
} else if yDiff > 0 {
direction = backend.DirectionDown
}
if direction == backend.DirectionStop {
continue
}
bots.game.ActionChannel <- backend.MoveAction{
ID: player.ID(),
Direction: direction,
Created: time.Now(),
}
}
time.Sleep(time.Millisecond * 200)
}
}()
} | pkg/bot/bot.go | 0.669421 | 0.470128 | bot.go | starcoder |
package nats
import (
"errors"
"fmt"
"time"
natsgo "github.com/nats-io/nats.go"
"github.com/simpleiot/simpleiot/data"
)
// SendNodePointCreate sends a node point using the nats protocol and
// creates the node if it does not already exist
func SendNodePointCreate(nc *natsgo.Conn, nodeID string, point data.Point, ack bool) error {
return SendNodePointsCreate(nc, nodeID, []data.Point{point}, ack)
}
// SendNodePointsCreate sends a node point using the nats protocol and
// creates the node if it does not already exist
func SendNodePointsCreate(nc *natsgo.Conn, nodeID string, points data.Points, ack bool) error {
_, err := GetNode(nc, nodeID, "none")
newNode := false
if err != nil {
if err != data.ErrDocumentNotFound {
return fmt.Errorf("GetNode error: %w", err)
}
newNode = true
}
err = SendNodePoints(nc, nodeID, points, ack)
if err != nil {
return fmt.Errorf("SendNodePoints error: %w", err)
}
if newNode {
err := SendEdgePoint(nc, nodeID, "", data.Point{
Type: data.PointTypeTombstone,
Value: 0,
}, true)
if err != nil {
return fmt.Errorf("SendEdgePoint error: %w", err)
}
}
return nil
}
// SendNodePoint sends a node point using the nats protocol
func SendNodePoint(nc *natsgo.Conn, nodeID string, point data.Point, ack bool) error {
points := data.Points{point}
return SendNodePoints(nc, nodeID, points, ack)
}
// SendEdgePoint sends a edge point using the nats protocol
func SendEdgePoint(nc *natsgo.Conn, nodeID, parentID string, point data.Point, ack bool) error {
points := data.Points{point}
return SendEdgePoints(nc, nodeID, parentID, points, ack)
}
// SendNodePoints sends node points using the nats protocol
func SendNodePoints(nc *natsgo.Conn, nodeID string, points data.Points, ack bool) error {
return sendPoints(nc, SubjectNodePoints(nodeID), points, ack)
}
// SendEdgePoints sends points using the nats protocol
func SendEdgePoints(nc *natsgo.Conn, nodeID, parentID string, points data.Points, ack bool) error {
if parentID == "" {
parentID = "none"
}
return sendPoints(nc, SubjectEdgePoints(nodeID, parentID), points, ack)
}
func sendPoints(nc *natsgo.Conn, subject string, points data.Points, ack bool) error {
for i := range points {
if points[i].Time.IsZero() {
points[i].Time = time.Now()
}
}
data, err := points.ToPb()
if err != nil {
return err
}
if ack {
msg, err := nc.Request(subject, data, time.Second)
if err != nil {
return err
}
if len(msg.Data) > 0 {
return errors.New(string(msg.Data))
}
} else {
if err := nc.Publish(subject, data); err != nil {
return err
}
}
return err
} | nats/point.go | 0.61555 | 0.449211 | point.go | starcoder |
package ring
import (
"math"
"math/bits"
"unsafe"
)
// BasisExtender stores the necessary parameters for RNS basis extension.
// The used algorithm is from https://eprint.iacr.org/2018/117.pdf.
type BasisExtender struct {
ringQ *Ring
ringP *Ring
paramsQtoP []modupParams
paramsPtoQ []modupParams
modDownparamsPtoQ [][]uint64
modDownparamsQtoP [][]uint64
polypoolQ *Poly
polypoolP *Poly
}
type modupParams struct {
//Parameters for basis extension from Q to P
// (Q/Qi)^-1) (mod each Qi) (in Montgomery form)
qoverqiinvqi []uint64
// Q/qi (mod each Pj) (in Montgomery form)
qoverqimodp [][]uint64
// Q*v (mod each Pj) for v in [1,...,k] where k is the number of Pj moduli
vtimesqmodp [][]uint64
}
func genModDownParams(ringQ, ringP *Ring) (params [][]uint64) {
params = make([][]uint64, len(ringP.Modulus))
bredParams := ringQ.BredParams
mredParams := ringQ.MredParams
for j := range ringP.Modulus {
params[j] = make([]uint64, len(ringQ.Modulus))
for i, qi := range ringQ.Modulus {
params[j][i] = ModExp(ringP.Modulus[j], qi-2, qi)
params[j][i] = MForm(params[j][i], qi, bredParams[i])
if j > 0 {
params[j][i] = MRed(params[j][i], params[j-1][i], qi, mredParams[i])
}
}
}
return
}
// NewBasisExtender creates a new BasisExtender, enabling RNS basis extension from Q to P and P to Q.
func NewBasisExtender(ringQ, ringP *Ring) *BasisExtender {
newParams := new(BasisExtender)
newParams.ringQ = ringQ
newParams.ringP = ringP
newParams.paramsQtoP = make([]modupParams, len(ringQ.Modulus))
for i := range ringQ.Modulus {
newParams.paramsQtoP[i] = basisextenderparameters(ringQ.Modulus[:i+1], ringP.Modulus)
}
newParams.paramsPtoQ = make([]modupParams, len(ringP.Modulus))
for i := range ringP.Modulus {
newParams.paramsPtoQ[i] = basisextenderparameters(ringP.Modulus[:i+1], ringQ.Modulus)
}
newParams.modDownparamsPtoQ = genModDownParams(ringQ, ringP)
newParams.modDownparamsQtoP = genModDownParams(ringP, ringQ)
newParams.polypoolQ = ringQ.NewPoly()
newParams.polypoolP = ringP.NewPoly()
return newParams
}
func basisextenderparameters(Q, P []uint64) modupParams {
bredParamsQ := make([][]uint64, len(Q))
mredParamsQ := make([]uint64, len(Q))
bredParamsP := make([][]uint64, len(P))
mredParamsP := make([]uint64, len(P))
for i := range Q {
bredParamsQ[i] = BRedParams(Q[i])
mredParamsQ[i] = MRedParams(Q[i])
}
for i := range P {
bredParamsP[i] = BRedParams(P[i])
mredParamsP[i] = MRedParams(P[i])
}
qoverqiinvqi := make([]uint64, len(Q))
qoverqimodp := make([][]uint64, len(P))
for i := range P {
qoverqimodp[i] = make([]uint64, len(Q))
}
var qiStar uint64
for i, qi := range Q {
qiStar = MForm(1, qi, bredParamsQ[i])
for j := 0; j < len(Q); j++ {
if j != i {
qiStar = MRed(qiStar, MForm(Q[j], qi, bredParamsQ[i]), qi, mredParamsQ[i])
}
}
// (Q/Qi)^-1) * r (mod Qi) (in Montgomery form)
qoverqiinvqi[i] = ModexpMontgomery(qiStar, int(qi-2), qi, mredParamsQ[i], bredParamsQ[i])
for j, pj := range P {
// (Q/qi * r) (mod Pj) (in Montgomery form)
qiStar = 1
for u := 0; u < len(Q); u++ {
if u != i {
qiStar = MRed(qiStar, MForm(Q[u], pj, bredParamsP[j]), pj, mredParamsP[j])
}
}
qoverqimodp[j][i] = MForm(qiStar, pj, bredParamsP[j])
}
}
vtimesqmodp := make([][]uint64, len(P))
var QmodPi uint64
for j, pj := range P {
vtimesqmodp[j] = make([]uint64, len(Q)+1)
// Correction Term (v*Q) mod each Pj
QmodPi = 1
for _, qi := range Q {
QmodPi = MRed(QmodPi, MForm(qi, pj, bredParamsP[j]), pj, mredParamsP[j])
}
v := pj - QmodPi
vtimesqmodp[j][0] = 0
for i := 1; i < len(Q)+1; i++ {
vtimesqmodp[j][i] = CRed(vtimesqmodp[j][i-1]+v, pj)
}
}
return modupParams{qoverqiinvqi: qoverqiinvqi, qoverqimodp: qoverqimodp, vtimesqmodp: vtimesqmodp}
}
// ShallowCopy creates a shallow copy of this basis extender in which the read-only data-structures are
// shared with the receiver.
func (be *BasisExtender) ShallowCopy() *BasisExtender {
if be == nil {
return nil
}
return &BasisExtender{
ringQ: be.ringQ,
ringP: be.ringP,
paramsQtoP: be.paramsQtoP,
paramsPtoQ: be.paramsPtoQ,
modDownparamsQtoP: be.modDownparamsQtoP,
modDownparamsPtoQ: be.modDownparamsPtoQ,
polypoolQ: be.ringQ.NewPoly(),
polypoolP: be.ringP.NewPoly(),
}
}
// ModUpQtoP extends the RNS basis of a polynomial from Q to QP.
// Given a polynomial with coefficients in basis {Q0,Q1....Qlevel},
// it extends its basis from {Q0,Q1....Qlevel} to {Q0,Q1....Qlevel,P0,P1...Pj}
func (be *BasisExtender) ModUpQtoP(levelQ, levelP int, polQ, polP *Poly) {
modUpExact(polQ.Coeffs[:levelQ+1], polP.Coeffs[:levelP+1], be.ringQ, be.ringP, be.paramsQtoP[levelQ])
}
// ModUpPtoQ extends the RNS basis of a polynomial from P to PQ.
// Given a polynomial with coefficients in basis {P0,P1....Plevel},
// it extends its basis from {P0,P1....Plevel} to {Q0,Q1...Qj}
func (be *BasisExtender) ModUpPtoQ(levelP, levelQ int, polP, polQ *Poly) {
modUpExact(polP.Coeffs[:levelP+1], polQ.Coeffs[:levelQ+1], be.ringP, be.ringQ, be.paramsPtoQ[levelP])
}
// ModDownQPtoQ reduces the basis of a polynomial.
// Given a polynomial with coefficients in basis {Q0,Q1....Qlevel} and {P0,P1...Pj},
// it reduces its basis from {Q0,Q1....Qlevel} and {P0,P1...Pj} to {Q0,Q1....Qlevel}
// and does a rounded integer division of the result by P.
func (be *BasisExtender) ModDownQPtoQ(levelQ, levelP int, p1Q, p1P, p2Q *Poly) {
ringQ := be.ringQ
modDownParams := be.modDownparamsPtoQ
polypool := be.polypoolQ
// Then we target this P basis of p1 and convert it to a Q basis (at the "level" of p1) and copy it on polypool
// polypool is now the representation of the P basis of p1 but in basis Q (at the "level" of p1)
be.ModUpPtoQ(levelP, levelQ, p1P, polypool)
// Finally, for each level of p1 (and polypool since they now share the same basis) we compute p2 = (P^-1) * (p1 - polypool) mod Q
for i := 0; i < levelQ+1; i++ {
SubVecAndMulScalarMontgomeryTwoQiVec(polypool.Coeffs[i], p1Q.Coeffs[i], p2Q.Coeffs[i], ringQ.Modulus[i]-modDownParams[levelP][i], ringQ.Modulus[i], ringQ.MredParams[i])
}
// In total we do len(P) + len(Q) NTT, which is optimal (linear in the number of moduli of P and Q)
}
// ModDownQPtoQNTT reduces the basis of a polynomial.
// Given a polynomial with coefficients in basis {Q0,Q1....Qi} and {P0,P1...Pj},
// it reduces its basis from {Q0,Q1....Qi} and {P0,P1...Pj} to {Q0,Q1....Qi}
// and does a rounded integer division of the result by P.
// Inputs must be in the NTT domain.
func (be *BasisExtender) ModDownQPtoQNTT(levelQ, levelP int, p1Q, p1P, p2Q *Poly) {
ringQ := be.ringQ
ringP := be.ringP
modDownParams := be.modDownparamsPtoQ
polypoolP := be.polypoolP
polypoolQ := be.polypoolQ
// First we get the P basis part of p1 out of the NTT domain
ringP.InvNTTLazyLvl(levelP, p1P, polypoolP)
// Then we target this P basis of p1 and convert it to a Q basis (at the "level" of p1) and copy it on polypool
// polypool is now the representation of the P basis of p1 but in basis Q (at the "level" of p1)
be.ModUpPtoQ(levelP, levelQ, polypoolP, polypoolQ)
// First we switch back the relevant polypool CRT array back to the NTT domain
ringQ.NTTLazyLvl(levelQ, polypoolQ, polypoolQ)
// Finally, for each level of p1 (and polypool since they now share the same basis) we compute p2 = (P^-1) * (p1 - polypool) mod Q
for i := 0; i < levelQ+1; i++ {
// Then for each coefficient we compute (P^-1) * (p1[i][j] - polypool[i][j]) mod qi
SubVecAndMulScalarMontgomeryTwoQiVec(polypoolQ.Coeffs[i], p1Q.Coeffs[i], p2Q.Coeffs[i], ringQ.Modulus[i]-modDownParams[levelP][i], ringQ.Modulus[i], ringQ.MredParams[i])
}
// In total we do len(P) + len(Q) NTT, which is optimal (linear in the number of moduli of P and Q)
}
// ModDownQPtoP reduces the basis of a polynomial.
// Given a polynomial with coefficients in basis {Q0,Q1....QlevelQ} and {P0,P1...PlevelP},
// it reduces its basis from {Q0,Q1....QlevelQ} and {P0,P1...PlevelP} to {P0,P1...PlevelP}
// and does a floored integer division of the result by Q.
func (be *BasisExtender) ModDownQPtoP(levelQ, levelP int, p1Q, p1P, p2P *Poly) {
ringP := be.ringP
modDownParams := be.modDownparamsQtoP
polypool := be.polypoolP
// Then we target this P basis of p1 and convert it to a Q basis (at the "level" of p1) and copy it on polypool
// polypool is now the representation of the P basis of p1 but in basis Q (at the "level" of p1)
be.ModUpQtoP(levelQ, levelP, p1Q, polypool)
// Finally, for each level of p1 (and polypool since they now share the same basis) we compute p2 = (P^-1) * (p1 - polypool) mod Q
for i := 0; i < levelP+1; i++ {
// Then for each coefficient we compute (P^-1) * (p1[i][j] - polypool[i][j]) mod qi
SubVecAndMulScalarMontgomeryTwoQiVec(polypool.Coeffs[i], p1P.Coeffs[i], p2P.Coeffs[i], ringP.Modulus[i]-modDownParams[levelP][i], ringP.Modulus[i], ringP.MredParams[i])
}
// In total we do len(P) + len(Q) NTT, which is optimal (linear in the number of moduli of P and Q)
}
// Caution, returns the values in [0, 2q-1]
func modUpExact(p1, p2 [][]uint64, ringQ, ringP *Ring, params modupParams) {
var v [8]uint64
var y0, y1, y2, y3, y4, y5, y6, y7 [32]uint64
Q := ringQ.Modulus
P := ringP.Modulus
mredParamsQ := ringQ.MredParams
mredParamsP := ringP.MredParams
vtimesqmodp := params.vtimesqmodp
qoverqiinvqi := params.qoverqiinvqi
qoverqimodp := params.qoverqimodp
// We loop over each coefficient and apply the basis extension
for x := 0; x < len(p1[0]); x = x + 8 {
reconstructRNS(len(p1), x, p1, &v, &y0, &y1, &y2, &y3, &y4, &y5, &y6, &y7, Q, mredParamsQ, qoverqiinvqi)
for j := 0; j < len(p2); j++ {
multSum((*[8]uint64)(unsafe.Pointer(&p2[j][x])), &v, &y0, &y1, &y2, &y3, &y4, &y5, &y6, &y7, len(p1), P[j], mredParamsP[j], vtimesqmodp[j], qoverqimodp[j])
}
}
}
// Decomposer is a structure that stores the parameters of the arbitrary decomposer.
// This decomposer takes a p(x)_Q (in basis Q) and returns p(x) mod qi in basis QP, where
// qi = prod(Q_i) for 0<=i<=L, where L is the number of factors in P.
type Decomposer struct {
ringQ, ringP *Ring
modUpParams [][][]modupParams
}
// NewDecomposer creates a new Decomposer.
func NewDecomposer(ringQ, ringP *Ring) (decomposer *Decomposer) {
decomposer = new(Decomposer)
decomposer.ringQ = ringQ
decomposer.ringP = ringP
Q := ringQ.Modulus
decomposer.modUpParams = make([][][]modupParams, len(ringP.Modulus)-1)
for lvlP := range ringP.Modulus[1:] {
P := ringP.Modulus[:lvlP+2]
alpha := len(P)
beta := int(math.Ceil(float64(len(Q)) / float64(alpha)))
xalpha := make([]int, beta)
for i := range xalpha {
xalpha[i] = alpha
}
if len(Q)%alpha != 0 {
xalpha[beta-1] = len(Q) % alpha
}
decomposer.modUpParams[lvlP] = make([][]modupParams, beta)
// Create modUpParams for each possible combination of [Qi,Pj] according to xalpha
for i := 0; i < beta; i++ {
decomposer.modUpParams[lvlP][i] = make([]modupParams, xalpha[i]-1)
for j := 0; j < xalpha[i]-1; j++ {
Qi := make([]uint64, j+2)
Pi := make([]uint64, len(Q)+len(P))
for k := 0; k < j+2; k++ {
Qi[k] = Q[i*alpha+k]
}
for k := 0; k < len(Q); k++ {
Pi[k] = Q[k]
}
for k := len(Q); k < len(Q)+len(P); k++ {
Pi[k] = P[k-len(Q)]
}
decomposer.modUpParams[lvlP][i][j] = basisextenderparameters(Qi, Pi)
}
}
}
return
}
// DecomposeAndSplit decomposes a polynomial p(x) in basis Q, reduces it modulo qi, and returns
// the result in basis QP separately.
func (decomposer *Decomposer) DecomposeAndSplit(levelQ, levelP, alpha, beta int, p0Q, p1Q, p1P *Poly) {
ringQ := decomposer.ringQ
ringP := decomposer.ringP
lvlQStart := beta * alpha
var decompLvl int
if levelQ > alpha*(beta+1)-1 {
decompLvl = alpha - 2
} else {
decompLvl = (levelQ % alpha) - 1
}
// First we check if the vector can simply by coping and rearranging elements (the case where no reconstruction is needed)
if decompLvl == -1 {
for j := 0; j < levelQ+1; j++ {
copy(p1Q.Coeffs[j], p0Q.Coeffs[lvlQStart])
}
for j := 0; j < levelP+1; j++ {
copy(p1P.Coeffs[j], p0Q.Coeffs[lvlQStart])
}
// Otherwise, we apply a fast exact base conversion for the reconstruction
} else {
params := decomposer.modUpParams[alpha-2][beta][decompLvl]
var v [8]uint64
var vi [8]float64
var y0, y1, y2, y3, y4, y5, y6, y7 [32]uint64
Q := ringQ.Modulus
P := ringP.Modulus
mredParamsQ := ringQ.MredParams
mredParamsP := ringP.MredParams
qoverqiinvqi := params.qoverqiinvqi
vtimesqmodp := params.vtimesqmodp
qoverqimodp := params.qoverqimodp
// We loop over each coefficient and apply the basis extension
for x := 0; x < len(p0Q.Coeffs[0]); x = x + 8 {
vi[0], vi[1], vi[2], vi[3], vi[4], vi[5], vi[6], vi[7] = 0, 0, 0, 0, 0, 0, 0, 0
// Coefficients to be decomposed
for i, j := 0, lvlQStart; i < decompLvl+2; i, j = i+1, j+1 {
qqiinv := qoverqiinvqi[i]
qi := Q[j]
mredParams := mredParamsQ[j]
qif := float64(qi)
px := (*[8]uint64)(unsafe.Pointer(&p0Q.Coeffs[j][x]))
py := (*[8]uint64)(unsafe.Pointer(&p1Q.Coeffs[j][x]))
// For the coefficients to be decomposed, we can simply copy them
py[0], py[1], py[2], py[3], py[4], py[5], py[6], py[7] = px[0], px[1], px[2], px[3], px[4], px[5], px[6], px[7]
y0[i] = MRed(px[0], qqiinv, qi, mredParams)
y1[i] = MRed(px[1], qqiinv, qi, mredParams)
y2[i] = MRed(px[2], qqiinv, qi, mredParams)
y3[i] = MRed(px[3], qqiinv, qi, mredParams)
y4[i] = MRed(px[4], qqiinv, qi, mredParams)
y5[i] = MRed(px[5], qqiinv, qi, mredParams)
y6[i] = MRed(px[6], qqiinv, qi, mredParams)
y7[i] = MRed(px[7], qqiinv, qi, mredParams)
// Computation of the correction term v * Q%pi
vi[0] += float64(y0[i]) / qif
vi[1] += float64(y1[i]) / qif
vi[2] += float64(y2[i]) / qif
vi[3] += float64(y3[i]) / qif
vi[4] += float64(y4[i]) / qif
vi[5] += float64(y5[i]) / qif
vi[6] += float64(y6[i]) / qif
vi[7] += float64(y7[i]) / qif
}
// Index of the correction term
v[0] = uint64(vi[0])
v[1] = uint64(vi[1])
v[2] = uint64(vi[2])
v[3] = uint64(vi[3])
v[4] = uint64(vi[4])
v[5] = uint64(vi[5])
v[6] = uint64(vi[6])
v[7] = uint64(vi[7])
// Coefficients of index smaller than the ones to be decomposed
for j := 0; j < lvlQStart; j++ {
multSum((*[8]uint64)(unsafe.Pointer(&p1Q.Coeffs[j][x])), &v, &y0, &y1, &y2, &y3, &y4, &y5, &y6, &y7, decompLvl+2, Q[j], mredParamsQ[j], vtimesqmodp[j], qoverqimodp[j])
}
// Coefficients of index greater than the ones to be decomposed
for j := alpha * beta; j < levelQ+1; j = j + 1 {
multSum((*[8]uint64)(unsafe.Pointer(&p1Q.Coeffs[j][x])), &v, &y0, &y1, &y2, &y3, &y4, &y5, &y6, &y7, decompLvl+2, Q[j], mredParamsQ[j], vtimesqmodp[j], qoverqimodp[j])
}
// Coefficients of the special primes Pi
for j, u := 0, len(ringQ.Modulus); j < levelP+1; j, u = j+1, u+1 {
multSum((*[8]uint64)(unsafe.Pointer(&p1P.Coeffs[j][x])), &v, &y0, &y1, &y2, &y3, &y4, &y5, &y6, &y7, decompLvl+2, P[j], mredParamsP[j], vtimesqmodp[u], qoverqimodp[u])
}
}
}
}
func reconstructRNS(index, x int, p [][]uint64, v *[8]uint64, y0, y1, y2, y3, y4, y5, y6, y7 *[32]uint64, Q, QInv, QbMont []uint64) {
var vi [8]float64
var qi, qiInv, qoverqiinvqi uint64
var qif float64
for i := 0; i < index; i++ {
qoverqiinvqi = QbMont[i]
qi = Q[i]
qiInv = QInv[i]
qif = float64(qi)
pTmp := (*[8]uint64)(unsafe.Pointer(&p[i][x]))
y0[i] = MRed(pTmp[0], qoverqiinvqi, qi, qiInv)
y1[i] = MRed(pTmp[1], qoverqiinvqi, qi, qiInv)
y2[i] = MRed(pTmp[2], qoverqiinvqi, qi, qiInv)
y3[i] = MRed(pTmp[3], qoverqiinvqi, qi, qiInv)
y4[i] = MRed(pTmp[4], qoverqiinvqi, qi, qiInv)
y5[i] = MRed(pTmp[5], qoverqiinvqi, qi, qiInv)
y6[i] = MRed(pTmp[6], qoverqiinvqi, qi, qiInv)
y7[i] = MRed(pTmp[7], qoverqiinvqi, qi, qiInv)
// Computation of the correction term v * Q%pi
vi[0] += float64(y0[i]) / qif
vi[1] += float64(y1[i]) / qif
vi[2] += float64(y2[i]) / qif
vi[3] += float64(y3[i]) / qif
vi[4] += float64(y4[i]) / qif
vi[5] += float64(y5[i]) / qif
vi[6] += float64(y6[i]) / qif
vi[7] += float64(y7[i]) / qif
}
v[0] = uint64(vi[0])
v[1] = uint64(vi[1])
v[2] = uint64(vi[2])
v[3] = uint64(vi[3])
v[4] = uint64(vi[4])
v[5] = uint64(vi[5])
v[6] = uint64(vi[6])
v[7] = uint64(vi[7])
}
// Caution, returns the values in [0, 2q-1]
func multSum(res, v *[8]uint64, y0, y1, y2, y3, y4, y5, y6, y7 *[32]uint64, alpha int, pj, qInv uint64, vtimesqmodp, qoverqimodp []uint64) {
var rlo, rhi [8]uint64
var mhi, mlo, c, hhi uint64
// Accumulates the sum on uint128 and does a lazy montgomery reduction at the end
for i := 0; i < alpha; i++ {
mhi, mlo = bits.Mul64(y0[i], qoverqimodp[i])
rlo[0], c = bits.Add64(rlo[0], mlo, 0)
rhi[0] += mhi + c
mhi, mlo = bits.Mul64(y1[i], qoverqimodp[i])
rlo[1], c = bits.Add64(rlo[1], mlo, 0)
rhi[1] += mhi + c
mhi, mlo = bits.Mul64(y2[i], qoverqimodp[i])
rlo[2], c = bits.Add64(rlo[2], mlo, 0)
rhi[2] += mhi + c
mhi, mlo = bits.Mul64(y3[i], qoverqimodp[i])
rlo[3], c = bits.Add64(rlo[3], mlo, 0)
rhi[3] += mhi + c
mhi, mlo = bits.Mul64(y4[i], qoverqimodp[i])
rlo[4], c = bits.Add64(rlo[4], mlo, 0)
rhi[4] += mhi + c
mhi, mlo = bits.Mul64(y5[i], qoverqimodp[i])
rlo[5], c = bits.Add64(rlo[5], mlo, 0)
rhi[5] += mhi + c
mhi, mlo = bits.Mul64(y6[i], qoverqimodp[i])
rlo[6], c = bits.Add64(rlo[6], mlo, 0)
rhi[6] += mhi + c
mhi, mlo = bits.Mul64(y7[i], qoverqimodp[i])
rlo[7], c = bits.Add64(rlo[7], mlo, 0)
rhi[7] += mhi + c
}
hhi, _ = bits.Mul64(rlo[0]*qInv, pj)
res[0] = rhi[0] - hhi + pj + vtimesqmodp[v[0]]
hhi, _ = bits.Mul64(rlo[1]*qInv, pj)
res[1] = rhi[1] - hhi + pj + vtimesqmodp[v[1]]
hhi, _ = bits.Mul64(rlo[2]*qInv, pj)
res[2] = rhi[2] - hhi + pj + vtimesqmodp[v[2]]
hhi, _ = bits.Mul64(rlo[3]*qInv, pj)
res[3] = rhi[3] - hhi + pj + vtimesqmodp[v[3]]
hhi, _ = bits.Mul64(rlo[4]*qInv, pj)
res[4] = rhi[4] - hhi + pj + vtimesqmodp[v[4]]
hhi, _ = bits.Mul64(rlo[5]*qInv, pj)
res[5] = rhi[5] - hhi + pj + vtimesqmodp[v[5]]
hhi, _ = bits.Mul64(rlo[6]*qInv, pj)
res[6] = rhi[6] - hhi + pj + vtimesqmodp[v[6]]
hhi, _ = bits.Mul64(rlo[7]*qInv, pj)
res[7] = rhi[7] - hhi + pj + vtimesqmodp[v[7]]
} | ring/ring_basis_extension.go | 0.709321 | 0.407039 | ring_basis_extension.go | starcoder |
package ilium
type RadianceMeter struct {
description string
ray Ray
sampleCount int
radiometer Radiometer
}
func MakeRadianceMeter(
config map[string]interface{}, shapes []Shape) *RadianceMeter {
description := config["description"].(string)
if len(shapes) != 1 {
panic("Radiance meter must have exactly one PointShape")
}
pointShape, ok := shapes[0].(*PointShape)
if !ok {
panic("Radiance meter must have exactly one PointShape")
}
target := MakePoint3FromConfig(config["target"])
sampleCount := int(config["sampleCount"].(float64))
var direction Vector3
direction.GetOffset(&pointShape.P, &target)
direction.Normalize(&direction)
ray := Ray{pointShape.P, direction, 5e-4, infFloat32(+1)}
return &RadianceMeter{
description: description,
ray: ray,
sampleCount: sampleCount,
radiometer: MakeRadiometer("Li", description),
}
}
func (rm *RadianceMeter) HasSpecularPosition() bool {
return true
}
func (rm *RadianceMeter) HasSpecularDirection() bool {
return true
}
func (rm *RadianceMeter) GetExtent() SensorExtent {
return SensorExtent{0, 1, 0, 1, rm.sampleCount}
}
func (rm *RadianceMeter) GetSampleConfig() SampleConfig {
return SampleConfig{}
}
func (rm *RadianceMeter) SampleRay(x, y int, sampleBundle SampleBundle) (
ray Ray, WeDivPdf Spectrum, pdf float32) {
ray = rm.ray
WeDivPdf = MakeConstantSpectrum(1)
pdf = 1
return
}
func (rm *RadianceMeter) SamplePixelPositionAndWeFromPoint(
u, v1, v2 float32, p Point3, pEpsilon float32, n Normal3) (
x, y int, WeDivPdf Spectrum, pdf float32, wi Vector3,
pSurface Point3, nSurface Normal3, shadowRay Ray) {
panic("Called unexpectedly")
}
func (rm *RadianceMeter) ComputeWePdfFromPoint(
x, y int, p Point3, pEpsilon float32, n Normal3, wi Vector3) float32 {
panic("Called unexpectedly")
}
func (rm *RadianceMeter) ComputeWeSpatialPdf(pSurface Point3) float32 {
// Since we're assuming pSurface is on the sensor, return 1
// even though we have a delta spatial distribution.
return 1
}
func (rm *RadianceMeter) ComputeWeDirectionalPdf(
x, y int, pSurface Point3, nSurface Normal3, wo Vector3) float32 {
// Since we're assuming all parameters are valid, return 1
// even though we have a delta directional distribution.
return 1
}
func (rm *RadianceMeter) ComputePixelPositionAndWe(
pSurface Point3, nSurface Normal3, wo Vector3) (
x, y int, We Spectrum) {
panic("Called unexpectedly")
}
func (rm *RadianceMeter) AccumulateSensorContribution(
x, y int, WeLiDivPdf Spectrum) {
rm.radiometer.AccumulateSensorContribution(WeLiDivPdf)
}
func (rm *RadianceMeter) AccumulateSensorDebugInfo(
tag string, x, y int, s Spectrum) {
rm.radiometer.AccumulateSensorDebugInfo(tag, s)
}
func (rm *RadianceMeter) RecordAccumulatedSensorContributions(x, y int) {
rm.radiometer.RecordAccumulatedSensorContributions()
}
func (rm *RadianceMeter) AccumulateLightContribution(
x, y int, WeLiDivPdf Spectrum) {
rm.radiometer.AccumulateLightContribution(WeLiDivPdf)
}
func (rm *RadianceMeter) AccumulateLightDebugInfo(
tag string, x, y int, s Spectrum) {
rm.radiometer.AccumulateLightDebugInfo(tag, s)
}
func (rm *RadianceMeter) RecordAccumulatedLightContributions() {
rm.radiometer.RecordAccumulatedLightContributions()
}
func (rm *RadianceMeter) EmitSignal(outputDir, outputExt string) {
rm.radiometer.EmitSignal()
} | ilium/radiance_meter.go | 0.855761 | 0.432483 | radiance_meter.go | starcoder |
package core
import (
"bytes"
"crypto/rand"
"errors"
"fmt"
"io/ioutil"
"log"
"strconv"
"strings"
)
const datasetSeparator = ","
// Dataset struct represents a dataset object.
type Dataset struct {
id string
path string
header []string
data []DatasetTuple
}
// NewDataset is the constructor for the Dataset struct. A random ID is assigned
// to a new dataset
func NewDataset(path string) *Dataset {
d := new(Dataset)
buffer := make([]byte, 4)
rand.Read(buffer)
d.id = fmt.Sprintf("%x", buffer)
d.path = path
return d
}
// ID getter for dataset
func (d Dataset) ID() string {
return d.id
}
// Path getter for dataset
func (d Dataset) Path() string {
return d.path
}
// Header getter for dataset - only works if ReadFromFile was successful
func (d Dataset) Header() []string {
return d.header
}
// Data getter for dataset - only works if ReadFromFile was successful
func (d Dataset) Data() []DatasetTuple {
return d.data
}
// String method for dataset object - returns the path of the dataset
func (d Dataset) String() string {
return d.path
}
// ReadFromFile is used to parse the Dataset into memory. If the data are previously read,
// the method is not re-executed.
func (d *Dataset) ReadFromFile() error {
if d.Header() != nil && d.Data() != nil { // previously read
return nil
}
dat, err := ioutil.ReadFile(d.path)
if err != nil {
return err
}
datSplit := strings.Split(fmt.Sprintf("%s", dat), "\n")
if len(datSplit) < 1 {
return errors.New("File without contents")
}
// reading header
d.header = make([]string, 0)
for _, s := range strings.Split(datSplit[0], datasetSeparator) {
d.header = append(d.header, s)
}
// reading data
for i := 1; i < len(datSplit); i++ {
if len(datSplit[i]) > 0 {
t := new(DatasetTuple)
t.Deserialize(datSplit[i])
d.data = append(d.data, *t)
}
}
return nil
}
// DatasetTuple represents a data tuple from the dataset
type DatasetTuple struct {
Data []float64
}
// Deserialize is used to construct a tuple from a string representation
func (t *DatasetTuple) Deserialize(data string) {
for _, s := range strings.Split(data, datasetSeparator) {
v, err := strconv.ParseFloat(strings.TrimSpace(s), 64)
if err != nil {
log.Println(err)
}
t.Data = append(t.Data, v)
}
}
// Serialize transforms the tuple to a string representation
func (t *DatasetTuple) Serialize() string {
return t.String()
}
func (t DatasetTuple) String() string {
max := len(t.Data) - 1
buffer := new(bytes.Buffer)
for i, v := range t.Data {
buffer.WriteString(fmt.Sprintf("%.5f", v))
if i < max {
buffer.WriteString(", ")
}
}
return fmt.Sprintf("%s", buffer.Bytes())
}
// Equals function returns true if t is equal to o
func (t DatasetTuple) Equals(o DatasetTuple) bool {
for i, v := range o.Data {
if t.Data[i] != v {
return false
}
}
return true
}
// DatasetTuples represents a slice of DatasetTuple objects
type DatasetTuples []DatasetTuple
func (slice DatasetTuples) Len() int {
return len(slice)
}
func (slice DatasetTuples) Less(i, j int) bool {
for k := range slice[i].Data {
if slice[i].Data[k] >= slice[j].Data[k] {
return false
}
}
return true
}
func (slice DatasetTuples) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
} | core/dataset.go | 0.714827 | 0.401658 | dataset.go | starcoder |
package origins
// Buffer holds a slice of facts. The buffer dynamically grows as facts
// are written to it. The position is maintained across reads.
type Buffer struct {
buf Facts
// Contents are buf[off : len(buf)]
off int
}
// grow grows the buffer to guarantee space for n more facts.
// It returns the index where facts should be written.
func (b *Buffer) grow(n int) int {
m := b.Len()
// Buffer is empty, reset to reclaim space.
if m == 0 && b.off != 0 {
b.Truncate(0)
}
if len(b.buf)+n > cap(b.buf) {
var buf Facts
if m+n <= cap(b.buf)/2 {
copy(b.buf[:], b.buf[b.off:])
buf = b.buf[:m]
} else {
buf = make(Facts, 2*cap(b.buf)+n)
copy(buf, b.buf[b.off:])
}
b.buf = buf
b.off = 0
}
b.buf = b.buf[0 : b.off+m+n]
return b.off + m
}
// Grow increase the buffer size by a minimum of n.
func (b *Buffer) Grow(n int) {
b.grow(n)
}
// Len returns the length of the unread portion of the buffer.
func (b *Buffer) Len() int {
return len(b.buf) - b.off
}
// Write writes a fact to the buffer.
func (b *Buffer) Write(f *Fact) error {
_, err := b.Append(f)
return err
}
// Write takes a slice of facts and writes them to the buffer.
func (b *Buffer) Append(buf ...*Fact) (int, error) {
m := b.grow(len(buf))
return copy(b.buf[m:], buf), nil
}
// Facts returns a copy of the unread portion of facts.
func (b *Buffer) Facts() Facts {
n := b.Len()
if n == 0 {
return Facts{}
}
c := make(Facts, n)
copy(c, b.buf[b.off:])
// Reclaim space.
b.Truncate(0)
return c
}
// Truncate resets the read and write position to the specified index.
func (b *Buffer) Truncate(n int) {
switch {
case n < 0 || n > b.Len():
panic("origins.Buffer: truncation out of range")
case n == 0:
// Reuse buffer space.
b.off = 0
}
b.buf = b.buf[0 : b.off+n]
}
// Reset resets the buffer so it has not content. This is equivalent to
// calling b.Truncate(0).
func (b *Buffer) Reset() {
b.Truncate(0)
}
// Next returns the next unread fact in the buffer.
func (b *Buffer) Next() *Fact {
// Do not exceed to the write position.
if b.off >= len(b.buf) {
// Buffer is empty, reset.
b.Truncate(0)
return nil
}
f := b.buf[b.off]
b.off++
return f
}
func (b *Buffer) Err() error {
return nil
}
// NewBuffer initializes a buffer of facts.
func NewBuffer(buf Facts) *Buffer {
return &Buffer{
buf: buf,
}
} | buffer.go | 0.804367 | 0.576542 | buffer.go | starcoder |
<tutorial>
Getting started example of using 51Degrees device detection. The example
shows how to:
<ol>
<li>Instantiate the 51Degrees device detection provider.
<p><pre class="prettyprint lang-go">
var provider = FiftyOneDegreesPatternV3.NewProvider(dataFile)
</pre></p>
<li>Produce a match for a single HTTP User-Agent header
<p><pre class="prettyprint lang-go">
var match = provider.GetMatch(userAgent)
</pre></p>
<li>Extract the value of the IsMobile property as a boolean
<p><pre class="prettyprint lang-go">
if match.GetValue("IsMobile") == "True"{
return true
}
return false
</pre></p>
</ol>
This example assumes you have the 51Degrees Go API installed correctly,.
</tutorial>
*/
// Snippet Start
package main
import (
"fmt"
"./src/pattern"
)
// Location of data file.
var dataFile = "../data/51Degrees-LiteV3.2.dat"
// Provides access to device detection functions.
var provider =
FiftyOneDegreesPatternV3.NewProvider(dataFile)
// Which properties to retrieve
var properties = []string{"IsMobile", "PlatformName", "PlatformVersion"}
// User-Agent string of an iPhone mobile device.
var mobileUserAgent = "Mozilla/5.0 (iPhone; CPU iPhone OS 7_1 like Mac OS X) " +
"AppleWebKit/537.51.2 (KHTML, like Gecko) 'Version/7.0 Mobile/11D167 " +
"Safari/9537.53"
// User-Agent string of Firefox Web browser version 41 on desktop.
var desktopUserAgent = "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) " +
"Gecko/20100101 Firefox/41.0"
// User-Agent string of a MediaHub device.
var mediaHubUserAgent = "Mozilla/5.0 (Linux; Android 4.4.2; X7 Quad Core " +
"Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 " +
"Chrome/30.0.0.0 Safari/537.36"
// isMobile function. Takes a User-Agent as an argument, carries out a
// match and returns a boolean value for the IsMobile property of the
// matched device.
func isMobile(userAgent string) bool{
var match = provider.GetMatch(userAgent)
if match.GetValue("IsMobile") == "True"{
return true
}
return false
}
func main() {
fmt.Println("Mobile User-Agent: ", mobileUserAgent)
// Determines whether the mobile User-Agent is a mobile device.
if isMobile(mobileUserAgent){
fmt.Println(" Mobile")
}else{
fmt.Println(" Non-Mobile")}
// Determines whether the desktop User-Agent is a mobile device.
fmt.Println("Desktop User-Agent: ", desktopUserAgent)
if isMobile(desktopUserAgent) {
fmt.Println(" Mobile")
}else {
fmt.Println(" Non-Mobile")
}
// Determines whether the MediaHub User-Agent is a mobile device.
fmt.Println("Media Hub User-Agent: ", mediaHubUserAgent)
if isMobile(mediaHubUserAgent) {
fmt.Println(" Mobile")
}else {
fmt.Println(" Non-Mobile")
}
}
// Snippet End | StronglyTyped.go | 0.576661 | 0.556339 | StronglyTyped.go | starcoder |
package assert
import (
"testing"
"github.com/ppapapetrou76/go-testing/internal/pkg/values"
)
// AssertableInt is the assertable structure for int values.
type AssertableInt struct {
t *testing.T
actual values.IntValue
}
// ThatInt returns an AssertableInt structure initialized with the test reference and the actual value to assert.
func ThatInt(t *testing.T, actual int) AssertableInt {
t.Helper()
return AssertableInt{
t: t,
actual: values.NewIntValue(actual),
}
}
// IsEqualTo asserts if the expected int is equal to the assertable int value
// It errors the tests if the compared values (actual VS expected) are not equal.
func (a AssertableInt) IsEqualTo(expected int) AssertableInt {
a.t.Helper()
if !a.actual.IsEqualTo(expected) {
a.t.Error(shouldBeEqual(a.actual, expected))
}
return a
}
// IsNotEqualTo asserts if the expected int is not equal to the assertable int value
// It errors the tests if the compared values (actual VS expected) are equal.
func (a AssertableInt) IsNotEqualTo(expected int) AssertableInt {
a.t.Helper()
if a.actual.IsEqualTo(expected) {
a.t.Error(shouldNotBeEqual(a.actual, expected))
}
return a
}
// IsGreaterThan asserts if the assertable int value is greater than the expected value
// It errors the tests if is not greater.
func (a AssertableInt) IsGreaterThan(expected int) AssertableInt {
a.t.Helper()
if !a.actual.IsGreaterThan(expected) {
a.t.Error(shouldBeGreater(a.actual, expected))
}
return a
}
// IsGreaterThanOrEqualTo asserts if the assertable int value is greater than or equal to the expected value
// It errors the tests if is not greater.
func (a AssertableInt) IsGreaterThanOrEqualTo(expected int) AssertableInt {
a.t.Helper()
if !a.actual.IsGreaterOrEqualTo(expected) {
a.t.Error(shouldBeGreaterOrEqual(a.actual, expected))
}
return a
}
// IsLessThan asserts if the assertable int value is less than the expected value
// It errors the tests if is not greater.
func (a AssertableInt) IsLessThan(expected int) AssertableInt {
a.t.Helper()
if !a.actual.IsLessThan(expected) {
a.t.Error(shouldBeLessThan(a.actual, expected))
}
return a
}
// IsLessThanOrEqualTo asserts if the assertable int value is less than or equal to the expected value
// It errors the tests if is not greater.
func (a AssertableInt) IsLessThanOrEqualTo(expected int) AssertableInt {
a.t.Helper()
if !a.actual.IsLessOrEqualTo(expected) {
a.t.Error(shouldBeLessOrEqual(a.actual, expected))
}
return a
} | assert/int.go | 0.830113 | 0.834677 | int.go | starcoder |
package types
import (
"sort"
"github.com/attic-labs/noms/go/d"
)
func MakePrimitiveType(k NomsKind) *Type {
switch k {
case BoolKind:
return BoolType
case NumberKind:
return NumberType
case StringKind:
return StringType
case BlobKind:
return BlobType
case ValueKind:
return ValueType
case TypeKind:
return TypeType
}
d.Chk.Fail("invalid NomsKind: %d", k)
return nil
}
// MakeUnionType creates a new union type unless the elemTypes can be folded into a single non union type.
func MakeUnionType(elemTypes ...*Type) *Type {
return simplifyType(makeUnionType(elemTypes...), false)
}
func MakeListType(elemType *Type) *Type {
return simplifyType(makeCompoundType(ListKind, elemType), false)
}
func MakeSetType(elemType *Type) *Type {
return simplifyType(makeCompoundType(SetKind, elemType), false)
}
func MakeRefType(elemType *Type) *Type {
return simplifyType(makeCompoundType(RefKind, elemType), false)
}
func MakeMapType(keyType, valType *Type) *Type {
return simplifyType(makeCompoundType(MapKind, keyType, valType), false)
}
func MakeStructType(name string, fields ...StructField) *Type {
fs := structTypeFields(fields)
sort.Sort(fs)
return simplifyType(makeStructType(name, fs), false)
}
// MakeUnionTypeIntersectStructs is a bit of strange function. It creates a
// simplified union type except for structs, where it creates interesection
// types.
// This function will go away so do not use it!
func MakeUnionTypeIntersectStructs(elemTypes ...*Type) *Type {
return simplifyType(makeUnionType(elemTypes...), true)
}
func MakeCycleType(name string) *Type {
d.PanicIfTrue(name == "")
return newType(CycleDesc(name))
}
func makePrimitiveType(k NomsKind) *Type {
return newType(PrimitiveDesc(k))
}
var BoolType = makePrimitiveType(BoolKind)
var NumberType = makePrimitiveType(NumberKind)
var StringType = makePrimitiveType(StringKind)
var BlobType = makePrimitiveType(BlobKind)
var TypeType = makePrimitiveType(TypeKind)
var ValueType = makePrimitiveType(ValueKind)
func makeCompoundType(kind NomsKind, elemTypes ...*Type) *Type {
return newType(CompoundDesc{kind, elemTypes})
}
func makeUnionType(elemTypes ...*Type) *Type {
if len(elemTypes) == 1 {
return elemTypes[0]
}
return makeCompoundType(UnionKind, elemTypes...)
}
func makeStructTypeQuickly(name string, fields structTypeFields) *Type {
return newType(StructDesc{name, fields})
}
func makeStructType(name string, fields structTypeFields) *Type {
verifyStructName(name)
verifyFields(fields)
return makeStructTypeQuickly(name, fields)
}
type FieldMap map[string]*Type
func MakeStructTypeFromFields(name string, fields FieldMap) *Type {
fs := make(structTypeFields, len(fields))
i := 0
for k, v := range fields {
fs[i] = StructField{k, v, false}
i++
}
sort.Sort(fs)
return simplifyType(makeStructType(name, fs), false)
}
// StructField describes a field in a struct type.
type StructField struct {
Name string
Type *Type
Optional bool
}
type structTypeFields []StructField
func (s structTypeFields) Len() int { return len(s) }
func (s structTypeFields) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s structTypeFields) Less(i, j int) bool { return s[i].Name < s[j].Name } | go/types/make_type.go | 0.617628 | 0.557002 | make_type.go | starcoder |
package taxjar
// Rate defines the returned object for rate requests
type Rate struct {
Zip string `json:"zip"`
State string `json:"state"`
StateRate float64 `json:"state_rate,string"`
County string `json:"county"`
CountyRate float64 `json:"county_rate,string"`
City string `json:"city"`
CityRate float64 `json:"city_rate,string"`
CombinedDistrictRate float64 `json:"combined_district_rate,string"`
CombinedRate float64 `json:"combined_rate,string"`
Country string `json:"country"`
Name string `json:"name"`
StandardRate float64 `json:"standard_rate,string"`
ReducedRate float64 `json:"reduced_rate,string"`
SuperReducedRate float64 `json:"super_reduced_rate,string"`
ParkingRate float64 `json:"parking_rate,string"`
DistanceSaleThreshold float64 `json:"distance_sale_threshold,string"`
FreightTaxable *bool `json:"freight_taxable"`
}
// RateList is the wrapper for the actual rate object
type RateList struct {
Rate Rate `json:"rate"`
}
type rateParams struct {
Country string `url:"country,omitempty"`
Zip string `url:"-"`
State string `url:"state,omitempty"`
City string `url:"city,omitempty"`
Street string `url:"street,omitempty"`
}
// RateCountry returns an option which sets the `country` parameter
func RateCountry(country string) func(*rateParams) error {
return func(rp *rateParams) error {
rp.Country = country
return nil
}
}
// RateState returns an option which sets the `state` parameter
func RateState(state string) func(*rateParams) error {
return func(rp *rateParams) error {
rp.State = state
return nil
}
}
// RateCity returns an option which sets the `city` parameter
func RateCity(city string) func(*rateParams) error {
return func(rp *rateParams) error {
rp.City = city
return nil
}
}
// RateStreet returns an option which sets the `street` parameter
func RateStreet(street string) func(*rateParams) error {
return func(rp *rateParams) error {
rp.Street = street
return nil
}
}
// RateService interfaces with the rates part of the API
type RateService struct {
Repository RateRepository
}
// Get a Rate
func (s *RateService) Get(zip string, options ...func(*rateParams) error) (Rate, error) {
params := rateParams{Zip: zip}
for _, option := range options {
if err := option(¶ms); nil != err {
return Rate{}, err
}
}
return s.Repository.get(params)
} | rate.go | 0.806052 | 0.417925 | rate.go | starcoder |
package gist
import (
"fmt"
"image"
"image/color"
"github.com/goki/ki/kit"
"github.com/goki/mat32"
"github.com/srwiley/rasterx"
)
// Color defines a standard color object for GUI use, with RGBA values, and
// all the usual necessary conversion functions to / from names, strings, etc
// ColorSpec fully specifies the color for rendering -- used in FillStyle and
// StrokeStyle
type ColorSpec struct {
Source ColorSources `desc:"source of color (solid, gradient)"`
Color Color `desc:"color for solid color source"`
Gradient *rasterx.Gradient `desc:"gradient parameters for gradient color source"`
}
var KiT_ColorSpec = kit.Types.AddType(&ColorSpec{}, nil)
// see colorparse.go for ColorSpec.SetString() method
// ColorSources determine how the color is generated -- used in FillStyle and StrokeStyle
type ColorSources int32
const (
SolidColor ColorSources = iota
LinearGradient
RadialGradient
ColorSourcesN
)
//go:generate stringer -type=ColorSources
var KiT_ColorSources = kit.Enums.AddEnumAltLower(ColorSourcesN, kit.NotBitFlag, StylePropProps, "")
func (ev ColorSources) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
func (ev *ColorSources) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
// GradientPoints defines points within the gradient
type GradientPoints int32
const (
GpX1 GradientPoints = iota
GpY1
GpX2
GpY2
GradientPointsN
)
// IsNil tests for nil solid or gradient colors
func (cs *ColorSpec) IsNil() bool {
if cs.Source == SolidColor {
return cs.Color.IsNil()
}
return cs.Gradient == nil
}
// ColorOrNil returns the solid color if non-nil, or nil otherwise -- for
// consumers that handle nil colors
func (cs *ColorSpec) ColorOrNil() color.Color {
if cs.Color.IsNil() {
return nil
}
return cs.Color
}
// SetColor sets a solid color
func (cs *ColorSpec) SetColor(cl color.Color) {
cs.Color.SetColor(cl)
cs.Source = SolidColor
cs.Gradient = nil
}
// SetName sets a solid color by name
func (cs *ColorSpec) SetName(name string) {
cs.Color.SetName(name)
cs.Source = SolidColor
cs.Gradient = nil
}
// Copy copies a gradient, making new copies of the stops instead of
// re-using pointers
func (cs *ColorSpec) CopyFrom(cp *ColorSpec) {
*cs = *cp
if cp.Gradient != nil {
cs.Gradient = &rasterx.Gradient{}
*cs.Gradient = *cp.Gradient
sn := len(cp.Gradient.Stops)
cs.Gradient.Stops = make([]rasterx.GradStop, sn)
copy(cs.Gradient.Stops, cp.Gradient.Stops)
}
}
// CopyStopsFrom copies gradient stops from other color spec, if both
// have gradient stops
func (cs *ColorSpec) CopyStopsFrom(cp *ColorSpec) {
if cp.Gradient == nil || cs.Gradient == nil {
return
}
sn := len(cp.Gradient.Stops)
if sn == 0 {
return
}
if len(cs.Gradient.Stops) != sn {
cs.Gradient.Stops = make([]rasterx.GradStop, sn)
}
copy(cs.Gradient.Stops, cp.Gradient.Stops)
}
// NewLinearGradient creates a new Linear gradient in spec, sets Source
// to LinearGradient.
func (cs *ColorSpec) NewLinearGradient() {
cs.Source = LinearGradient
cs.Gradient = &rasterx.Gradient{IsRadial: false, Matrix: rasterx.Identity, Spread: rasterx.PadSpread}
cs.Gradient.Bounds.W = 1
cs.Gradient.Bounds.H = 1
}
// NewRadialGradient creates a new Radial gradient in spec, sets Source
// to RadialGradient.
func (cs *ColorSpec) NewRadialGradient() {
cs.Source = RadialGradient
cs.Gradient = &rasterx.Gradient{IsRadial: true, Matrix: rasterx.Identity, Spread: rasterx.PadSpread}
cs.Gradient.Bounds.W = 1
cs.Gradient.Bounds.H = 1
}
// SetGradientPoints sets UserSpaceOnUse points for gradient based on given bounding box
func (cs *ColorSpec) SetGradientPoints(bbox mat32.Box2) {
if cs.Gradient == nil {
return
}
cs.Gradient.Units = rasterx.UserSpaceOnUse
if cs.Gradient.IsRadial {
ctr := bbox.Min.Add(bbox.Max).MulScalar(.5)
rad := 0.5 * mat32.Max(bbox.Max.X-bbox.Min.X, bbox.Max.Y-bbox.Min.Y)
cs.Gradient.Points = [5]float64{float64(ctr.X), float64(ctr.Y), float64(ctr.X), float64(ctr.Y), float64(rad)}
} else {
cs.Gradient.Points = [5]float64{float64(bbox.Min.X), float64(bbox.Min.Y), float64(bbox.Max.X), float64(bbox.Min.Y), 0} // linear R-L
}
}
// SetShadowGradient sets a linear gradient starting at given color and going
// down to transparent based on given color and direction spec (defaults to
// "to down")
func (cs *ColorSpec) SetShadowGradient(cl color.Color, dir string) {
cs.Color.SetColor(cl)
if dir == "" {
dir = "to down"
}
cs.SetString(fmt.Sprintf("linear-gradient(%v, lighter-0, transparent)", dir), nil)
cs.Source = LinearGradient
}
// SetGradientBounds sets bounds of the gradient
func SetGradientBounds(grad *rasterx.Gradient, bounds image.Rectangle) {
grad.Bounds.X = float64(bounds.Min.X)
grad.Bounds.Y = float64(bounds.Min.Y)
sz := bounds.Size()
grad.Bounds.W = float64(sz.X)
grad.Bounds.H = float64(sz.Y)
}
// CopyGradient copies a gradient, making new copies of the stops instead of
// re-using pointers
func CopyGradient(dst, src *rasterx.Gradient) {
*dst = *src
sn := len(src.Stops)
dst.Stops = make([]rasterx.GradStop, sn)
copy(dst.Stops, src.Stops)
}
func MatToRasterx(mat *mat32.Mat2) rasterx.Matrix2D {
return rasterx.Matrix2D{float64(mat.XX), float64(mat.YX), float64(mat.XY), float64(mat.YY), float64(mat.X0), float64(mat.Y0)}
}
func RasterxToMat(mat *rasterx.Matrix2D) mat32.Mat2 {
return mat32.Mat2{float32(mat.A), float32(mat.B), float32(mat.C), float32(mat.D), float32(mat.E), float32(mat.F)}
}
// RenderColor gets the color for rendering, applying opacity and bounds for
// gradients
func (cs *ColorSpec) RenderColor(opacity float32, bounds image.Rectangle, xform mat32.Mat2) interface{} {
if cs.Source == SolidColor || cs.Gradient == nil {
return rasterx.ApplyOpacity(cs.Color, float64(opacity))
} else {
if cs.Source == RadialGradient {
cs.Gradient.IsRadial = true
} else {
cs.Gradient.IsRadial = false
}
SetGradientBounds(cs.Gradient, bounds)
return cs.Gradient.GetColorFunctionUS(float64(opacity), MatToRasterx(&xform))
}
}
// SetIFace sets the color spec from given interface value, e.g., for ki.Props
// key is an optional property key for error -- always logs errors
func (c *ColorSpec) SetIFace(val interface{}, ctxt Context, key string) error {
switch valv := val.(type) {
case string:
c.SetString(valv, ctxt)
case *Color:
c.SetColor(*valv)
case *ColorSpec:
*c = *valv
case color.Color:
c.SetColor(valv)
}
return nil
}
// ApplyXForm transforms the points for a UserSpaceOnUse gradient
func (c *ColorSpec) ApplyXForm(xf mat32.Mat2) {
if c.Gradient == nil {
return
}
if c.Gradient.Units == rasterx.ObjectBoundingBox {
return
}
if c.Gradient.IsRadial { // radial uses transform instead of points
mat := RasterxToMat(&c.Gradient.Matrix)
mat = xf.Mul(mat)
c.Gradient.Matrix = MatToRasterx(&mat)
} else {
p1 := mat32.Vec2{float32(c.Gradient.Points[0]), float32(c.Gradient.Points[1])}
p1 = xf.MulVec2AsPt(p1)
p2 := mat32.Vec2{float32(c.Gradient.Points[2]), float32(c.Gradient.Points[3])}
p2 = xf.MulVec2AsPt(p2)
c.Gradient.Points[0] = float64(p1.X)
c.Gradient.Points[1] = float64(p1.Y)
c.Gradient.Points[2] = float64(p2.X)
c.Gradient.Points[3] = float64(p2.Y)
}
}
// ApplyXFormPt transforms the points for a UserSpaceOnUse gradient
// relative to a given center point
func (c *ColorSpec) ApplyXFormPt(xf mat32.Mat2, pt mat32.Vec2) {
if c.Gradient == nil {
return
}
if c.Gradient.Units == rasterx.ObjectBoundingBox {
return
}
if c.Gradient.IsRadial { // radial uses transform instead of points
mat := RasterxToMat(&c.Gradient.Matrix)
mat = mat.MulCtr(xf, pt)
c.Gradient.Matrix = MatToRasterx(&mat)
} else {
p1 := mat32.Vec2{float32(c.Gradient.Points[0]), float32(c.Gradient.Points[1])}
p1 = xf.MulVec2AsPtCtr(p1, pt)
p2 := mat32.Vec2{float32(c.Gradient.Points[2]), float32(c.Gradient.Points[3])}
p2 = xf.MulVec2AsPtCtr(p2, pt)
c.Gradient.Points[0] = float64(p1.X)
c.Gradient.Points[1] = float64(p1.Y)
c.Gradient.Points[2] = float64(p2.X)
c.Gradient.Points[3] = float64(p2.Y)
}
} | gist/colorspec.go | 0.772187 | 0.411347 | colorspec.go | starcoder |
package executetest
import (
"math"
"sort"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/plan"
"gonum.org/v1/gonum/floats"
)
// Two floating point values are considered
// equal if they are within tol of each other.
const tol float64 = 1e-25
// The maximum number of floating point values that are allowed
// to lie between two float64s and still be considered equal.
const ulp uint = 2
// Comparison options for floating point values.
// NaNs are considered equal, and float64s must
// be sufficiently close to be considered equal.
var floatOptions = cmp.Options{
cmpopts.EquateNaNs(),
cmp.FilterValues(func(x, y float64) bool {
return !math.IsNaN(x) && !math.IsNaN(y)
}, cmp.Comparer(func(x, y float64) bool {
// If sufficiently close, then move on.
// This avoids situations close to zero.
if floats.EqualWithinAbs(x, y, tol) {
return true
}
// If not sufficiently close, both floats
// must be within ulp steps of each other.
if !floats.EqualWithinULP(x, y, ulp) {
return false
}
return true
})),
}
func ProcessTestHelper(
t *testing.T,
data []flux.Table,
want []*Table,
wantErr error,
create func(d execute.Dataset, c execute.TableBuilderCache) execute.Transformation,
) {
t.Helper()
d := NewDataset(RandomDatasetID())
c := execute.NewTableBuilderCache(UnlimitedAllocator)
c.SetTriggerSpec(plan.DefaultTriggerSpec)
tx := create(d, c)
parentID := RandomDatasetID()
var gotErr error
for _, b := range data {
if err := tx.Process(parentID, b); err != nil {
gotErr = err
break
}
}
tx.Finish(parentID, gotErr)
if gotErr == nil {
gotErr = d.FinishedErr
}
if gotErr == nil && wantErr != nil {
t.Fatalf("expected error %s, got none", wantErr.Error())
} else if gotErr != nil && wantErr == nil {
t.Fatalf("expected no error, got %s", gotErr.Error())
} else if gotErr != nil && wantErr != nil {
if wantErr.Error() != gotErr.Error() {
t.Fatalf("unexpected error -want/+got\n%s", cmp.Diff(wantErr.Error(), gotErr.Error()))
} else {
return
}
}
got, err := TablesFromCache(c)
if err != nil {
t.Fatal(err)
}
NormalizeTables(got)
NormalizeTables(want)
sort.Sort(SortedTables(got))
sort.Sort(SortedTables(want))
if !cmp.Equal(want, got, floatOptions) {
t.Errorf("unexpected tables -want/+got\n%s", cmp.Diff(want, got))
}
} | execute/executetest/transformation.go | 0.605333 | 0.422862 | transformation.go | starcoder |
package utils
import "time"
// - a < b
// + a > b
// 0 a == b
type Comparator func(a, b interface{}) int
func StringComparator(a, b interface{}) int {
s1 := a.(string)
s2 := b.(string)
// 获取遍历长度
min := len(s2)
if len(s1) < len(s2) {
min = len(s1)
}
diff := 0
for i := 0; i < min && diff == 0; i++ {
diff = int(s1[i]) - int(s2[i])
}
if diff == 0 {
diff = len(s1) - len(s2)
}
if diff < 0 {
return -1
}
if diff > 0 {
return 1
}
return 0
}
func IntComparator(a, b interface{}) int {
aAsserted := a.(int)
bAsserted := b.(int)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func Int8comparator(a, b interface{}) int {
aasserted := a.(int8)
basserted := b.(int8)
switch {
case aasserted > basserted:
return 1
case aasserted < basserted:
return -1
default:
return 0
}
}
func Int16comparator(a, b interface{}) int {
aasserted := a.(int16)
basserted := b.(int16)
switch {
case aasserted > basserted:
return 1
case aasserted < basserted:
return -1
default:
return 0
}
}
func Int32Comparator(a, b interface{}) int {
aAsserted := a.(int32)
bAsserted := b.(int32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func Int64Comparator(a, b interface{}) int {
aAsserted := a.(int64)
bAsserted := b.(int64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func UIntComparator(a, b interface{}) int {
aAsserted := a.(uint)
bAsserted := b.(uint)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func UInt8comparator(a, b interface{}) int {
aasserted := a.(uint8)
basserted := b.(uint8)
switch {
case aasserted > basserted:
return 1
case aasserted < basserted:
return -1
default:
return 0
}
}
func UInt16comparator(a, b interface{}) int {
aasserted := a.(uint16)
basserted := b.(uint16)
switch {
case aasserted > basserted:
return 1
case aasserted < basserted:
return -1
default:
return 0
}
}
func UInt32Comparator(a, b interface{}) int {
aAsserted := a.(uint32)
bAsserted := b.(uint32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func UInt64Comparator(a, b interface{}) int {
aAsserted := a.(uint64)
bAsserted := b.(uint64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func Float32Comparator(a, b interface{}) int {
aAsserted := a.(float32)
bAsserted := b.(float32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func Float64Comparator(a, b interface{}) int {
aAsserted := a.(float64)
bAsserted := b.(float64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func ByteComparator(a, b interface{}) int {
aAsserted := a.(byte)
bAsserted := b.(byte)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func RuneComparator(a, b interface{}) int {
aAsserted := a.(rune)
bAsserted := b.(rune)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
func TimeComparator(a, b interface{}) int {
aAsserted := a.(time.Time)
bAsserted := b.(time.Time)
switch {
case aAsserted.After(bAsserted):
return 1
case aAsserted.Before(bAsserted):
return -1
default:
return 0
}
} | estl/utils/comparator.go | 0.518302 | 0.606964 | comparator.go | starcoder |
package bzip2
// moveToFrontDecoder implements a move-to-front list. Such a list is an
// efficient way to transform a string with repeating elements into one with
// many small valued numbers, which is suitable for entropy encoding. It works
// by starting with an initial list of symbols and references symbols by their
// index into that list. When a symbol is referenced, it's moved to the front
// of the list. Thus, a repeated symbol ends up being encoded with many zeros,
// as the symbol will be at the front of the list after the first access.
type moveToFrontDecoder struct {
// Rather than actually keep the list in memory, the symbols are stored
// as a circular, double linked list with the symbol indexed by head
// at the front of the list.
symbols [256]byte
next [256]uint8
prev [256]uint8
head uint8
len int
}
// newMTFDecoder creates a move-to-front decoder with an explicit initial list
// of symbols.
func newMTFDecoder(symbols []byte) *moveToFrontDecoder {
if len(symbols) > 256 {
panic("too many symbols")
}
m := new(moveToFrontDecoder)
copy(m.symbols[:], symbols)
m.len = len(symbols)
m.threadLinkedList()
return m
}
// newMTFDecoderWithRange creates a move-to-front decoder with an initial
// symbol list of 0...n-1.
func newMTFDecoderWithRange(n int) *moveToFrontDecoder {
if n > 256 {
panic("newMTFDecoderWithRange: cannot have > 256 symbols")
}
m := new(moveToFrontDecoder)
for i := 0; i < n; i++ {
m.symbols[byte(i)] = byte(i)
}
m.len = n
m.threadLinkedList()
return m
}
// threadLinkedList creates the initial linked-list pointers.
func (m *moveToFrontDecoder) threadLinkedList() {
if m.len == 0 {
return
}
m.prev[0] = uint8(m.len - 1)
for i := byte(0); int(i) < m.len-1; i++ {
m.next[i] = uint8(i + 1)
m.prev[i+1] = uint8(i)
}
m.next[m.len-1] = 0
}
func (m *moveToFrontDecoder) Decode(n int) (b byte) {
// Most of the time, n will be zero so it's worth dealing with this
// simple case.
if n == 0 {
return m.symbols[m.head]
}
i := m.head
for j := 0; j < n; j++ {
i = m.next[i]
}
b = m.symbols[i]
m.next[m.prev[i]] = m.next[i]
m.prev[m.next[i]] = m.prev[i]
m.next[i] = m.head
m.prev[i] = m.prev[m.head]
m.next[m.prev[m.head]] = i
m.prev[m.head] = i
m.head = i
return
}
// First returns the symbol at the front of the list.
func (m *moveToFrontDecoder) First() byte {
return m.symbols[m.head]
} | src/pkg/compress/bzip2/move_to_front.go | 0.816443 | 0.547222 | move_to_front.go | starcoder |
package ipv4cidr
import (
"errors"
"regexp"
"strconv"
"strings"
"github.com/microsoft/go-cidr-manager/ipv4cidr/consts"
"github.com/microsoft/go-cidr-manager/ipv4cidr/utils"
)
// IPv4CIDR models an IPv4 CIDR range.
// @field ip uint32: Holds the IP address
// @field mask uint8: Holds the CIDR mask
// @field netmask uint32: Holds the netmask for the subnet
// @field rangeLength uint32: Holds the number of IP addresses in the CIDR range
type IPv4CIDR struct {
ip uint32
mask uint8
netmask uint32
rangeLength uint32
}
// NewIPv4CIDR instantiates a new IPv4CIDR object and returns it
// @param IP string: A string representation of CIDR range in the format a.b.c.d/e or a.b.c.d
// @param standardize bool: If the IP part of the CIDR range is not the first IP in range, then setting this value to "true" will automatically convert it to the first IP in range. If set to "false", a non-standard CIDR will give an error
// @returns *IPv4CIDR: If the input parameters are valid, returns a pointer to a new IPv4CIDR object
// @returns error: If the input parameters are invalid, or any processing errors occur, returns the appropriate error back to caller.
func NewIPv4CIDR(IP string, standardize bool) (*IPv4CIDR, error) {
// Use regex to check if the input string is valid
isValid, err := regexp.Match(consts.IPv4CIDRRegex, []byte(IP))
if err != nil {
return nil, err
}
if !isValid {
err := errors.New(consts.InvalidIPv4CIDRError)
return nil, err
}
// Create an IPv4CIDR object
ip := IPv4CIDR{}
// Parse the input string into the IPv4CIDR object
err = ip.parse(IP, standardize)
if err != nil {
return nil, err
}
return &ip, nil
}
// parse takes as input the IP string and standardize flag, and parses it
// @input ipString string: A valid IP/CIDR string
// @input standardize bool: Flag for whether to standardize non-standard IP string or throw an error
// @returns error: If there is any processing error, the appropriate error is returned to caller.
func (i *IPv4CIDR) parse(ipString string, standardize bool) error {
// Instantiate IP as a 32-bit 0.0.0.0
ip := uint32(0)
// Instantiate mask with a default value of 32
mask := uint8(32)
// Split the IP string into the IP part (ipSections[0]) and optional CIDR part (ipSections[1])
ipSections := strings.Split(ipString, "/")
// If there are 2 sections, a CIDR part was provided, use that to set the mask. Else, let mask have default value of 32
if len(ipSections) == 2 {
tempMask, err := strconv.Atoi(ipSections[1])
if err != nil {
return err
}
mask = uint8(tempMask)
}
// Split the IP part into 4 sections (a.b.c.d => [a,b,c,d])
ipNumbers := strings.Split(ipSections[0], ".")
// Convert each 8-bit section into its integer representation, and set the corresponding 8 bits of the IP's integer representation
for i := 0; i < 4; i++ {
tempIP, err := strconv.Atoi(ipNumbers[i])
if err != nil {
return err
}
ip = ip << consts.GroupSize
ip = ip | uint32(tempIP)
}
netmask := utils.GetNetmask(mask)
rangeLength := utils.GetCIDRRangeLength(mask)
// If standardize is true, then standardize the IP part of the object
// If standardize is false, check if the representation is correct. If not, return an error
if standardize {
ip = utils.Standardize(ip, netmask)
} else {
err := utils.CheckStandardized(ip, netmask)
if err != nil {
return err
}
}
// Set values in the IP object
i.ip = ip
i.mask = mask
i.rangeLength = rangeLength
i.netmask = netmask
return nil
}
// Split splits the IPv4CIDR into two IPv4CIDRs of half the size (mask + 1)
// @returns *IPv4CIDR: The first (lower) block
// @returns *IPv4CIDR: The second (higher) block
// @returns error: If CIDR cannot be split further, the appropriate error is returned.
func (i *IPv4CIDR) Split() (*IPv4CIDR, *IPv4CIDR, error) {
// If we are already at a single-IP CIDR block, further splitting is not possible. Hence return an error
if i.rangeLength == 1 {
return nil, nil, errors.New(consts.NoMoreSplittingPossibleError)
}
// The new mask becomes the old mask + 1
newMask := i.mask + 1
// The new range is half of old range
newRange := i.rangeLength / 2
// The new netmask has the leftmost 0 of the old netmask also set
// In other words, shift right and set the highest bit
newNetmask := (i.netmask >> 1) | consts.HighestBitSet
// The lower CIDR block has the same IP
newIP1 := i.ip
// The higher CIDR block has the leftmost 0 of the rightmost block of 0s also set.
// The XOR of the old and new netmasks gives us the bit that needs to be set, which can be done by bitwise OR
newIP2 := (i.ip | (newNetmask ^ i.netmask))
// Create the two new IPv4CIDR objects
IP1 := IPv4CIDR{
ip: newIP1,
mask: newMask,
rangeLength: newRange,
netmask: newNetmask,
}
IP2 := IPv4CIDR{
ip: newIP2,
mask: newMask,
rangeLength: newRange,
netmask: newNetmask,
}
return &IP1, &IP2, nil
}
// GetIPInRange returns the nth IP address in the CIDR block
// @input n uint32: The value of n, representing the nth IP to return
// @input withCIDR bool: Flag corresponding to whether to append the CIDR mask with the returned IP or not
// @returns string: The nth IP address
// @returns error: If nth IP is out of range of the CIDR block, an error is returned
func (i *IPv4CIDR) GetIPInRange(n uint32, withCIDR bool) (string, error) {
// Check if range exceeded, return error if yes
if i.rangeLength < n {
return "", errors.New(consts.RequestedIPExceedsCIDRRangeError)
}
// The nth IP is obtained by simply adding n-1 to the 1st IP in CIDR range
nthIP := i.ip + n - 1
// Convert the IP to string
nthIPstr := utils.ConvertIPToString(nthIP)
// If withCIDR is set, append the CIDR mask to string
if withCIDR {
mask := strconv.Itoa(int(i.mask))
nthIPstr = strings.Join([]string{nthIPstr, mask}, "/")
}
return nthIPstr, nil
}
// ToString converts the IP into its string representation
// @returns string: String corresponding to the IP address in format a.b.c.d
func (i *IPv4CIDR) ToString() string {
ip := utils.ConvertIPToString(i.ip)
mask := strconv.Itoa(int(i.mask))
return strings.Join([]string{ip, mask}, "/")
}
// GetIP returns the IP part of the CIDR range
// @returns string: String corresponding to the first IP address in CIDR range in format a.b.c.d
func (i *IPv4CIDR) GetIP() string {
return utils.ConvertIPToString(i.ip)
}
// GetCIDRRangeLength returns the number of IP addresses contained in the CIDR range
// @returns uint32: Length of the CIDR range
func (i *IPv4CIDR) GetCIDRRangeLength() uint32 {
return i.rangeLength
}
// GetMask returns the mask part of the CIDR range (0-32)
// @returns uint8: Mask of the CIDR range
func (i *IPv4CIDR) GetMask() uint8 {
return i.mask
}
// GetNetmask returns the netmask for the CIDR range
// @returns string: Netmask of the CIDR range
func (i *IPv4CIDR) GetNetmask() string {
return utils.ConvertIPToString(i.netmask)
} | ipv4cidr/ipv4cidr.go | 0.705176 | 0.450178 | ipv4cidr.go | starcoder |
package main
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
)
type Coordinate struct {
y, x int
}
// DFS, doesn't scale
func Day15_DFS(input [][]int) int {
startCoordinate := Coordinate{0, 0}
// Create already traversed coordinate map
traversedCoordinates := make(map[Coordinate]bool)
totalSums := make(map[int]bool)
aPathSum := 0
var coordinateArr []Coordinate
Traverse(startCoordinate, input, aPathSum, totalSums, traversedCoordinates, coordinateArr)
min := 999999999999
for k, _ := range totalSums {
if k < min {
min = k
}
}
return min
}
func Traverse(coordinate Coordinate, input [][]int, aPathSum int, totalSums map[int]bool, traversedCoordinates map[Coordinate]bool, coordinateArr []Coordinate) {
// Copy coordinateArr
var coordinateArrCopy []Coordinate
for _, v := range coordinateArr {
coordinateArrCopy = append(coordinateArrCopy, v)
}
// Copy traversedCoordinates map
traversedCoordinatesCopy := make(map[Coordinate]bool)
for k, v := range traversedCoordinates {
traversedCoordinatesCopy[k] = v
}
// Add current coordinate to coordinateArrCopy
coordinateArrCopy = append(coordinateArrCopy, coordinate)
if _, found := traversedCoordinatesCopy[coordinate]; !found {
// This coordinate has not been traversed
// Add this coordinate to traversed coordinate
traversedCoordinatesCopy[coordinate] = true
// Add current coordinate risk level
aPathSum += input[coordinate.y][coordinate.x]
if coordinate.y == len(input)-1 && coordinate.x == len(input[0])-1 {
// Destination reached
// Append current path's risk level
totalSums[aPathSum] = true
} else {
// All other coordinate except destination
if coordinate.x+1 < len(input[0]) {
// Traverse right
Traverse(Coordinate{coordinate.y, coordinate.x + 1}, input, aPathSum, totalSums, traversedCoordinatesCopy, coordinateArrCopy)
}
if coordinate.x-1 >= 0 {
// Traverse left
Traverse(Coordinate{coordinate.y, coordinate.x - 1}, input, aPathSum, totalSums, traversedCoordinatesCopy, coordinateArrCopy)
}
if coordinate.y+1 < len(input) {
// Traverse below
Traverse(Coordinate{coordinate.y + 1, coordinate.x}, input, aPathSum, totalSums, traversedCoordinatesCopy, coordinateArrCopy)
}
if coordinate.y-1 >= 0 {
// Traverse above
Traverse(Coordinate{coordinate.y - 1, coordinate.x}, input, aPathSum, totalSums, traversedCoordinatesCopy, coordinateArrCopy)
}
}
}
}
// Use Djikstra instead of DFS (took hours for part 2, probably should have used priority queue)
func Day15(input [][]int) int {
// Create visited coordinate set
coordinatesSet := make(map[Coordinate]bool)
// Create and initialize distance map coordinate -> value
distancesMap := make(map[Coordinate]int)
for y, yValue := range input {
for x, _ := range yValue {
aCoordinate := Coordinate{y, x}
if y == 0 && x == 0 {
// Start coordinate
distancesMap[aCoordinate] = 0
} else {
// All other coordinates
distancesMap[aCoordinate] = 999999999999
}
// Add to coordinates set
coordinatesSet[aCoordinate] = true
}
}
// While coordinate set is not empty
for len(coordinatesSet) > 0 {
// Get coordinate with min distance
var coordinateWithMinDistance Coordinate
minDistance := 999999999999
for k, v := range distancesMap {
if _, found := coordinatesSet[k]; found {
if v < minDistance {
minDistance = v
coordinateWithMinDistance = k
}
}
}
// Remove min distance coordinate from coordinate set
delete(coordinatesSet, coordinateWithMinDistance)
// Terminate if target is reached
if coordinateWithMinDistance.y == len(input)-1 && coordinateWithMinDistance.x == len(input[0])-1 {
break
}
// For each neighbour
if coordinateWithMinDistance.x+1 < len(input[0]) {
// Right neighbour
altDistance := distancesMap[coordinateWithMinDistance] + input[coordinateWithMinDistance.y][coordinateWithMinDistance.x+1]
rightCoordinate := Coordinate{coordinateWithMinDistance.y, coordinateWithMinDistance.x + 1}
if altDistance < distancesMap[rightCoordinate] {
distancesMap[rightCoordinate] = altDistance
}
}
if coordinateWithMinDistance.x-1 >= 0 {
// Left neighbour
altDistance := distancesMap[coordinateWithMinDistance] + input[coordinateWithMinDistance.y][coordinateWithMinDistance.x-1]
leftCoordinate := Coordinate{coordinateWithMinDistance.y, coordinateWithMinDistance.x - 1}
if altDistance < distancesMap[leftCoordinate] {
distancesMap[leftCoordinate] = altDistance
}
}
if coordinateWithMinDistance.y+1 < len(input) {
// Below neighbour
altDistance := distancesMap[coordinateWithMinDistance] + input[coordinateWithMinDistance.y+1][coordinateWithMinDistance.x]
belowCoordinate := Coordinate{coordinateWithMinDistance.y + 1, coordinateWithMinDistance.x}
if altDistance < distancesMap[belowCoordinate] {
distancesMap[belowCoordinate] = altDistance
}
}
if coordinateWithMinDistance.y-1 >= 0 {
// Above neighbour
altDistance := distancesMap[coordinateWithMinDistance] + input[coordinateWithMinDistance.y-1][coordinateWithMinDistance.x]
aboveCoordinate := Coordinate{coordinateWithMinDistance.y - 1, coordinateWithMinDistance.x}
if altDistance < distancesMap[aboveCoordinate] {
distancesMap[aboveCoordinate] = altDistance
}
}
}
targetCoordinate := Coordinate{len(input) - 1, len(input[0]) - 1}
return distancesMap[targetCoordinate]
}
func Day15_1(input [][]int) int {
inputFiveTimes := make([][]int, len(input)*5)
for i := 0; i < len(input)*5; i++ {
inputFiveTimes[i] = make([]int, len(input[0])*5)
}
for j := 0; j < 5; j++ {
xBig := 0
for y := 0; y < len(input); y++ {
for i := 0; i < 5; i++ {
for x := 0; x < len(input[0]); x++ {
value := input[y][x]
if xBig >= len(input[0]) {
value = inputFiveTimes[y][xBig-len(input[0])] + 1
if value > 9 {
value = 1
}
}
inputFiveTimes[y][xBig] = value
xBig++
}
}
xBig = 0
}
}
for y := 0; y < len(inputFiveTimes); y++ {
for x := 0; x < len(inputFiveTimes[0]); x++ {
value := inputFiveTimes[y][x]
if y >= len(input) {
value = inputFiveTimes[y-len(input)][x] + 1
if value > 9 {
value = 1
}
}
inputFiveTimes[y][x] = value
}
}
return Day15(inputFiveTimes)
}
func main() {
file, err := os.Open("input.txt")
if err != nil {
log.Fatalf("failed to open")
}
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanLines)
var inputArr [][]int
for scanner.Scan() {
var lineArr []int
lineString := scanner.Text()
for _, aChar := range lineString {
anInt, _ := strconv.Atoi(string(aChar))
lineArr = append(lineArr, anInt)
}
inputArr = append(inputArr, lineArr)
}
file.Close()
resultDay15 := Day15(inputArr)
resultDay15_1 := Day15_1(inputArr)
fmt.Println(resultDay15)
fmt.Println(resultDay15_1)
} | 2021/day15/day15.go | 0.729905 | 0.51562 | day15.go | starcoder |
package algebra
import "fmt"
const sqrt2 = 1.414213562373095
// QR2 implements numbers in the algebraic number field ℚ(√2) (the rationals
// adjoined with √2). ℚ(√2) = {(a + b√2)/c : a,b,c ∈ ℤ}.
type QR2 struct {}
var (
// ℚ(√2) is a field (over triples of ints).
_ Field[[3]int] = QR2{}
// Quaternions over ℚ(√2) are a division ring.
_ DivisionRing[[4][3]int] = Quaternion[[3]int, QR2]{}
)
// Float returns a float64 representation of x.
func (QR2) Float(x [3]int) float64 {
return (float64(x[0]) + sqrt2*float64(x[1])) / float64(x[2])
}
func (QR2) Format(x [3]int) string {
return fmt.Sprintf("(%d+%d√2)/%d", x[0], x[1], x[2])
}
// Canon returns x in a canonical form (reduced fraction with
// positive denominator).
func (QR2) Canon(x [3]int) [3]int {
if x == [3]int{} {
return [3]int{0, 0, 1}
}
if x[2] < 0 {
x[0], x[1], x[2] = -x[0], -x[1], -x[2]
}
d := GCD(Abs(x[0]), GCD(Abs(x[1]), Abs(x[2])))
x[0] /= d
x[1] /= d
x[2] /= d
return x
}
// Add returns x+y.
func (q QR2) Add(x, y [3]int) [3]int {
// (a1 + b1√2)/c1 + (a2 + b2√2)/c2
// = (a1/c1 + a2/c2) + (b1/c1 + b2/c2)√2
// = ((a1c2 + a2c1) + (b1c2 + b2c1)√2)/c1c2
return q.Canon([3]int{
0: x[0]*y[2] + y[0]*x[2],
1: x[1]*y[2] + y[1]*x[2],
2: x[2] * y[2],
})
}
// Neg returns -x.
func (QR2) Neg(x [3]int) [3]int {
return [3]int{-x[0], -x[1], x[2]}
}
// Zero returns the triple representing 0.
func (QR2) Zero() [3]int {
return [3]int{0, 0, 1}
}
// Mul returns x*y.
func (q QR2) Mul(x, y [3]int) [3]int {
// (a1 + b1√2)/c1 * (a2 + b2√2)/c2
// = (a1 + b1√2)(a2 + b2√2) / c1c2
// = (a1a2 + a2b1√2 + a1b2√2 + 2b1b2) / c1c2
// = (a1a2+2b1b2 + (a2b1+a1b2)√2) / c1c2
return q.Canon([3]int{
0: x[0]*y[0] + 2*x[1]*y[1],
1: y[0]*x[1] + x[0]*y[1],
2: x[2] * y[2],
})
}
// Inv returns 1/x.
func (q QR2) Inv(x [3]int) [3]int {
// c / (a + b√2)
// = (c / (a + b√2)) * (a - b√2)/(a - b√2)
// = c(a - b√2) / (a + b√2)(a - b√2)
// = (ac - bc√2) / (a² - ab√2 + ab√2 - 2b²)
// = (ac - bc√2) / (a² - 2b²)
return q.Canon([3]int{
0: x[0] * x[2],
1: -x[1] * x[2],
2: x[0]*x[0] - 2*x[1]*x[1],
})
}
// Identity returns the triple representing 1.
func (QR2) Identity() [3]int {
return [3]int{1, 0, 1}
} | algebra/qr2.go | 0.776114 | 0.657126 | qr2.go | starcoder |
package graph
import "fmt"
// Graph implements a basic, non directed graph.
type Graph struct {
x, y []float64 // nodes coords
legend []string // node legends
links map[struct{ x, y int }]bool // edges, encoded with i<j
}
// NewGraph creates a new, empty Graph.
func NewGraph() *Graph {
g := new(Graph)
g.links = make(map[struct{ x, y int }]bool)
return g
}
// Clone make a (deep) clone
func (g *Graph) Clone() *Graph {
gg := NewGraph()
for i := range g.x {
gg.x, gg.y, gg.legend = append(gg.x, g.x[i]), append(gg.y, g.y[i]), append(gg.legend, g.legend[i])
for j := range g.x {
if g.Linked(i, j) {
gg.Link(i, j)
}
}
}
return gg
}
// Size retun the number of nodes.
// Nodes are numbered from O to Size -1.
func (g *Graph) Size() int {
return len(g.x)
}
// Coord gets the position of node n.
func (g *Graph) Coord(n int) (float64, float64) {
return g.x[n], g.y[n]
}
// Legend provides the legend associated with the node.
func (g *Graph) Legend(n int) string {
return g.legend[n]
}
// Linked is true if nodes i and j are connected.
func (g *Graph) Linked(i, j int) bool {
if i == j {
return false
}
if i > j {
return g.Linked(j, i)
}
s := struct{ x, y int }{i, j}
b, ok := g.links[s]
if ok && b {
return true
}
return false
}
// ToString human readable format (for debugging).
func (g *Graph) ToString() string {
return fmt.Sprint(*g)
}
// Add adds a node, returning its index.
func (g *Graph) Add(x, y float64, legend string) int {
g.x, g.y = append(g.x, x), append(g.y, y)
g.legend = append(g.legend, legend)
return len(g.x) - 1
}
// Link establish a link between nodes i and j.
// Self linking is ignored.
// Links are symetrical, if (i,j) are linked, (j,i) will be.
func (g *Graph) Link(i, j int) {
if i == j {
return
}
if i > j {
g.Link(j, i)
return
}
g.links[struct{ x, y int }{i, j}] = true
}
// Dist2 provides the squared distance between two nodes
func (g *Graph) Dist2(i, j int) float64 {
return (g.x[i]-g.x[j])*(g.x[i]-g.x[j]) + (g.y[i]-g.y[j])*(g.y[i]-g.y[j])
} | graph/graph.go | 0.7773 | 0.457137 | graph.go | starcoder |
package datadog
import (
"encoding/json"
)
// UsageCIVisibilityHour CI visibility usage in a given hour.
type UsageCIVisibilityHour struct {
// The number of spans for pipelines in the queried hour.
CiPipelineIndexedSpans *int32 `json:"ci_pipeline_indexed_spans,omitempty"`
// The number of spans for tests in the queried hour.
CiTestIndexedSpans *int32 `json:"ci_test_indexed_spans,omitempty"`
// Shows the total count of all active Git committers for Pipelines in the current month. A committer is active if they commit at least 3 times in a given month.
CiVisibilityPipelineCommitters *int32 `json:"ci_visibility_pipeline_committers,omitempty"`
// The total count of all active Git committers for tests in the current month. A committer is active if they commit at least 3 times in a given month.
CiVisibilityTestCommitters *int32 `json:"ci_visibility_test_committers,omitempty"`
// The organization name.
OrgName *string `json:"org_name,omitempty"`
// The organization public ID.
PublicId *string `json:"public_id,omitempty"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:-`
}
// NewUsageCIVisibilityHour instantiates a new UsageCIVisibilityHour object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewUsageCIVisibilityHour() *UsageCIVisibilityHour {
this := UsageCIVisibilityHour{}
return &this
}
// NewUsageCIVisibilityHourWithDefaults instantiates a new UsageCIVisibilityHour object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewUsageCIVisibilityHourWithDefaults() *UsageCIVisibilityHour {
this := UsageCIVisibilityHour{}
return &this
}
// GetCiPipelineIndexedSpans returns the CiPipelineIndexedSpans field value if set, zero value otherwise.
func (o *UsageCIVisibilityHour) GetCiPipelineIndexedSpans() int32 {
if o == nil || o.CiPipelineIndexedSpans == nil {
var ret int32
return ret
}
return *o.CiPipelineIndexedSpans
}
// GetCiPipelineIndexedSpansOk returns a tuple with the CiPipelineIndexedSpans field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageCIVisibilityHour) GetCiPipelineIndexedSpansOk() (*int32, bool) {
if o == nil || o.CiPipelineIndexedSpans == nil {
return nil, false
}
return o.CiPipelineIndexedSpans, true
}
// HasCiPipelineIndexedSpans returns a boolean if a field has been set.
func (o *UsageCIVisibilityHour) HasCiPipelineIndexedSpans() bool {
if o != nil && o.CiPipelineIndexedSpans != nil {
return true
}
return false
}
// SetCiPipelineIndexedSpans gets a reference to the given int32 and assigns it to the CiPipelineIndexedSpans field.
func (o *UsageCIVisibilityHour) SetCiPipelineIndexedSpans(v int32) {
o.CiPipelineIndexedSpans = &v
}
// GetCiTestIndexedSpans returns the CiTestIndexedSpans field value if set, zero value otherwise.
func (o *UsageCIVisibilityHour) GetCiTestIndexedSpans() int32 {
if o == nil || o.CiTestIndexedSpans == nil {
var ret int32
return ret
}
return *o.CiTestIndexedSpans
}
// GetCiTestIndexedSpansOk returns a tuple with the CiTestIndexedSpans field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageCIVisibilityHour) GetCiTestIndexedSpansOk() (*int32, bool) {
if o == nil || o.CiTestIndexedSpans == nil {
return nil, false
}
return o.CiTestIndexedSpans, true
}
// HasCiTestIndexedSpans returns a boolean if a field has been set.
func (o *UsageCIVisibilityHour) HasCiTestIndexedSpans() bool {
if o != nil && o.CiTestIndexedSpans != nil {
return true
}
return false
}
// SetCiTestIndexedSpans gets a reference to the given int32 and assigns it to the CiTestIndexedSpans field.
func (o *UsageCIVisibilityHour) SetCiTestIndexedSpans(v int32) {
o.CiTestIndexedSpans = &v
}
// GetCiVisibilityPipelineCommitters returns the CiVisibilityPipelineCommitters field value if set, zero value otherwise.
func (o *UsageCIVisibilityHour) GetCiVisibilityPipelineCommitters() int32 {
if o == nil || o.CiVisibilityPipelineCommitters == nil {
var ret int32
return ret
}
return *o.CiVisibilityPipelineCommitters
}
// GetCiVisibilityPipelineCommittersOk returns a tuple with the CiVisibilityPipelineCommitters field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageCIVisibilityHour) GetCiVisibilityPipelineCommittersOk() (*int32, bool) {
if o == nil || o.CiVisibilityPipelineCommitters == nil {
return nil, false
}
return o.CiVisibilityPipelineCommitters, true
}
// HasCiVisibilityPipelineCommitters returns a boolean if a field has been set.
func (o *UsageCIVisibilityHour) HasCiVisibilityPipelineCommitters() bool {
if o != nil && o.CiVisibilityPipelineCommitters != nil {
return true
}
return false
}
// SetCiVisibilityPipelineCommitters gets a reference to the given int32 and assigns it to the CiVisibilityPipelineCommitters field.
func (o *UsageCIVisibilityHour) SetCiVisibilityPipelineCommitters(v int32) {
o.CiVisibilityPipelineCommitters = &v
}
// GetCiVisibilityTestCommitters returns the CiVisibilityTestCommitters field value if set, zero value otherwise.
func (o *UsageCIVisibilityHour) GetCiVisibilityTestCommitters() int32 {
if o == nil || o.CiVisibilityTestCommitters == nil {
var ret int32
return ret
}
return *o.CiVisibilityTestCommitters
}
// GetCiVisibilityTestCommittersOk returns a tuple with the CiVisibilityTestCommitters field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageCIVisibilityHour) GetCiVisibilityTestCommittersOk() (*int32, bool) {
if o == nil || o.CiVisibilityTestCommitters == nil {
return nil, false
}
return o.CiVisibilityTestCommitters, true
}
// HasCiVisibilityTestCommitters returns a boolean if a field has been set.
func (o *UsageCIVisibilityHour) HasCiVisibilityTestCommitters() bool {
if o != nil && o.CiVisibilityTestCommitters != nil {
return true
}
return false
}
// SetCiVisibilityTestCommitters gets a reference to the given int32 and assigns it to the CiVisibilityTestCommitters field.
func (o *UsageCIVisibilityHour) SetCiVisibilityTestCommitters(v int32) {
o.CiVisibilityTestCommitters = &v
}
// GetOrgName returns the OrgName field value if set, zero value otherwise.
func (o *UsageCIVisibilityHour) GetOrgName() string {
if o == nil || o.OrgName == nil {
var ret string
return ret
}
return *o.OrgName
}
// GetOrgNameOk returns a tuple with the OrgName field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageCIVisibilityHour) GetOrgNameOk() (*string, bool) {
if o == nil || o.OrgName == nil {
return nil, false
}
return o.OrgName, true
}
// HasOrgName returns a boolean if a field has been set.
func (o *UsageCIVisibilityHour) HasOrgName() bool {
if o != nil && o.OrgName != nil {
return true
}
return false
}
// SetOrgName gets a reference to the given string and assigns it to the OrgName field.
func (o *UsageCIVisibilityHour) SetOrgName(v string) {
o.OrgName = &v
}
// GetPublicId returns the PublicId field value if set, zero value otherwise.
func (o *UsageCIVisibilityHour) GetPublicId() string {
if o == nil || o.PublicId == nil {
var ret string
return ret
}
return *o.PublicId
}
// GetPublicIdOk returns a tuple with the PublicId field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageCIVisibilityHour) GetPublicIdOk() (*string, bool) {
if o == nil || o.PublicId == nil {
return nil, false
}
return o.PublicId, true
}
// HasPublicId returns a boolean if a field has been set.
func (o *UsageCIVisibilityHour) HasPublicId() bool {
if o != nil && o.PublicId != nil {
return true
}
return false
}
// SetPublicId gets a reference to the given string and assigns it to the PublicId field.
func (o *UsageCIVisibilityHour) SetPublicId(v string) {
o.PublicId = &v
}
func (o UsageCIVisibilityHour) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.UnparsedObject != nil {
return json.Marshal(o.UnparsedObject)
}
if o.CiPipelineIndexedSpans != nil {
toSerialize["ci_pipeline_indexed_spans"] = o.CiPipelineIndexedSpans
}
if o.CiTestIndexedSpans != nil {
toSerialize["ci_test_indexed_spans"] = o.CiTestIndexedSpans
}
if o.CiVisibilityPipelineCommitters != nil {
toSerialize["ci_visibility_pipeline_committers"] = o.CiVisibilityPipelineCommitters
}
if o.CiVisibilityTestCommitters != nil {
toSerialize["ci_visibility_test_committers"] = o.CiVisibilityTestCommitters
}
if o.OrgName != nil {
toSerialize["org_name"] = o.OrgName
}
if o.PublicId != nil {
toSerialize["public_id"] = o.PublicId
}
return json.Marshal(toSerialize)
}
func (o *UsageCIVisibilityHour) UnmarshalJSON(bytes []byte) (err error) {
raw := map[string]interface{}{}
all := struct {
CiPipelineIndexedSpans *int32 `json:"ci_pipeline_indexed_spans,omitempty"`
CiTestIndexedSpans *int32 `json:"ci_test_indexed_spans,omitempty"`
CiVisibilityPipelineCommitters *int32 `json:"ci_visibility_pipeline_committers,omitempty"`
CiVisibilityTestCommitters *int32 `json:"ci_visibility_test_committers,omitempty"`
OrgName *string `json:"org_name,omitempty"`
PublicId *string `json:"public_id,omitempty"`
}{}
err = json.Unmarshal(bytes, &all)
if err != nil {
err = json.Unmarshal(bytes, &raw)
if err != nil {
return err
}
o.UnparsedObject = raw
return nil
}
o.CiPipelineIndexedSpans = all.CiPipelineIndexedSpans
o.CiTestIndexedSpans = all.CiTestIndexedSpans
o.CiVisibilityPipelineCommitters = all.CiVisibilityPipelineCommitters
o.CiVisibilityTestCommitters = all.CiVisibilityTestCommitters
o.OrgName = all.OrgName
o.PublicId = all.PublicId
return nil
} | api/v1/datadog/model_usage_ci_visibility_hour.go | 0.709523 | 0.415788 | model_usage_ci_visibility_hour.go | starcoder |
package crypto
import (
"crypto/subtle"
"fmt"
C25519 "github.com/incognitochain/go-incognito-sdk-v2/crypto/curve25519"
)
// Point represents an elliptic curve point. It only needs 32 bytes to represent a point.
type Point struct {
key C25519.Key
}
// RandomPoint returns a random Point on the elliptic curve.
func RandomPoint() *Point {
sc := RandomScalar()
return new(Point).ScalarMultBase(sc)
}
// PointValid checks if a Point is valid.
func (p Point) PointValid() bool {
var point C25519.ExtendedGroupElement
return point.FromBytes(&p.key)
}
// GetKey returns the key of a Point.
func (p Point) GetKey() C25519.Key {
return p.key
}
// SetKey sets v as the key of a Point.
func (p *Point) SetKey(v *C25519.Key) (*Point, error) {
p.key = *v
var point C25519.ExtendedGroupElement
if !point.FromBytes(&p.key) {
return nil, fmt.Errorf("invalid point value")
}
return p, nil
}
// Set sets p = q and returns p.
func (p *Point) Set(q *Point) *Point {
p.key = q.key
return p
}
// String returns the hex-encoded string of a Point.
func (p Point) String() string {
return fmt.Sprintf("%x", p.key[:])
}
// ToBytes returns an 32-byte long array from a Point.
func (p Point) ToBytes() [Ed25519KeySize]byte {
return p.key.ToBytes()
}
// ToBytesS returns a slice of bytes from a Point.
func (p Point) ToBytesS() []byte {
slice := p.key.ToBytes()
return slice[:]
}
// FromBytes sets an array of 32 bytes to a Point.
func (p *Point) FromBytes(b [Ed25519KeySize]byte) (*Point, error) {
p.key.FromBytes(b)
var point C25519.ExtendedGroupElement
if !point.FromBytes(&p.key) {
return nil, fmt.Errorf("invalid point value")
}
return p, nil
}
// FromBytesS sets a slice of bytes to a Point.
func (p *Point) FromBytesS(b []byte) (*Point, error) {
if len(b) != Ed25519KeySize {
return nil, fmt.Errorf("invalid Ed25519 Key Size")
}
var array [Ed25519KeySize]byte
copy(array[:], b)
p.key.FromBytes(array)
var point C25519.ExtendedGroupElement
if !point.FromBytes(&p.key) {
return nil, fmt.Errorf("invalid point value")
}
return p, nil
}
// Identity sets p to the identity point and returns p.
func (p *Point) Identity() *Point {
p.key = C25519.Identity
return p
}
// IsIdentity checks if p is the identity point.
func (p Point) IsIdentity() bool {
if p.key == C25519.Identity {
return true
}
return false
}
// ScalarMultBase set p = a * G, where a is a scalar and G is the curve base point and returns p.
func (p *Point) ScalarMultBase(a *Scalar) *Point {
key := C25519.ScalarmultBase(&a.key)
p.key = *key
return p
}
// ScalarMult sets p = a * pa and returns p.
func (p *Point) ScalarMult(pa *Point, a *Scalar) *Point {
key := C25519.ScalarMultKey(&pa.key, &a.key)
p.key = *key
return p
}
// MultiScalarMult sets p = sum(sList[i] * pList[i]) and returns p.
func (p *Point) MultiScalarMult(sList []*Scalar, pList []*Point) *Point {
nSc := len(sList)
nPoint := len(pList)
if nSc != nPoint {
panic("cannot Multi-ScalarMult with different size inputs")
}
scalarKeyLs := make([]*C25519.Key, nSc)
pointKeyLs := make([]*C25519.Key, nSc)
for i := 0; i < nSc; i++ {
scalarKeyLs[i] = &sList[i].key
pointKeyLs[i] = &pList[i].key
}
key := C25519.MultiScalarMultKey(pointKeyLs, scalarKeyLs)
res, _ := new(Point).SetKey(key)
return res
}
// InvertScalarMultBase sets p = (1/a) * G and returns p.
func (p *Point) InvertScalarMultBase(a *Scalar) *Point {
inv := new(Scalar).Invert(a)
p.ScalarMultBase(inv)
return p
}
// InvertScalarMult sets p = (1/a) * pa and returns p.
func (p *Point) InvertScalarMult(pa *Point, a *Scalar) *Point {
inv := new(Scalar).Invert(a)
p.ScalarMult(pa, inv)
return p
}
// Derive sets p = 1/(a+b) * pa and returns p.
func (p *Point) Derive(pa *Point, a *Scalar, b *Scalar) *Point {
c := new(Scalar).Add(a, b)
return p.InvertScalarMult(pa, c)
}
// Add sets p = pa + pb and returns p.
func (p *Point) Add(pa, pb *Point) *Point {
res := p.key
C25519.AddKeys(&res, &pa.key, &pb.key)
p.key = res
return p
}
// AddPedersen sets p = aA + bB and returns p.
func (p *Point) AddPedersen(a *Scalar, A *Point, b *Scalar, B *Point) *Point {
var precomputedA [8]C25519.CachedGroupElement
Ae := new(C25519.ExtendedGroupElement)
Ae.FromBytes(&A.key)
C25519.GePrecompute(&precomputedA, Ae)
var precomputedB [8]C25519.CachedGroupElement
Be := new(C25519.ExtendedGroupElement)
Be.FromBytes(&B.key)
C25519.GePrecompute(&precomputedB, Be)
var key C25519.Key
C25519.AddKeys3_3(&key, &a.key, &precomputedA, &b.key, &precomputedB)
p.key = key
return p
}
// Sub sets p = pa - pb and returns p.
func (p *Point) Sub(pa, pb *Point) *Point {
res := p.key
C25519.SubKeys(&res, &pa.key, &pb.key)
p.key = res
return p
}
// IsPointEqual checks if pa = pb.
func IsPointEqual(pa *Point, pb *Point) bool {
tmpA := pa.ToBytesS()
tmpB := pb.ToBytesS()
return subtle.ConstantTimeCompare(tmpA, tmpB) == 1
}
// HashToPointFromIndex returns the hash of the concatenation of padStr and index.
func HashToPointFromIndex(index int64, padStr string) *Point {
array := C25519.GBASE.ToBytes()
msg := array[:]
msg = append(msg, []byte(padStr)...)
msg = append(msg, []byte(string(index))...)
keyHash := C25519.Key(C25519.Keccak256(msg))
keyPoint := keyHash.HashToPoint()
p, _ := new(Point).SetKey(keyPoint)
return p
}
// HashToPoint returns the Point value of the hash of b.
func HashToPoint(b []byte) *Point {
keyHash := C25519.Key(C25519.Keccak256(b))
keyPoint := keyHash.HashToPoint()
p, _ := new(Point).SetKey(keyPoint)
return p
} | crypto/point.go | 0.868813 | 0.508422 | point.go | starcoder |
package crossover
import (
in "github.com/RevelesD/GoBasicGA/algorithm/InitialPopulation"
"github.com/RevelesD/GoBasicGA/lib"
"math/rand"
)
/**
Note
this version of the AGT is going to differ slightly from
the one studied on class, this is because it's not clear
if elitism happens before or after the crossover. If it happens
before the crossover and the number of elements passed
by elitism is an odd number it's going to cause problems when
we try to insert an even number of new elements to the next
generation and because there are an odd number of elements
passed by elitism the generation is going to either fall short
or overflow on individuals.
To prevent this there are two options,
1) Make the elitism after the crossover and replace k random
individuals from the new generation with the elitism selection.
2) Make the elitism before the crossover and Accept only even
numbers for elitism.
In this implementation we are going with option 2.
For the first implementation the next lecture is recommended
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5597564/
*/
/**
* @param(c) Current generation
* @param(eval) Current generation evaluation
* @param(ct) Crossover threshold
*/
func Crossover(c []lib.Genome, eval []lib.Evaluation, ct *float64, n [][]lib.Nucleotide) (*lib.Genome, *lib.Genome) {
a, b := selectPair(eval)
x, y := crossoverPair(&c[a], &c[b], ct, n)
return x, y
}
/**
* param(eval) Evaluations of the current generation
* returns() A pair of indexes from different elements
*/
func selectPair(eval []lib.Evaluation) (int, int) {
random := rand.Float64()
var current = 1.0
var firstIndex int
var secondIndex int
for _, value := range eval {
if random < value.Cumulative && value.Cumulative < current {
current = value.Cumulative
firstIndex = value.Element
}
}
current = 1.0
for {
random = rand.Float64()
for _, value := range eval {
if random < value.Cumulative && value.Cumulative < current {
current = value.Cumulative
secondIndex = value.Element
}
}
if secondIndex != firstIndex {
break
}
current = 1.0
}
return firstIndex, secondIndex
}
/**
* param(ct) Crossover threshold
* param(f) The first element selected to be breed
* param(s) The second element selected to be breed
* returns() The progeny result of the crossover
*/
func crossoverPair(f *lib.Genome, s *lib.Genome, ct *float64, n [][]lib.Nucleotide) (*lib.Genome, *lib.Genome) {
if rand.Float64() <= *ct {
return f, s
}
var half int
chromosomesA := make([]lib.Chromosome, len(f.Chromosomes))
chromosomesB := make([]lib.Chromosome, len(f.Chromosomes))
a := lib.Genome{Chromosomes: chromosomesA}
b := lib.Genome{Chromosomes: chromosomesB}
for i, value := range f.Chromosomes {
half = len(value.Gen) / 2
chromosomesA[i].Gen = append(f.Chromosomes[i].Gen[:half], s.Chromosomes[i].Gen[half:]...)
chromosomesA[i].Allele = in.EvaluateGen(chromosomesA[i].Gen, n[i])
chromosomesB[i].Gen = append(s.Chromosomes[i].Gen[:half], f.Chromosomes[i].Gen[half:]...)
chromosomesB[i].Allele = in.EvaluateGen(chromosomesB[i].Gen, n[i])
}
return &a, &b
} | algorithm/crossover/crossover.go | 0.665845 | 0.476762 | crossover.go | starcoder |
package parser
// line parsers are dispatch calls that parse a single unit of text into a
// Node object which contains the whole statement. Dockerfiles have varied
// (but not usually unique, see ONBUILD for a unique example) parsing rules
// per-command, and these unify the processing in a way that makes it
// manageable.
import (
"encoding/json"
"strings"
"unicode"
"github.com/pkg/errors"
)
var (
errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
)
// ignore the current argument. This will still leave a command parsed, but
// will not incorporate the arguments into the ast.
func parseIgnore(rest string, d *directives) (*Node, map[string]bool, error) {
return &Node{}, nil, nil
}
// parses a whitespace-delimited set of arguments. The result is effectively a
// linked list of string arguments.
func parseStringsWhitespaceDelimited(rest string, _ *directives) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
node := &Node{}
rootnode := node
prevnode := node
for _, str := range reWhitespace.Split(rest, -1) { // use regexp
prevnode = node
node.Value = str
node.Next = &Node{}
node = node.Next
}
// XXX to get around regexp.Split *always* providing an empty string at the
// end due to how our loop is constructed, nil out the last node in the
// chain.
prevnode.Next = nil
return rootnode, nil, nil
}
// parseJSON converts JSON arrays to an AST.
func parseJSON(rest string, _ *directives) (*Node, map[string]bool, error) {
rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
if !strings.HasPrefix(rest, "[") {
return nil, nil, errors.Errorf(`error parsing "%s" as a JSON array`, rest)
}
var myJSON []interface{}
if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
return nil, nil, err
}
var top, prev *Node
for _, str := range myJSON {
s, ok := str.(string)
if !ok {
return nil, nil, errDockerfileNotStringArray
}
node := &Node{Value: s}
if prev == nil {
top = node
} else {
prev.Next = node
}
prev = node
}
return top, map[string]bool{"json": true}, nil
}
// parseMaybeJSON determines if the argument appears to be a JSON array. If
// so, passes to parseJSON; if not, quotes the result and returns a single
// node.
func parseMaybeJSON(rest string, d *directives) (*Node, map[string]bool, error) {
if rest == "" {
return nil, nil, nil
}
node, attrs, err := parseJSON(rest, d)
if err == nil {
return node, attrs, nil
}
if err == errDockerfileNotStringArray {
return nil, nil, err
}
node = &Node{}
node.Value = rest
return node, nil, nil
} | specfile/parser/line_parsers.go | 0.637821 | 0.40645 | line_parsers.go | starcoder |
package store
import (
"time"
"github.com/Semior001/timetype"
)
// Location describes a room or auditory where the ClassDescription is held
type Location string
// EducationalProgram describes a study level of students
type EducationalProgram string
// Basic educational levels
const (
Bachelor EducationalProgram = "bachelor"
Master EducationalProgram = "master"
)
// CourseFormat describes a parts (couples, lessons) of the Course
type CourseFormat string
// Basic course formats
const (
Lecture CourseFormat = "lecture"
Tutorial CourseFormat = "tutorial"
Lab CourseFormat = "lab"
)
// Course describes a basic semester course, e.g. "Operational systems"
type Course struct {
ID string `json:"id"` // a hash derived from all others fields
Name string `json:"name"` // the name of the course
Program EducationalProgram `json:"program,omitempty"` // bachelor, master or graduate
Formats []CourseFormat `json:"formats,omitempty"` // a set of preferable course formats
Groups []Group `json:"groups,omitempty"` // a study groups, e.g. "BS19-04"
Assistants []Teacher `json:"assistants,omitempty"` // teacher assistants of the course
PrimaryLector Teacher `json:"primary_lector"` // e.g. who leads the lectures
AssistantLector Teacher `json:"assistant_lector,omitempty"` // e.g. who leads the tutorials, might be empty
StudyYear StudyYear `json:"study_year"`
Classes []ClassDescription `json:"classes,omitempty"` // classes of the course, i.e. the course schedule
}
// ClassDescription describes a basic lesson of the Course, e.g. a couple
type ClassDescription struct {
ID string `json:"id"`
Title string `json:"title"`
Location Location `json:"location"`
Start time.Time `json:"start"`
Duration time.Duration `json:"duration"`
Repeats int `json:"repeats"`
}
// Class describes a general class with its references to groups, courses, teachers etc.
type Class struct {
ClassDescription
Course Course
Group Group
Teacher Teacher
}
// Group describes a basic students group, e.g. "BS19-04"
type Group struct {
ID string `json:"id"`
Name string `json:"name"` // name of this group, e.g. "BS19-04"
StudyYear StudyYear `json:"study_year"`
}
// PrepareUntrusted sets zero values to all immutable for user fields
func (g *Group) PrepareUntrusted() {
g.ID = ""
}
// StudyYear describes a particular study year like "BS - Year 1 (Computer Science)"
type StudyYear struct {
ID string `json:"id"`
Name string `json:"name"`
}
// TimeSlot describes a particular period of time in a week
type TimeSlot struct {
ID string `json:"id"` // id of this time slot
Weekday time.Weekday `json:"weekday"` // a weekday of time slot
Start timetype.Clock `json:"start"` // start time of time slot
Duration timetype.Duration `json:"duration"` // duration of a time slot
} | backend/app/store/uni.go | 0.65379 | 0.417034 | uni.go | starcoder |
package goequals
import (
"reflect"
)
func Equals(v1, v2 interface{}) bool {
switch v := v1.(type) {
case int:
return equalsInt(int64(v), v2)
case int8:
return equalsInt(int64(v), v2)
case int16:
return equalsInt(int64(v), v2)
case int32:
return equalsInt(int64(v), v2)
case int64:
return equalsInt(int64(v), v2)
case uint:
return equalsInt(int64(v), v2)
case uint8:
return equalsInt(int64(v), v2)
case uint16:
return equalsInt(int64(v), v2)
case uint32:
return equalsInt(int64(v), v2)
case uint64:
return compareUint(uint64(v), v2)
case float32:
return compareFloat(float64(v), v2)
case float64:
return compareFloat(float64(v), v2)
case string:
return compareString(v, v2)
case bool:
return compareBool(v, v2)
case complex64:
return compareComplex(complex128(v), v2)
case complex128:
return compareComplex(v, v2)
}
val1 := reflect.ValueOf(v1)
val2 := reflect.ValueOf(v2)
switch val1.Kind() {
case reflect.Slice:
return compareSlice(val1, val2)
case reflect.Map:
return compareMap(val1, val2)
case reflect.Array:
return compareArray(val1, val2)
default:
return reflect.DeepEqual(v1, v2)
}
}
func compareComplex(v1 complex128, _v2 interface{}) bool {
switch v2 := _v2.(type) {
case complex64:
return v1 == complex128(v2)
case complex128:
return v1 == v2
}
return false
}
func compareArray(v1 reflect.Value, v2 reflect.Value) bool {
if v2.Kind() != reflect.Array {
return false
}
dv1 := downgradeArray(v1)
dv2 := downgradeArray(v2)
if len(dv1) != len(dv2) {
return false
}
for i, elem1 := range dv1 {
//log.Printf("dv1 %+v dv2 %+v elem %d = %+v, %+v", dv1, dv2, i, elem1, dv2[i])
if !Equals(elem1, dv2[i]) {
return false
}
}
return true
}
func compareMap(v1, v2 reflect.Value) bool {
if v2.Kind() != reflect.Map {
return false
}
dv1 := downgradeMap(v1)
dv2 := downgradeMap(v2)
if len(dv1) != len(dv2) {
return false
}
for k1, elem1 := range dv1 {
for k2, elem2 := range dv2 {
if Equals(k1, k2) {
if !Equals(elem1, elem2) {
return false
}
delete(dv2, k2)
break
}
}
}
return true
}
func compareSlice(v1, v2 reflect.Value) bool {
if v2.Kind() != reflect.Slice {
return false
}
dv1 := downgradeSlice(v1)
dv2 := downgradeSlice(v2)
if len(dv1) != len(dv2) {
return false
}
for i, elem1 := range dv1 {
//log.Printf("dv1 %+v dv2 %+v elem %d = %+v, %+v", dv1, dv2, i, elem1, dv2[i])
if !Equals(elem1, dv2[i]) {
return false
}
}
return true
}
func downgradeSlice(v reflect.Value) (dv []interface{}) {
dv = make([]interface{}, v.Len())
for i := 0; i < len(dv); i++ {
dv[i] = v.Index(i).Interface()
}
return
}
func downgradeMap(v reflect.Value) (dv map[interface{}]interface{}) {
dv = make(map[interface{}]interface{}, v.Len())
for _, key := range v.MapKeys() {
elem := v.MapIndex(key)
dv[key.Interface()] = elem.Interface()
}
return
}
func downgradeArray(v reflect.Value) (dv []interface{}) {
dv = make([]interface{}, v.Len())
for i := 0; i < len(dv); i++ {
dv[i] = v.Index(i).Interface()
}
return
}
func compareBool(v bool, v2 interface{}) bool {
switch v2 := v2.(type) {
case bool:
return v == v2
default:
return false
}
}
func compareString(v string, v2 interface{}) bool {
switch v2 := v2.(type) {
case string:
return v == v2
default:
return false
}
}
func compareFloat(v float64, v2 interface{}) bool {
switch v2 := v2.(type) {
case float32:
return v-0.000001 < float64(v2) && float64(v2) < v+0.000001
case float64:
return v-0.000001 < float64(v2) && float64(v2) < v+0.000001
default:
return false
}
}
func equalsInt(v int64, r interface{}) bool {
switch v2 := r.(type) {
case int:
return int64(v2) == v
case int8:
return int64(v2) == v
case int16:
return int64(v2) == v
case int32:
return int64(v2) == v
case int64:
return int64(v2) == v
case uint:
return int64(v2) == v
case uint8:
return int64(v2) == v
case uint16:
return int64(v2) == v
case uint32:
return int64(v2) == v
case uint64:
if v < 0 {
return false
}
return int64(v2) == v
default:
return false
}
}
func compareUint(v uint64, r interface{}) bool {
switch v2 := r.(type) {
case int:
return v2 >= 0 && uint64(v2) == v
case int8:
return v2 >= 0 && uint64(v2) == v
case int16:
return v2 >= 0 && uint64(v2) == v
case int32:
return v2 >= 0 && uint64(v2) == v
case int64:
return v2 >= 0 && uint64(v2) == v
case uint:
return v2 >= 0 && uint64(v2) == v
case uint8:
return v2 >= 0 && uint64(v2) == v
case uint16:
return v2 >= 0 && uint64(v2) == v
case uint32:
return v2 >= 0 && uint64(v2) == v
case uint64:
return v2 >= 0 && uint64(v2) == v
default:
return false
}
} | goequals.go | 0.532911 | 0.484136 | goequals.go | starcoder |
package encoding
import (
"crypto/rand"
"github.com/OpenWhiteBox/primitives/matrix"
)
// EquivalentBytes returns true if two Byte encodings are identical and false if not.
func EquivalentBytes(a, b Byte) bool {
for x := 0; x < 256; x++ {
if a.Encode(byte(x)) != b.Encode(byte(x)) {
return false
}
}
return true
}
// DecomposeByteLinear decomposes an opaque Byte encoding into a ByteLinear encoding.
func DecomposeByteLinear(in Byte) (ByteLinear, bool) {
m := matrix.Matrix{}
for i := uint(0); i < 8; i++ {
m = append(m, matrix.Row{in.Encode(byte(1 << i))})
}
forwards := m.Transpose()
backwards, ok := forwards.Invert()
return ByteLinear{
Forwards: forwards,
Backwards: backwards,
}, ok
}
// DecomposeByteAffine decomposes an opaque Byte encoding into a ByteAffine encoding.
func DecomposeByteAffine(in Byte) (ByteAffine, bool) {
c := ByteAdditive(in.Encode(0))
M, ok := DecomposeByteLinear(ComposedBytes{in, c})
return ByteAffine{
ByteLinear: M,
ByteAdditive: c,
}, ok
}
// ProbablyEquivalentDoubles returns true if two Double encodings are probably equivalent and false if they're
// definitely not.
func ProbablyEquivalentDoubles(a, b Double) bool {
for i := 0; i < 20; i++ {
in := [2]byte{}
rand.Read(in[:])
x, y := a.Encode(in), b.Encode(in)
if x != y {
return false
}
}
return true
}
// DecomposeDoubleLinear decomposes an opaque Double encoding into a DoubleLinear encoding.
func DecomposeDoubleLinear(in Double) (DoubleLinear, bool) {
m := matrix.Matrix{}
for i := 0; i < 2; i++ {
for j := uint(0); j < 8; j++ {
x := [2]byte{}
x[i] = 1 << j
x = in.Encode(x)
m = append(m, matrix.Row(x[:]))
}
}
forwards := m.Transpose()
backwards, ok := forwards.Invert()
return DoubleLinear{
Forwards: forwards,
Backwards: backwards,
}, ok
}
// DecomposeDoubleAffine decomposes an opaque Double encoding into a DoubleAffine encoding.
func DecomposeDoubleAffine(in Double) (DoubleAffine, bool) {
c := DoubleAdditive(in.Encode([2]byte{}))
M, ok := DecomposeDoubleLinear(ComposedDoubles{in, c})
return DoubleAffine{
DoubleLinear: M,
DoubleAdditive: c,
}, ok
}
// ProbablyEquivalentWords returns true if two Word encodings are probably equivalent and false if they're definitely
// not.
func ProbablyEquivalentWords(a, b Word) bool {
for i := 0; i < 20; i++ {
in := [4]byte{}
rand.Read(in[:])
x, y := a.Encode(in), b.Encode(in)
if x != y {
return false
}
}
return true
}
// DecomposeWordLinear decomposes an opaque Word encoding into a WordLinear encoding.
func DecomposeWordLinear(in Word) (WordLinear, bool) {
m := matrix.Matrix{}
for i := 0; i < 4; i++ {
for j := uint(0); j < 8; j++ {
x := [4]byte{}
x[i] = 1 << j
x = in.Encode(x)
m = append(m, matrix.Row(x[:]))
}
}
forwards := m.Transpose()
backwards, ok := forwards.Invert()
return WordLinear{
Forwards: forwards,
Backwards: backwards,
}, ok
}
// DecomposeWordAffine decomposes an opaque Word encoding into a WordAffine encoding.
func DecomposeWordAffine(in Word) (WordAffine, bool) {
c := WordAdditive(in.Encode([4]byte{}))
M, ok := DecomposeWordLinear(ComposedWords{in, c})
return WordAffine{
WordLinear: M,
WordAdditive: c,
}, ok
}
// ProbablyEquivalentBlocks returns true if two Block encodings are probably equivalent and false if they're definitely
// not.
func ProbablyEquivalentBlocks(a, b Block) bool {
for i := 0; i < 20; i++ {
in := [16]byte{}
rand.Read(in[:])
x, y := a.Encode(in), b.Encode(in)
if x != y {
return false
}
}
return true
}
// DecomposeBlockLinear decomposes an opaque Block encoding into a BlockLinear encoding.
func DecomposeBlockLinear(in Block) (BlockLinear, bool) {
m := matrix.Matrix{}
for i := 0; i < 16; i++ {
for j := uint(0); j < 8; j++ {
x := [16]byte{}
x[i] = 1 << j
x = in.Encode(x)
m = append(m, matrix.Row(x[:]))
}
}
forwards := m.Transpose()
backwards, ok := forwards.Invert()
return BlockLinear{
Forwards: forwards,
Backwards: backwards,
}, ok
}
// DecomposeBlockAffine decomposes an opaque Block encoding into a BlockAffine encoding.
func DecomposeBlockAffine(in Block) (BlockAffine, bool) {
c := BlockAdditive(in.Encode([16]byte{}))
M, ok := DecomposeBlockLinear(ComposedBlocks{in, c})
return BlockAffine{
BlockLinear: M,
BlockAdditive: c,
}, ok
}
// DecomposeConcatenatedBlock decomposes an opaque concatenated Block encoding into an explicit one.
func DecomposeConcatenatedBlock(in Block) (out ConcatenatedBlock) {
for pos := 0; pos < 16; pos++ {
sbox := SBox{}
for x := 0; x < 256; x++ {
X := [16]byte{}
X[pos] = byte(x)
Y := in.Encode(X)
sbox.EncKey[x] = Y[pos]
sbox.DecKey[Y[pos]] = X[pos]
}
out[pos] = sbox
}
return
} | encoding/decompose.go | 0.785267 | 0.539044 | decompose.go | starcoder |
package iso20022
// Amount of money for which goods or services are offered, sold, or bought.
type UnitPrice10 struct {
// Type and information about a price.
Type *TypeOfPrice10Code `xml:"Tp"`
// Type and information about a price.
ExtendedType *Extended350Code `xml:"XtndedTp"`
// Value of the price, eg, as a currency and value.
Value *PriceValue1 `xml:"Val"`
// Type of pricing calculation method.
PriceMethod *PriceMethod1Code `xml:"PricMtd,omitempty"`
// Specifies the number of days used for calculating the accrued interest amount.
NumberOfDaysAccrued *Number `xml:"NbOfDaysAcrd,omitempty"`
// Amount included in the NAV that corresponds to gains directly or indirectly derived from interest payment in the scope of the European Directive on taxation of savings income in the form of interest payments.
TaxableIncomePerShare *ActiveCurrencyAnd13DecimalAmount `xml:"TaxblIncmPerShr,omitempty"`
// Specifies whether the fund calculates a taxable interest per share (TIS).
TaxableIncomePerShareCalculated *TaxableIncomePerShareCalculated2Code `xml:"TaxblIncmPerShrClctd,omitempty"`
// Specifies whether the fund calculates a taxable interest per share (TIS).
ExtendedTaxableIncomePerShareCalculated *Extended350Code `xml:"XtndedTaxblIncmPerShrClctd,omitempty"`
// Specifies the reason why the price is different from the current market price.
PriceDifferenceReason *Max350Text `xml:"PricDiffRsn,omitempty"`
}
func (u *UnitPrice10) SetType(value string) {
u.Type = (*TypeOfPrice10Code)(&value)
}
func (u *UnitPrice10) SetExtendedType(value string) {
u.ExtendedType = (*Extended350Code)(&value)
}
func (u *UnitPrice10) AddValue() *PriceValue1 {
u.Value = new(PriceValue1)
return u.Value
}
func (u *UnitPrice10) SetPriceMethod(value string) {
u.PriceMethod = (*PriceMethod1Code)(&value)
}
func (u *UnitPrice10) SetNumberOfDaysAccrued(value string) {
u.NumberOfDaysAccrued = (*Number)(&value)
}
func (u *UnitPrice10) SetTaxableIncomePerShare(value, currency string) {
u.TaxableIncomePerShare = NewActiveCurrencyAnd13DecimalAmount(value, currency)
}
func (u *UnitPrice10) SetTaxableIncomePerShareCalculated(value string) {
u.TaxableIncomePerShareCalculated = (*TaxableIncomePerShareCalculated2Code)(&value)
}
func (u *UnitPrice10) SetExtendedTaxableIncomePerShareCalculated(value string) {
u.ExtendedTaxableIncomePerShareCalculated = (*Extended350Code)(&value)
}
func (u *UnitPrice10) SetPriceDifferenceReason(value string) {
u.PriceDifferenceReason = (*Max350Text)(&value)
} | UnitPrice10.go | 0.800419 | 0.471527 | UnitPrice10.go | starcoder |
package kurobako
import (
"encoding/json"
"fmt"
"math"
)
func isFinite(v float64) bool {
return !(math.IsInf(v, 0) || math.IsNaN(v))
}
// ContinuousRange represents a numerical continuous range.
type ContinuousRange struct {
// Low is the lower bound of the range (inclusive).
Low float64 `json:"low"`
// High is the upper bound of the range (exclusive).
High float64 `json:"high"`
}
// ToRange creates a Range object that contains the receiver object.
func (r ContinuousRange) ToRange() Range {
return Range{r}
}
// MarshalJSON encodes a ContinuousRange object to JSON bytes.
func (r ContinuousRange) MarshalJSON() ([]byte, error) {
m := map[string]interface{}{
"type": "CONTINUOUS",
"low": r.Low,
"high": r.High,
}
if !isFinite(r.Low) {
delete(m, "low")
}
if !isFinite(r.High) {
delete(m, "high")
}
return json.Marshal(m)
}
// UnmarshalJSON decodes a ContinuousRange object from JSON bytes.
func (r *ContinuousRange) UnmarshalJSON(data []byte) error {
var m struct {
Low *float64 `json:"low"`
High *float64 `json:"high"`
}
if err := json.Unmarshal(data, &m); err != nil {
return err
}
if m.Low == nil {
r.Low = math.Inf(-1)
} else {
r.Low = *m.Low
}
if m.High == nil {
r.High = math.Inf(0)
} else {
r.High = *m.High
}
return nil
}
// DiscreteRange represents a numerical discrete range.
type DiscreteRange struct {
// Low is the lower bound of the range (inclusive).
Low int64 `json:"low"`
// High is the upper bound of the range (exclusive).
High int64 `json:"high"`
}
// ToRange create a Range object that contains the receiver object.
func (r DiscreteRange) ToRange() Range {
return Range{r}
}
// CategoricalRange represents a categorical range (choices).
type CategoricalRange struct {
// Choices is the possible values in the range.
Choices []string `json:"choices"`
}
// ToRange creates a Range object that contains the receiver object.
func (r CategoricalRange) ToRange() Range {
return Range{r}
}
// Range represents the range of a parameter.
type Range struct {
inner interface{}
}
// Low is the lower bound of the range (inclusive).
func (r *Range) Low() float64 {
switch x := (r.inner).(type) {
case ContinuousRange:
return x.Low
case DiscreteRange:
return float64(x.Low)
case CategoricalRange:
return 0.0
default:
panic("unreachable")
}
}
// High is the upper bound of the range (exclusive).
func (r *Range) High() float64 {
switch x := (r.inner).(type) {
case ContinuousRange:
return x.High
case DiscreteRange:
return float64(x.High)
case CategoricalRange:
return float64(len(x.Choices))
default:
panic("unreachable")
}
}
// AsContinuousRange tries to return the inner object of the range as a ContinuousRange object.
func (r *Range) AsContinuousRange() *ContinuousRange {
inner, ok := (r.inner).(ContinuousRange)
if ok {
return &inner
}
return nil
}
// AsDiscreteRange tries to return the inner object of the range as a DiscreteRange object.
func (r *Range) AsDiscreteRange() *DiscreteRange {
inner, ok := (r.inner).(DiscreteRange)
if ok {
return &inner
}
return nil
}
// AsCategoricalRange tries to return the inner object of the range as a CategoricalRange object.
func (r *Range) AsCategoricalRange() *CategoricalRange {
inner, ok := (r.inner).(CategoricalRange)
if ok {
return &inner
}
return nil
}
// MarshalJSON encodes a range object to JSON bytes.
func (r Range) MarshalJSON() ([]byte, error) {
if x := r.AsContinuousRange(); x != nil {
return json.Marshal(x)
} else if x := r.AsDiscreteRange(); x != nil {
return json.Marshal(map[string]interface{}{
"type": "DISCRETE",
"low": x.Low,
"high": x.High,
})
} else if x := r.AsCategoricalRange(); x != nil {
return json.Marshal(map[string]interface{}{
"type": "CATEGORICAL",
"choices": x.Choices,
})
} else {
panic("unreachable")
}
}
// UnmarshalJSON decodes a Range object from JSON bytes.
func (r *Range) UnmarshalJSON(data []byte) error {
var m map[string]interface{}
if err := json.Unmarshal(data, &m); err != nil {
return err
}
switch m["type"] {
case "CONTINUOUS":
var x ContinuousRange
if err := json.Unmarshal(data, &x); err != nil {
return err
}
*r = x.ToRange()
case "DISCRETE":
var x DiscreteRange
if err := json.Unmarshal(data, &x); err != nil {
return err
}
*r = x.ToRange()
case "CATEGORICAL":
var x CategoricalRange
if err := json.Unmarshal(data, &x); err != nil {
return err
}
*r = x.ToRange()
default:
return fmt.Errorf("unknown or missing \"type\" field: %v", m["type"])
}
return nil
} | range.go | 0.828384 | 0.634345 | range.go | starcoder |
package components
import (
"sync"
"github.com/go-gl/mathgl/mgl32"
)
const (
// TypeTransform represents a transform component's type.
TypeTransform = "transform"
)
// Transform represents the world position of an entity.
type Transform interface {
Component
// Set sets the transform to a specific matrix.
Set(mgl32.Mat4)
// Data retrieves the transforms matrix.
Data() mgl32.Mat4
// Rotate rotates the transform by the angles passed into the method.
Rotate(mgl32.Vec3)
// Translate translates the transform by the value passed into the method.
Translate(mgl32.Vec3)
// Update translates the transform based on the first argument then rotates it using the second argument.
Update(mgl32.Vec3, mgl32.Vec3)
}
// NewTransform creates a new transform component.
func NewTransform() Transform {
t := transform{
modelView: mgl32.Ident4(),
rotation: mgl32.Vec3{0, 0, 0},
translation: mgl32.Vec3{0, 0, 0},
}
return &t
}
// transform represents the data of the Transform component.
type transform struct {
modelView mgl32.Mat4
rotation mgl32.Vec3
translation mgl32.Vec3
dataLock sync.RWMutex
}
// Type retrieves the type of this component.
func (t *transform) Type() string {
return TypeTransform
}
// Set sets the transform to a specific matrix.
func (t *transform) Set(modelView mgl32.Mat4) {
t.dataLock.Lock()
defer t.dataLock.Unlock()
t.modelView = modelView
}
// Data retrieves the transforms matrix.
func (t *transform) Data() mgl32.Mat4 {
t.dataLock.RLock()
defer t.dataLock.RUnlock()
return t.modelView
}
// Rotate rotates the transform by the angles passed into the method.
func (t *transform) Rotate(rotate mgl32.Vec3) {
t.dataLock.Lock()
defer t.dataLock.Unlock()
t.rotation = t.rotation.Add(rotate)
total := t.rotation
rotX := mgl32.HomogRotate3DX(total.X())
rotY := mgl32.HomogRotate3DY(total.Y())
rotZ := mgl32.HomogRotate3DZ(total.Z())
rotMatrix := rotZ.Mul4(rotY).Mul4(rotX)
trans := t.translation
t.modelView = mgl32.Ident4().Mul4(mgl32.Translate3D(trans.X(), trans.Y(), trans.Z())).Mul4(rotMatrix)
}
// Translate translates the transform by the value passed into the method.
func (t *transform) Translate(translate mgl32.Vec3) {
t.dataLock.Lock()
defer t.dataLock.Unlock()
t.translation = t.translation.Add(translate)
trans := t.translation
t.modelView = t.modelView.Mul4(mgl32.Translate3D(trans.X(), trans.Y(), trans.Z()))
}
// Update translates and rotates the transform.
func (t *transform) Update(translate, rotate mgl32.Vec3) {
t.dataLock.Lock()
defer t.dataLock.Unlock()
t.translation = t.translation.Add(translate)
trans := t.translation
t.rotation = t.rotation.Add(rotate)
total := t.rotation
rotX := mgl32.HomogRotate3DX(total.X())
rotY := mgl32.HomogRotate3DY(total.Y())
rotZ := mgl32.HomogRotate3DZ(total.Z())
rotMatrix := rotZ.Mul4(rotY).Mul4(rotX)
trans = t.translation
t.modelView = mgl32.Ident4().Mul4(mgl32.Translate3D(trans.X(), trans.Y(), trans.Z())).Mul4(rotMatrix)
} | components/transform.go | 0.843412 | 0.72964 | transform.go | starcoder |
package text
import (
"fmt"
"time"
"github.com/antonmedv/expr"
"github.com/flanksource/commons/duration"
)
func MakeExpressionEnvs(envs map[string]interface{}) map[string]interface{} {
for name, funcMap := range GetTemplateFuncs() {
envs[name] = funcMap
}
envs["humanizeDuration"] = HumanizeDuration
envs["Sprintf"] = fmt.Sprintf
envs["Now"] = time.Now
envs["Date"] = Date
envs["Duration"] = Duration
envs["Equal"] = EqualTime
envs["Before"] = Before
envs["BeforeOrEqual"] = BeforeOrEqual
envs["After"] = After
envs["AfterOrEqual"] = AfterOrEqual
envs["Add"] = Add
envs["Sub"] = Sub
envs["EqualDuration"] = EqualDuration
envs["BeforeDuration"] = BeforeDuration
envs["BeforeOrEqualDuration"] = BeforeOrEqualDuration
envs["AfterDuration"] = AfterDuration
envs["AfterOrEqualDuration"] = AfterOrEqualDuration
envs["Age"] = Age
return envs
}
func MakeExpressionOptions(envs map[string]interface{}) []expr.Option {
// Operators override for date comprising.
envs = MakeExpressionEnvs(envs)
options := []expr.Option{
expr.Env(envs),
// Operators override for date comprising.
expr.Operator("==", "Equal"),
expr.Operator("<", "Before"),
expr.Operator("<=", "BeforeOrEqual"),
expr.Operator(">", "After"),
expr.Operator(">=", "AfterOrEqual"),
// Time and duration manipulation.
expr.Operator("+", "Add"),
expr.Operator("-", "Sub"),
// Operators override for duration comprising.
expr.Operator("==", "EqualDuration"),
expr.Operator("<", "BeforeDuration"),
expr.Operator("<=", "BeforeOrEqualDuration"),
expr.Operator(">", "AfterDuration"),
expr.Operator(">=", "AfterOrEqualDuration"),
}
return options
}
func Date(s string) time.Time {
t, err := time.Parse(time.RFC3339, s)
if err != nil {
panic(err)
}
return t
}
func Duration(s string) time.Duration {
d, err := duration.ParseDuration(s)
if err != nil {
panic(err)
}
return time.Duration(d)
}
func EqualTime(a, b time.Time) bool { return a.Equal(b) }
func Before(a, b time.Time) bool { return a.Before(b) }
func BeforeOrEqual(a, b time.Time) bool { return a.Before(b) || a.Equal(b) }
func After(a, b time.Time) bool { return a.After(b) }
func AfterOrEqual(a, b time.Time) bool { return a.After(b) || a.Equal(b) }
func Add(a time.Time, b time.Duration) time.Time { return a.Add(b) }
func Sub(a, b time.Time) time.Duration { return a.Sub(b) }
func EqualDuration(a, b time.Duration) bool { return a == b }
func BeforeDuration(a, b time.Duration) bool { return a < b }
func BeforeOrEqualDuration(a, b time.Duration) bool { return a <= b }
func AfterDuration(a, b time.Duration) bool { return a > b }
func AfterOrEqualDuration(a, b time.Duration) bool { return a >= b }
func Age(s string) time.Duration {
t, err := time.Parse(time.RFC3339, s)
if err != nil {
panic(err)
}
return time.Since(t)
} | text/expressions.go | 0.605799 | 0.514095 | expressions.go | starcoder |
package base
import (
"time"
)
const (
DATETIME_FORMAT = "2006-01-02 15:04:05"
DATE_FORMAT = "2006-01-02"
TIME_FORMAT = "15:04:05"
Day = time.Hour * 24
DaySec = 24 * 3600
)
func Format(t time.Time, layout string) string {
return t.Format(layout)
}
func FormatDateTime(t time.Time) string {
return t.Format(DATETIME_FORMAT)
}
func FormatDate(t time.Time) string {
return t.Format(DATE_FORMAT)
}
func FormatTime(t time.Time) string {
return t.Format(TIME_FORMAT)
}
func Parse(layout string, value string) (time.Time, error) {
return time.ParseInLocation(layout, value, time.Local)
}
func ParseDateTime(value string) (time.Time, error) {
return time.ParseInLocation(DATETIME_FORMAT, value, time.Local)
}
func ParseDate(value string) (time.Time, error) {
return time.ParseInLocation(DATE_FORMAT, value, time.Local)
}
func ParseTime(value string) (time.Time, error) {
return time.ParseInLocation(TIME_FORMAT, value, time.Local)
}
func Date(year int, month time.Month, day, hour, min, sec int) time.Time {
return time.Date(year, month, day, hour, min, sec, 0, time.Local)
}
func Unix(sec int64) time.Time {
return time.Unix(sec, 0)
}
func IntSecond(sec int) time.Duration {
return time.Second * time.Duration(sec)
}
func IsSameDay(t1, t2 time.Time) bool {
y1, m1, d1 := t1.Date()
y2, m2, d2 := t2.Date()
return d1 == d2 && m1 == m2 && y1 == y2
}
func CheckMomentHappend(t time.Time, hour, min, sec int) bool {
now := time.Now()
moment := time.Date(t.Year(), t.Month(), t.Day(), hour, min, sec, 0, time.Local)
if !moment.Before(now) {
return false
}
if moment.After(t) {
return true
}
moment = moment.AddDate(0, 0, 1)
return moment.Before(now)
}
func GetMomentDelay(hour, min, sec int) time.Duration {
now := time.Now()
moment := time.Date(now.Year(), now.Month(), now.Day(), hour, min, sec, 0, time.Local)
if moment.After(now) {
return moment.Sub(now)
}
return time.Hour*24 - now.Sub(moment)
}
func GetZero(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.Local)
}
//GetDeltaDays t1-t2
func GetDeltaDays(t1, t2 time.Time) int {
return int(GetZero(t1).Sub(GetZero(t2)) / Day)
}
//WeekDay 1~7
func WeekDay(t time.Time) int {
week := int(t.Weekday())
if week == 0 {
week = 7
}
return week
}
func IsSameWeek(t1, t2 time.Time) bool {
year1, week1 := t1.ISOWeek()
year2, week2 := t1.ISOWeek()
return week1 == week2 && year1 == year2
} | base/systime.go | 0.675872 | 0.411584 | systime.go | starcoder |
// Package numbers implements various numerical functions.
package numbers
import (
"image"
"math"
)
// RoundToNonZeroPlaces rounds the float up, so that it has at least the provided
// number of non-zero decimal places.
// Returns the rounded float and the number of leading decimal places that
// are zero. Returns the original float when places is zero. Negative places
// are treated as positive, so that -2 == 2.
func RoundToNonZeroPlaces(f float64, places int) (float64, int) {
if f == 0 {
return 0, 0
}
decOnly := zeroBeforeDecimal(f)
if decOnly == 0 {
return f, 0
}
nzMult := multToNonZero(decOnly)
if places == 0 {
return f, multToPlaces(nzMult)
}
plMult := placesToMult(places)
m := float64(nzMult * plMult)
return math.Ceil(f*m) / m, multToPlaces(nzMult)
}
// multToNonZero returns multiplier for the float, so that the first decimal
// place is non-zero. The float must not be zero.
func multToNonZero(f float64) int {
v := f
if v < 0 {
v *= -1
}
mult := 1
for v < 0.1 {
v *= 10
mult *= 10
}
return mult
}
// placesToMult translates the number of decimal places to a multiple of 10.
func placesToMult(places int) int {
if places < 0 {
places *= -1
}
mult := 1
for i := 0; i < places; i++ {
mult *= 10
}
return mult
}
// multToPlaces translates the multiple of 10 to a number of decimal places.
func multToPlaces(mult int) int {
places := 0
for mult > 1 {
mult /= 10
places++
}
return places
}
// zeroBeforeDecimal modifies the float so that it only has zero value before
// the decimal point.
func zeroBeforeDecimal(f float64) float64 {
var sign float64 = 1
if f < 0 {
f *= -1
sign = -1
}
floor := math.Floor(f)
return (f - floor) * sign
}
// Round returns the nearest integer, rounding half away from zero.
// Copied from the math package of Go 1.10 for backwards compatibility with Go
// 1.8 where the math.Round function doesn't exist yet.
func Round(x float64) float64 {
t := math.Trunc(x)
if math.Abs(x-t) >= 0.5 {
return t + math.Copysign(1, x)
}
return t
}
// MinMax returns the smallest and the largest value among the provided values.
// Returns (0, 0) if there are no values.
func MinMax(values []float64) (min, max float64) {
if len(values) == 0 {
return 0, 0
}
min = math.MaxFloat64
max = -1 * math.MaxFloat64
for _, v := range values {
if v < min {
min = v
}
if v > max {
max = v
}
}
return min, max
}
// MinMaxInts returns the smallest and the largest int value among the provided
// values. Returns (0, 0) if there are no values.
func MinMaxInts(values []int) (min, max int) {
if len(values) == 0 {
return 0, 0
}
min = math.MaxInt32
max = -1 * math.MaxInt32
for _, v := range values {
if v < min {
min = v
}
if v > max {
max = v
}
}
return min, max
}
// DegreesToRadians converts degrees to the equivalent in radians.
func DegreesToRadians(degrees int) float64 {
if degrees > 360 {
degrees %= 360
}
return (float64(degrees) / 180) * math.Pi
}
// RadiansToDegrees converts radians to the equivalent in degrees.
func RadiansToDegrees(radians float64) int {
d := int(Round(radians * 180 / math.Pi))
if d < 0 {
d += 360
}
return d
}
// Abs returns the absolute value of x.
func Abs(x int) int {
if x < 0 {
return -x
}
return x
}
// findGCF finds the greatest common factor of two integers.
func findGCF(a, b int) int {
if a == 0 || b == 0 {
return 0
}
a = Abs(a)
b = Abs(b)
// https://en.wikipedia.org/wiki/Euclidean_algorithm
for {
rem := a % b
a = b
b = rem
if b == 0 {
break
}
}
return a
}
// SimplifyRatio simplifies the given ratio.
func SimplifyRatio(ratio image.Point) image.Point {
gcf := findGCF(ratio.X, ratio.Y)
if gcf == 0 {
return image.ZP
}
return image.Point{
X: ratio.X / gcf,
Y: ratio.Y / gcf,
}
}
// SplitByRatio splits the provided number by the specified ratio.
func SplitByRatio(n int, ratio image.Point) image.Point {
sr := SimplifyRatio(ratio)
if sr.Eq(image.ZP) {
return image.ZP
}
fn := float64(n)
sum := float64(sr.X + sr.Y)
fact := fn / sum
return image.Point{
int(Round(fact * float64(sr.X))),
int(Round(fact * float64(sr.Y))),
}
} | internal/numbers/numbers.go | 0.873424 | 0.725308 | numbers.go | starcoder |
package utils
import (
"strings"
"unicode"
"unicode/utf8"
"unsafe"
)
//EmptySpace
const EmptySpace = " "
//EmptyString
const EmptyString = ""
//分隔符
const Spliter = "/"
/**
* Tokenize the given {@code String} into a {@code String} array via a
* {@link StringTokenizer}.
* <p>The given {@code delimiters} string can consist of any number of
* delimiter characters. Each of those characters can be used to separate
* tokens. A delimiter is always a single character; for multi-character
* delimiters, consider using {@link #delimitedListToStringArray}.
* @param str the {@code String} to tokenize
* @param delimiters the delimiter characters, assembled as a {@code String}
* (each of the characters is individually considered as a delimiter)
* @param trimTokens trim the tokens via {@link String#trim()}
* @param ignoreEmptyTokens omit empty tokens from the result array
* (only applies to tokens that are empty after trimming; StringTokenizer
* will not consider subsequent delimiters as token in the first place).
* @return an array of the tokens ({@code null} if the input {@code String}
* was {@code null})
* @see java.util.StringTokenizer
* @see String#trim()
* @see #delimitedListToStringArray
*/
//TokenizeToStringArray
func TokenizeToStringArray(str,delimiters string,trimTokens,ignoreEmptyTokens bool) []*string {
if str == EmptyString {
return nil
}
tokens := make([]*string,0)
for _,token := range strings.Split(str,delimiters) {
if trimTokens {
token = strings.Trim(token,EmptySpace)
}
if !ignoreEmptyTokens || token != EmptyString {
var item = token
tokens = append(tokens,&item)
}
}
return tokens
}
//TokenizeToStringArray1
func TokenizeToStringArray1(str,delimiters string) []*string {
return TokenizeToStringArray(str,delimiters,true,true)
}
//Str2Bytes
func Str2Bytes(s string) []byte {
x := (*[2]uintptr)(unsafe.Pointer(&s))
h := [3]uintptr{x[0], x[1], x[1]}
return *(*[]byte)(unsafe.Pointer(&h))
}
//Bytes2Str
func Bytes2Str(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
//StartsWith
func StartsWith(str,prefix string,toffset int) bool{
ta := Str2Bytes(str)
to := toffset
pa := Str2Bytes(prefix)
po := 0
pc := utf8.RuneCountInString(prefix)
// Note: toffset might be near -1>>>1.
if (toffset < 0) || (toffset > utf8.RuneCountInString(str) - pc) {
return false
}
for {
if pc--;pc >= 0 {
if ta[to] != pa[po] {
to++
po++
return false
}
}else{
break
}
}
return true
}
//IsBlank 判断是否存在空格
func IsBlank(source string) bool{
if strings.EqualFold(EmptyString,source) {
return true
}
for i := len(source); i > 0; {
r, size := utf8.DecodeLastRuneInString(source[0:i])
i -= size
if !unicode.IsSpace(r){
return false
}
}
return true
}
//HasText 判断是否有值
func HasText(source string) bool{
return !IsBlank(source)
} | utils/string_util.go | 0.70028 | 0.401013 | string_util.go | starcoder |
package weekendraytracer
import (
"math"
"math/rand"
"github.com/go-gl/mathgl/mgl64"
)
// Material describes how light reflects off a surface
type Material interface {
// Scatter describes the surface's response to the ray rIn,
// at the intersection point given in hit.
// Returns: whether the ray is scattered,
// a vector describing the attenuation of the ray,
// and the scattered ray (if any)
Scatter(rIn Ray, hit HitRecord) (bool, mgl64.Vec3, Ray)
}
// Lambertian surfaces are diffuse, and scatter light in random directions.
type Lambertian struct {
Albedo mgl64.Vec3
}
// Scatter simulates the reflection off a diffuse surface.
func (l Lambertian) Scatter(rIn Ray, hit HitRecord) (bool, mgl64.Vec3, Ray) {
target := hit.P.Add(hit.Normal).Add(randomInsideUnitSphere())
scattered := Ray{A: hit.P, B: target.Sub(hit.P)}
attenuation := l.Albedo
return true, attenuation, scattered
}
// randomInsideUnitSphere generates a random vector inside a unit sphere:
func randomInsideUnitSphere() mgl64.Vec3 {
p := mgl64.Vec3{99.0, 99.0, 99.0}
for p.LenSqr() >= 1.0 {
x := (2.0 * rand.Float64()) - 1.0
y := (2.0 * rand.Float64()) - 1.0
z := (2.0 * rand.Float64()) - 1.0
p = mgl64.Vec3{x, y, z}
}
return p
}
// Metallic surfaces reflect light.
type Metallic struct {
Albedo mgl64.Vec3
Fuzziness float64
}
// Scatter simulates the reflection off a metallic surface.
func (m Metallic) Scatter(rIn Ray, hit HitRecord) (bool, mgl64.Vec3, Ray) {
// To simulate uneven surfaces, we distort the reflection slightly:
fuzz := randomInsideUnitSphere().Mul(m.Fuzziness)
reflected := reflect(rIn.Direction().Normalize(), hit.Normal)
scattered := Ray{A: hit.P, B: reflected.Add(fuzz)}
attenuation := m.Albedo
isScattered := scattered.Direction().Dot(hit.Normal) > 0
return isScattered, attenuation, scattered
}
// reflect calculates the reflection of vector v against the surface normal n.
func reflect(v, n mgl64.Vec3) mgl64.Vec3 {
t := n.Mul(2 * v.Dot(n))
return v.Sub(t)
}
// Dielectric surfaces both reflect and refract light:
type Dielectric struct {
Refractance float64
}
// refract simulates the refraction of vector v against the surface normal n,
// returning whether the ray is refracted (and the refracted ray if it is)
func refract(v mgl64.Vec3, n mgl64.Vec3, refractRatio float64) (bool, mgl64.Vec3) {
uv := v.Normalize()
dt := uv.Dot(n)
discriminant := 1.0 - refractRatio*refractRatio*(1.0-dt*dt)
if discriminant > 0.0 {
a := uv.Sub(n.Mul(dt)).Mul(refractRatio)
b := n.Mul(math.Sqrt(discriminant))
refracted := a.Sub(b)
return true, refracted
}
return false, mgl64.Vec3{}
}
// schlick is Christophe Schlick's polynomial approximation of
// the specular reflection coefficient:
func schlick(cosine float64, refract float64) float64 {
r0 := (1.0 - refract) / (1.0 + refract)
r0 = r0 * r0
return r0 + (1.0-r0)*math.Pow((1-cosine), 5.0)
}
// Scatter simulates the reflection off a dielectric surface.
func (d Dielectric) Scatter(rIn Ray, hit HitRecord) (bool, mgl64.Vec3, Ray) {
var outwardNormal mgl64.Vec3
var refractRatio float64
var cosine float64
var reflectProb float64
reflected := reflect(rIn.Direction(), hit.Normal)
attenuation := mgl64.Vec3{1.0, 1.0, 1.0}
if rIn.Direction().Dot(hit.Normal) > 0 {
outwardNormal = hit.Normal.Mul(-1.0)
refractRatio = d.Refractance
cosine = d.Refractance * rIn.Direction().Dot(hit.Normal) / rIn.Direction().Len()
} else {
outwardNormal = hit.Normal
refractRatio = 1.0 / d.Refractance
cosine = -1.0 * rIn.Direction().Dot(hit.Normal) / rIn.Direction().Len()
}
isRefracted, refracted := refract(rIn.Direction(), outwardNormal, refractRatio)
if isRefracted {
reflectProb = schlick(cosine, d.Refractance)
} else {
reflectProb = 1.0
}
var scattered mgl64.Vec3
if rand.Float64() < reflectProb {
scattered = reflected
} else {
scattered = refracted
}
return true, attenuation, Ray{A: hit.P, B: scattered}
} | material.go | 0.828419 | 0.614568 | material.go | starcoder |
package model
func predict2(features []float64) float64 {
if (features[2] < 0.5) || (features[2] == -1) {
if (features[1] < 0.5) || (features[1] == -1) {
if (features[0] < 0.5) || (features[0] == -1) {
if (features[6] < 0.0760708675) || (features[6] == -1) {
if (features[16] < 0.32698375) || (features[16] == -1) {
if (features[7] < 0.268250227) || (features[7] == -1) {
if (features[5] < 0.360856116) || (features[5] == -1) {
return 0.552726686
} else {
return -3.35712743
}
} else {
return 3.59137011
}
} else {
if (features[14] < 0.838974714) || (features[14] == -1) {
if (features[14] < 0.442470878) || (features[14] == -1) {
return 6.50993109
} else {
return 2.97817874
}
} else {
if (features[3] < 0.68460989) || (features[3] == -1) {
return -1.5195111
} else {
return 0.495405883
}
}
}
} else {
if (features[7] < 0.108870044) || (features[7] == -1) {
if (features[9] < 0.264320642) || (features[9] == -1) {
if (features[6] < 0.741639555) || (features[6] == -1) {
return 3.48238301
} else {
return 58.0834923
}
} else {
if (features[15] < 0.0908528864) || (features[15] == -1) {
return 3.46036792
} else {
return 65.4346848
}
}
} else {
if (features[16] < 0.594482124) || (features[16] == -1) {
if (features[7] < 0.711652517) || (features[7] == -1) {
return 41.8439522
} else {
return 20.5362568
}
} else {
if (features[15] < 0.559699059) || (features[15] == -1) {
return 27.8098278
} else {
return 7.9497261
}
}
}
}
} else {
return 8.10288143
}
} else {
return 1.1799258
}
} else {
return 80.7237396
}
} | examples/xgboost/XGBRegressor/booster2.go | 0.537527 | 0.484746 | booster2.go | starcoder |
package inputsample
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"github.com/pbanos/botanic/feature"
"github.com/pbanos/botanic/set"
)
/*
ReadSample represents a sample whose feature values
are retrieved from a reader. A feature value will be
requested using a FeatureValueRequester before reading it.
*/
type readSample struct {
obtainedValues map[string]interface{}
undefinedValue string
scanner *bufio.Scanner
featureValueRequester FeatureValueRequester
features []feature.Feature
}
/*
FeatureValueRequester represents a way to ask
for feature values and reject the given values.
*/
type FeatureValueRequester interface {
RequestValueFor(feature.Feature) error
RejectValueFor(feature.Feature, interface{}) error
}
/*
New takes an io.Reader, a slice of features, a
FeatureValueRequester and an undefinedValue coding string
and returns a Sample.
The returned Sample ValueFor method reads feature values first
requesting them with the given FeatureValueRequester and
then parsing the values from the reader.
The parsing expects each value to be presented ending with the
'\n' character, that is in new lines. Also, the undefinedValue
string followed by the '\n' character will be interpreted as an
undefined value.
For a feature.ContinuousFeature, lines will be read from the
reader until a line containing a valid float64 number is found.
For a feature.DiscreteFeature, lines will be read from the
reader until a line with a valid value for the feature is found.
For both kind of feature.Feature, non accepted values will be
rejected with the FeatureValueRequester's RejectValueFor method.
Attempting to obtain a value for Feature not in the given
features slice, or for another type of feature will return nil.
*/
func New(r io.Reader, features []feature.Feature, featureValueRequester FeatureValueRequester, undefinedValue string) set.Sample {
scanner := bufio.NewScanner(os.Stdin)
return &readSample{make(map[string]interface{}), undefinedValue, scanner, featureValueRequester, features}
}
func (rs *readSample) ValueFor(f feature.Feature) (interface{}, error) {
value, ok := rs.obtainedValues[f.Name()]
if ok {
return value, nil
}
var featureWithInfo feature.Feature
for _, feature := range rs.features {
if f.Name() == feature.Name() {
featureWithInfo = feature
}
}
if featureWithInfo == nil {
return nil, fmt.Errorf("have no information about feature %s, do not know how to read its value", f.Name())
}
err := rs.featureValueRequester.RequestValueFor(featureWithInfo)
if err != nil {
return nil, err
}
switch featureWithInfo := featureWithInfo.(type) {
case *feature.ContinuousFeature:
return rs.readContinuousFeature(featureWithInfo)
case *feature.DiscreteFeature:
return rs.readDiscreteFeature(featureWithInfo)
}
return nil, fmt.Errorf("do not know how to read a value for features of type %T", featureWithInfo)
}
func (rs *readSample) readContinuousFeature(f feature.Feature) (interface{}, error) {
var value float64
var err error
for rs.scanner.Scan() {
line := rs.scanner.Text()
if line == rs.undefinedValue {
rs.obtainedValues[f.Name()] = nil
return nil, nil
}
value, err = strconv.ParseFloat(line, 64)
if err == nil {
rs.obtainedValues[f.Name()] = value
return value, nil
}
err = rs.featureValueRequester.RejectValueFor(f, line)
if err != nil {
break
}
}
if err != nil {
return nil, err
}
err = rs.scanner.Err()
if err != nil {
return nil, err
}
return nil, fmt.Errorf("EOF when requesting value")
}
func (rs *readSample) readDiscreteFeature(df *feature.DiscreteFeature) (interface{}, error) {
var err error
for rs.scanner.Scan() {
line := rs.scanner.Text()
if line == rs.undefinedValue {
rs.obtainedValues[df.Name()] = nil
return nil, nil
}
for _, v := range df.AvailableValues() {
if v == line {
rs.obtainedValues[df.Name()] = v
return v, nil
}
}
err = rs.featureValueRequester.RejectValueFor(df, line)
if err != nil {
break
}
}
if err != nil {
return nil, err
}
err = rs.scanner.Err()
if err != nil {
return nil, err
}
return nil, fmt.Errorf("EOF when requesting value")
} | set/inputsample/inputsample.go | 0.612078 | 0.517388 | inputsample.go | starcoder |
package filters
import (
"math"
"github.com/mattetti/audio/dsp/windows"
)
// Sinc represents a sinc function
// The sinc function also called the "sampling function," is a function that
// arises frequently in signal processing and the theory of Fourier transforms.
// The full name of the function is "sine cardinal," but it is commonly referred to by
// its abbreviation, "sinc."
// http://mathworld.wolfram.com/SincFunction.html
type Sinc struct {
CutOffFreq float64
SamplingFreq int
// Taps are the numbers of samples we go back in time when processing the sync function.
// The tap numbers will affect the shape of the filter. The more taps, the more
// shape but the more delays being injected.
Taps int
Window windows.Function
_lowPassCoefs []float64
_highPassCoefs []float64
}
// LowPassCoefs returns the coeficients to create a low pass filter
func (s *Sinc) LowPassCoefs() []float64 {
if s == nil {
return nil
}
if s._lowPassCoefs != nil && len(s._lowPassCoefs) > 0 {
return s._lowPassCoefs
}
size := s.Taps + 1
// sample rate is 2 pi radians per second.
// we get the cutt off frequency in radians per second
b := (2 * math.Pi) * s.TransitionFreq()
s._lowPassCoefs = make([]float64, size)
// we use a window of size taps + 1
winData := s.Window(size)
// we only do half the taps because the coefs are symmetric
// but we fill up all the coefs
for i := 0; i < (s.Taps / 2); i++ {
c := float64(i) - float64(s.Taps)/2
y := math.Sin(c*b) / (math.Pi * c)
s._lowPassCoefs[i] = y * winData[i]
s._lowPassCoefs[size-1-i] = s._lowPassCoefs[i]
}
// then we do the ones we missed in case we have an odd number of taps
s._lowPassCoefs[s.Taps/2] = 2 * s.TransitionFreq() * winData[s.Taps/2]
return s._lowPassCoefs
}
// HighPassCoefs returns the coeficients to create a high pass filter
func (s *Sinc) HighPassCoefs() []float64 {
if s == nil {
return nil
}
if s._highPassCoefs != nil && len(s._highPassCoefs) > 0 {
return s._highPassCoefs
}
// we take the low pass coesf and invert them
size := s.Taps + 1
s._highPassCoefs = make([]float64, size)
lowPassCoefs := s.LowPassCoefs()
winData := s.Window(size)
for i := 0; i < (s.Taps / 2); i++ {
s._highPassCoefs[i] = -lowPassCoefs[i]
s._highPassCoefs[size-1-i] = s._highPassCoefs[i]
}
s._highPassCoefs[s.Taps/2] = (1 - 2*s.TransitionFreq()) * winData[s.Taps/2]
return s._highPassCoefs
}
// TransitionFreq returns a ratio of the cutoff frequency and the sample rate.
func (s *Sinc) TransitionFreq() float64 {
if s == nil {
return 0
}
return s.CutOffFreq / float64(s.SamplingFreq)
} | dsp/filters/sinc.go | 0.832169 | 0.434641 | sinc.go | starcoder |
package dendrolog
// TreeRenderer provides a type which print arbitrary trees to ASCII-like text.
type TreeRenderer struct {
main stringBlock
collectionResult *collected
}
type inputNode interface {
Children() []inputNode
}
type collected struct {
current interface{}
children []*collected
}
// CollectFromTree takes an arbitrary tree data type and runs `selector` with tree as its' node argument.
// The children of each node provided to `selector` be provided using the `child` function with non-pointer values.
// All nodes of the tree are visited and children are visited in the order given by the order of calls to `child`.
func (renderer *TreeRenderer) CollectFromTree(tree interface{}, selector func(node interface{}, child func(childPointer interface{}))) {
root := renderer.collectFromTree(tree, selector)
renderer.collectionResult = root
}
func (renderer *TreeRenderer) collectFromTree(tree interface{}, selector func(node interface{}, child func(childPointer interface{}))) *collected {
var collectedChildren []*collected
selector(tree, func(child interface{}) {
var val *collected
if child != nil {
val = renderer.collectFromTree(child, selector)
} else {
val = &collected{
current: nil,
}
}
collectedChildren = append(collectedChildren, val)
})
return &collected{
children: collectedChildren,
current: tree,
}
}
// Render takes a result collected in `CollectFromTree` and renders a tree into text, running `stringifier` on each node of the tree in originally visited order.
// `stringifier` is called for every node of the tree provided to `CollectFromTree` and must return a string representation of each node.
// Simply returns the string "nil" if no nodes was collected, or `CollectFromTree` was never called.
func (renderer *TreeRenderer) Render(stringifier func(node interface{}) string) string {
if renderer.collectionResult == nil {
return "nil"
}
rendered := renderer.render(renderer.collectionResult, stringifier)
return rendered.block.string() + "\n"
}
func (renderer *TreeRenderer) render(current *collected, stringifier func(node interface{}) string) renderedNode {
strRenderer := stringRenderer{}
val := stringifier(current.current)
root := strRenderer.createBlockFromString(val)
allNil := true
for _, child := range current.children {
if child.current != nil {
allNil = false
break
}
}
if allNil {
return renderedNode{block: root, start: 0, end: len(val) - 1}
}
renderedChildren := []renderedNode{}
for _, child := range current.children {
renderedChild := renderer.render(child, stringifier)
renderedChildren = append(renderedChildren, renderedChild)
}
return renderLines(renderedChildren, root)
} | treeRenderer.go | 0.847148 | 0.500183 | treeRenderer.go | starcoder |
package math
import "math"
type Vector3 struct {
X, Y, Z float32
}
func NewVector3(x, y, z float32) *Vector3 {
return &Vector3{X: x, Y: y, Z: z}
}
func (v *Vector3) Add(x, y, z float32) *Vector3 {
return NewVector3(v.X+x, v.Y+y, v.Z+z)
}
func (v *Vector3) AddFromVector3(vector3 *Vector3) *Vector3 {
return NewVector3(v.X+vector3.X, v.Y+vector3.Y, v.Z+vector3.Z)
}
func (v *Vector3) Subtract(x, y, z float32) *Vector3 {
return v.Add(-x, -y, -z)
}
func (v *Vector3) SubtractFromVector3(vector3 *Vector3) *Vector3 {
return v.Add(-vector3.X, -vector3.Y, -vector3.Z)
}
func (v *Vector3) Multiply(num float32) *Vector3 {
return NewVector3(v.X*num, v.Y*num, v.Z*num)
}
func (v *Vector3) Divide(num float32) *Vector3 {
return NewVector3(v.X/num, v.Y/num, v.Z/num)
}
func (v *Vector3) Ceil() *Vector3 {
return NewVector3(
float32(math.Ceil(float64(v.X))),
float32(math.Ceil(float64(v.Y))),
float32(math.Ceil(float64(v.Z))),
)
}
func (v *Vector3) Floor() *Vector3 {
return NewVector3(
float32(math.Floor(float64(v.X))),
float32(math.Floor(float64(v.Y))),
float32(math.Floor(float64(v.Z))),
)
}
func (v *Vector3) Round() *Vector3 {
return NewVector3(
float32(math.Round(float64(v.X))),
float32(math.Round(float64(v.Y))),
float32(math.Round(float64(v.Z))),
)
}
func (v *Vector3) RoundToEven() *Vector3 {
return NewVector3(
float32(math.RoundToEven(float64(v.X))),
float32(math.RoundToEven(float64(v.Y))),
float32(math.RoundToEven(float64(v.Z))),
)
}
func (v *Vector3) Abs() *Vector3 {
return NewVector3(
float32(math.Abs(float64(v.X))),
float32(math.Abs(float64(v.Y))),
float32(math.Abs(float64(v.Z))),
)
}
func (v *Vector3) AsVector3() *Vector3 {
return NewVector3(v.X, v.Y, v.Z)
}
func (v *Vector3) Distance(pos *Vector3) float32 {
return sqrt32(v.DistanceSquared(pos))
}
func (v *Vector3) DistanceSquared(pos *Vector3) float32 {
return pow32(v.X, pos.X, 2) + pow32(v.Y, pos.Y, 2) + pow32(v.Z, pos.Z, 2)
}
func (v *Vector3) MaxPlainDistance(pos *Vector3) float32 {
return float32(math.Max(
math.Abs(float64(v.X-pos.X)),
math.Abs(float64(v.Z-pos.Z)),
))
}
func (v *Vector3) Length() float32 {
return sqrt32(v.LengthSquared())
}
func (v *Vector3) LengthSquared() float32 {
return v.X*v.X + v.Y*v.Y + v.Z*v.Z
}
func (v *Vector3) Normalize() *Vector3 {
l := v.LengthSquared()
if l > 0 {
return v.Divide(sqrt32(l))
}
return new(Vector3)
}
func (v *Vector3) Dot(pos *Vector3) float32 {
return v.X*pos.X + v.Y*pos.Y + v.Z*pos.Z
}
func (v *Vector3) Cross(vector3 *Vector3) *Vector3 {
return NewVector3(
v.Y*vector3.Z-v.Z*vector3.Y,
v.Z*vector3.X-v.X*vector3.Z,
v.X*vector3.Y-v.Y*vector3.X,
)
}
func (v *Vector3) Equals(vector3 *Vector3) bool {
return v.X == vector3.X && v.Y == vector3.Y && v.Z == vector3.Z
}
func (v *Vector3) SetComponents(x, y, z float32) {
v.X = x
v.Y = y
v.Z = z
}
// GetIntermediateWithXValue returns a new vector with x value equal to the second parameter,
// along the line between this vector and the passed in vector, or nil if not possible.
func (v *Vector3) GetIntermediateWithXValue(vector3 *Vector3, x float32) *Vector3 {
xDiff := vector3.X - v.X
yDiff := vector3.Y - v.Y
zDiff := vector3.Z - v.Z
if xDiff*xDiff < 0.0000001 {
return nil
}
f := (x - v.X) / xDiff
if f < 0 || f > 1 {
return nil
} else {
return NewVector3(x, v.Y+yDiff*f, v.Z+zDiff*f)
}
}
// GetIntermediateWithYValue returns a new vector with y value equal to the second parameter,
// along the line between this vector and the passed in vector, or nil if not possible.
func (v *Vector3) GetIntermediateWithYValue(vector3 *Vector3, y float32) *Vector3 {
xDiff := vector3.X - v.X
yDiff := vector3.Y - v.Y
zDiff := vector3.Z - v.Z
if yDiff*yDiff < 0.0000001 {
return nil
}
f := (y - v.Y) / yDiff
if f < 0 || f > 1 {
return nil
} else {
return NewVector3(v.X+xDiff*f, y, v.Z+zDiff*f)
}
}
// GetIntermediateWithZValue returns a new vector with z value equal to the second parameter,
// along the line between this vector and the passed in vector, or nil if not possible.
func (v *Vector3) GetIntermediateWithZValue(vector3 *Vector3, z float32) *Vector3 {
xDiff := vector3.X - v.X
yDiff := vector3.Y - v.Y
zDiff := vector3.Z - v.Z
if zDiff*zDiff < 0.0000001 {
return nil
}
f := (z - v.Z) / zDiff
if f < 0 || f > 1 {
return nil
} else {
return NewVector3(v.X+xDiff*f, v.Y+yDiff*f, z)
}
} | pkg/math/vector3.go | 0.860867 | 0.825976 | vector3.go | starcoder |
package keras2go
/**
* Element-wise sum of several tensors.
*
* :param output: output tensor.
* :param num_tensors: number of tensors being summed.
* :param ...: variadic. Tensors to be summed.
*/
func k2c_add(output *K2c_tensor, inputList ...*K2c_tensor) {
output.fillFloat64(0)
for _, input := range inputList {
for j := 0; j < output.Numel; j++ {
output.Array[j] += input.Array[j]
}
}
}
/**
* Element-wise difference of two tensors.
*
* :param output: output tensor.
* :param num_tensors: number of tensors being summed. Not used but kept for a consistent API with other merge layers.
* :param tensor1: first input tensor.
* :param tensor2: second input tensor.
*/
func k2c_subtract(output *K2c_tensor, num_tensors int, tensor1 *K2c_tensor, tensor2 *K2c_tensor) {
for i := 0; i < output.Numel; i++ {
output.Array[i] = tensor1.Array[i] - tensor2.Array[i]
}
}
/**
* Element-wise product of several tensors.
*
* :param output: output tensor.
* :param num_tensors: number of tensors being multiplied.
* :param ...: variadic. Tensors to be multiplied.
*/
func k2c_multiply(output *K2c_tensor, inputList ...*K2c_tensor) {
output.fillFloat64(1)
for _, input := range inputList {
for j := 0; j < output.Numel; j++ {
output.Array[j] *= input.Array[j]
}
}
}
/**
* Element-wise average of several tensors.
*
* :param output: output tensor.
* :param num_tensors: number of tensors being averaged.
* :param ...: variadic. Tensors to be averaged.
*/
func k2c_average(output *K2c_tensor, inputList ...*K2c_tensor) {
var num_tensors_inv = 1.0 / float64(len(inputList))
output.fillFloat64(0)
for _, input := range inputList {
for j := 0; j < output.Numel; j++ {
output.Array[j] += input.Array[j] * num_tensors_inv
}
}
}
/**
* Element-wise maximum of several tensors.
*
* :param output: output tensor.
* :param num_tensors: number of tensors over which to take max.
* :param ...: variadic. Tensors to take the max of.
*/
func k2c_max(output *K2c_tensor, inputList ...*K2c_tensor) {
for i := 0; i < output.Numel; i++ {
output.Array[i] = inputList[0].Array[i]
}
for _, input := range inputList[1:] {
for j := 0; j < output.Numel; j++ {
if output.Array[j] < input.Array[j] {
output.Array[j] = input.Array[j]
}
}
}
}
/**
* Element-wise minimum of several tensors.
*
* :param output: output tensor.
* :param inputList: Tensors to take the min of.
*/
func k2c_min(output *K2c_tensor, inputList ...*K2c_tensor) {
for i := 0; i < output.Numel; i++ {
output.Array[i] = inputList[0].Array[i]
}
for _, input := range inputList[1:] {
for j := 0; j < output.Numel; j++ {
if output.Array[j] > input.Array[j] {
output.Array[j] = input.Array[j]
}
}
}
}
/**
* Concatenation of several tensors.
*
* :param output: output tensor.
* :param axis: axis along which to concatenate.
* :param inputList: Tensors to concatenate.
*/
func k2c_concatenate(output *K2c_tensor, axis int, inputList ...*K2c_tensor) {
var offset = 0
var outidx int
var insub, outsub [K2C_MAX_NDIM]int
for _, input := range inputList {
for j := 0; j < input.Numel; j++ {
k2c_idx2sub(j, insub[:], input.Shape[:], input.Ndim)
copy(outsub[:], insub[:])
outidx = k2c_sub2idx(outsub[:], output.Shape[:], output.Ndim)
output.Array[outidx] = input.Array[j]
}
offset += input.Shape[axis]
}
} | merge_layers.go | 0.812979 | 0.640917 | merge_layers.go | starcoder |
package mop
import (
`sort`
`strconv`
`strings`
)
// Sorter gets called to sort stock quotes by one of the columns. The
// setup is rather lengthy; there should probably be more concise way
// that uses reflection and avoids hardcoding the column names.
type Sorter struct {
profile *Profile // Pointer to where we store sort column and order.
}
type sortable []Stock
func (list sortable) Len() int { return len(list) }
func (list sortable) Swap(i, j int) { list[i], list[j] = list[j], list[i] }
type byTickerAsc struct { sortable }
type byLastTradeAsc struct { sortable }
type byChangeAsc struct { sortable }
type byChangePctAsc struct { sortable }
type byOpenAsc struct { sortable }
type byLowAsc struct { sortable }
type byHighAsc struct { sortable }
type byLow52Asc struct { sortable }
type byHigh52Asc struct { sortable }
type byVolumeAsc struct { sortable }
type byAvgVolumeAsc struct { sortable }
type byPeRatioAsc struct { sortable }
type byDividendAsc struct { sortable }
type byYieldAsc struct { sortable }
type byMarketCapAsc struct { sortable }
type byTickerDesc struct { sortable }
type byLastTradeDesc struct { sortable }
type byChangeDesc struct { sortable }
type byChangePctDesc struct { sortable }
type byOpenDesc struct { sortable }
type byLowDesc struct { sortable }
type byHighDesc struct { sortable }
type byLow52Desc struct { sortable }
type byHigh52Desc struct { sortable }
type byVolumeDesc struct { sortable }
type byAvgVolumeDesc struct { sortable }
type byPeRatioDesc struct { sortable }
type byDividendDesc struct { sortable }
type byYieldDesc struct { sortable }
type byMarketCapDesc struct { sortable }
func (list byTickerAsc) Less(i, j int) bool { return list.sortable[i].Ticker < list.sortable[j].Ticker }
func (list byLastTradeAsc) Less(i, j int) bool { return list.sortable[i].LastTrade < list.sortable[j].LastTrade }
func (list byChangeAsc) Less(i, j int) bool { return c(list.sortable[i].Change) < c(list.sortable[j].Change) }
func (list byChangePctAsc) Less(i, j int) bool { return c(list.sortable[i].ChangePct) < c(list.sortable[j].ChangePct) }
func (list byOpenAsc) Less(i, j int) bool { return list.sortable[i].Open < list.sortable[j].Open }
func (list byLowAsc) Less(i, j int) bool { return list.sortable[i].Low < list.sortable[j].Low }
func (list byHighAsc) Less(i, j int) bool { return list.sortable[i].High < list.sortable[j].High }
func (list byLow52Asc) Less(i, j int) bool { return list.sortable[i].Low52 < list.sortable[j].Low52 }
func (list byHigh52Asc) Less(i, j int) bool { return list.sortable[i].High52 < list.sortable[j].High52 }
func (list byVolumeAsc) Less(i, j int) bool { return list.sortable[i].Volume < list.sortable[j].Volume }
func (list byAvgVolumeAsc) Less(i, j int) bool { return list.sortable[i].AvgVolume < list.sortable[j].AvgVolume }
func (list byPeRatioAsc) Less(i, j int) bool { return list.sortable[i].PeRatio < list.sortable[j].PeRatio }
func (list byDividendAsc) Less(i, j int) bool { return list.sortable[i].Dividend < list.sortable[j].Dividend }
func (list byYieldAsc) Less(i, j int) bool { return list.sortable[i].Yield < list.sortable[j].Yield }
func (list byMarketCapAsc) Less(i, j int) bool { return m(list.sortable[i].MarketCap) < m(list.sortable[j].MarketCap) }
func (list byTickerDesc) Less(i, j int) bool { return list.sortable[j].Ticker < list.sortable[i].Ticker }
func (list byLastTradeDesc) Less(i, j int) bool { return list.sortable[j].LastTrade < list.sortable[i].LastTrade }
func (list byChangeDesc) Less(i, j int) bool { return c(list.sortable[j].ChangePct) < c(list.sortable[i].ChangePct) }
func (list byChangePctDesc) Less(i, j int) bool { return c(list.sortable[j].ChangePct) < c(list.sortable[i].ChangePct) }
func (list byOpenDesc) Less(i, j int) bool { return list.sortable[j].Open < list.sortable[i].Open }
func (list byLowDesc) Less(i, j int) bool { return list.sortable[j].Low < list.sortable[i].Low }
func (list byHighDesc) Less(i, j int) bool { return list.sortable[j].High < list.sortable[i].High }
func (list byLow52Desc) Less(i, j int) bool { return list.sortable[j].Low52 < list.sortable[i].Low52 }
func (list byHigh52Desc) Less(i, j int) bool { return list.sortable[j].High52 < list.sortable[i].High52 }
func (list byVolumeDesc) Less(i, j int) bool { return list.sortable[j].Volume < list.sortable[i].Volume }
func (list byAvgVolumeDesc) Less(i, j int) bool { return list.sortable[j].AvgVolume < list.sortable[i].AvgVolume }
func (list byPeRatioDesc) Less(i, j int) bool { return list.sortable[j].PeRatio < list.sortable[i].PeRatio }
func (list byDividendDesc) Less(i, j int) bool { return list.sortable[j].Dividend < list.sortable[i].Dividend }
func (list byYieldDesc) Less(i, j int) bool { return list.sortable[j].Yield < list.sortable[i].Yield }
func (list byMarketCapDesc) Less(i, j int) bool { return m(list.sortable[j].MarketCap) < m(list.sortable[i].MarketCap) }
// Initialize simply saves the pointer to Profile for later use.
func (sorter *Sorter) Initialize(profile *Profile) *Sorter {
sorter.profile = profile
return sorter
}
// SortByCurrentColumn builds a list of sort interface based on current sort
// order, then calls sort.Sort to do the actual job.
func (sorter *Sorter) SortByCurrentColumn(stocks []Stock) *Sorter {
var interfaces []sort.Interface
if sorter.profile.Ascending {
interfaces = []sort.Interface{
byTickerAsc { stocks },
byLastTradeAsc { stocks },
byChangeAsc { stocks },
byChangePctAsc { stocks },
byOpenAsc { stocks },
byLowAsc { stocks },
byHighAsc { stocks },
byLow52Asc { stocks },
byHigh52Asc { stocks },
byVolumeAsc { stocks },
byAvgVolumeAsc { stocks },
byPeRatioAsc { stocks },
byDividendAsc { stocks },
byYieldAsc { stocks },
byMarketCapAsc { stocks },
}
} else {
interfaces = []sort.Interface{
byTickerDesc { stocks },
byLastTradeDesc { stocks },
byChangeDesc { stocks },
byChangePctDesc { stocks },
byOpenDesc { stocks },
byLowDesc { stocks },
byHighDesc { stocks },
byLow52Desc { stocks },
byHigh52Desc { stocks },
byVolumeDesc { stocks },
byAvgVolumeDesc { stocks },
byPeRatioDesc { stocks },
byDividendDesc { stocks },
byYieldDesc { stocks },
byMarketCapDesc { stocks },
}
}
sort.Sort(interfaces[sorter.profile.SortColumn])
return sorter
}
// The same exact method is used to sort by $Change and Change%. In both cases
// we sort by the value of Change% so that multiple $0.00s get sorted proferly.
func c(str string) float32 {
trimmed := strings.Replace(strings.Trim(str, ` %`), `$`, ``, 1)
value, _ := strconv.ParseFloat(trimmed, 32)
return float32(value)
}
// When sorting by the market value we must first convert 42B etc. notations
// to proper numeric values.
func m(str string) float32 {
multiplier := 1.0
switch str[len(str)-1:len(str)] { // Check the last character.
case `B`:
multiplier = 1000000000.0
case `M`:
multiplier = 1000000.0
case `K`:
multiplier = 1000.0
}
trimmed := strings.Trim(str, ` $BMK`) // Get rid of non-numeric characters.
value, _ := strconv.ParseFloat(trimmed, 32)
return float32(value * multiplier)
} | sorter.go | 0.515864 | 0.405096 | sorter.go | starcoder |
package core
//Polygon - Closed Chain Polyline
type Polygon []Line
// GeomType - Describes geometry type
func (Polygon) geomType() string {
return "polygon"
}
// CreatePolygonFromPoints - Creates a Polygon from a slice of Points
func CreatePolygonFromPoints(points []Point) Polygon {
var p Polygon
for i, pt := range points {
if i > 0 {
line := createLine(points[i-1], pt)
p = append(p, line)
}
}
//Final linestring to close end and first point.
p = append(p, createLine(points[len(points)-1], points[0]))
return p
}
//Vertices - Returns distinct vertices that make up the Polygon.
func (p Polygon) Vertices() []Point {
var distinctPoints []Point
// distinctPoints = append(distinctPoints, p[0][0])
for _, l := range p {
distinctPoints = append(distinctPoints, l[0])
}
return distinctPoints
}
// GetNumEdges - Returns NumEdges returns the number of edges in this shape.
// Copied from S2
func (p *Polygon) GetNumEdges() int {
if len(*p) == 0 {
return 0
}
return len(*p)
}
//Perimeter - Returns perimeter of polygon
func (p *Polygon) Perimeter() float64 {
var d float64
for _, l := range *p {
d = d + l.length()
}
return d
}
// Area - Returns area of polygon
// https://www.mathopenref.com/coordpolygonarea.html
// Note does not work for self intersecting polygons. (need to add catch for this. )
func (p Polygon) Area() float64 {
distinctPoints := p.Vertices()
distinctPoints[len(distinctPoints)] = distinctPoints[0]
var subTotal float64
for i := 0; i < len(distinctPoints)-1; i++ {
part := (distinctPoints[i].X * distinctPoints[i+1].Y) + (distinctPoints[i].Y * distinctPoints[i+1].X)
subTotal = subTotal + part
}
return subTotal / 2
}
func (p *Polygon) bbox() BoundingBox {
points := p.Vertices()
points[len(points)] = points[0]
var minX float64
var minY float64
var maxX float64
var maxY float64
for _, pt := range points {
if pt.X < minX {
minX = pt.X
}
if pt.Y < minY {
minY = pt.Y
}
if pt.X > maxX {
maxX = pt.X
}
if pt.Y > maxY {
maxY = pt.Y
}
}
return BoundingBox{Point{X: minX, Y: minY}, Point{X: maxX, Y: maxY}}
}
//ClosedChain - Check if is a closed chain of lines (i.e. it is a Polygon)
func (p Polygon) ClosedChain() bool {
start := p[0][0]
end := p[len(p)-1][1]
x, y := false, false
if start.X == end.X {
x = true
}
if start.Y == end.Y {
y = true
}
if x == true && y == true {
return true
}
return false
} | core/Polygon.go | 0.827689 | 0.565839 | Polygon.go | starcoder |
package datatype
type DataTypes int
const UndefinedDataType DataTypes = 0x0001
const ElementDataType DataTypes = 0x1000
const PrimitiveDataType = ElementDataType + 0x0200
const ComplexDataType = ElementDataType + 0x0400
const ResourceDataType DataTypes = 0x2000
const (
BooleanDataType = iota + PrimitiveDataType
IntegerDataType
StringDataType
DecimalDataType
URIDataType
DateDataType
DateTimeDataType
TimeDataType
CodeDataType
IDDataType
MarkdownDataType
UnsignedIntDataType
PositiveIntDataType
)
const (
QuantityDataType = iota + ComplexDataType
)
const ElementTypeName = "Element"
var fqElementTypeName = NewFQTypeName(ElementTypeName, FHIRNamespaceName)
type Accessor interface {
DataType() DataTypes
TypeSpec() TypeSpecAccessor
Empty() bool
Equal(accessor Accessor) bool
Equivalent(accessor Accessor) bool
}
type ElementType struct {
}
type ElementAccessor interface {
Accessor
}
type Stringifier interface {
String() string
}
func StringValue(stringifier Stringifier) string {
if stringifier == nil {
return ""
}
return stringifier.String()
}
type PrimitiveType struct {
ElementType
nilValue bool
}
type PrimitiveAccessor interface {
ElementAccessor
Stringifier
Nil() bool
}
func (t *PrimitiveType) Nil() bool {
return t.nilValue
}
func (t *PrimitiveType) Empty() bool {
return t.Nil()
}
func TypeEqual(a1 Accessor, a2 Accessor) bool {
return a1 != nil && a2 != nil && a1.DataType() == a2.DataType()
}
func IsPrimitive(accessor Accessor) bool {
return accessor != nil && accessor.DataType()&PrimitiveDataType == PrimitiveDataType
}
func Equal(a1 Accessor, a2 Accessor) bool {
return a1 == a2 || (Empty(a1) && Empty(a2)) ||
(a1 != nil && a2 != nil && a1.Equal(a2))
}
func Equivalent(a1 Accessor, a2 Accessor) bool {
if a1 == a2 || Empty(a1) && Empty(a2) {
return true
}
if a1 == nil || a2 == nil {
return false
}
return a1.Equivalent(a2)
}
func Empty(a Accessor) bool {
return a == nil || a.Empty()
}
var elementTypeSpec = NewTypeSpecWithBase(fqElementTypeName, nil)
func newElementTypeSpec(name string) *TypeSpec {
return newElementTypeSpecWithBase(name, elementTypeSpec)
}
func newElementTypeSpecWithBase(name string, base TypeSpecAccessor) *TypeSpec {
return NewTypeSpecWithBase(NewFQTypeName(name, FHIRNamespaceName), base)
} | datatype/data_type.go | 0.620392 | 0.716851 | data_type.go | starcoder |
package sync3
import "math"
type SliceRanges [][2]int64
func (r SliceRanges) Valid() bool {
for _, sr := range r {
// always goes from start to end
if sr[1] < sr[0] {
return false
}
if sr[0] < 0 {
return false
}
}
return true
}
// Inside returns true if i is inside the range
func (r SliceRanges) Inside(i int64) bool {
for _, sr := range r {
if sr[0] <= i && i <= sr[1] {
return true
}
}
return false
}
// UpperClamp returns the start-index e.g [50,99] -> 50 of the first range higher than i.
// If `i` is inside a range, returns -1.
// E.g [50,99] i=30 -> 50, [50,99] i=55 -> -1
func (r SliceRanges) UpperClamp(i int64) (clampIndex int64) {
clampIndex = math.MaxInt64 - 1
modified := false
for _, sr := range r {
if sr[0] > i && sr[0] < int64(clampIndex) {
clampIndex = sr[0]
modified = true
}
}
if !modified {
clampIndex = -1
}
return
}
// LowerClamp returns the end-index e.g [0,99] -> 99 of the first range lower than i.
// This is critical to determine which index to delete when rooms move outside of the tracked range.
// If `i` is inside a range, returns the clamp for the lower range. Returns -1 if a clamp cannot be found
// e.g [0,99] i=50 -> -1 whereas [0,99][150,199] i=160 -> 99
func (r SliceRanges) LowerClamp(i int64) (clampIndex int64) {
clampIndex = -1
for _, sr := range r {
if sr[1] < i && sr[1] > int64(clampIndex) {
clampIndex = sr[1]
}
}
return
}
// Delta returns the ranges which are unchanged, added and removed
func (r SliceRanges) Delta(next SliceRanges) (added SliceRanges, removed SliceRanges, same SliceRanges) {
olds := make(map[[2]int64]bool)
for _, oldStartEnd := range r {
olds[oldStartEnd] = true
}
news := make(map[[2]int64]bool)
for _, newStartEnd := range next {
news[newStartEnd] = true
}
for oldStartEnd := range olds {
if news[oldStartEnd] {
same = append(same, oldStartEnd)
} else {
removed = append(removed, oldStartEnd)
}
}
for newStartEnd := range news {
if olds[newStartEnd] {
continue
}
added = append(added, newStartEnd)
}
return
}
// Slice into this range, returning subslices of slice
func (r SliceRanges) SliceInto(slice Subslicer) []Subslicer {
var result []Subslicer
// TODO: ensure we don't have overlapping ranges
for _, sr := range r {
// apply range caps
// the range are always index positions hence -1
sliceLen := slice.Len()
if sr[0] >= sliceLen {
sr[0] = sliceLen - 1
}
if sr[1] >= sliceLen {
sr[1] = sliceLen - 1
}
subslice := slice.Subslice(sr[0], sr[1]+1)
result = append(result, subslice)
}
return result
}
type Subslicer interface {
Len() int64
Subslice(i, j int64) Subslicer
} | sync3/range.go | 0.675122 | 0.428712 | range.go | starcoder |
package similarities
import (
"fmt"
"github.com/jtejido/golucene/core/search"
"math"
)
/**
* TwoStageLM is a class for ranking documents that explicitly captures the different influences of the query and document
* collection on the optimal settings of retrieval parameters.
* It involves two steps. Estimate a document language for the model, and Compute the query likelihood using the estimated
* language model. (DirichletLM and JelinkedMercerLM)
*
* From <NAME> and <NAME>. 2002. Two-Stage Language Models for Information Retrieval.
* @see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.7.3316&rep=rep1&type=pdf
*
* In a nutshell, this is a generalization of JelinkedMercerLM and DirichletLM.
* The default values used here are the same constants found from the two classes.
* Thus, making λ = 1 and μ same value as DirichletLM Class resolves the score towards DirichletLM, while as making μ larger
* and λ same value as JelinekMercerLM Class resolves the score towards JelinekMercerLM.
*
* @lucene.experimental(jtejido)
*/
type LMTwoStageSimilarity struct {
*lmSimilarityImpl
mu, lambda float32
}
func NewLMTwoStageSimilarityWithModel(collectionModel CollectionModel, lambda, mu float32) *LMTwoStageSimilarity {
ans := new(LMTwoStageSimilarity)
ans.lmSimilarityImpl = newLMSimilarity(ans, collectionModel)
ans.lambda = lambda
ans.mu = mu
return ans
}
func NewDefaultLMTwoStageSimilarity() *LMTwoStageSimilarity {
ans := new(LMTwoStageSimilarity)
ans.lmSimilarityImpl = newDefaultLMSimilarity(ans)
ans.lambda = DEFAULT_LAMBDA_JM
ans.mu = DEFAULT_MU_DIRICHLET
return ans
}
func NewLMTwoStageSimilarity(mu, lambda float32) *LMTwoStageSimilarity {
ans := new(LMTwoStageSimilarity)
ans.lmSimilarityImpl = newDefaultLMSimilarity(ans)
ans.lambda = lambda
ans.mu = mu
return ans
}
/**
* Smoothed p(w|d) is ((1 - λ)(c(w|d) + (μp(w|C))) / (|d| + μ)) + λp(w|C));
* Document dependent constant (norm) is (1-λ)|d| + μ / (|d| + μ)
*
* The term weight in a form of KL divergence is given by p(w|Q)log(p(w|d)/αp(w|C)) + log α where:
* p(w|d) = the document model.
* p(w|C) = the collection model.
* p(w|Q) = the query model.
* α = document dependent constant
*
* Thus it becomes log(1 + (λc(w|d) / ((1-λ)|d| + μ)p(w|C)) + log((1-λ)|d| + μ / (|d| + μ)).
**/
func (ts *LMTwoStageSimilarity) score(stats Stats, freq, docLen float32) float32 {
norm := ((1-ts.lambda)*docLen + ts.mu) / (docLen + ts.mu)
score := stats.TotalBoost() * float32(math.Log(1+float64((ts.lambda*freq)/(((1-ts.lambda)*docLen+ts.mu)*stats.(LMStats).CollectionProbability())))+math.Log(float64(norm)))
if score > 0 {
return score
}
return 0
}
func (ts *LMTwoStageSimilarity) explain(expl search.ExplanationSPI, stats Stats, doc int, freq, docLen float32) {
if stats.TotalBoost() != 1.0 {
expl.AddDetail(search.NewExplanation(stats.TotalBoost(), "boost"))
}
expl.AddDetail(search.NewExplanation(ts.lambda, "lambda"))
expl.AddDetail(search.NewExplanation(ts.mu, "mu"))
weightExpl := search.NewExplanation(float32(math.Log(1+float64((ts.lambda*freq)/(((1-ts.lambda)*docLen+ts.mu)*stats.(LMStats).CollectionProbability())))), "term weight")
expl.AddDetail(weightExpl)
expl.AddDetail(search.NewExplanation(float32(math.Log(float64(((1-ts.lambda)*docLen+ts.mu)/(docLen+ts.mu)))), "document norm"))
ts.lmSimilarityImpl.explain(expl, stats, doc, freq, docLen)
}
func (ts *LMTwoStageSimilarity) getName() string {
return fmt.Sprintf("Two-Stage(lambda=%.2f, mu=%.2f)", ts.lambda, ts.mu)
} | core/search/similarities/lmTwoStage.go | 0.884189 | 0.473718 | lmTwoStage.go | starcoder |
package data
import (
"github.com/wardlem/graphlite/util"
)
const (
nilEdge = "attempt to operate on a nil edge"
)
const edgeDataSize = 22
type Edge struct {
Id uint32 // The Id of the Edge
label uint16 // The Id of the Label for the Edge
from uint32 // The Id of the origin Vertex of the Edge
to uint32 // The Id of the destination Vertex of the Edge
outNext uint32 // The Id of the next Edge of the origin Vertex
inNext uint32 // The Id of the next Edge of the destination Vertex
attributable
}
func constructEdge(id uint32, bytes []byte) (*Edge, *DataError){
var e error
edge := new(Edge)
label := bytes[0:2]
from := bytes[2:6]
to := bytes[6:10]
outNext := bytes[10:14]
inNext := bytes[14:18]
attributes := bytes[18:22]
edge.Id = id
edge.label, e = util.BytesToUint16(label)
if (e != nil){
return nil, dataError("Failed to construct edge. Could not convert label.", e, nil)
}
edge.from, e = util.BytesToUint32(from)
if (e != nil){
return nil, dataError("Failed to construct edge. Could not convert from.", e, nil)
}
edge.to, e = util.BytesToUint32(to)
if (e != nil){
return nil, dataError("Failed to construct edge. Could not convert to.", e, nil)
}
edge.outNext, e = util.BytesToUint32(outNext)
if (e != nil){
return nil, dataError("Failed to construct edge. Could not convert outNext.", e, nil)
}
edge.inNext, e = util.BytesToUint32(inNext)
if (e != nil){
return nil, dataError("Failed to construct edge. Could not convert inNext.", e, nil)
}
edge.firstAtt, e = util.BytesToUint32(attributes)
if (e != nil){
return nil, dataError("Failed to construct edge. Could not convert attributes.", e, nil)
}
return edge, nil
}
func (e *Edge) Label(g *Graph) *Label {
Assert(nilEdge, e != nil)
Assert(nilGraph, g != nil)
Assert(nilLabelStore, g.labelStore != nil)
l, _ := g.labelStore.find(e.label)
return l
}
func (e *Edge) Key(g *Graph) string {
Assert(nilAttribute, e != nil)
l := e.Label(g)
return l.Value(g)
}
func (e *Edge) From (g *Graph) *Vertex {
Assert(nilGraph, g != nil)
Assert(nilEdge, e != nil)
Assert(nilVertexStore, g.vertexStore != nil)
return g.vertexStore.Find(e.from)
}
func (e *Edge) To(g *Graph) *Vertex {
Assert(nilGraph, g != nil)
Assert(nilEdge, e != nil)
Assert(nilVertexStore, g.vertexStore != nil)
return g.vertexStore.Find(e.to)
}
func (e *Edge) OutNext(g *Graph) *Edge {
Assert(nilGraph, g != nil)
Assert(nilEdge, e != nil)
Assert(nilEdgeStore, g.edgeStore != nil)
return g.edgeStore.Find(e.outNext)
}
func (e *Edge) InNext(g *Graph) *Edge {
Assert(nilGraph, g != nil)
Assert(nilEdge, e != nil)
Assert(nilEdgeStore, g.edgeStore != nil)
return g.edgeStore.Find(e.inNext)
}
func (e *Edge) track(g *Graph) {
g.edgeStore.Track(e)
} | data/edge.go | 0.605916 | 0.519765 | edge.go | starcoder |
package set
// BitSet data structure
type BitSet struct {
data []uint64
}
// NewBitSet returns a pointer to new BitSet
func NewBitSet(size int) *BitSet {
b := &BitSet{
data: make([]uint64, size/8+1),
}
return b
}
// Clone returns a copy of a BitSet
func (b *BitSet) Clone() *BitSet {
t := &BitSet{}
t.data = append(t.data, b.data...)
return t
}
// Set sets given bit to true
func (b *BitSet) Set(i int) {
x, y := b.getIndex(i)
b.data[x] |= b.getMask(y)
}
// SetMany sets given bits to true
func (b *BitSet) SetMany(indexes ...int) {
for _, idx := range indexes {
x, y := b.getIndex(idx)
b.data[x] |= b.getMask(y)
}
}
// SetRange sets bits in range [i,j] to true
func (b *BitSet) SetRange(i, j int) {
for idx := i; idx <= j; idx++ {
x, y := b.getIndex(idx)
b.data[x] |= b.getMask(y)
}
}
// Unset sets given bit to false
func (b *BitSet) Unset(i int) {
x, y := b.getIndex(i)
b.data[x] &^= b.getMask(y)
}
// UnsetMany sets given bits to false
func (b *BitSet) UnsetMany(indexes ...int) {
for _, idx := range indexes {
x, y := b.getIndex(idx)
b.data[x] &^= b.getMask(y)
}
}
// UnsetRange sets bits in range [i,j] to false
func (b *BitSet) UnsetRange(i, j int) {
for idx := i; idx <= j; idx++ {
x, y := b.getIndex(idx)
b.data[x] &^= b.getMask(y)
}
}
// Get return true if bit is true, false otherwise
func (b *BitSet) Get(i int) bool {
x, y := b.getIndex(i)
return (b.data[x] & b.getMask(y)) != 0
}
// GetMany returns bit status for indexes
func (b *BitSet) GetMany(indexes ...int) []bool {
res := make([]bool, len(indexes))
for i, idx := range indexes {
x, y := b.getIndex(idx)
res[i] = (b.data[x] & b.getMask(y)) != 0
}
return res
}
// GetRange returns bits statuses from range [i,j]
func (b *BitSet) GetRange(i, j int) []bool {
res := make([]bool, j-i+1)
for idx := i; idx <= j; idx++ {
x, y := b.getIndex(idx)
res[idx-i] = (b.data[x] & b.getMask(y)) != 0
}
return res
}
// Toggle flips bit value
func (b *BitSet) Toggle(i int) {
x, y := b.getIndex(i)
b.data[x] ^= b.getMask(y)
}
// ToggleMany flips bits values
func (b *BitSet) ToggleMany(indexes ...int) {
for _, idx := range indexes {
x, y := b.getIndex(idx)
b.data[x] ^= b.getMask(y)
}
}
// ToggleRange flips bits in range [i,j] to false
func (b *BitSet) ToggleRange(i, j int) {
for idx := i; idx <= j; idx++ {
x, y := b.getIndex(idx)
b.data[x] ^= b.getMask(y)
}
}
// Count returns number of true bits
func (b *BitSet) Count() int {
res := 0
for _, v := range b.data {
res += getBits(v)
}
return res
}
// Any returns true if at least 1 bit is true
func (b *BitSet) Any() bool {
return b.Count() > 0
}
// AnyMany returns true if at least 1 bit from indexes is true
func (b *BitSet) AnyMany(indexes ...int) bool {
for _, idx := range indexes {
x, y := b.getIndex(idx)
if (b.data[x] & b.getMask(y)) != 0 {
return true
}
}
return false
}
// AnyRange returns true if at least 1 bit from range is true
func (b *BitSet) AnyRange(i, j int) bool {
for idx := i; idx <= j; idx++ {
x, y := b.getIndex(idx)
if (b.data[x] & b.getMask(y)) != 0 {
return true
}
}
return false
}
// None returns true if no bits is true
func (b *BitSet) None() bool {
return b.Count() == 0
}
// NoneMany returns true if no bits from indexes are true
func (b *BitSet) NoneMany(indexes ...int) bool {
for _, idx := range indexes {
x, y := b.getIndex(idx)
if (b.data[x] & b.getMask(y)) != 0 {
return false
}
}
return true
}
// NoneRange returns true if no bits in range are true
func (b *BitSet) NoneRange(i, j int) bool {
for idx := i; idx <= j; idx++ {
x, y := b.getIndex(idx)
if (b.data[x] & b.getMask(y)) != 0 {
return false
}
}
return true
}
func (b *BitSet) getIndex(i int) (int, int) {
return i >> 6, i & 63
}
func (b *BitSet) getMask(i int) uint64 {
return uint64(1 << uint(i))
} | set/bitset.go | 0.772917 | 0.441071 | bitset.go | starcoder |
package n
// Char wraps the Go rune providing a way to distinguish it from an int32
// where as a rune is indistinguishable from an int32. Provides convenience
// methods on par with rapid development languages.
type Char rune
// // C is an alias to NewChar for brevity
// func C(obj interface{}) *Str {
// return NewChar(obj)
// }
// NewChar creates a new chart from the given obj. Will always be non nil.
// Supports: string, *string, rune, *rune, byte, *byte
func NewChar(obj interface{}) *Char {
return ToChar(obj)
}
// NewCharV creates a new chart from the given obj. Will always be non nil.
// Allows for empty Char with a Null value
func NewCharV(obj ...interface{}) *Char {
new := Char(0)
return &new
}
// Object interface methods
//--------------------------------------------------------------------------------------------------
// A is an alias of String for brevity
func (p *Char) A() string {
return p.String()
}
// Equal returns true if the given *Char is value equal to this *Char.
func (p *Char) Equal(obj interface{}) bool {
other := ToChar(obj)
if p == nil {
return false
}
return *p == *other
}
// G returns the underlying data structure as a builtin Go type
func (p *Char) G() rune {
return p.O().(rune)
}
// Less returns true if the given *Char is less than this *Char.
func (p *Char) Less(obj interface{}) bool {
other := ToChar(obj)
if p == nil {
return false
}
return p.A() < other.A()
}
// O returns the underlying data structure as is
func (p *Char) O() interface{} {
if p == nil {
return rune(0)
}
return rune(*p)
}
// Nil tests if the object is nil
func (p *Char) Nil() bool {
if p == nil {
return true
}
return false
}
// Null tests if the char is a rune(0)
func (p *Char) Null() bool {
if p == nil {
return false
}
return rune(*p) == rune(0)
}
// String returns a string representation of the Object, implements Stringer interface.
func (p *Char) String() string {
if p == nil || *p == Char(0) {
return ""
}
return string(*p)
} | char.go | 0.751283 | 0.480722 | char.go | starcoder |
package aestest
import (
"log"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/inklabs/rangedb/pkg/crypto"
"github.com/inklabs/rangedb/pkg/crypto/aes"
)
const (
PlainText = "lorem ipsum"
ValidAES256Base64Key = "<KEY>
ValidAESGCMBase64CipherText = "Glq32jvn9nPO/pqxN9p3YQT4pvoZuV4aQCOy/TIdEtqW8vtGMnsG"
InvalidAESLengthBase64Key = "<KEY>
InvalidBase64Key = "invalid-"
InvalidBase64CipherText = "."
EmptyBase64CipherText = ""
)
func VerifyAESEncryption(t *testing.T, encryptor crypto.Encryptor) {
encrypt, _ := aes.NewCBCPKCS5Padding().Encrypt(ValidAES256Base64Key, PlainText)
log.Print(encrypt)
t.Run("encrypt/decrypt string", func(t *testing.T) {
tests := []struct {
keyLength string
key string
}{
{keyLength: "AES-128", key: "<KEY>},
{keyLength: "AES-192", key: "<KEY>"},
{keyLength: "AES-256", key: "<KEY>},
}
for _, tc := range tests {
t.Run(tc.keyLength, func(t *testing.T) {
// Given
// When
encryptedValue, err := encryptor.Encrypt(tc.key, PlainText)
require.NoError(t, err)
assert.NotEqual(t, PlainText, encryptedValue)
// Then
decryptedValue, err := encryptor.Decrypt(tc.key, encryptedValue)
require.NoError(t, err)
assert.Equal(t, PlainText, decryptedValue)
})
}
})
t.Run("errors", func(t *testing.T) {
t.Run("encrypt", func(t *testing.T) {
t.Run("from invalid base64 key", func(t *testing.T) {
// Given
// When
decryptedValue, err := encryptor.Encrypt(InvalidBase64Key, PlainText)
// Then
require.EqualError(t, err, "illegal base64 data at input byte 7")
assert.Equal(t, "", decryptedValue)
})
t.Run("from invalid key size", func(t *testing.T) {
// Given
// When
decryptedValue, err := encryptor.Encrypt(InvalidAESLengthBase64Key, PlainText)
// Then
require.EqualError(t, err, "crypto/aes: invalid key size 7")
assert.Equal(t, "", decryptedValue)
})
})
t.Run("decrypt", func(t *testing.T) {
t.Run("from invalid base64 key", func(t *testing.T) {
// Given
// When
decryptedValue, err := encryptor.Decrypt(InvalidBase64Key, ValidAESGCMBase64CipherText)
// Then
require.EqualError(t, err, "illegal base64 data at input byte 7")
assert.Equal(t, "", decryptedValue)
})
t.Run("from invalid base64 cipher text", func(t *testing.T) {
// Given
// When
decryptedValue, err := encryptor.Decrypt(ValidAES256Base64Key, InvalidBase64CipherText)
// Then
require.EqualError(t, err, "illegal base64 data at input byte 0")
assert.Equal(t, "", decryptedValue)
})
t.Run("from empty base64 cipher text", func(t *testing.T) {
// Given
// When
decryptedValue, err := encryptor.Decrypt(ValidAES256Base64Key, EmptyBase64CipherText)
// Then
require.EqualError(t, err, "encrypted data empty")
assert.Equal(t, "", decryptedValue)
})
t.Run("from invalid key size", func(t *testing.T) {
// Given
// When
decryptedValue, err := encryptor.Decrypt(InvalidAESLengthBase64Key, ValidAESGCMBase64CipherText)
// Then
require.EqualError(t, err, "crypto/aes: invalid key size 7")
assert.Equal(t, "", decryptedValue)
})
})
})
}
func AESEncryptorBenchmark(b *testing.B, encryptor crypto.Encryptor, cipherText string) {
b.Run("encrypt", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := encryptor.Encrypt(ValidAES256Base64Key, PlainText)
if err != nil {
require.NoError(b, err)
}
}
})
b.Run("decrypt", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := encryptor.Decrypt(ValidAES256Base64Key, cipherText)
if err != nil {
require.NoError(b, err)
}
}
})
} | pkg/crypto/aes/aestest/verify_aes_encryption.go | 0.530966 | 0.402069 | verify_aes_encryption.go | starcoder |
package board
import (
"sort"
"strings"
)
// PositionEvent describes the causes of a unit's position
type PositionEvent int
const (
// UnitPlaced unit added to the board at the beginning of the phase
UnitPlaced PositionEvent = iota // TODO: decide better name for this
// Held unit has been held by player
Held
// Moved unit has moved territories
Moved
// Bounced unit has bounced from territory
Bounced
// Defeated unit has been defeated
Defeated
)
// Position describes the unit's board position
type Position struct {
Territory Territory
Strength int
Cause PositionEvent
}
// Manager is a board manager that records positions of units on a board
type Manager interface {
Position(*Unit) *Position
Positions() map[*Unit]Position
Move(*Unit, Territory, int)
Hold(*Unit, int)
Bounce(*Unit)
SetDefeated(*Unit)
Conflict() []*Unit
AtOrigin(*Unit) bool
}
// PositionManager implements Manager
type PositionManager struct {
history map[*Unit][]Position
}
// NewPositionManager constructor for PositionManager
func NewPositionManager() PositionManager {
return PositionManager{
history: make(map[*Unit][]Position),
}
}
// AddUnit places a unit on the board
func (m PositionManager) AddUnit(unit *Unit, territory Territory) {
m.history[unit] = []Position{{
Territory: territory,
Cause: UnitPlaced,
}}
}
// Positions returns all positions managed
func (m PositionManager) Positions() map[*Unit]Position {
positions := make(map[*Unit]Position)
for u := range m.history {
positions[u] = *m.Position(u)
}
return positions
}
// Conflict returns the first conflict found on the board
func (m PositionManager) Conflict() []*Unit {
conflicts := make(map[string][]*Unit)
for u, position := range m.Positions() {
if position.Cause == Defeated {
continue
}
conflicts = m.appendTerritoryConflict(conflicts, u)
if position.Cause == Moved {
conflicts = m.appendCounterAttackConflict(conflicts, u)
}
}
for _, units := range conflicts {
if len(units) > 1 {
return units
}
}
return nil
}
// Move moves a unit from one territory to another
func (m PositionManager) Move(u *Unit, to Territory, strength int) {
m.history[u] = append(m.history[u], Position{Territory: to, Strength: strength, Cause: Moved})
}
// Hold holds the unit in the current position
func (m PositionManager) Hold(u *Unit, strength int) {
m.history[u] = append(m.history[u], Position{Territory: m.Position(u).Territory, Cause: Held, Strength: strength})
}
// Bounce bounces two or more units when there is no winner
func (m PositionManager) Bounce(u *Unit) {
m.history[u] = append(m.history[u], Position{
Territory: m.prevPosition(u).Territory, Strength: 0, Cause: Bounced})
}
// SetDefeated sets a unit's position as defeated
func (m PositionManager) SetDefeated(u *Unit) {
m.history[u] = append(m.history[u], Position{Territory: m.Position(u).Territory, Cause: Defeated})
}
// Position returns the current board position of a unit
func (m PositionManager) Position(u *Unit) *Position {
hist := m.positionHistory(u)
return &hist[len(hist)-1]
}
// AtOrigin determines if the unit is located at the phase starting position
func (m PositionManager) AtOrigin(u *Unit) bool {
pos, hist := m.Position(u), m.positionHistory(u)
if pos == nil || hist == nil {
return false
}
return pos.Territory.Abbr == hist[0].Territory.Abbr
}
// Defeated determines if the unit has been defeated
func (m PositionManager) Defeated(u *Unit) bool {
return m.Position(u).Cause == Defeated
}
func (m PositionManager) positionHistory(u *Unit) []Position {
hist, ok := m.history[u]
if !ok || len(hist) == 0 {
return nil
}
return hist
}
func (m PositionManager) prevPosition(u *Unit) *Position {
hist := m.positionHistory(u)
n := len(hist)
if n < 2 {
return nil
}
return &hist[n-2]
}
func (m PositionManager) appendCounterAttackConflict(conflicts map[string][]*Unit, u *Unit) map[string][]*Unit {
s := []string{m.Position(u).Territory.Abbr, m.prevPosition(u).Territory.Abbr}
sort.Strings(s)
return appendConflict(conflicts, strings.Join(s, "."), u)
}
func (m PositionManager) appendTerritoryConflict(conflicts map[string][]*Unit, u *Unit) map[string][]*Unit {
return appendConflict(conflicts, m.Position(u).Territory.Abbr, u)
}
func appendConflict(conflicts map[string][]*Unit, key string, u *Unit) map[string][]*Unit {
if conflicts[key] == nil {
conflicts[key] = make([]*Unit, 0)
}
conflicts[key] = append(conflicts[key], u)
return conflicts
} | game/order/board/manager.go | 0.568655 | 0.50293 | manager.go | starcoder |
package trie
// This is an opaque wrapper type to abstract over a ternary search trie.
type Trie struct {
root *trieNode
}
type trieNode struct {
val rune
terminal bool
left *trieNode
right *trieNode
next *trieNode
}
// Insert a key, which is a slice of rune's (chars).
func (t *Trie) Insert(key []rune) {
t.root = t.root.Insert(key)
}
// Given a prefix slice of rune characters, this returns a collection of all
// full matching strings.
func (t *Trie) Search(prefix []rune) [][]rune {
prefix_node := t.root.Match(prefix)
if prefix_node == nil {
return nil
} else {
return prefix_node.Collect(prefix)
}
}
// Search for a rune slice in the trie. If exactly one match is not produced, the
// default is returned.
func (t *Trie) SearchWithDefault(prefix []rune, def []rune) []rune {
matches := t.Search(prefix)
if len(matches) != 1 {
return def
} else {
return matches[0]
}
}
func (t *trieNode) Insert(key []rune) *trieNode {
if len(key) == 0 {
return t
}
if t == nil {
t = &trieNode{
key[0],
len(key) == 1,
nil,
nil,
nil,
}
}
if len(key) == 0 {
t.terminal = true
} else if t.val == key[0] {
t.next = t.next.Insert(key[1:])
} else if t.val < key[0] {
t.right = t.right.Insert(key)
} else if t.val > key[0] {
t.left = t.left.Insert(key)
}
return t
}
// Return the trieNode that matches a prefix.
func (t *trieNode) Match(key []rune) *trieNode {
if t == nil {
return nil
}
if len(key) == 0 {
return t
} else if t.val == key[0] {
return t.next.Match(key[1:])
} else if t.val < key[0] {
return t.right.Match(key)
} else {
return t.left.Match(key)
}
}
// Gather all complete children of this key, prepending a given prefix.
func (t *trieNode) Collect(prefix []rune) [][]rune {
if t == nil {
return make([][]rune, 0, 0)
}
// the capacities here are actually just wild guesses, so benchmarks
// TODO i guess
pool := make([][]rune, 0, 32)
pool = append(pool, t.left.Collect(prefix)...)
if t.terminal {
pool = append(pool, append(prefix, t.val))
}
pool = append(pool, t.next.Collect(append(prefix, t.val))...)
pool = append(pool, t.right.Collect(prefix)...)
return pool
} | pkg/trie/trie.go | 0.772788 | 0.582283 | trie.go | starcoder |
package sort
// partition return partition index of pivot
func partition(data []int, low int, high int) int {
pivot := data[high]
partitionIndex := low
for i := low; i < high; i++ {
if data[i] <= pivot {
data[i], data[partitionIndex] = data[partitionIndex], data[i]
partitionIndex++
}
}
data[high], data[partitionIndex] = data[partitionIndex], data[high]
return partitionIndex
}
// QuickSort is a Divide and Conquer algorithm.
// It picks an element as pivot and partitions the given array around the
// picked pivot. It uses last element as pivot element
func QuickSort(data []int, low int, high int) {
if high <= low {
return
}
j := partition(data, low, high)
QuickSort(data, low, j-1)
QuickSort(data, j+1, high)
}
// medianOfThree to return pivot index
func medianOfThree(data []int, low int, high int) int {
mid := (low + high) / 2
if data[high] < data[low] {
data[low], data[high] = data[high], data[low]
}
if data[low] > data[mid] {
data[low], data[mid] = data[mid], data[low]
}
if data[mid] > data[high] {
data[mid], data[high] = data[high], data[mid]
}
return mid
}
// QuickSortOptimized function implements optimized version of Quick Sort
// algorithm. It uses Insertion sort for dataset with less than or equal to 7
// elements. It uses media of three strategy to select the pivot element.
func QuickSortOptimized(data []int, low int, high int) {
if high <= low {
return
}
// use insertion sort for data set of size <=7
if high <= low+cutoff-1 {
InsertionSort(data[low : high+1])
return
}
// Use median of Three to find the Pivot index
median := medianOfThree(data, low, high)
data[median], data[high] = data[high], data[median]
j := partition(data, low, high)
QuickSortOptimized(data, low, j-1)
QuickSortOptimized(data, j+1, high)
}
// Quick3WaySort is a variant of Quick Sort 3-way partitioning.
// 3-way quicksort is optimization which works in specific cases like when
// input to be sorted contains few unique keys, in this case having
// traditional approach of one pivot does not perform well compared to
// 3-way quicksort.
func Quick3WaySort(data []int, low int, high int) {
if high <= low {
return
}
lt := low
gt := high
pivot := data[low]
i := low
for i <= gt {
if data[i] < pivot {
data[lt], data[i] = data[i], data[lt]
lt++
i++
} else if data[i] > pivot {
data[gt], data[i] = data[i], data[gt]
gt--
} else {
i++
}
}
Quick3WaySort(data, low, lt-1)
Quick3WaySort(data, gt+1, high)
} | Go/sort/quick.go | 0.779741 | 0.664826 | quick.go | starcoder |
package main
import (
"encoding/binary"
"github.com/gwaylib/errors"
)
type AxpertWorkingStatus struct {
data [20]byte
data1 [20]byte
}
func ParseAxpertWorkingStatus(data, data1 []byte) (*AxpertWorkingStatus, error) {
if len(data) < 20 {
return nil, errors.New("need data len >= 20")
}
if len(data1) < 20 {
return nil, errors.New("need data len >= 20")
}
a := &AxpertWorkingStatus{}
copy(a.data[:], data)
copy(a.data1[:], data1)
return a, nil
}
// unit 0.1V
func (a *AxpertWorkingStatus) GridVoltage() uint16 {
return binary.LittleEndian.Uint16(a.data[:2])
}
// unit 0.1Hz
func (a *AxpertWorkingStatus) GridFrequency() uint16 {
return binary.LittleEndian.Uint16(a.data[2:4])
}
// unit 0.1V
func (a *AxpertWorkingStatus) ACOutputVoltage() uint16 {
return binary.LittleEndian.Uint16(a.data[4:6])
}
// unit 0.1Hz
func (a *AxpertWorkingStatus) ACOutputFrequency() uint16 {
return binary.LittleEndian.Uint16(a.data[6:8])
}
// unit VA
func (a *AxpertWorkingStatus) ACOutputApparentPower() uint16 {
return binary.LittleEndian.Uint16(a.data[8:10])
}
// unit Watt
func (a *AxpertWorkingStatus) ACOutputPower() uint16 {
return binary.LittleEndian.Uint16(a.data[10:12])
}
// unit 1%
func (a *AxpertWorkingStatus) ACOutputLoadPercent() uint16 {
return binary.LittleEndian.Uint16(a.data[12:14])
}
// unit V
func (a *AxpertWorkingStatus) BUSVoltage() uint16 {
return binary.LittleEndian.Uint16(a.data[14:16])
}
// unit 0.01V
func (a *AxpertWorkingStatus) BatteryVoltage() uint16 {
return binary.LittleEndian.Uint16(a.data[16:18])
}
// unit A
func (a *AxpertWorkingStatus) BatteryCharging() uint16 {
return binary.LittleEndian.Uint16(a.data[18:20])
}
// uint 1%
func (a *AxpertWorkingStatus) BatteryCapacity() uint16 {
return binary.LittleEndian.Uint16(a.data1[:2])
}
// unit 'C
func (a *AxpertWorkingStatus) InverterSinkTemperature() uint16 {
return binary.LittleEndian.Uint16(a.data1[2:4])
}
// unit A
func (a *AxpertWorkingStatus) BatteryDischarge() uint16 {
return binary.LittleEndian.Uint16(a.data1[4:6])
}
// data[6] low, data[7] for reserved
func (a *AxpertWorkingStatus) ACChargingOn() bool {
onBit := byte(1 << 0)
return a.data1[6]&onBit == onBit
}
func (a *AxpertWorkingStatus) SCCChargingOn() bool {
onBit := byte(1 << 1)
return a.data1[6]&onBit == onBit
}
func (a *AxpertWorkingStatus) ChargingOn() bool {
onBit := byte(1 << 2)
return a.data1[6]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrPVLoss() bool {
onBit := byte(1 << 0)
return a.data1[8]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrInverterFault() bool {
onBit := byte(1 << 1)
return a.data1[8]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrBusOver() bool {
onBit := byte(1 << 2)
return a.data1[8]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrBusUnder() bool {
onBit := byte(1 << 3)
return a.data1[8]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrBusSoftFail() bool {
onBit := byte(1 << 4)
return a.data1[8]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrLineFail() bool {
onBit := byte(1 << 5)
return a.data1[8]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrOutputShort() bool {
onBit := byte(1 << 6)
return a.data1[8]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrInvererVoltageTooLow() bool {
onBit := byte(1 << 7)
return a.data1[8]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrInvererVoltageTooHight() bool {
onBit := byte(1 << 0)
return a.data1[9]&onBit == onBit
}
// Compile with ErrInverterFault, it's fault if InverterFault, otherwise warning
func (a *AxpertWorkingStatus) ErrOverTemperature() bool {
onBit := byte(1 << 0)
return a.data1[9]&onBit == onBit
}
// Compile with ErrInverterFault, it's fault if InverterFault, otherwise warning
func (a *AxpertWorkingStatus) ErrFanLocked() bool {
onBit := byte(1 << 1)
return a.data1[9]&onBit == onBit
}
// Compile with ErrInverterFault, it's fault if InverterFault, otherwise warning
func (a *AxpertWorkingStatus) ErrBatteryVoltageHigh() bool {
onBit := byte(1 << 2)
return a.data1[9]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrBatteryVoltageLow() bool {
onBit := byte(1 << 3)
return a.data1[9]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrOverCharge() bool {
onBit := byte(1 << 4)
return a.data1[9]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrBatteryUnderShutDown() bool {
onBit := byte(1 << 5)
return a.data1[9]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrBatteryDerating() bool {
onBit := byte(1 << 6)
return a.data1[9]&onBit == onBit
}
// Compile with ErrInverterFault, it's fault if InverterFault, otherwise warning
func (a *AxpertWorkingStatus) ErrOverLoad() bool {
onBit := byte(1 << 7)
return a.data1[9]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrERRPROMFault() bool {
onBit := byte(1 << 0)
return a.data1[10]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrInverterOverCurrent() bool {
onBit := byte(1 << 1)
return a.data1[10]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrInverterSoftFail() bool {
onBit := byte(1 << 2)
return a.data1[10]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrSelfTestFail() bool {
onBit := byte(1 << 3)
return a.data1[10]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrOutputDCVoltageOver() bool {
onBit := byte(1 << 4)
return a.data1[10]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrBatteryOpen() bool {
onBit := byte(1 << 5)
return a.data1[10]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrCurrentSensorFail() bool {
onBit := byte(1 << 6)
return a.data1[10]&onBit == onBit
}
// fault
func (a *AxpertWorkingStatus) ErrBatteryShort() bool {
onBit := byte(1 << 7)
return a.data1[10]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrPowerLimit() bool {
onBit := byte(1 << 0)
return a.data1[11]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrPVVoltageHigh() bool {
onBit := byte(1 << 1)
return a.data1[11]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrMPPTOverloadFault() bool {
onBit := byte(1 << 2)
return a.data1[11]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrMPPTOverloadWarning() bool {
onBit := byte(1 << 3)
return a.data1[11]&onBit == onBit
}
// warn
func (a *AxpertWorkingStatus) ErrBatteryTooLowToCharge() bool {
onBit := byte(1 << 4)
return a.data1[11]&onBit == onBit
}
func (a *AxpertWorkingStatus) WorkingMode() string {
return string(a.data1[13])
} | examples/blesh/axpert_working_status.go | 0.686475 | 0.412353 | axpert_working_status.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.