code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package argparse
import (
"errors"
"regexp"
"strconv"
)
// ParameterMatcher defines the default matching algorithms
type ParameterMatcher int
const (
// StringMatcher returns the default implementation
StringMatcher ParameterMatcher = 0
// IntegerMatcher converts the input to an integer
IntegerMatcher = iota
// AllMatcher matches everything leftover
AllMatcher = iota
)
var (
defaultMatcherRegExp = regexp.MustCompile(`^([-_\w\d]*)`)
)
// Parameter object
type Parameter struct {
matcher func(string) (string, string, bool)
converter func(string) (interface{}, error)
}
func defaultMatcher(input string) (string, string, bool) {
matches := defaultMatcherRegExp.FindStringSubmatch(input)
return matches[1], input[len(matches[0]):], true
}
func defaultConverter(input string) (interface{}, error) {
return input, nil
}
// NewParameter creates a new parameter definition
func NewParameter() *Parameter {
return &Parameter{defaultMatcher, defaultConverter}
}
// NewDefaultParameter creates a new parameter definition using one of the default sets
func NewDefaultParameter(matcher ParameterMatcher) *Parameter {
param := NewParameter()
switch matcher {
case IntegerMatcher:
param.SetMatcherRegexp(regexp.MustCompile(`^(\d+)`))
param.SetConverter(func(input string) (interface{}, error) {
return strconv.Atoi(input)
})
case AllMatcher:
param.SetMatcherRegexp(regexp.MustCompile(`(.*)`))
}
return param
}
// Matches returns a flag if the input matches the expected format
func (p *Parameter) Matches(input string) bool {
_, _, ok := p.matcher(input)
return ok
}
// Match splits the input to the expected match and returns the remaining string
func (p *Parameter) Match(input string) (interface{}, string, error) {
match, remaining, ok := p.matcher(input)
if !ok {
return nil, remaining, errors.New("Could not match the parameter")
}
obj, err := p.converter(match)
return obj, remaining, err
}
// SetMatcher sets the matching algorithm for this parameter
func (p *Parameter) SetMatcher(matcher func(string) (string, string, bool)) {
p.matcher = matcher
}
// SetMatcherRegexp sets the matching algorithm using a regexp
func (p *Parameter) SetMatcherRegexp(re *regexp.Regexp) {
p.SetMatcher(func(input string) (string, string, bool) {
if !re.MatchString(input) {
return "", input, false
}
matches := re.FindStringSubmatch(input)
return matches[1], input[len(matches[0]):], true
})
}
// SetConverter changes the convert function
func (p *Parameter) SetConverter(converter func(string) (interface{}, error)) {
p.converter = converter
} | parameter.go | 0.80905 | 0.442938 | parameter.go | starcoder |
package neuralnetwork
import (
"fmt"
"time"
)
//Model implements the model architecure.
type Model struct {
layers []Layer
name string
optimizer Optimizer
loss func([]float64, []float64) float64
lossValues []float64
trainingDuration time.Duration
modelMetrics []Metrics
trainDataX, trainDataY []float64
callbacks []Callback
training bool
learningRate float64
trainingLog TrainingLog
}
//Metrics is an interface that requires two functions, Measure and Name and is passed to the model.compile method.
type Metrics interface {
Measure([]float64, []float64) float64
Name() string
}
//Optimizer interface requires an ApplyGradients function. Pass it to the model compilation.
type Optimizer interface {
ApplyGradients()
}
//TrainingLog returns model's log
type TrainingLog struct {
logs []string
}
//Sequential returns a model given layers and a name.
func Sequential(layers []Layer, name string) *Model {
return &Model{layers: layers, name: name}
}
//Add method adds a layer to the end of the model architecture
func (m *Model) Add(layer Layer) *Model {
m.layers[len(m.layers)] = layer
return m
}
//GetLayerByIndex returns the ith layer.
func (m *Model) GetLayerByIndex(index int) Layer {
return m.layers[index]
}
//GetMetricsByIndex returns the index's model metric
func (m *Model) GetMetricsByIndex(index int) Metrics {
return m.modelMetrics[index]
}
//GetLayerByName returns the layer given its name.
func (m *Model) GetLayerByName(name string) Layer {
for i := range m.layers {
if m.layers[i].Name() == name {
return m.layers[i]
}
}
return m.layers[0]
}
//Compile compiles the model given the optimizer, loss and metrics
func (m *Model) Compile(optimizer Optimizer, loss func([]float64, []float64) float64, ms []Metrics) {
m.optimizer = optimizer
m.loss = loss
m.modelMetrics = ms
}
//Predict does the feed forward magic when fed the inputs.
func (m *Model) Predict(values []float64) []float64 {
var outputs []float64
for i := range m.layers {
outputs = m.layers[i].Call()
m.layers[i+1].Call()
if i == len(m.layers)-1 {
return outputs
}
}
return outputs
}
//Train trains the model given trainX and trainY data and the number of epochs. It keeps track of the defined metrics and prints it every epoch. It also prints the training duration.
//It returns a map from strings to floats, where strings represent the metrics name and float the metrics value.
func (m *Model) Train(trainX, trainY []float64, epochs int) map[string]float64 {
startTime := time.Now()
metricsValues := make(map[string]float64, len(m.modelMetrics))
for i := 1; i < epochs; i++ {
for j := 0; j < len(trainX); j++ {
lossValue := m.loss(m.Predict(trainX), trainY)
m.lossValues = append(m.lossValues, lossValue)
m.optimizer.ApplyGradients()
}
avg := meanValue(m.lossValues)
for _, met := range m.modelMetrics {
metricsValues[met.Name()] = met.Measure(m.Predict(trainX), trainY)
}
fmt.Printf("Epoch: %d Loss:%.4f\n", i, avg)
}
endTime := time.Now()
m.trainingDuration = endTime.Sub(startTime)
fmt.Printf("Training duration: %s\n", m.trainingDuration.String())
return metricsValues
}
//Summary prints the layer by layer summaary along with trainable parameters.
func (m *Model) Summary() {
var sum int
for i := range m.layers {
tp := m.layers[i].TrainableParameters()
sum += tp
fmt.Printf("name: %s trainable parameters: %d\n", m.layers[i].Name(), tp)
}
fmt.Println("Trainable parameters: ", sum)
} | nn/model.go | 0.818809 | 0.501709 | model.go | starcoder |
package randomizer
import (
"fmt"
"gopkg.in/yaml.v2"
)
// a prenode is the precursor to a graph node; its parents can be either
// strings (the names of other prenodes) or other prenodes. the main difference
// between a prenode and a graph node is that prenodes are trees, not graphs.
// string references to other prenodes become pointers when converting from
// prenodes to nodes, thus forming the graph.
type prenode struct {
parents []interface{}
nType nodeType
minCount int
}
// returns a new prenode which does not have parents, and which will remain
// false until it does.
func rootPrenode(parents ...interface{}) *prenode {
return &prenode{parents: parents, nType: orNode}
}
var seasonsPrenodes, agesPrenodes map[string]*prenode
func init() {
seasonsPrenodes = make(map[string]*prenode)
appendPrenodes(seasonsPrenodes, loadLogic("rings.yaml"),
loadLogic("seasons_items.yaml"), loadLogic("seasons_kill.yaml"),
loadLogic("holodrum.yaml"), loadLogic("subrosia.yaml"),
loadLogic("portals.yaml"), loadLogic("seasons_dungeons.yaml"))
flattenNestedPrenodes(seasonsPrenodes)
agesPrenodes = make(map[string]*prenode)
appendPrenodes(agesPrenodes, loadLogic("rings.yaml"),
loadLogic("ages_items.yaml"), loadLogic("ages_kill.yaml"),
loadLogic("labrynna.yaml"), loadLogic("ages_dungeons.yaml"))
flattenNestedPrenodes(agesPrenodes)
err := yaml.Unmarshal(FSMustByte(false, "/romdata/rings.yaml"), &rings)
if err != nil {
panic(err)
}
}
// add nested nodes to the map and turn their references into strings, adding
// an interger suffix to the successive parents of a node.
func flattenNestedPrenodes(nodes map[string]*prenode) {
done := true
for name, pn := range nodes {
suffix := 0
for i, parent := range pn.parents {
switch parent := parent.(type) {
case *prenode:
suffix++
subName := fmt.Sprintf("%s %d", name, suffix)
pn.parents[i] = subName
nodes[subName] = parent
done = false
}
}
}
// recurse if nodes were added
if !done {
flattenNestedPrenodes(nodes)
}
}
// returns a copy of all prenodes for the given game.
func getPrenodes(game int) map[string]*prenode {
src := sora(game, seasonsPrenodes, agesPrenodes).(map[string]*prenode)
dst := make(map[string]*prenode, len(src))
for k, v := range src {
dst[k] = v
}
return dst
}
// merges the given prenode maps into the first argument.
func appendPrenodes(total map[string]*prenode, maps ...map[string]*prenode) {
for _, nodeMap := range maps {
for k, v := range nodeMap {
if _, ok := total[k]; ok {
panic("duplicate logic key: " + k)
}
total[k] = v
}
}
}
// loads a logic map from yaml.
func loadLogic(filename string) map[string]*prenode {
raw := make(map[string]interface{})
if err := yaml.Unmarshal(
FSMustByte(false, "/logic/"+filename), raw); err != nil {
panic(err)
}
m := make(map[string]*prenode)
for k, v := range raw {
m[k] = loadNode(v)
}
return m
}
// loads a node (and any of its explicit parents, recursively) from yaml.
func loadNode(v interface{}) *prenode {
n := new(prenode)
switch v := v.(type) {
case []interface{}: // and node
n.nType = andNode
n.parents = make([]interface{}, len(v))
for i, parent := range v {
switch parent.(type) {
case string:
n.parents[i] = parent
default:
n.parents[i] = loadNode(parent)
}
}
case map[interface{}]interface{}: // other node
switch {
case v["or"] != nil:
n.nType = orNode
n.parents = loadParents(v["or"])
case v["count"] != nil:
n.nType = countNode
n.minCount = v["count"].([]interface{})[0].(int)
n.parents = make([]interface{}, 1)
n.parents[0] = v["count"].([]interface{})[1].(string)
case v["rupees"] != nil:
n.nType = rupeesNode
n.parents = loadParents(v["rupees"])
default:
panic(fmt.Sprintf("unknown logic type: %v", v))
}
}
return n
}
// loads a node's parents from yaml.
func loadParents(v interface{}) []interface{} {
var parents []interface{}
switch v := v.(type) {
case []interface{}: // and node
parents = make([]interface{}, len(v))
for i, parent := range v {
switch parent.(type) {
case string:
parents[i] = parent
default:
parents[i] = loadNode(parent)
}
}
default: // single parent, other node
parents = make([]interface{}, 1)
parents[0] = loadNode(v)
}
return parents
}
var rupeeValues = map[string]int{
"rupees, 1": 1,
"rupees, 5": 5,
"rupees, 10": 10,
"rupees, 20": 20,
"rupees, 30": 30,
"rupees, 50": 50,
"rupees, 100": 100,
"rupees, 200": 200,
"goron mountain old man": 300,
"western coast old man": 300,
"holodrum plain east old man": 200,
"horon village old man": 100,
"north horon old man": 100,
// rng is involved: each rupee is worth 1, 5, 10, or 20.
// these totals are about 2 standard deviations below mean.
"d2 rupee room": 150,
"d6 rupee room": 90,
} | randomizer/logic.go | 0.566618 | 0.579906 | logic.go | starcoder |
package phomath
import "math"
const numMatrix3Values = 3 * 3
// NewMatrix3 creates a new three-dimensional matrix. Argument is optional Matrix3 to copy from.
func NewMatrix3(from *Matrix3) *Matrix3 {
m := &Matrix3{Values: [numMatrix3Values]float64{}}
if from != nil {
return m.Copy(from)
}
return m.Identity()
}
// Matrix3 is a three-dimensional matrix
type Matrix3 struct {
Values [numMatrix3Values]float64
}
// Clone makes a clone of this Matrix3.
func (m *Matrix3) Clone() *Matrix3 {
return NewMatrix3(m)
}
// Copy the values of a given Matrix into this Matrix.
func (m *Matrix3) Copy(other *Matrix3) *Matrix3 {
m.Values[0] = other.Values[0]
m.Values[1] = other.Values[1]
m.Values[2] = other.Values[2]
m.Values[3] = other.Values[3]
m.Values[4] = other.Values[4]
m.Values[5] = other.Values[5]
m.Values[6] = other.Values[6]
m.Values[7] = other.Values[7]
m.Values[8] = other.Values[8]
return m
}
// Set is an alias for Matrix3.Copy
func (m *Matrix3) Set(other *Matrix3) *Matrix3 {
return m.Copy(other)
}
// Identity resets this Matrix to an identity (default) matrix.
func (m *Matrix3) Identity() *Matrix3 {
m.Values[0] = 1
m.Values[1] = 0
m.Values[2] = 0
m.Values[3] = 0
m.Values[4] = 1
m.Values[5] = 0
m.Values[6] = 0
m.Values[7] = 0
m.Values[8] = 1
return m
}
// FromMatrix4 copies the values of a given Matrix4 into this Matrix3.
func (m *Matrix3) FromMatrix4(m4 *Matrix4) *Matrix3 {
m.Values[0] = m4.Values[0]
m.Values[1] = m4.Values[1]
m.Values[2] = m4.Values[2]
m.Values[3] = m4.Values[4]
m.Values[4] = m4.Values[5]
m.Values[5] = m4.Values[6]
m.Values[6] = m4.Values[8]
m.Values[7] = m4.Values[9]
m.Values[8] = m4.Values[10]
return m
}
// FromSlice sets the values of this Matrix from the given slice.
func (m *Matrix3) FromSlice(s []float64) *Matrix3 {
if s != nil {
numVals := len(s)
for idx := 0; idx < numVals && idx < numMatrix3Values; idx++ {
m.Values[idx] = s[idx]
}
}
return m
}
// Transpose this Matrix.
func (m *Matrix3) Transpose() *Matrix3 {
v := m.Values
v[1], v[2], v[3], v[5], v[6], v[7] = v[3], v[6], v[1], v[7], v[2], v[5]
return m
}
// Invert this Matrix.
func (m *Matrix3) Invert() *Matrix3 {
a := m.Values
a00 := a[0]
a01 := a[1]
a02 := a[2]
a10 := a[3]
a11 := a[4]
a12 := a[5]
a20 := a[6]
a21 := a[7]
a22 := a[8]
b01 := a22*a11 - a12*a21
b11 := -a22*a10 + a12*a20
b21 := a21*a10 - a11*a20
// calculate the determinant
det := a00*b01 + a01*b11 + a02*b21
if det < Epsilon {
return nil
}
det = 1 / det
a[0] = b01 * det
a[1] = (-a22*a01 + a02*a21) * det
a[2] = (a12*a01 - a02*a11) * det
a[3] = b11 * det
a[4] = (a22*a00 - a02*a20) * det
a[5] = (-a12*a00 + a02*a10) * det
a[6] = b21 * det
a[7] = (-a21*a00 + a01*a20) * det
a[8] = (a11*a00 - a01*a10) * det
return m
}
// Adjoint calculates the adjoint, or adjugate, of this Matrix.
func (m *Matrix3) Adjoint() *Matrix3 {
a := m.Values
a00 := a[0]
a01 := a[1]
a02 := a[2]
a10 := a[3]
a11 := a[4]
a12 := a[5]
a20 := a[6]
a21 := a[7]
a22 := a[8]
a[0] = a11*a22 - a12*a21
a[1] = a02*a21 - a01*a22
a[2] = a01*a12 - a02*a11
a[3] = a12*a20 - a10*a22
a[4] = a00*a22 - a02*a20
a[5] = a02*a10 - a00*a12
a[6] = a10*a21 - a11*a20
a[7] = a01*a20 - a00*a21
a[8] = a00*a11 - a01*a10
return m
}
// Determinant calculates the determinant of this Matrix.
func (m *Matrix3) Determinant() float64 {
a := m.Values
a00 := a[0]
a01 := a[1]
a02 := a[2]
a10 := a[3]
a11 := a[4]
a12 := a[5]
a20 := a[6]
a21 := a[7]
a22 := a[8]
return a00*(a22*a11-a12*a21) + a01*(-a22*a10+a12*a20) + a02*(a21*a10-a11*a20)
}
// Multiply this Matrix by the given Matrix.
func (m *Matrix3) Multiply(other *Matrix3) *Matrix3 {
a, b := m.Values, other.Values
a00, b00 := a[0], b[0]
a01, b01 := a[1], b[1]
a02, b02 := a[2], b[2]
a10, b10 := a[3], b[3]
a11, b11 := a[4], b[4]
a12, b12 := a[5], b[5]
a20, b20 := a[6], b[6]
a21, b21 := a[7], b[7]
a22, b22 := a[8], b[8]
a[0] = b00*a00 + b01*a10 + b02*a20
a[1] = b00*a01 + b01*a11 + b02*a21
a[2] = b00*a02 + b01*a12 + b02*a22
a[3] = b10*a00 + b11*a10 + b12*a20
a[4] = b10*a01 + b11*a11 + b12*a21
a[5] = b10*a02 + b11*a12 + b12*a22
a[6] = b20*a00 + b21*a10 + b22*a20
a[7] = b20*a01 + b21*a11 + b22*a21
a[8] = b20*a02 + b21*a12 + b22*a22
return m
}
// Translate this Matrix using the given Vector.
func (m *Matrix3) Translate(v Vector2Like) *Matrix3 {
a := m.Values
x, y := v.XY()
a[6] = x*a[0] + y*a[3] + a[6]
a[7] = x*a[1] + y*a[4] + a[7]
a[8] = x*a[2] + y*a[5] + a[8]
return m
}
// Rotate applies a rotation transformation to this Matrix.
func (m *Matrix3) Rotate(radians float64) *Matrix3 {
a := m.Values
a00 := a[0]
a01 := a[1]
a02 := a[2]
a10 := a[3]
a11 := a[4]
a12 := a[5]
s, c := math.Sin(radians), math.Cos(radians)
a[0] = c*a00 + s*a10
a[1] = c*a01 + s*a11
a[2] = c*a02 + s*a12
a[3] = c*a10 - s*a00
a[4] = c*a11 - s*a01
a[5] = c*a12 - s*a02
return m
}
// Scale applies a scalar transformation to this Matrix.
func (m *Matrix3) Scale(v Vector2Like) *Matrix3 {
a := m.Values
x, y := v.XY()
a[0] = x * a[0]
a[1] = x * a[1]
a[2] = x * a[2]
a[3] = y * a[3]
a[4] = y * a[4]
a[5] = y * a[5]
return m
}
// FromQuaternion sets the values of this Matrix from the given Quaternion.
func (m *Matrix3) FromQuaternion(q *Quaternion) *Matrix3 {
x, y, z, w := q.X, q.Y, q.Z, q.W
x2, y2, z2 := x+x, y+y, z+z
xx, xy, xz := x*x2, x*y2, x*z2
yy, yz, zz := y*y2, y*z2, z*z2
wx, wy, wz := w*x2, w*y2, w*z2
out := m.Values
out[0] = 1 - (yy + zz)
out[3] = xy + wz
out[6] = xz - wy
out[1] = xy - wz
out[4] = 1 - (xx + zz)
out[7] = yz + wx
out[2] = xz + wy
out[5] = yz - wx
out[8] = 1 - (xx + yy)
return m
}
// NormalFromMatrix4 sets the values of this Matrix3 to be normalized from the given Matrix4.
func (m *Matrix3) NormalFromMatrix4(m4 *Matrix4) *Matrix3 {
a, out := m4.Values, m.Values
a00 := a[0]
a01 := a[1]
a02 := a[2]
a03 := a[3]
a10 := a[4]
a11 := a[5]
a12 := a[6]
a13 := a[7]
a20 := a[8]
a21 := a[9]
a22 := a[10]
a23 := a[11]
a30 := a[12]
a31 := a[13]
a32 := a[14]
a33 := a[15]
b00 := a00*a11 - a01*a10
b01 := a00*a12 - a02*a10
b02 := a00*a13 - a03*a10
b03 := a01*a12 - a02*a11
b04 := a01*a13 - a03*a11
b05 := a02*a13 - a03*a12
b06 := a20*a31 - a21*a30
b07 := a20*a32 - a22*a30
b08 := a20*a33 - a23*a30
b09 := a21*a32 - a22*a31
b10 := a21*a33 - a23*a31
b11 := a22*a33 - a23*a32
// calculate the determinant
det := b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06
if det < Epsilon {
return nil
}
det = 1 / det
out[0] = (a11*b11 - a12*b10 + a13*b09) * det
out[1] = (a12*b08 - a10*b11 - a13*b07) * det
out[2] = (a10*b10 - a11*b08 + a13*b06) * det
out[3] = (a02*b10 - a01*b11 - a03*b09) * det
out[4] = (a00*b11 - a02*b08 + a03*b07) * det
out[5] = (a01*b08 - a00*b10 - a03*b06) * det
out[6] = (a31*b05 - a32*b04 + a33*b03) * det
out[7] = (a32*b02 - a30*b05 - a33*b01) * det
out[8] = (a30*b04 - a31*b02 + a33*b00) * det
return m
} | phomath/matrix3.go | 0.887241 | 0.693489 | matrix3.go | starcoder |
package pkg
import (
"math"
)
const EarthRadiusInMeters = 6372797.560856
func degreesToRadians(degrees float64) float64 {
return float64(degrees * math.Pi / 180.0)
}
func radiansToDegrees(radians float64) float64 {
return float64(radians * 180.0 / math.Pi)
}
func getPointAhead(latLng []float64, distanceMeters float64, azimuth float64) []float64 {
radiusFraction := float64(distanceMeters / EarthRadiusInMeters)
bearing := float64(degreesToRadians(azimuth))
lat1 := degreesToRadians(latLng[0])
lng1 := degreesToRadians(latLng[1])
lat2Part1 := math.Sin(lat1) * math.Cos(radiusFraction)
lat2Part2 := math.Cos(lat1) * math.Sin(radiusFraction) * math.Cos(bearing)
lat2 := math.Asin(lat2Part1 + lat2Part2)
lng2Part1 := math.Sin(bearing) * math.Sin(radiusFraction) * math.Cos(lat1)
lng2Part2 := math.Cos(radiusFraction) - (math.Sin(lat1) * math.Sin(lat2))
lng2 := lng1 + math.Atan2(lng2Part1, lng2Part2)
lng2 = math.Mod(lng2+3*math.Pi, 2*math.Pi) - math.Pi
return []float64{radiansToDegrees(lat2), radiansToDegrees(lng2),}
}
func pointPlusDistanceEast(fromCoordinate []float64, distance float64) []float64 {
return getPointAhead(fromCoordinate, distance, 90.0)
}
func pointPlusDistanceNorth(fromCoordinate []float64, distance float64) []float64 {
return getPointAhead(fromCoordinate, distance, 0.0)
}
func metersToGeoPoint(latAsMeters float64, lonAsMeters float64) []float64 {
point := []float64{0.0, 0.0}
pointEast := pointPlusDistanceEast(point, lonAsMeters)
pointNorthEast := pointPlusDistanceNorth(pointEast, latAsMeters)
return pointNorthEast
}
func latToMeter(latitude float64) float64 {
distance := haversineDistance([]float64{latitude, 0.0}, []float64{0.0, 0.0})
if latitude < 0 {
distance *= -1
}
return distance
}
func lngToMeter(longitude float64) float64 {
distance := haversineDistance([]float64{longitude, 0.0}, []float64{0.0, 0.0})
if longitude < 0 {
distance *= -1
}
return distance
}
func haversineDistance(aInit []float64, bInit []float64) float64 {
a := make([]float64, len(aInit))
copy(a, aInit)
b := make([]float64, len(bInit))
copy(b, bInit)
a[0] = a[0] / 180.0 * math.Pi
a[1] = a[1] / 180.0 * math.Pi
b[0] = b[0] / 180.0 * math.Pi
b[1] = b[1] / 180.0 * math.Pi
return math.Acos(math.Sin(a[0])*math.Sin(b[0])+math.Cos(a[0])*math.Cos(b[0])*math.Cos(a[1]-b[1])) * EarthRadiusInMeters
} | pkg/geo.go | 0.857545 | 0.753058 | geo.go | starcoder |
package dataframe
import (
"fmt"
"strconv"
"time"
)
// String defines string data types.
type String string
// NewStringValue takes any interface and returns Value.
func NewStringValue(v interface{}) Value {
switch t := v.(type) {
case string:
return String(t)
case []byte:
return String(t)
case bool:
return String(fmt.Sprintf("%v", t))
case int:
return String(strconv.FormatInt(int64(t), 10))
case int8:
return String(strconv.FormatInt(int64(t), 10))
case int16:
return String(strconv.FormatInt(int64(t), 10))
case int32:
return String(strconv.FormatInt(int64(t), 10))
case int64:
return String(strconv.FormatInt(t, 10))
case uint:
return String(strconv.FormatUint(uint64(t), 10))
case uint8: // byte is an alias for uint8
return String(strconv.FormatUint(uint64(t), 10))
case uint16:
return String(strconv.FormatUint(uint64(t), 10))
case uint32:
return String(strconv.FormatUint(uint64(t), 10))
case float32:
return String(strconv.FormatFloat(float64(t), 'f', -1, 64))
case float64:
return String(strconv.FormatFloat(t, 'f', -1, 64))
case time.Time:
return String(t.String())
case time.Duration:
return String(t.String())
default:
panic(fmt.Errorf("%v(%T) is not supported yet", v, v))
}
}
// NewStringValueNil returns an empty value.
func NewStringValueNil() Value {
return String("")
}
func (s String) String() (string, bool) {
return string(s), true
}
func (s String) Int64() (int64, bool) {
iv, err := strconv.ParseInt(string(s), 10, 64)
return iv, err == nil
}
func (s String) Uint64() (uint64, bool) {
iv, err := strconv.ParseUint(string(s), 10, 64)
return iv, err == nil
}
func (s String) Float64() (float64, bool) {
f, err := strconv.ParseFloat(string(s), 64)
return f, err == nil
}
func (s String) Time(layout string) (time.Time, bool) {
t, err := time.Parse(layout, string(s))
return t, err == nil
}
func (s String) Duration() (time.Duration, bool) {
d, err := time.ParseDuration(string(s))
return d, err == nil
}
func (s String) IsNil() bool {
return len(s) == 0
}
func (s String) EqualTo(v Value) bool {
tv, ok := v.(String)
return ok && s == tv
}
func (s String) Copy() Value {
return s
} | vendor/github.com/gyuho/dataframe/value_string.go | 0.698535 | 0.526769 | value_string.go | starcoder |
package firewall
import (
"encoding/json"
"fmt"
"testing"
"github.com/ingrammicro/cio/api/types"
"github.com/ingrammicro/cio/utils"
"github.com/stretchr/testify/assert"
)
// GetPolicyMocked test mocked function
func GetPolicyMocked(t *testing.T, policyIn *types.Policy) *types.Policy {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// to json
dIn, err := json.Marshal(policyIn)
assert.Nil(err, "Firewall test data corrupted")
// call service
cs.On("Get", APIPathCloudFirewallProfile).Return(dIn, 200, nil)
policyOut, err := ds.GetPolicy()
policyIn.Md5 = policyOut.Md5
assert.Nil(err, "Error getting firewall policy")
assert.Equal(*policyIn, *policyOut, "GetPolicy returned different policies")
return policyOut
}
// GetPolicyFailErrMocked test mocked function
func GetPolicyFailErrMocked(t *testing.T, policyIn *types.Policy) *types.Policy {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// to json
dIn, err := json.Marshal(policyIn)
assert.Nil(err, "Firewall test data corrupted")
// call service
cs.On("Get", APIPathCloudFirewallProfile).Return(dIn, 200, fmt.Errorf("mocked error"))
policyOut, err := ds.GetPolicy()
assert.NotNil(err, "We are expecting an error")
assert.Nil(policyOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return policyOut
}
// GetPolicyFailStatusMocked test mocked function
func GetPolicyFailStatusMocked(t *testing.T, policyIn *types.Policy) *types.Policy {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// to json
dIn, err := json.Marshal(policyIn)
assert.Nil(err, "Firewall test data corrupted")
// call service
cs.On("Get", APIPathCloudFirewallProfile).Return(dIn, 499, nil)
policyOut, err := ds.GetPolicy()
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(policyOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return policyOut
}
// GetPolicyFailJSONMocked test mocked function
func GetPolicyFailJSONMocked(t *testing.T, policyIn *types.Policy) *types.Policy {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Get", APIPathCloudFirewallProfile).Return(dIn, 200, nil)
policyOut, err := ds.GetPolicy()
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(policyOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return policyOut
}
// AddPolicyRuleMocked test mocked function
func AddPolicyRuleMocked(t *testing.T, policyRuleIn *types.PolicyRule) *types.PolicyRule {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*policyRuleIn)
assert.Nil(err, "Firewall test data corrupted")
// to json
dOut, err := json.Marshal(policyRuleIn)
assert.Nil(err, "Firewall test data corrupted")
// call service
cs.On("Post", APIPathCloudFirewallProfileRules, mapIn).Return(dOut, 200, nil)
policyRuleOut, err := ds.AddPolicyRule(mapIn)
assert.Nil(err, "Error adding policy rule")
assert.Equal(policyRuleIn, policyRuleOut, "AddPolicyRule returned different rules")
return policyRuleOut
}
// AddPolicyRuleFailErrMocked test mocked function
func AddPolicyRuleFailErrMocked(t *testing.T, policyRuleIn *types.PolicyRule) *types.PolicyRule {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*policyRuleIn)
assert.Nil(err, "Firewall test data corrupted")
// to json
dOut, err := json.Marshal(policyRuleIn)
assert.Nil(err, "Firewall test data corrupted")
// call service
cs.On("Post", APIPathCloudFirewallProfileRules, mapIn).Return(dOut, 200, fmt.Errorf("mocked error"))
policyRuleOut, err := ds.AddPolicyRule(mapIn)
assert.NotNil(err, "We are expecting an error")
assert.Nil(policyRuleOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return policyRuleOut
}
// AddPolicyRuleFailStatusMocked test mocked function
func AddPolicyRuleFailStatusMocked(t *testing.T, policyRuleIn *types.PolicyRule) *types.PolicyRule {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*policyRuleIn)
assert.Nil(err, "Firewall test data corrupted")
// to json
dOut, err := json.Marshal(policyRuleIn)
assert.Nil(err, "Firewall test data corrupted")
// call service
cs.On("Post", APIPathCloudFirewallProfileRules, mapIn).Return(dOut, 499, nil)
policyRuleOut, err := ds.AddPolicyRule(mapIn)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(policyRuleOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return policyRuleOut
}
// AddPolicyRuleFailJSONMocked test mocked function
func AddPolicyRuleFailJSONMocked(t *testing.T, policyRuleIn *types.PolicyRule) *types.PolicyRule {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*policyRuleIn)
assert.Nil(err, "Firewall test data corrupted")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Post", APIPathCloudFirewallProfileRules, mapIn).Return(dIn, 200, nil)
policyRuleOut, err := ds.AddPolicyRule(mapIn)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(policyRuleOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return policyRuleOut
}
// UpdatePolicyMocked test mocked function
func UpdatePolicyMocked(t *testing.T, policyIn *types.Policy) *types.Policy {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*policyIn)
assert.Nil(err, "Firewall test data corrupted")
// to json
dOut, err := json.Marshal(policyIn)
assert.Nil(err, "Firewall test data corrupted")
// call service
cs.On("Put", APIPathCloudFirewallProfile, mapIn).Return(dOut, 200, nil)
policyOut, err := ds.UpdatePolicy(mapIn)
assert.Nil(err, "Error updating policy")
assert.Equal(policyIn, policyOut, "UpdatePolicy returned different policies")
return policyOut
}
// UpdatePolicyFailErrMocked test mocked function
func UpdatePolicyFailErrMocked(t *testing.T, policyIn *types.Policy) *types.Policy {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*policyIn)
assert.Nil(err, "Firewall test data corrupted")
// to json
dOut, err := json.Marshal(policyIn)
assert.Nil(err, "Firewall test data corrupted")
// call service
cs.On("Put", APIPathCloudFirewallProfile, mapIn).Return(dOut, 200, fmt.Errorf("mocked error"))
policyOut, err := ds.UpdatePolicy(mapIn)
assert.NotNil(err, "We are expecting an error")
assert.Nil(policyOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return policyOut
}
// UpdatePolicyFailStatusMocked test mocked function
func UpdatePolicyFailStatusMocked(t *testing.T, policyIn *types.Policy) *types.Policy {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*policyIn)
assert.Nil(err, "Firewall test data corrupted")
// to json
dOut, err := json.Marshal(policyIn)
assert.Nil(err, "Firewall test data corrupted")
// call service
cs.On("Put", APIPathCloudFirewallProfile, mapIn).Return(dOut, 499, nil)
policyOut, err := ds.UpdatePolicy(mapIn)
assert.NotNil(err, "We are expecting an status code error")
assert.Nil(policyOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return policyOut
}
// UpdatePolicyFailJSONMocked test mocked function
func UpdatePolicyFailJSONMocked(t *testing.T, policyIn *types.Policy) *types.Policy {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewFirewallService(cs)
assert.Nil(err, "Couldn't load firewall service")
assert.NotNil(ds, "Firewall service not instanced")
// convertMap
mapIn, err := utils.ItemConvertParams(*policyIn)
assert.Nil(err, "Firewall test data corrupted")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Put", APIPathCloudFirewallProfile, mapIn).Return(dIn, 200, nil)
policyOut, err := ds.UpdatePolicy(mapIn)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(policyOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return policyOut
} | api/firewall/firewall_api_mocked.go | 0.747432 | 0.444444 | firewall_api_mocked.go | starcoder |
// Package gl defines tge-gl API
package gl
import (
binary "encoding/binary"
unsafe "unsafe"
tge "github.com/thommil/tge"
)
// Name name of the plugin
const Name = "gl"
var _pluginInstance = &plugin{}
var nativeEndian binary.ByteOrder
func init() {
tge.Register(_pluginInstance)
buf := [2]byte{}
*(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD)
switch buf {
case [2]byte{0xAB, 0xCD}:
nativeEndian = binary.BigEndian
default:
nativeEndian = binary.LittleEndian
}
}
func (p *plugin) GetName() string {
return Name
}
// Byte buffer array singleton, allocate 1mB at startup
var byteArrayBuffer = make([]byte, 0)
var byteArrayBufferExtendFactor = 1
func getByteArrayBuffer(size int) []byte {
if size > len(byteArrayBuffer) {
for (1024 * 1024 * byteArrayBufferExtendFactor) < size {
byteArrayBufferExtendFactor++
}
byteArrayBuffer = make([]byte, (1024 * 1024 * byteArrayBufferExtendFactor))
}
return byteArrayBuffer[:size]
}
// Uint16ToBytes convert uint16 array to byte array
// depending on host endianness
func Uint16ToBytes(values []uint16) []byte {
b := getByteArrayBuffer(2 * len(values))
if nativeEndian == binary.LittleEndian {
for i, v := range values {
u := *(*uint16)(unsafe.Pointer(&v))
b[2*i+0] = byte(u)
b[2*i+1] = byte(u >> 8)
}
} else {
for i, v := range values {
u := *(*uint16)(unsafe.Pointer(&v))
b[2*i+0] = byte(u >> 8)
b[2*i+1] = byte(u)
}
}
return b
}
// Uint32ToBytes convert uint32 array to byte array
// depending on host endianness
func Uint32ToBytes(values []uint32) []byte {
b := getByteArrayBuffer(4 * len(values))
if nativeEndian == binary.LittleEndian {
for i, v := range values {
u := *(*uint32)(unsafe.Pointer(&v))
b[2*i+0] = byte(u)
b[2*i+1] = byte(u >> 8)
b[4*i+2] = byte(u >> 16)
b[4*i+3] = byte(u >> 24)
}
} else {
for i, v := range values {
u := *(*uint32)(unsafe.Pointer(&v))
b[4*i+0] = byte(u >> 24)
b[4*i+1] = byte(u >> 16)
b[4*i+2] = byte(u >> 8)
b[4*i+3] = byte(u)
}
}
return b
}
// Int16ToBytes convert int16 array to byte array
// depending on host endianness
func Int16ToBytes(values []uint16) []byte {
b := getByteArrayBuffer(2 * len(values))
if nativeEndian == binary.LittleEndian {
for i, v := range values {
u := *(*int16)(unsafe.Pointer(&v))
b[2*i+0] = byte(u)
b[2*i+1] = byte(u >> 8)
}
} else {
for i, v := range values {
u := *(*int16)(unsafe.Pointer(&v))
b[2*i+0] = byte(u >> 8)
b[2*i+1] = byte(u)
}
}
return b
}
// Int32ToBytes convert int32 array to byte array
// depending on host endianness
func Int32ToBytes(values []uint32) []byte {
b := getByteArrayBuffer(4 * len(values))
if nativeEndian == binary.LittleEndian {
for i, v := range values {
u := *(*int32)(unsafe.Pointer(&v))
b[2*i+0] = byte(u)
b[2*i+1] = byte(u >> 8)
b[4*i+2] = byte(u >> 16)
b[4*i+3] = byte(u >> 24)
}
} else {
for i, v := range values {
u := *(*int32)(unsafe.Pointer(&v))
b[4*i+0] = byte(u >> 24)
b[4*i+1] = byte(u >> 16)
b[4*i+2] = byte(u >> 8)
b[4*i+3] = byte(u)
}
}
return b
}
// Float32ToBytes convert float32 array to byte array
// depending on host endianness
func Float32ToBytes(values []float32) []byte {
b := getByteArrayBuffer(4 * len(values))
if nativeEndian == binary.LittleEndian {
for i, v := range values {
u := *(*uint32)(unsafe.Pointer(&v))
b[4*i+0] = byte(u)
b[4*i+1] = byte(u >> 8)
b[4*i+2] = byte(u >> 16)
b[4*i+3] = byte(u >> 24)
}
} else {
for i, v := range values {
u := *(*uint32)(unsafe.Pointer(&v))
b[4*i+0] = byte(u >> 24)
b[4*i+1] = byte(u >> 16)
b[4*i+2] = byte(u >> 8)
b[4*i+3] = byte(u)
}
}
return b
}
// Float64ToBytes convert float64 array to byte array
// depending on host endianness
func Float64ToBytes(values []float64) []byte {
b := getByteArrayBuffer(8 * len(values))
if nativeEndian == binary.LittleEndian {
for i, v := range values {
u := *(*uint64)(unsafe.Pointer(&v))
b[8*i+0] = byte(u)
b[8*i+1] = byte(u >> 8)
b[8*i+2] = byte(u >> 16)
b[8*i+3] = byte(u >> 24)
b[8*i+4] = byte(u >> 32)
b[8*i+5] = byte(u >> 40)
b[8*i+6] = byte(u >> 48)
b[8*i+7] = byte(u >> 56)
}
} else {
for i, v := range values {
u := *(*uint64)(unsafe.Pointer(&v))
b[8*i+0] = byte(u >> 56)
b[8*i+1] = byte(u >> 48)
b[8*i+2] = byte(u >> 40)
b[8*i+3] = byte(u >> 32)
b[8*i+4] = byte(u >> 24)
b[8*i+5] = byte(u >> 16)
b[8*i+6] = byte(u >> 8)
b[8*i+7] = byte(u)
}
}
return b
}
// PointerToBytes allows to revover Byte[] from a pointer, useful for ports (ex: G3N)
func PointerToBytes(data interface{}, size int) []byte {
switch data.(type) {
case *uint8:
b := getByteArrayBuffer(size)
for i := 0; i < size; i++ {
b[i] = *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(data.(*uint8))) + uintptr(i)))
}
return b
case *uint16:
b := getByteArrayBuffer(2 * size)
if nativeEndian == binary.LittleEndian {
for i := 0; i < size; i++ {
v := *(*uint16)(unsafe.Pointer(uintptr(unsafe.Pointer(data.(*uint16))) + uintptr(i*4)))
u := *(*uint16)(unsafe.Pointer(&v))
b[2*i+0] = byte(u)
b[2*i+1] = byte(u >> 8)
}
} else {
for i := 0; i < size; i++ {
v := *(*uint16)(unsafe.Pointer(uintptr(unsafe.Pointer(data.(*uint16))) + uintptr(i*4)))
u := *(*uint16)(unsafe.Pointer(&v))
b[2*i+0] = byte(u >> 8)
b[2*i+1] = byte(u)
}
}
return b
case *uint32:
b := getByteArrayBuffer(4 * size)
if nativeEndian == binary.LittleEndian {
for i := 0; i < size; i++ {
v := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(data.(*uint32))) + uintptr(i*4)))
u := *(*uint32)(unsafe.Pointer(&v))
b[4*i+0] = byte(u)
b[4*i+1] = byte(u >> 8)
b[4*i+2] = byte(u >> 16)
b[4*i+3] = byte(u >> 24)
}
} else {
for i := 0; i < size; i++ {
v := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(data.(*uint32))) + uintptr(i*4)))
u := *(*uint32)(unsafe.Pointer(&v))
b[4*i+0] = byte(u >> 24)
b[4*i+1] = byte(u >> 16)
b[4*i+2] = byte(u >> 8)
b[4*i+3] = byte(u)
}
}
return b
case *float32:
b := getByteArrayBuffer(4 * size)
if nativeEndian == binary.LittleEndian {
for i := 0; i < size; i++ {
v := *(*float32)(unsafe.Pointer(uintptr(unsafe.Pointer(data.(*float32))) + uintptr(i*4)))
u := *(*uint32)(unsafe.Pointer(&v))
b[4*i+0] = byte(u)
b[4*i+1] = byte(u >> 8)
b[4*i+2] = byte(u >> 16)
b[4*i+3] = byte(u >> 24)
}
} else {
for i := 0; i < size; i++ {
v := *(*float32)(unsafe.Pointer(uintptr(unsafe.Pointer(data.(*float32))) + uintptr(i*4)))
u := *(*uint32)(unsafe.Pointer(&v))
b[4*i+0] = byte(u >> 24)
b[4*i+1] = byte(u >> 16)
b[4*i+2] = byte(u >> 8)
b[4*i+3] = byte(u)
}
}
return b
case *float64:
b := getByteArrayBuffer(8 * size)
if nativeEndian == binary.LittleEndian {
for i := 0; i < size; i++ {
v := *(*float64)(unsafe.Pointer(uintptr(unsafe.Pointer(data.(*float64))) + uintptr(i*8)))
u := *(*uint64)(unsafe.Pointer(&v))
b[8*i+0] = byte(u)
b[8*i+1] = byte(u >> 8)
b[8*i+2] = byte(u >> 16)
b[8*i+3] = byte(u >> 24)
b[8*i+4] = byte(u >> 32)
b[8*i+5] = byte(u >> 40)
b[8*i+6] = byte(u >> 48)
b[8*i+7] = byte(u >> 56)
}
} else {
for i := 0; i < size; i++ {
v := *(*float64)(unsafe.Pointer(uintptr(unsafe.Pointer(data.(*float64))) + uintptr(i*8)))
u := *(*uint64)(unsafe.Pointer(&v))
b[8*i+0] = byte(u >> 56)
b[8*i+1] = byte(u >> 48)
b[8*i+2] = byte(u >> 40)
b[8*i+3] = byte(u >> 32)
b[8*i+4] = byte(u >> 24)
b[8*i+5] = byte(u >> 16)
b[8*i+6] = byte(u >> 8)
b[8*i+7] = byte(u)
}
}
return b
}
return nil
} | gl.go | 0.646795 | 0.416619 | gl.go | starcoder |
package geom
import "math"
// ZV = Vec{0,0}
var ZV = Vec{}
// Vec is a 2d Vector
type Vec struct {
X float64
Y float64
}
// Equals returns v == other
func (v Vec) Equals(other Vec) bool {
return v.X == other.X && v.Y == other.Y
}
func (v Vec) EqualsEpsilon(other Vec) bool {
return v.EqualsEpsilon2(other, Epsilon)
}
func (v Vec) EqualsEpsilon2(other Vec, epsilon float64) bool {
if v.Equals(other) {
return true
}
return ScalarEqualsEpsilon(v.X, other.X, epsilon) &&
ScalarEqualsEpsilon(v.Y, other.Y, epsilon)
}
// IsZero returns true if both axes are 0
func (v Vec) IsZero() bool {
return v.Equals(ZV)
}
func (v Vec) Add(other Vec) Vec {
return Vec{v.X + other.X, v.Y + other.Y}
}
func (v Vec) Sub(other Vec) Vec {
return Vec{v.X - other.X, v.Y - other.Y}
}
func (v Vec) Mul(other Vec) Vec {
return Vec{v.X * other.X, v.Y * other.Y}
}
// Dot product
func (v Vec) Dot(other Vec) float64 {
return v.X*other.X + v.Y*other.Y
}
// Cross product
func (v Vec) Cross(other Vec) float64 {
return v.X*other.Y - other.X*v.Y
}
// Magnitude = length
func (v Vec) Magnitude() float64 {
return math.Hypot(v.X, v.Y)
}
// Normalized normalizes a vector.
// Also known as direction, unit vector.
func (v Vec) Normalized() Vec {
if v.X == 0 && v.Y == 0 {
}
m := v.Magnitude()
return Vec{v.X / m, v.Y / m}
}
// Scaled returns {v.X * s, v.Y * s}
func (v Vec) Scaled(s float64) Vec {
return Vec{v.X * s, v.Y * s}
}
// ScaledXY returns {v.X * sx, v.Y * sy}
func (v Vec) ScaledXY(sx, sy float64) Vec {
return Vec{v.X * sx, v.Y * sy}
}
// Clamp returns a new Vec between that is "min >= v <= max"
func (v Vec) Clamp(min, max Vec) Vec {
nv := Vec{math.Max(min.X, v.X), math.Max(min.Y, v.Y)}
nv.X = math.Min(max.X, nv.X)
nv.Y = math.Min(max.Y, nv.Y)
return nv
}
// RectClamp returns a new Vec between that is "min >= v <= max"
func (v Vec) RectClamp(r Rect) Vec {
return v.Clamp(r.Min, r.Max)
}
func (v Vec) Angle() float64 {
return math.Atan2(v.Y, v.X)
}
func (v Vec) Applyed() {
} | geom/vec.go | 0.92122 | 0.518668 | vec.go | starcoder |
package zpay32
import (
"fmt"
"strconv"
"github.com/decred/dcrlnd/lnwire"
)
var (
// toMAtoms is a map from a unit to a function that converts an amount
// of that unit to MilliAtoms.
toMAtoms = map[byte]func(uint64) (lnwire.MilliAtom, error){
'm': mDcrToMAtoms,
'u': uDcrToMAtoms,
'n': nDcrToMAtoms,
'p': pDcrToMAtoms,
}
)
// mDcrToMAtoms converts the given amount in milliDCR to MilliAtoms.
func mDcrToMAtoms(m uint64) (lnwire.MilliAtom, error) {
return lnwire.MilliAtom(m) * 100000000, nil
}
// uDcrToMAtoms converts the given amount in microDCR to MilliAtoms.
func uDcrToMAtoms(u uint64) (lnwire.MilliAtom, error) {
return lnwire.MilliAtom(u * 100000), nil
}
// nDcrToMAtoms converts the given amount in nanoDCR to MilliAtoms.
func nDcrToMAtoms(n uint64) (lnwire.MilliAtom, error) {
return lnwire.MilliAtom(n * 100), nil
}
// pDcrToMAtoms converts the given amount in picoDCR to MilliAtoms.
func pDcrToMAtoms(p uint64) (lnwire.MilliAtom, error) {
if p < 10 {
return 0, fmt.Errorf("minimum amount is 10p")
}
if p%10 != 0 {
return 0, fmt.Errorf("amount %d pDCR not expressible in mAt",
p)
}
return lnwire.MilliAtom(p / 10), nil
}
// decodeAmount returns the amount encoded by the provided string in MilliAtom.
func decodeAmount(amount string) (lnwire.MilliAtom, error) {
if len(amount) < 1 {
return 0, fmt.Errorf("amount must be non-empty")
}
// If last character is a digit, then the amount can just be
// interpreted as DCR.
char := amount[len(amount)-1]
digit := char - '0'
if digit <= 9 {
dcr, err := strconv.ParseUint(amount, 10, 64)
if err != nil {
return 0, err
}
return lnwire.MilliAtom(dcr) * mAtPerDcr, nil
}
// If not a digit, it must be part of the known units.
conv, ok := toMAtoms[char]
if !ok {
return 0, fmt.Errorf("unknown multiplier %c", char)
}
// Known unit.
num := amount[:len(amount)-1]
if len(num) < 1 {
return 0, fmt.Errorf("number must be non-empty")
}
am, err := strconv.ParseUint(num, 10, 64)
if err != nil {
return 0, err
}
return conv(am)
} | channeldb/migration_01_to_11/zpay32/amountunits.go | 0.628065 | 0.616936 | amountunits.go | starcoder |
package main
import (
"fmt"
"math"
)
// "uncertain number type"
// a little optimization is to represent the error term with its square.
// this saves some taking of square roots in various places.
type unc struct {
n float64 // the number
s float64 // *square* of one sigma error term
}
// constructor, nice to have so it can handle squaring of error term
func newUnc(n, s float64) *unc {
return &unc{n, s * s}
}
// error term accessor method, nice to have so it can handle recovering
// (non-squared) error term from internal (squared) representation
func (z *unc) errorTerm() float64 {
return math.Sqrt(z.s)
}
// Arithmetic methods are modeled on the Go big number package.
// The basic scheme is to pass all operands as method arguments, compute
// the result into the method receiver, and then return the receiver as
// the result of the method. This has an advantage of letting the programer
// determine allocation and use of temporary objects, reducing garbage;
// and has the convenience and efficiency of allowing operations to be chained.
// addition/subtraction
func (z *unc) addC(a *unc, c float64) *unc {
*z = *a
z.n += c
return z
}
func (z *unc) subC(a *unc, c float64) *unc {
*z = *a
z.n -= c
return z
}
func (z *unc) addU(a, b *unc) *unc {
z.n = a.n + b.n
z.s = a.s + b.s
return z
}
func (z *unc) subU(a, b *unc) *unc {
z.n = a.n - b.n
z.s = a.s + b.s
return z
}
// multiplication/division
func (z *unc) mulC(a *unc, c float64) *unc {
z.n = a.n * c
z.s = a.s * c * c
return z
}
func (z *unc) divC(a *unc, c float64) *unc {
z.n = a.n / c
z.s = a.s / (c * c)
return z
}
func (z *unc) mulU(a, b *unc) *unc {
prod := a.n * b.n
z.n, z.s = prod, prod*prod*(a.s/(a.n*a.n)+b.s/(b.n*b.n))
return z
}
func (z *unc) divU(a, b *unc) *unc {
quot := a.n / b.n
z.n, z.s = quot, quot*quot*(a.s/(a.n*a.n)+b.s/(b.n*b.n))
return z
}
// exponentiation
func (z *unc) expC(a *unc, c float64) *unc {
f := math.Pow(a.n, c)
g := f * c / a.n
z.n = f
z.s = a.s * g * g
return z
}
func main() {
x1 := newUnc(100, 1.1)
x2 := newUnc(200, 2.2)
y1 := newUnc(50, 1.2)
y2 := newUnc(100, 2.3)
var d, d2 unc
d.expC(d.addU(d.expC(d.subU(x1, x2), 2), d2.expC(d2.subU(y1, y2), 2)), .5)
fmt.Println("d: ", d.n)
fmt.Println("error:", d.errorTerm())
}
//\Numeric-error-propagation\numeric-error-propagation.go | tasks/Numeric-error-propagation/numeric-error-propagation.go | 0.795142 | 0.560974 | numeric-error-propagation.go | starcoder |
package x
import (
"context"
"github.com/confio/weave"
"github.com/confio/weave/crypto"
"github.com/tendermint/tmlibs/common"
)
//--------------- expose helpers -----
// TestHelpers returns helper objects for tests,
// encapsulated in one object to be easily imported in other packages
type TestHelpers struct{}
// CountingDecorator passes tx along, and counts how many times it was called.
// Adds one on input down, one on output up,
// to differentiate panic from error
func (TestHelpers) CountingDecorator() CountingDecorator {
return &countingDecorator{}
}
// CountingHandler returns success and counts times called
func (TestHelpers) CountingHandler() CountingHandler {
return &countingHandler{}
}
// ErrorDecorator always returns the given error when called
func (TestHelpers) ErrorDecorator(err error) weave.Decorator {
return errorDecorator{err}
}
// ErrorHandler always returns the given error when called
func (TestHelpers) ErrorHandler(err error) weave.Handler {
return errorHandler{err}
}
// PanicAtHeightDecorator will panic if ctx.height >= h
func (TestHelpers) PanicAtHeightDecorator(h int64) weave.Decorator {
return panicAtHeightDecorator{h}
}
// PanicHandler always pancis with the given error when called
func (TestHelpers) PanicHandler(err error) weave.Handler {
return panicHandler{err}
}
// WriteHandler will write the given key/value pair to the KVStore,
// and return the error (use nil for success)
func (TestHelpers) WriteHandler(key, value []byte, err error) weave.Handler {
return writeHandler{
key: key,
value: value,
err: err,
}
}
// WriteDecorator will write the given key/value pair to the KVStore,
// either before or after calling down the stack.
// Returns (res, err) from child handler untouched
func (TestHelpers) WriteDecorator(key, value []byte, after bool) weave.Decorator {
return writeDecorator{
key: key,
value: value,
after: after,
}
}
// TagHandler writes a tag to DeliverResult and returns error of nil
// returns error, but doens't write any tags on CheckTx
func (TestHelpers) TagHandler(key, value []byte, err error) weave.Handler {
return tagHandler{
key: key,
value: value,
err: err,
}
}
// Wrap wraps the handler with one decorator and returns it
// as a single handler.
// Minimal version of ChainDecorators for test cases
func (TestHelpers) Wrap(d weave.Decorator, h weave.Handler) weave.Handler {
return wrappedHandler{
d: d,
h: h,
}
}
// MakeKey returns a random PrivateKey and the associated address
func (TestHelpers) MakeKey() (crypto.Signer, weave.Condition) {
priv := crypto.GenPrivKeyEd25519()
addr := priv.PublicKey().Condition()
return priv, addr
}
// MockMsg returns a weave.Msg object holding these bytes
func (TestHelpers) MockMsg(bz []byte) weave.Msg {
return &mockMsg{bz}
}
// MockTx returns a minimal weave.Tx object holding this Msg
func (TestHelpers) MockTx(msg weave.Msg) weave.Tx {
return &mockTx{msg}
}
// Authenticate returns an Authenticator that gives permissions
// to the given addresses
func (TestHelpers) Authenticate(perms ...weave.Condition) Authenticator {
return mockAuth{perms}
}
// CtxAuth returns an authenticator that uses the context
// getting and setting with the given key
func (TestHelpers) CtxAuth(key interface{}) CtxAuther {
return CtxAuther{key}
}
// CountingDecorator keeps track of number of times called.
// 2x per call, 1x per call with panic inside
type CountingDecorator interface {
GetCount() int
weave.Decorator
}
// CountingHandler keeps track of number of times called.
// 1x per call
type CountingHandler interface {
GetCount() int
weave.Handler
}
//--------------- tx and msg -----------------------
//------ msg
type mockMsg struct {
data []byte
}
var _ weave.Msg = (*mockMsg)(nil)
func (m mockMsg) Marshal() ([]byte, error) {
return m.data, nil
}
func (m *mockMsg) Unmarshal(bz []byte) error {
m.data = bz
return nil
}
func (m mockMsg) Path() string {
return "mock"
}
//------ tx
type mockTx struct {
msg weave.Msg
}
var _ weave.Tx = (*mockTx)(nil)
func (m mockTx) GetMsg() (weave.Msg, error) {
return m.msg, nil
}
func (m mockTx) Marshal() ([]byte, error) {
return m.msg.Marshal()
}
func (m *mockTx) Unmarshal(bz []byte) error {
return m.msg.Unmarshal(bz)
}
//------ static auth (added in constructor)
type mockAuth struct {
signers []weave.Condition
}
var _ Authenticator = mockAuth{}
func (a mockAuth) GetConditions(weave.Context) []weave.Condition {
return a.signers
}
func (a mockAuth) HasAddress(ctx weave.Context, addr weave.Address) bool {
for _, s := range a.signers {
if addr.Equals(s.Address()) {
return true
}
}
return false
}
//----- dynamic auth (based on ctx)
// CtxAuther gets/sets permissions on the given context key
type CtxAuther struct {
key interface{}
}
var _ Authenticator = CtxAuther{}
// SetConditions returns a context with the given permissions set
func (a CtxAuther) SetConditions(ctx weave.Context, perms ...weave.Condition) weave.Context {
return context.WithValue(ctx, a.key, perms)
}
// GetConditions returns permissions previously set on this context
func (a CtxAuther) GetConditions(ctx weave.Context) []weave.Condition {
val, _ := ctx.Value(a.key).([]weave.Condition)
return val
}
// HasAddress returns true iff this address is in GetConditions
func (a CtxAuther) HasAddress(ctx weave.Context, addr weave.Address) bool {
for _, s := range a.GetConditions(ctx) {
if addr.Equals(s.Address()) {
return true
}
}
return false
}
//-------------- counting -------------------------
type countingDecorator struct {
called int
}
var _ weave.Decorator = (*countingDecorator)(nil)
func (c *countingDecorator) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx, next weave.Checker) (weave.CheckResult, error) {
c.called++
res, err := next.Check(ctx, store, tx)
c.called++
return res, err
}
func (c *countingDecorator) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx, next weave.Deliverer) (weave.DeliverResult, error) {
c.called++
res, err := next.Deliver(ctx, store, tx)
c.called++
return res, err
}
func (c *countingDecorator) GetCount() int {
return c.called
}
// countingHandler counts how many times it was called
type countingHandler struct {
called int
}
var _ weave.Handler = (*countingHandler)(nil)
func (c *countingHandler) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.CheckResult, error) {
c.called++
return weave.CheckResult{}, nil
}
func (c *countingHandler) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.DeliverResult, error) {
c.called++
return weave.DeliverResult{}, nil
}
func (c *countingHandler) GetCount() int {
return c.called
}
//----------- errors ------------
// errorDecorator returns the given error
type errorDecorator struct {
err error
}
var _ weave.Decorator = errorDecorator{}
func (e errorDecorator) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx, next weave.Checker) (weave.CheckResult, error) {
return weave.CheckResult{}, e.err
}
func (e errorDecorator) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx, next weave.Deliverer) (weave.DeliverResult, error) {
return weave.DeliverResult{}, e.err
}
// errorHandler returns the given error
type errorHandler struct {
err error
}
var _ weave.Handler = errorHandler{}
func (e errorHandler) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.CheckResult, error) {
return weave.CheckResult{}, e.err
}
func (e errorHandler) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.DeliverResult, error) {
return weave.DeliverResult{}, e.err
}
// panicAtHeightDecorator panics if ctx.height >= p.height
type panicAtHeightDecorator struct {
height int64
}
var _ weave.Decorator = panicAtHeightDecorator{}
func (p panicAtHeightDecorator) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx, next weave.Checker) (weave.CheckResult, error) {
if val, _ := weave.GetHeight(ctx); val > p.height {
panic("too high")
}
return next.Check(ctx, store, tx)
}
func (p panicAtHeightDecorator) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx, next weave.Deliverer) (weave.DeliverResult, error) {
if val, _ := weave.GetHeight(ctx); val > p.height {
panic("too high")
}
return next.Deliver(ctx, store, tx)
}
// panicHandler always panics
type panicHandler struct {
err error
}
var _ weave.Handler = panicHandler{}
func (p panicHandler) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.CheckResult, error) {
panic(p.err)
}
func (p panicHandler) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.DeliverResult, error) {
panic(p.err)
}
//----------------- writers --------
// writeHandler writes the key, value pair and returns the error (may be nil)
type writeHandler struct {
key []byte
value []byte
err error
}
var _ weave.Handler = writeHandler{}
func (h writeHandler) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.CheckResult, error) {
store.Set(h.key, h.value)
return weave.CheckResult{}, h.err
}
func (h writeHandler) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.DeliverResult, error) {
store.Set(h.key, h.value)
return weave.DeliverResult{}, h.err
}
// writeDecorator writes the key, value pair.
// either before or after calling the handlers
type writeDecorator struct {
key []byte
value []byte
after bool
}
var _ weave.Decorator = writeDecorator{}
func (d writeDecorator) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx, next weave.Checker) (weave.CheckResult, error) {
if !d.after {
store.Set(d.key, d.value)
}
res, err := next.Check(ctx, store, tx)
if d.after && err == nil {
store.Set(d.key, d.value)
}
return res, err
}
func (d writeDecorator) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx, next weave.Deliverer) (weave.DeliverResult, error) {
if !d.after {
store.Set(d.key, d.value)
}
res, err := next.Deliver(ctx, store, tx)
if d.after && err == nil {
store.Set(d.key, d.value)
}
return res, err
}
//----------------- misc --------
// tagHandler writes the key, value pair and returns the error (may be nil)
type tagHandler struct {
key []byte
value []byte
err error
}
var _ weave.Handler = tagHandler{}
func (h tagHandler) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.CheckResult, error) {
return weave.CheckResult{}, h.err
}
func (h tagHandler) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.DeliverResult, error) {
tags := common.KVPairs{{Key: h.key, Value: h.value}}
return weave.DeliverResult{Tags: tags}, h.err
}
type wrappedHandler struct {
d weave.Decorator
h weave.Handler
}
var _ weave.Handler = wrappedHandler{}
func (w wrappedHandler) Check(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.CheckResult, error) {
return w.d.Check(ctx, store, tx, w.h)
}
func (w wrappedHandler) Deliver(ctx weave.Context, store weave.KVStore,
tx weave.Tx) (weave.DeliverResult, error) {
return w.d.Deliver(ctx, store, tx, w.h)
} | x/helpers.go | 0.772187 | 0.41401 | helpers.go | starcoder |
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent/dialect/sql"
"github.com/DanielTitkov/anomaly-detection-service/internal/repository/entgo/ent/detectionjob"
"github.com/DanielTitkov/anomaly-detection-service/internal/repository/entgo/ent/detectionjobinstance"
)
// DetectionJobInstance is the model entity for the DetectionJobInstance schema.
type DetectionJobInstance struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreateTime holds the value of the "create_time" field.
CreateTime time.Time `json:"create_time,omitempty"`
// UpdateTime holds the value of the "update_time" field.
UpdateTime time.Time `json:"update_time,omitempty"`
// StartedAt holds the value of the "started_at" field.
StartedAt *time.Time `json:"started_at,omitempty"`
// FinishedAt holds the value of the "finished_at" field.
FinishedAt *time.Time `json:"finished_at,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the DetectionJobInstanceQuery when eager-loading is set.
Edges DetectionJobInstanceEdges `json:"edges"`
detection_job_instance *int
}
// DetectionJobInstanceEdges holds the relations/edges for other nodes in the graph.
type DetectionJobInstanceEdges struct {
// Anomalies holds the value of the anomalies edge.
Anomalies []*Anomaly `json:"anomalies,omitempty"`
// DetectionJob holds the value of the detection_job edge.
DetectionJob *DetectionJob `json:"detection_job,omitempty"`
// loadedTypes holds the information for reporting if a
// type was loaded (or requested) in eager-loading or not.
loadedTypes [2]bool
}
// AnomaliesOrErr returns the Anomalies value or an error if the edge
// was not loaded in eager-loading.
func (e DetectionJobInstanceEdges) AnomaliesOrErr() ([]*Anomaly, error) {
if e.loadedTypes[0] {
return e.Anomalies, nil
}
return nil, &NotLoadedError{edge: "anomalies"}
}
// DetectionJobOrErr returns the DetectionJob value or an error if the edge
// was not loaded in eager-loading, or loaded but was not found.
func (e DetectionJobInstanceEdges) DetectionJobOrErr() (*DetectionJob, error) {
if e.loadedTypes[1] {
if e.DetectionJob == nil {
// The edge detection_job was loaded in eager-loading,
// but was not found.
return nil, &NotFoundError{label: detectionjob.Label}
}
return e.DetectionJob, nil
}
return nil, &NotLoadedError{edge: "detection_job"}
}
// scanValues returns the types for scanning values from sql.Rows.
func (*DetectionJobInstance) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case detectionjobinstance.FieldID:
values[i] = &sql.NullInt64{}
case detectionjobinstance.FieldCreateTime, detectionjobinstance.FieldUpdateTime, detectionjobinstance.FieldStartedAt, detectionjobinstance.FieldFinishedAt:
values[i] = &sql.NullTime{}
case detectionjobinstance.ForeignKeys[0]: // detection_job_instance
values[i] = &sql.NullInt64{}
default:
return nil, fmt.Errorf("unexpected column %q for type DetectionJobInstance", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the DetectionJobInstance fields.
func (dji *DetectionJobInstance) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case detectionjobinstance.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
dji.ID = int(value.Int64)
case detectionjobinstance.FieldCreateTime:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field create_time", values[i])
} else if value.Valid {
dji.CreateTime = value.Time
}
case detectionjobinstance.FieldUpdateTime:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field update_time", values[i])
} else if value.Valid {
dji.UpdateTime = value.Time
}
case detectionjobinstance.FieldStartedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field started_at", values[i])
} else if value.Valid {
dji.StartedAt = new(time.Time)
*dji.StartedAt = value.Time
}
case detectionjobinstance.FieldFinishedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field finished_at", values[i])
} else if value.Valid {
dji.FinishedAt = new(time.Time)
*dji.FinishedAt = value.Time
}
case detectionjobinstance.ForeignKeys[0]:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for edge-field detection_job_instance", value)
} else if value.Valid {
dji.detection_job_instance = new(int)
*dji.detection_job_instance = int(value.Int64)
}
}
}
return nil
}
// QueryAnomalies queries the "anomalies" edge of the DetectionJobInstance entity.
func (dji *DetectionJobInstance) QueryAnomalies() *AnomalyQuery {
return (&DetectionJobInstanceClient{config: dji.config}).QueryAnomalies(dji)
}
// QueryDetectionJob queries the "detection_job" edge of the DetectionJobInstance entity.
func (dji *DetectionJobInstance) QueryDetectionJob() *DetectionJobQuery {
return (&DetectionJobInstanceClient{config: dji.config}).QueryDetectionJob(dji)
}
// Update returns a builder for updating this DetectionJobInstance.
// Note that you need to call DetectionJobInstance.Unwrap() before calling this method if this DetectionJobInstance
// was returned from a transaction, and the transaction was committed or rolled back.
func (dji *DetectionJobInstance) Update() *DetectionJobInstanceUpdateOne {
return (&DetectionJobInstanceClient{config: dji.config}).UpdateOne(dji)
}
// Unwrap unwraps the DetectionJobInstance entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (dji *DetectionJobInstance) Unwrap() *DetectionJobInstance {
tx, ok := dji.config.driver.(*txDriver)
if !ok {
panic("ent: DetectionJobInstance is not a transactional entity")
}
dji.config.driver = tx.drv
return dji
}
// String implements the fmt.Stringer.
func (dji *DetectionJobInstance) String() string {
var builder strings.Builder
builder.WriteString("DetectionJobInstance(")
builder.WriteString(fmt.Sprintf("id=%v", dji.ID))
builder.WriteString(", create_time=")
builder.WriteString(dji.CreateTime.Format(time.ANSIC))
builder.WriteString(", update_time=")
builder.WriteString(dji.UpdateTime.Format(time.ANSIC))
if v := dji.StartedAt; v != nil {
builder.WriteString(", started_at=")
builder.WriteString(v.Format(time.ANSIC))
}
if v := dji.FinishedAt; v != nil {
builder.WriteString(", finished_at=")
builder.WriteString(v.Format(time.ANSIC))
}
builder.WriteByte(')')
return builder.String()
}
// DetectionJobInstances is a parsable slice of DetectionJobInstance.
type DetectionJobInstances []*DetectionJobInstance
func (dji DetectionJobInstances) config(cfg config) {
for _i := range dji {
dji[_i].config = cfg
}
} | internal/repository/entgo/ent/detectionjobinstance.go | 0.68763 | 0.411879 | detectionjobinstance.go | starcoder |
package gostr
import (
"crypto/rand"
"encoding/base64"
"fmt"
"regexp"
"strings"
"github.com/google/uuid"
)
// Return the remainder of a string after the first occurrence of a given value.
func After(s string, search string) string {
if search == "" {
return s
}
position := strings.Index(s, search)
if position == -1 {
return s
}
return s[position+len(search):]
}
// Return the remainder of a string after the last occurrence of a given value.
func AfterLast(s string, search string) string {
if search == "" {
return s
}
position := strings.LastIndex(s, search)
if position == -1 {
return s
}
return s[position+len(search):]
}
// Get the portion of a string before the first occurrence of a given value.
func Before(s string, search string) string {
if search == "" {
return s
}
position := strings.Index(s, search)
if position == -1 {
return s
}
return s[0:position]
}
// Get the portion of a string before the last occurrence of a given value.
func BeforeLast(s string, search string) string {
if search == "" {
return s
}
str := strings.LastIndex(s, search)
if str == -1 {
return s
}
return s[0:str]
}
// Get the portion of a string between two given values.
func Between(s string, from string, to string) string {
if from == "" || to == "" {
return s
}
return BeforeLast(After(s, from), to)
}
// Determine if a given string contains any of the the given substrings.
func Contains(haystack string, needles []string) bool {
for _, needle := range needles {
if needle != "" && strings.Index(haystack, needle) != -1 {
return true
}
}
return false
}
// Determine if a given string contains all given substrings.
func ContainsAll(haystack string, needles []string) bool {
for _, needle := range needles {
if !Contains(haystack, []string{needle}) {
return false
}
}
return true
}
// Determine if a given string ends with any of the the given substrings.
func EndsWith(haystack string, needles []string) bool {
for _, needle := range needles {
if needle != "" &&
strings.HasSuffix(haystack, needle) {
return true
}
}
return false
}
// Determine if a given string is a valid UUID.
func IsUUID(s string) bool {
_, err := uuid.Parse(s)
return err == nil
}
// Return the length of the given string.
func Length(s string) int {
return len(s)
}
// Convert the given string to lower-case.
func Lower(s string) string {
return strings.ToLower(s)
}
// Limit the number of words in a string.
func Words(s string, words uint, end string) string {
if words <= 1 || s == "" {
return s
}
exp := fmt.Sprintf(`^\s*(?:\S+\s*){1,%d}`, words)
re := regexp.MustCompile(exp)
t := re.FindString(s)
return fmt.Sprintf(`%v%v`, strings.TrimSpace(t), end)
}
// Pad the right side of a string with another.
func PadRight(s string, length uint, pad string) string {
if len(s) >= int(length) || len(pad) == 0 {
return s
}
if (int(length)-len(s))%len(pad) == 0 {
nPads := (int(length) - len(s)) / len(pad)
return s + strings.Repeat(pad, nPads)
}
nPads := (int(length) - len(s)) / len(pad)
rC := int(length) - len(s+strings.Repeat(pad, nPads))
return s + strings.Repeat(pad, nPads) + pad[:rC]
}
// Pad the left side of a string with another.
func PadLeft(s string, length uint, pad string) string {
if len(s) >= int(length) || len(pad) == 0 {
return s
}
if (int(length)-len(s))%len(pad) == 0 {
nPads := (int(length) - len(s)) / len(pad)
return strings.Repeat(pad, nPads) + s
}
nPads := (int(length) - len(s)) / len(pad)
rC := int(length) - len(s+strings.Repeat(pad, nPads))
return strings.Repeat(pad, nPads) + pad[:rC] + s
}
// Generate a more truly "random" alpha-numeric string.
func Random(l uint) string {
s := ""
if l == 0 {
l = 16
}
for len(s) < int(l) {
size := int(l) - len(s)
bytes := make([]byte, size)
rand.Read(bytes)
encoded := base64.RawStdEncoding.EncodeToString(bytes)
r := strings.NewReplacer("/", "", "+", "", "=", "")
s += r.Replace(encoded[0:size])
}
return s
}
// Repeat the given string.
func Repeat(s string, times uint) string {
return strings.Repeat(s, int(times))
}
// Convert the given string to upper-case.
func Upper(s string) string {
return strings.ToUpper(s)
} | gostr.go | 0.78436 | 0.480235 | gostr.go | starcoder |
package bgls
import (
"math/big"
)
// *complexNum is a complex number whose elements are members of field of size p
// This is essentially an element of Fp[i]/(i^2 + 1)
type complexNum struct {
im, re *big.Int // value is ai+b, where a,b \in Fp
}
func getComplexZero() *complexNum {
return &complexNum{new(big.Int).SetInt64(0), new(big.Int).SetInt64(0)}
}
func (result *complexNum) Add(num *complexNum, other *complexNum, p *big.Int) *complexNum {
result.im.Add(num.im, other.im)
result.re.Add(num.re, other.re)
result.im.Mod(result.im, p)
result.re.Mod(result.re, p)
return result
}
func (result *complexNum) Conjugate(num *complexNum) *complexNum {
result.re.Set(num.re)
result.im.Sub(zero, num.im)
return result
}
func (result *complexNum) Mul(num *complexNum, other *complexNum, p *big.Int) *complexNum {
real := new(big.Int).Mul(num.re, other.re)
real.Sub(real, new(big.Int).Mul(num.im, other.im))
imag := new(big.Int).Mul(num.im, other.re)
imag.Add(imag, new(big.Int).Mul(num.re, other.im))
real.Mod(real, p)
imag.Mod(imag, p)
result.im = imag
result.re = real
return result
}
func (result *complexNum) MulScalar(num *complexNum, other *big.Int, p *big.Int) *complexNum {
real := new(big.Int).Mul(num.re, other)
real.Mod(real, p)
result.re = real
result.im = num.im
return result
}
func (result *complexNum) Square(num *complexNum, p *big.Int) *complexNum {
real := new(big.Int).Exp(num.re, two, p)
real.Sub(real, new(big.Int).Exp(num.im, two, p))
real.Mod(real, p)
imag := new(big.Int).Mul(num.im, num.re)
imag.Mul(two, imag)
imag.Mod(imag, p)
result.im = imag
result.re = real
return result
}
func (result *complexNum) Set(num *complexNum) *complexNum {
result.im.Set(num.im)
result.re.Set(num.re)
return result
}
func (result *complexNum) Exp(base *complexNum, power *big.Int, p *big.Int) *complexNum {
sum := &complexNum{new(big.Int).SetInt64(0), new(big.Int).SetInt64(1)}
t := getComplexZero()
for i := power.BitLen() - 1; i >= 0; i-- {
t.Square(sum, p)
if power.Bit(i) != 0 {
sum.Mul(t, base, p)
} else {
sum.Set(t)
}
}
result.im = sum.im
result.re = sum.re
return result
}
func (result *complexNum) Equals(other *complexNum) bool {
if result.im.Cmp(other.im) != 0 || result.re.Cmp(other.re) != 0 {
return false
}
return true
} | complexNum.go | 0.645455 | 0.551453 | complexNum.go | starcoder |
package mesh
import (
"math"
"github.com/DexterLB/traytor/maths"
"github.com/DexterLB/traytor/ray"
)
// If the depth of the KDtree reaches MaxTreeDepth, the node becomes leaf
// whith node.Triangles the remaining triangles
const (
MaxTreeDepth = 10
TrianglesPerLeaf = 20
)
// Vertex is a single vertex in a mesh
type Vertex struct {
Normal maths.Vec3 `json:"normal"`
Coordinates maths.Vec3 `json:"coordinates"`
UV maths.Vec3 `json:"uv"`
}
// Triangle is a face with 3 vertices (indices in the vertex array)
type Triangle struct {
Vertices [3]int `json:"vertices"`
Material int `json:"material"`
AB, AC, ABxAC *maths.Vec3
Normal *maths.Vec3 `json:"normal"`
surfaceOx *maths.Vec3
surfaceOy *maths.Vec3
}
// Mesh is a triangle mesh
type Mesh struct {
Vertices []Vertex `json:"vertices"`
Faces []Triangle `json:"faces"`
tree *KDtree
BoundingBox *BoundingBox
}
// Init of Mesh sets the Triangle indices in the Faces array, calculates the bounding box
// and KD tree, sets the surfaceOx and Oy and Cross Products of the sides of each triangle
func (m *Mesh) Init() {
allIndices := make([]int, len(m.Faces))
for i := range allIndices {
allIndices[i] = i
}
m.BoundingBox = m.GetBoundingBox()
m.tree = m.newKDtree(m.BoundingBox, allIndices, 0)
for i := range m.Faces {
triangle := &m.Faces[i]
A := &m.Vertices[triangle.Vertices[0]].Coordinates
B := &m.Vertices[triangle.Vertices[1]].Coordinates
C := &m.Vertices[triangle.Vertices[2]].Coordinates
AB := maths.MinusVectors(B, A)
AC := maths.MinusVectors(C, A)
triangle.AB = AB
triangle.AC = AC
triangle.ABxAC = maths.CrossProduct(AB, AC)
surfaceA := &m.Vertices[triangle.Vertices[0]].UV
surfaceB := &m.Vertices[triangle.Vertices[1]].UV
surfaceC := &m.Vertices[triangle.Vertices[2]].UV
surfaceAB := maths.MinusVectors(surfaceB, surfaceA)
surfaceAC := maths.MinusVectors(surfaceC, surfaceA)
// Solve using Cramer:
// |surfaceAB.X * px + surfaceAX.X *qx + 1 = 0
// |surfaceAB.Y * px + surfaceAX.Y *qx = 0
// and
// surfaceAB.X * py + surfaceAX.X *qy = 0
// surfaceAB.X * py + surfaceAX.X *qy + 1 = 0
px, qx := maths.SolveEquation(surfaceAB, surfaceAC, maths.NewVec3(1, 0, 0))
py, qy := maths.SolveEquation(surfaceAB, surfaceAC, maths.NewVec3(0, 1, 0))
triangle.surfaceOx = maths.AddVectors(AB.Scaled(px), AC.Scaled(qx))
triangle.surfaceOy = maths.AddVectors(AB.Scaled(py), AC.Scaled(qy))
}
}
// SlowIntersect finds the intersection between a ray and the mesh
// and returns their intersection and the surface material.
// Returns nil and -1 if they don't intersect
// Has O(n) complexity.
func (m *Mesh) SlowIntersect(incoming *ray.Ray) *ray.Intersection {
intersection := &ray.Intersection{}
intersection.Distance = maths.Inf
found := false
for _, triangle := range m.Faces {
if m.intersectTriangle(incoming, &triangle, intersection, nil) {
found = true
}
}
if !found {
return nil
}
return intersection
}
// Intersect finds the intersection between a ray and the mesh
// and returns their intersection and the surface material.
// Returns nil and -1 if they don't intersect
// Has O(log(n)) amortised complexity.
func (m *Mesh) Intersect(incoming *ray.Ray) *ray.Intersection {
incoming.Init()
// There wouldn't be intersection if the incoming doesn't cross the bounding box
if !m.BoundingBox.Intersect(incoming) {
return nil
}
intersectionInfo := &ray.Intersection{Distance: maths.Inf}
if m.IntersectKD(incoming, m.BoundingBox, m.tree, intersectionInfo) {
return intersectionInfo
}
return nil
}
// IntersectTriangle find whether there's an intersection point between the ray and the triangle
// using barycentric coordinates and calculate the distance
func IntersectTriangle(ray *ray.Ray, A, B, C *maths.Vec3) (bool, float64) {
AB := maths.MinusVectors(B, A)
AC := maths.MinusVectors(C, A)
reverseDirection := ray.Direction.Negative()
distToA := maths.MinusVectors(&ray.Start, A)
ABxAC := maths.CrossProduct(AB, AC)
det := maths.DotProduct(ABxAC, reverseDirection)
reverseDet := 1 / det
if math.Abs(det) < maths.Epsilon {
return false, maths.Inf
}
lambda2 := maths.MixedProduct(distToA, AC, reverseDirection) * reverseDet
lambda3 := maths.MixedProduct(AB, distToA, reverseDirection) * reverseDet
gamma := maths.DotProduct(ABxAC, distToA) * reverseDet
if gamma < 0 {
return false, maths.Inf
}
if lambda2 < 0 || lambda2 > 1 || lambda3 < 0 || lambda3 > 1 || lambda2+lambda3 > 1 {
return false, maths.Inf
}
return true, gamma
}
// IntersectTriangle returns whether there's an intersection between the ray and the triangle,
// using barycentric coordinates and takes the point only if it's closer to the
// previously found intersection and the point is within the bounding box
func (m *Mesh) intersectTriangle(ray *ray.Ray, triangle *Triangle, intersection *ray.Intersection, boundingBox *BoundingBox) bool {
// lambda2 * AB + lambda3 * AC - intersectDist*rayDir = distToA
// If the triangle is ABC, this gives you A
A := &m.Vertices[triangle.Vertices[0]].Coordinates
distToA := maths.MinusVectors(&ray.Start, A)
rayDir := ray.Direction
ABxAC := triangle.ABxAC
// We will find the barycentric coordinates using Cramer's formula, so we'll need the determinant
// det is (AB^AC)*dir of the ray, but we're gonna use 1/det, so we find the recerse:
det := -maths.DotProduct(ABxAC, &rayDir)
if math.Abs(det) < maths.Epsilon {
return false
}
reverseDet := 1 / det
intersectDist := maths.DotProduct(ABxAC, distToA) * reverseDet
if intersectDist < 0 || intersectDist > intersection.Distance {
return false
}
// lambda2 = (dist^dir)*AC / det
// lambda3 = -(dist^dir)*AB / det
lambda2 := maths.MixedProduct(distToA, &rayDir, triangle.AC) * reverseDet
lambda3 := -maths.MixedProduct(distToA, &rayDir, triangle.AB) * reverseDet
if lambda2 < 0 || lambda2 > 1 || lambda3 < 0 || lambda3 > 1 || lambda2+lambda3 > 1 {
return false
}
ip := maths.AddVectors(&ray.Start, (&rayDir).Scaled(intersectDist))
// If we aren't inside the bounding box, there could be a closer intersection
// within the bounding box
if boundingBox != nil && !boundingBox.Inside(ip) {
return false
}
intersection.Point = ip
intersection.Distance = intersectDist
if triangle.Normal != nil {
intersection.Normal = triangle.Normal
} else {
// We solve intersection.normal = Anormal + AB normal * lambda2 + ACnormal * lambda 3
Anormal := &m.Vertices[triangle.Vertices[0]].Normal
Bnormal := &m.Vertices[triangle.Vertices[1]].Normal
Cnormal := &m.Vertices[triangle.Vertices[2]].Normal
ABxlambda2 := maths.MinusVectors(Bnormal, Anormal).Scaled(lambda2)
ACxlambda3 := maths.MinusVectors(Cnormal, Anormal).Scaled(lambda3)
intersection.Normal = maths.AddVectors(Anormal, maths.AddVectors(ABxlambda2, ACxlambda3))
}
uvA := &m.Vertices[triangle.Vertices[0]].UV
uvB := &m.Vertices[triangle.Vertices[1]].UV
uvC := &m.Vertices[triangle.Vertices[2]].UV
// We solve intersection.uv = uvA + uvAB * lambda2 + uvAC * lambda 3
uvABxlambda2 := maths.MinusVectors(uvB, uvA).Scaled(lambda2)
uvACxlambda3 := maths.MinusVectors(uvC, uvA).Scaled(lambda3)
uv := maths.AddVectors(uvA, maths.AddVectors(uvABxlambda2, uvACxlambda3))
intersection.U = uv.X
intersection.V = uv.Y
intersection.SurfaceOx = triangle.surfaceOx
intersection.SurfaceOy = triangle.surfaceOy
intersection.Incoming = ray
intersection.Material = triangle.Material
return true
}
// GetBoundingBox returns the boundig box of the mesh, adding every vertex to the box
func (m *Mesh) GetBoundingBox() *BoundingBox {
boundingBox := NewBoundingBox()
for _, vertex := range m.Vertices {
boundingBox.AddPoint(&vertex.Coordinates)
}
return boundingBox
}
// NewKDtree returns the KD tree for the mesh with MaxTreeDepth by slicing the bouindingBox
// and including the triangles in the bounding box (if it's in the middle of two bounding boxes, we include it in both)
func (m *Mesh) newKDtree(boundingBox *BoundingBox, trianglesIndices []int, depth int) *KDtree {
if depth > MaxTreeDepth || len(trianglesIndices) < TrianglesPerLeaf {
node := NewLeaf(trianglesIndices)
return node
}
// We take the (axis + 2) % 3 to alternate between Ox, Oy and Oz on each turn
// it's not the best decision in every case
axis := (depth + 2) % 3
leftLimit := boundingBox.MaxVolume[axis]
righLimit := boundingBox.MinVolume[axis]
median := (leftLimit + righLimit) / 2
var leftTriangles, rightTriangles []int
var A, B, C *maths.Vec3
leftBoundingBox, rightBoundingBox := boundingBox.Split(axis, median)
for _, index := range trianglesIndices {
A = &m.Vertices[m.Faces[index].Vertices[0]].Coordinates
B = &m.Vertices[m.Faces[index].Vertices[1]].Coordinates
C = &m.Vertices[m.Faces[index].Vertices[2]].Coordinates
if leftBoundingBox.IntersectTriangle(A, B, C) {
leftTriangles = append(leftTriangles, index)
}
if rightBoundingBox.IntersectTriangle(A, B, C) {
rightTriangles = append(rightTriangles, index)
}
}
node := NewNode(median, axis)
leftChild := m.newKDtree(leftBoundingBox, leftTriangles, depth+1)
rightChild := m.newKDtree(rightBoundingBox, rightTriangles, depth+1)
node.Children[0] = leftChild
node.Children[1] = rightChild
return node
}
// IntersectKD returns whether there's an intersection with the ray. The the current node is leaf
// we check each of its triangles and divide the bounding box and check for each child
func (m *Mesh) IntersectKD(ray *ray.Ray, boundingBox *BoundingBox, node *KDtree, intersectionInfo *ray.Intersection) bool {
foundIntersection := false
if node.Axis == maths.Leaf {
for _, triangle := range node.Triangles {
if m.intersectTriangle(ray, &m.Faces[triangle], intersectionInfo, boundingBox) {
foundIntersection = true
}
}
return foundIntersection
}
leftBoundingBoxChild, rightBoundingBoxChild := boundingBox.Split(node.Axis, node.Median)
var firstBoundingBox, secondBoundingBox *BoundingBox
var firstNodeChild, secondNodeChild *KDtree
if ray.Start.GetDimension(node.Axis) <= node.Median {
firstBoundingBox = leftBoundingBoxChild
secondBoundingBox = rightBoundingBoxChild
firstNodeChild = node.Children[0]
secondNodeChild = node.Children[1]
} else {
firstBoundingBox = rightBoundingBoxChild
secondBoundingBox = leftBoundingBoxChild
firstNodeChild = node.Children[1]
secondNodeChild = node.Children[0]
}
if boundingBox.IntersectWall(node.Axis, node.Median, ray) {
if m.IntersectKD(ray, firstBoundingBox, firstNodeChild, intersectionInfo) {
return true
}
return m.IntersectKD(ray, secondBoundingBox, secondNodeChild, intersectionInfo)
}
if firstBoundingBox.Intersect(ray) {
return m.IntersectKD(ray, firstBoundingBox, firstNodeChild, intersectionInfo)
}
return m.IntersectKD(ray, secondBoundingBox, secondNodeChild, intersectionInfo)
} | mesh/mesh.go | 0.89093 | 0.610715 | mesh.go | starcoder |
package unityai
import "math"
type Quaternionf struct {
x, y, z, w float32
}
func NewQuaternionf(x, y, z, w float32) Quaternionf {
return Quaternionf{x, y, z, w}
}
func (this *Quaternionf) X() float32 {
return this.x
}
func (this *Quaternionf) Y() float32 {
return this.y
}
func (this *Quaternionf) Z() float32 {
return this.z
}
func (this *Quaternionf) W() float32 {
return this.w
}
func (this *Quaternionf) Add(that Quaternionf) Quaternionf {
return Quaternionf{
this.x + that.x,
this.y + that.y,
this.z + that.z,
this.w + that.w,
}
}
func (this *Quaternionf) Sub(that Quaternionf) Quaternionf {
return Quaternionf{
this.x - that.x,
this.y - that.y,
this.z - that.z,
this.w - that.w,
}
}
func (lhs Quaternionf) Mul(rhs Quaternionf) Quaternionf {
return NewQuaternionf(
lhs.w*rhs.x+lhs.x*rhs.w+lhs.y*rhs.z-lhs.z*rhs.y,
lhs.w*rhs.y+lhs.y*rhs.w+lhs.z*rhs.x-lhs.x*rhs.z,
lhs.w*rhs.z+lhs.z*rhs.w+lhs.x*rhs.y-lhs.y*rhs.x,
lhs.w*rhs.w-lhs.x*rhs.x-lhs.y*rhs.y-lhs.z*rhs.z)
}
func QuaternionToMatrix3(q Quaternionf, m *Matrix3x3f) {
// Precalculate coordinate products
x := q.x * 2.0
y := q.y * 2.0
z := q.z * 2.0
xx := q.x * x
yy := q.y * y
zz := q.z * z
xy := q.x * y
xz := q.x * z
yz := q.y * z
wx := q.w * x
wy := q.w * y
wz := q.w * z
// Calculate 3x3 matrix from orthonormal basis
m.m_Data[0] = 1.0 - (yy + zz)
m.m_Data[1] = xy + wz
m.m_Data[2] = xz - wy
m.m_Data[3] = xy - wz
m.m_Data[4] = 1.0 - (xx + zz)
m.m_Data[5] = yz + wx
m.m_Data[6] = xz + wy
m.m_Data[7] = yz - wx
m.m_Data[8] = 1.0 - (xx + yy)
}
func QuaternionToMatrix4(q Quaternionf, m *Matrix4x4f) {
// Precalculate coordinate products
x := q.x * 2.0
y := q.y * 2.0
z := q.z * 2.0
xx := q.x * x
yy := q.y * y
zz := q.z * z
xy := q.x * y
xz := q.x * z
yz := q.y * z
wx := q.w * x
wy := q.w * y
wz := q.w * z
// Calculate 3x3 matrix from orthonormal basis
m.m_Data[0] = 1.0 - (yy + zz)
m.m_Data[1] = xy + wz
m.m_Data[2] = xz - wy
m.m_Data[3] = 0.0
m.m_Data[4] = xy - wz
m.m_Data[5] = 1.0 - (xx + zz)
m.m_Data[6] = yz + wx
m.m_Data[7] = 0.0
m.m_Data[8] = xz + wy
m.m_Data[9] = yz - wx
m.m_Data[10] = 1.0 - (xx + yy)
m.m_Data[11] = 0.0
m.m_Data[12] = 0.0
m.m_Data[13] = 0.0
m.m_Data[14] = 0.0
m.m_Data[15] = 1.0
}
func InverseQuaternion(q Quaternionf) Quaternionf {
var ret Quaternionf
ret.x = -q.x
ret.y = -q.y
ret.z = -q.z
ret.w = q.w
return ret
}
type RotationOrder int
const (
kOrderXYZ RotationOrder = iota
kOrderXZY
kOrderYZX
kOrderYXZ
kOrderZXY
kOrderZYX
OrderUnity RotationOrder = kOrderZXY
)
func EulerToQuaternionUnity(eulerAngle Vector3f) Quaternionf {
return EulerToQuaternion(eulerAngle.Mulf(math.Pi/180.0), kOrderZXY)
}
func EulerToQuaternion(someEulerAngles Vector3f, order RotationOrder) Quaternionf {
cX := math.Cos(float64(someEulerAngles.x / 2.0))
sX := math.Sin(float64(someEulerAngles.x / 2.0))
cY := math.Cos(float64(someEulerAngles.y / 2.0))
sY := math.Sin(float64(someEulerAngles.y / 2.0))
cZ := math.Cos(float64(someEulerAngles.z / 2.0))
sZ := math.Sin(float64(someEulerAngles.z / 2.0))
qX := NewQuaternionf(float32(sX), 0.0, 0.0, float32(cX))
qY := NewQuaternionf(0.0, float32(sY), 0.0, float32(cY))
qZ := NewQuaternionf(0.0, 0.0, float32(sZ), float32(cZ))
var ret Quaternionf
switch order {
case kOrderZYX:
CreateQuaternionFromAxisQuaternions(qX, qY, qZ, &ret)
case kOrderYZX:
CreateQuaternionFromAxisQuaternions(qX, qZ, qY, &ret)
case kOrderXZY:
CreateQuaternionFromAxisQuaternions(qY, qZ, qX, &ret)
case kOrderZXY:
CreateQuaternionFromAxisQuaternions(qY, qX, qZ, &ret)
case kOrderYXZ:
CreateQuaternionFromAxisQuaternions(qZ, qX, qY, &ret)
case kOrderXYZ:
CreateQuaternionFromAxisQuaternions(qZ, qY, qX, &ret)
}
return ret
}
func CreateQuaternionFromAxisQuaternions(q1 Quaternionf, q2 Quaternionf, q3 Quaternionf, result *Quaternionf) {
*result = q1.Mul(q2).Mul(q3)
} | quaternion.go | 0.825132 | 0.556219 | quaternion.go | starcoder |
package trie
// A Trie is a set that is optimized for working with strings.
type Trie interface {
// Add inserts new values into the trie.
Add(values ...string)
// Complete returns all strings that complete the supplied prefix string.
// If no relevant strings exist, the resulting array will be empty.
Complete(prefix string) []string
// Contains checks if the trie contains all specified values.
Contains(values ...string) bool
// Remove deletes the specified values from the trie.
Remove(values ...string)
}
// A trie is used to quickly check for and retrieve strings.
type trie struct {
value rune
children map[rune]*trie
}
// adds the supplied string to the trie character by character.
func (t *trie) add(value []rune, index int) {
if index >= len(value) {
t.children[0] = &trie{}
return
}
current := value[index]
node, ok := t.children[current]
if !ok {
node = &trie{value: current, children: map[rune]*trie{}}
t.children[current] = node
}
node.add(value, index+1)
}
func (t *trie) Add(values ...string) {
if t.children == nil {
t.children = map[rune]*trie{}
}
for _, value := range values {
t.add([]rune(value), 0)
}
}
// get finds a node in the trie using the supplied value as a path.
// Returns nil if no such node is found.
func (t *trie) get(value []rune, index int) *trie {
if index == len(value) {
return t
}
node, ok := t.children[value[index]]
if !ok {
return nil
}
return node.get(value, index+1)
}
// traverse returns all strings that begin with the specified prefix.
// If no relevant strings exist, the resulting array will be empty.
func (t *trie) traverse(prefix string, first bool) []string {
if !first && t.value == 0 {
return []string{prefix}
}
values := []string{}
if !first {
prefix += string(t.value)
}
for _, v := range t.children {
values = append(values, v.traverse(prefix, false)...)
}
return values
}
func (t *trie) Complete(prefix string) []string {
node := t.get([]rune(prefix), 0)
if node == nil {
return []string{}
}
return node.traverse(prefix, true)
}
// contains checks if the trie contains the specified value.
func (t *trie) contains(value []rune, index int) bool {
if index == len(value) {
_, ok := t.children[0]
return ok
}
node, ok := t.children[value[index]]
if !ok {
return false
}
return node.contains(value, index+1)
}
func (t *trie) Contains(values ...string) bool {
for _, value := range values {
if !t.contains([]rune(value), 0) {
return false
}
}
return true
}
// remove deletes the specified value from the trie.
// Returns true if the node should be removed from its parent.
func (t *trie) remove(value []rune, index int) bool {
if index == len(value) {
delete(t.children, 0)
return len(t.children) == 0
}
node, ok := t.children[value[index]]
if !ok {
return false
}
if node.remove(value, index+1) {
delete(t.children, value[index])
}
return len(t.children) == 0
}
func (t *trie) Remove(values ...string) {
for _, value := range values {
t.remove([]rune(value), 0)
}
}
// NewTrie initializes a new trie.
func NewTrie() Trie {
return &trie{}
} | trie/trie.go | 0.756897 | 0.687522 | trie.go | starcoder |
package p5
import (
"math"
"gioui.org/f32"
"gioui.org/op"
"gioui.org/op/clip"
"gioui.org/op/paint"
)
// Ellipse draws an ellipse at (x,y) with the provided width and height.
func (p *Proc) Ellipse(x, y, w, h float64) {
if !p.doFill() && !p.doStroke() {
return
}
w *= 0.5
h *= 0.5
var (
ec float64
f1 f32.Point
f2 f32.Point
p1 = p.pt(x-w, y)
)
switch {
case math.Abs(w) > math.Abs(h):
ec = math.Sqrt(w*w - h*h)
f1 = p.pt(x+ec, y).Sub(p1)
f2 = p.pt(x-ec, y).Sub(p1)
default:
ec = math.Sqrt(h*h - w*w)
f1 = p.pt(x, y+ec).Sub(p1)
f2 = p.pt(x, y-ec).Sub(p1)
}
path := func(o *op.Ops) clip.PathSpec {
var path clip.Path
path.Begin(o)
path.Move(p1)
path.Arc(f1, f2, 2*math.Pi)
return path.End()
}
if fill := p.stk.cur().fill; fill != nil {
stk := op.Push(p.ctx.Ops)
paint.FillShape(
p.ctx.Ops,
rgba(fill),
clip.Outline{
Path: path(p.ctx.Ops),
}.Op(),
)
stk.Pop()
}
if stroke := p.stk.cur().stroke.color; stroke != nil {
stk := op.Push(p.ctx.Ops)
paint.FillShape(
p.ctx.Ops,
rgba(stroke),
clip.Stroke{
Path: path(p.ctx.Ops),
Style: p.stk.cur().stroke.style,
}.Op(),
)
stk.Pop()
}
}
// Circle draws a circle at (x,y) with a diameter d.
func (p *Proc) Circle(x, y, d float64) {
p.Ellipse(x, y, d, d)
}
// Arc draws an ellipsoidal arc centered at (x,y), with the provided
// width and height, and a path from the beg to end radians.
// Positive angles denote a counter-clockwise path.
func (p *Proc) Arc(x, y, w, h float64, beg, end float64) {
if !p.doStroke() {
return
}
var (
c = p.pt(x, y)
a = p.cfg.trX(w)
b = p.cfg.trY(h)
f1 f32.Point
f2 f32.Point
)
switch {
case a >= b:
f := math.Sqrt(a*a - b*b)
f1 = c.Add(p.pt(+f, 0))
f2 = c.Add(p.pt(-f, 0))
default:
f := math.Sqrt(b*b - a*a)
f1 = c.Add(p.pt(0, +f))
f2 = c.Add(p.pt(0, -f))
}
var (
sin, cos = math.Sincos(beg)
p0 = p.pt(a*cos, b*sin).Add(c)
path clip.Path
)
stk := op.Push(p.ctx.Ops)
path.Begin(p.ctx.Ops)
path.Move(p0)
path.Arc(f1.Sub(p0), f2.Sub(p0), float32(end-beg))
paint.FillShape(
p.ctx.Ops,
rgba(p.stk.cur().stroke.color),
clip.Stroke{
Path: path.End(),
Style: p.stk.cur().stroke.style,
}.Op(),
)
stk.Pop()
}
// Line draws a line between (x1,y1) and (x2,y2).
func (p *Proc) Line(x1, y1, x2, y2 float64) {
if !p.doStroke() {
return
}
var (
p1 = p.pt(x1, y1)
p2 = p.pt(x2, y2)
path clip.Path
)
stk := op.Push(p.ctx.Ops)
path.Begin(p.ctx.Ops)
path.Move(p1)
path.Line(p2.Sub(path.Pos()))
paint.FillShape(
p.ctx.Ops,
rgba(p.stk.cur().stroke.color),
clip.Stroke{
Path: path.End(),
Style: p.stk.cur().stroke.style,
}.Op(),
)
stk.Pop()
}
// Quad draws a quadrilateral, connecting the 4 points (x1,y1),
// (x2,y2), (x3,y3) and (x4,y4) together.
func (p *Proc) Quad(x1, y1, x2, y2, x3, y3, x4, y4 float64) {
p.poly(
p.pt(x1, y1),
p.pt(x2, y2),
p.pt(x3, y3),
p.pt(x4, y4),
p.pt(x1, y1),
)
}
// Rect draws a rectangle at (x,y) with width w and height h.
func (p *Proc) Rect(x, y, w, h float64) {
p.Quad(x, y, x+w, y, x+w, y+h, x, y+h)
}
// Square draws a square at (x,y) with size s.
func (p *Proc) Square(x, y, s float64) {
p.Rect(x, y, s, s)
}
// Triangle draws a triangle, connecting the 3 points (x1,y1), (x2,y2)
// and (x3,y3) together.
func (p *Proc) Triangle(x1, y1, x2, y2, x3, y3 float64) {
p.poly(
p.pt(x1, y1),
p.pt(x2, y2),
p.pt(x3, y3),
p.pt(x1, y1),
)
}
// Bezier draws a cubic Bézier curve from (x1,y1) to (x4,y4) and two control points (x2,y2) and (x3,y3)
func (p *Proc) Bezier(x1, y1, x2, y2, x3, y3, x4, y4 float64) {
if !p.doStroke() {
return
}
var (
sp = p.pt(x1, y1)
cp0 = p.pt(x2, y2).Sub(sp)
cp1 = p.pt(x3, y3).Sub(sp)
ep = p.pt(x4, y4).Sub(sp)
path clip.Path
)
defer op.Push(p.ctx.Ops).Pop()
path.Begin(p.ctx.Ops)
path.Move(sp)
path.Cube(cp0, cp1, ep)
paint.FillShape(
p.ctx.Ops,
rgba(p.stk.cur().stroke.color),
clip.Stroke{
Path: path.End(),
Style: p.stk.cur().stroke.style,
}.Op(),
)
}
func (p *Proc) poly(ps ...f32.Point) {
if !p.doFill() && !p.doStroke() {
return
}
path := func(o *op.Ops) clip.PathSpec {
var path clip.Path
path.Begin(o)
path.Move(ps[0])
for _, p := range ps[1:] {
path.Line(p.Sub(path.Pos()))
}
return path.End()
}
if p.doFill() {
stk := op.Push(p.ctx.Ops)
paint.FillShape(
p.ctx.Ops,
rgba(p.stk.cur().fill),
clip.Outline{
Path: path(p.ctx.Ops),
}.Op(),
)
stk.Pop()
}
if p.doStroke() {
stk := op.Push(p.ctx.Ops)
paint.FillShape(
p.ctx.Ops,
rgba(p.stk.cur().stroke.color),
clip.Stroke{
Path: path(p.ctx.Ops),
Style: p.stk.cur().stroke.style,
}.Op(),
)
stk.Pop()
}
} | shapes.go | 0.747432 | 0.565659 | shapes.go | starcoder |
package date
import "time"
// Period is a time interval.
type Period int
const (
// Once represents the beginning of the interval.
Once Period = iota
// Daily is a daily interval.
Daily
// Weekly is a weekly interval.
Weekly
// Monthly is a monthly interval.
Monthly
// Quarterly is a quarterly interval.
Quarterly
// Yearly is a yearly interval.
Yearly
)
func (p Period) String() string {
switch p {
case Once:
return "once"
case Daily:
return "daily"
case Weekly:
return "weekly"
case Monthly:
return "monthly"
case Quarterly:
return "quarterly"
case Yearly:
return "yearly"
}
return ""
}
// Date creates a new date.
func Date(year int, month time.Month, day int) time.Time {
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC)
}
// StartOf returns the first date in the given period which
// contains the receiver.
func StartOf(d time.Time, p Period) time.Time {
switch p {
case Once:
return d
case Daily:
return d
case Weekly:
var x = (int(d.Weekday()) + 6) % 7
return d.AddDate(0, 0, -x)
case Monthly:
return Date(d.Year(), d.Month(), 1)
case Quarterly:
return Date(d.Year(), ((d.Month()-1)/3*3)+1, 1)
case Yearly:
return Date(d.Year(), 1, 1)
}
return d
}
// EndOf returns the last date in the given period that contains
// the receiver.
func EndOf(d time.Time, p Period) time.Time {
switch p {
case Once:
return d
case Daily:
return d
case Weekly:
var x = (7 - int(d.Weekday())) % 7
return d.AddDate(0, 0, x)
case Monthly:
return StartOf(d, Monthly).AddDate(0, 1, -1)
case Quarterly:
return StartOf(d, Quarterly).AddDate(0, 3, 0).AddDate(0, 0, -1)
case Yearly:
return Date(d.Year(), 12, 31)
}
return d
}
// Series returns a series of dates in the given interval,
// which contains both t0 and t1.
func Series(t0, t1 time.Time, p Period) []time.Time {
var (
res = []time.Time{StartOf(t0, p).AddDate(0, 0, -1)}
t = t0
)
for t == t1 || t.Before(t1) {
res = append(res, EndOf(t, p))
t = EndOf(t, p).AddDate(0, 0, 1)
}
return res
} | lib/date/date.go | 0.745676 | 0.578329 | date.go | starcoder |
package either
import (
"github.com/calebcase/base/data"
"github.com/calebcase/base/data/list"
)
type Class[A, B any] interface {
NewLeft(A) Left[A, B]
NewRight(B) Right[A, B]
}
type Type[A, B any] struct{}
// Ensure Type implements Class.
var _ Class[int, string] = Type[int, string]{}
func NewType[A, B any]() Type[A, B] {
return Type[A, B]{}
}
func (t Type[A, B]) NewLeft(v A) Left[A, B] {
return Left[A, B]{v}
}
func (t Type[A, B]) NewRight(v B) Right[A, B] {
return Right[A, B]{v}
}
// Either is the sum type for Either.
type Either[A, B any] interface {
isEither(A, B)
}
// Left contains left value A.
type Left[A, B any] struct {
Value A
}
// Ensure Left implements data.Data.
var _ data.Data[int] = Left[int, string]{}
func (l Left[A, B]) isEither(_ A, _ B) {}
func (l Left[A, B]) DEmpty() bool {
return true
}
func (l Left[A, B]) DValue() A {
panic(data.ErrNoValue)
}
func (l Left[A, B]) DRest() data.Data[A] {
return nil
}
// Right contains the right value.
type Right[A, B any] struct {
Value B
}
// Ensure Right implements data.Data
var _ data.Data[string] = Right[int, string]{}
func (r Right[A, B]) isEither(_ A, _ B) {}
func (r Right[A, B]) DEmpty() bool {
return false
}
func (r Right[A, B]) DValue() B {
return r.Value
}
func (r Right[A, B]) DRest() data.Data[B] {
return nil
}
// Apply returns the default value `dflt` if `v` is Nothing. Otherwise it
// returns the result of calling `f` on `v`.
func Apply[A, B, C any](fL func(a A) C, fR func(b B) C, v Either[A, B]) C {
if l, ok := v.(Left[A, B]); ok {
return fL(l.Value)
}
if r, ok := v.(Right[A, B]); ok {
return fR(r.Value)
}
panic("impossible")
}
func Lefts[A, B any](es list.List[Either[A, B]]) (vs list.List[A]) {
vs = list.List[A]{}
if es == nil || len(es) == 0 {
return vs
}
if l, ok := es.DValue().(Left[A, B]); ok {
vs = append(vs, l.Value)
}
for rest := es.DRest(); rest != nil; rest = rest.DRest() {
if l, ok := rest.DValue().(Left[A, B]); ok {
vs = append(vs, l.Value)
}
}
return vs
} | data/either/either.go | 0.71889 | 0.50177 | either.go | starcoder |
package ent
import (
"fmt"
"sign-in/app/record/service/internal/data/ent/record"
"strings"
"time"
"entgo.io/ent/dialect/sql"
)
// Record is the model entity for the Record schema.
type Record struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// UserID holds the value of the "user_id" field.
UserID int64 `json:"user_id,omitempty"`
// SignInIndex holds the value of the "sign_in_index" field.
SignInIndex int `json:"sign_in_index,omitempty"`
// Reward holds the value of the "reward" field.
Reward float64 `json:"reward,omitempty"`
// SignInDay holds the value of the "sign_in_day" field.
SignInDay string `json:"sign_in_day,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Record) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case record.FieldReward:
values[i] = new(sql.NullFloat64)
case record.FieldID, record.FieldUserID, record.FieldSignInIndex:
values[i] = new(sql.NullInt64)
case record.FieldSignInDay:
values[i] = new(sql.NullString)
case record.FieldCreatedAt, record.FieldUpdatedAt:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Record", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Record fields.
func (r *Record) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case record.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
r.ID = int(value.Int64)
case record.FieldUserID:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field user_id", values[i])
} else if value.Valid {
r.UserID = value.Int64
}
case record.FieldSignInIndex:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field sign_in_index", values[i])
} else if value.Valid {
r.SignInIndex = int(value.Int64)
}
case record.FieldReward:
if value, ok := values[i].(*sql.NullFloat64); !ok {
return fmt.Errorf("unexpected type %T for field reward", values[i])
} else if value.Valid {
r.Reward = value.Float64
}
case record.FieldSignInDay:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field sign_in_day", values[i])
} else if value.Valid {
r.SignInDay = value.String
}
case record.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
r.CreatedAt = value.Time
}
case record.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
r.UpdatedAt = value.Time
}
}
}
return nil
}
// Update returns a builder for updating this Record.
// Note that you need to call Record.Unwrap() before calling this method if this Record
// was returned from a transaction, and the transaction was committed or rolled back.
func (r *Record) Update() *RecordUpdateOne {
return (&RecordClient{config: r.config}).UpdateOne(r)
}
// Unwrap unwraps the Record entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (r *Record) Unwrap() *Record {
tx, ok := r.config.driver.(*txDriver)
if !ok {
panic("ent: Record is not a transactional entity")
}
r.config.driver = tx.drv
return r
}
// String implements the fmt.Stringer.
func (r *Record) String() string {
var builder strings.Builder
builder.WriteString("Record(")
builder.WriteString(fmt.Sprintf("id=%v", r.ID))
builder.WriteString(", user_id=")
builder.WriteString(fmt.Sprintf("%v", r.UserID))
builder.WriteString(", sign_in_index=")
builder.WriteString(fmt.Sprintf("%v", r.SignInIndex))
builder.WriteString(", reward=")
builder.WriteString(fmt.Sprintf("%v", r.Reward))
builder.WriteString(", sign_in_day=")
builder.WriteString(r.SignInDay)
builder.WriteString(", created_at=")
builder.WriteString(r.CreatedAt.Format(time.ANSIC))
builder.WriteString(", updated_at=")
builder.WriteString(r.UpdatedAt.Format(time.ANSIC))
builder.WriteByte(')')
return builder.String()
}
// Records is a parsable slice of Record.
type Records []*Record
func (r Records) config(cfg config) {
for _i := range r {
r[_i].config = cfg
}
} | app/record/service/internal/data/ent/record.go | 0.663015 | 0.437042 | record.go | starcoder |
package texture
import (
"fmt"
"github.com/adrianderstroff/pbr/pkg/view/image/image2d"
gl "github.com/adrianderstroff/pbr/pkg/core/gl"
)
// Texture holds no to several images.
type Texture struct {
handle uint32
target uint32
texPos uint32 // e.g. gl.TEXTURE0
}
// GetHandle returns the OpenGL of this texture.
func (tex *Texture) GetHandle() uint32 {
return tex.handle
}
// Delete destroys the Texture.
func (tex *Texture) Delete() {
gl.DeleteTextures(1, &tex.handle)
}
// GenMipmap generates mipmap levels.
// Chooses the two mipmaps that most closely match the size of the pixel being
// textured and uses the GL_LINEAR criterion to produce a texture value.
func (tex *Texture) GenMipmap() {
tex.Bind(0)
gl.GenerateMipmap(tex.target)
tex.Unbind()
}
// SetMinMagFilter sets the filter to determine which behaviour is used for
// level of detail functions.
func (tex *Texture) SetMinMagFilter(min, mag int32) {
tex.Bind(0)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, min)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, mag)
tex.Unbind()
}
// SetWrap1D sets the behavior at the 1D texure borders
func (tex *Texture) SetWrap1D(s int32) {
tex.Bind(0)
gl.TexParameteri(gl.TEXTURE_1D, gl.TEXTURE_WRAP_S, s)
tex.Unbind()
}
// SetWrap2D sets the behavior at the 2D texure borders
func (tex *Texture) SetWrap2D(s, t int32) {
tex.Bind(0)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, s)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, t)
tex.Unbind()
}
// SetWrap3D sets the behavior at the 3D texure borders
func (tex *Texture) SetWrap3D(s, t, r int32) {
tex.Bind(0)
gl.TexParameteri(gl.TEXTURE_3D, gl.TEXTURE_WRAP_S, s)
gl.TexParameteri(gl.TEXTURE_3D, gl.TEXTURE_WRAP_T, t)
gl.TexParameteri(gl.TEXTURE_3D, gl.TEXTURE_WRAP_R, r)
tex.Unbind()
}
// Bind makes the texure available at the specified position.
func (tex *Texture) Bind(index uint32) {
tex.texPos = gl.TEXTURE0 + index
gl.ActiveTexture(tex.texPos)
gl.BindTexture(tex.target, tex.handle)
}
// Unbind makes the texture unavailable for reading.
func (tex *Texture) Unbind() {
tex.texPos = 0
gl.BindTexture(tex.target, 0)
}
// DownloadImage2D texture data from the GPU into an Image2D.
func (tex *Texture) DownloadImage2D(format, pixeltype uint32) (image2d.Image2D, error) {
// bind texture for using the following functions
tex.Bind(0)
defer tex.Unbind()
// grab texture dimensions
var (
width int32
height int32
)
gl.GetTexLevelParameteriv(tex.target, 0, gl.TEXTURE_WIDTH, &width)
gl.GetTexLevelParameteriv(tex.target, 0, gl.TEXTURE_HEIGHT, &height)
// grab sizes from format and pixel type
bytesize := byteSizeFromPixelType(pixeltype)
channels := channelsFromFormat(format)
fmt.Printf("Texture Format (%v,%v) %v channels %vbit\n", width, height,
channels, bytesize*8)
// initialize data
data := make([]uint8, width*height*int32(channels*bytesize))
// download data into buffer
gl.GetTexImage(tex.target, 0, format, pixeltype, gl.Ptr(data))
img, err := image2d.MakeFromData(int(width), int(height), channels, data)
if err != nil {
return image2d.Image2D{}, err
}
return img, nil
}
// DownloadCubeMapImages extracts texture data from the GPU into 6 Image2D for
// each side of the cube map.
func (tex *Texture) DownloadCubeMapImages(format, pixeltype uint32) ([]image2d.Image2D, error) {
// bind texture for using the following functions
tex.Bind(0)
defer tex.Unbind()
// grab texture dimensions
var (
width int32
height int32
)
gl.GetTexLevelParameteriv(gl.TEXTURE_CUBE_MAP_POSITIVE_X, 0, gl.TEXTURE_WIDTH, &width)
gl.GetTexLevelParameteriv(gl.TEXTURE_CUBE_MAP_POSITIVE_X, 0, gl.TEXTURE_HEIGHT, &height)
// grab sizes from format and pixel type
bytesize := byteSizeFromPixelType(pixeltype)
channels := channelsFromFormat(format)
// download all sides of the cubemap
cubeMapImages := make([]image2d.Image2D, 6)
for i := 0; i < 6; i++ {
// download data of a cubemap side into buffer
var target uint32 = gl.TEXTURE_CUBE_MAP_POSITIVE_X + uint32(i)
data := make([]uint8, width*height*int32(channels*bytesize))
gl.GetTexImage(target, 0, format, pixeltype, gl.Ptr(data))
// create image2d from data
img, err := image2d.MakeFromData(int(width), int(height), channels, data)
if err != nil {
return []image2d.Image2D{}, err
}
// add to slice
cubeMapImages[i] = img
}
return cubeMapImages, nil
}
func channelsFromFormat(format uint32) int {
var channels int = -1
switch format {
case gl.RED:
channels = 1
break
case gl.RG:
channels = 2
break
case gl.RGB:
channels = 3
break
case gl.RGBA:
channels = 4
break
}
return channels
}
func byteSizeFromPixelType(pixeltype uint32) int {
var bytesize int = 3
switch pixeltype {
case gl.BYTE, gl.UNSIGNED_BYTE:
bytesize = 1
break
case gl.SHORT, gl.UNSIGNED_SHORT:
bytesize = 2
break
case gl.INT, gl.UNSIGNED_INT, gl.FLOAT:
bytesize = 4
break
}
return bytesize
} | pkg/view/texture/texture.go | 0.764628 | 0.455138 | texture.go | starcoder |
package assert
import (
"reflect"
"time"
)
// The idea here is to accept `*testing.T`, or another impl to be able to test this package itself.
type Tester interface {
Errorf(format string, args ...any)
Helper()
}
// ExpectedActual logs a testing error and returns false if the expected and actual values are not equal.
// Typically you will not need the return value unless you want to stop testing on failure.
// If you call this within a test utility func, make sure you use `t.Helper()` so you get accurate failure locations.
func ExpectedActual[T comparable](t Tester, expected, actual T, name string) bool {
if expected == actual {
return true
}
t.Helper() // Marks this func as a Helper, so this error gets logged at the caller's location
t.Errorf(`[%s] Expected: "%+v".(%s) Actual: "%+v".(%s)`, name,
expected, reflect.TypeOf(expected),
actual, reflect.TypeOf(actual))
return false
}
// ExpectedApproxTime logs a testing error and returns false if the expected and actual time values are not close.
// Specifically, if the absolute value of the difference between the two is LARGER than the given "epsilon".
// Typically you will not need the return value unless you want to stop testing on failure.
func ExpectedApproxTime(t Tester, expected, actual time.Time, epsilon time.Duration, name string) bool {
delta := abs(actual.Sub(expected))
if delta > epsilon {
t.Helper() // Marks this func as a Helper, so this error gets logged at the caller's location
t.Errorf(`[%s] Expected: "%s" Actual: "%s" Delta: %s Tolerance: %s`, name, expected, actual, delta, epsilon)
return false
}
return true
}
// ExpectedApproxDuration logs a testing error and returns false if the expected and actual duration values are not close.
// Specifically, if the absolute value of the difference between the two is LARGER than the given "epsilon".
// Typically you will not need the return value unless you want to stop testing on failure.
func ExpectedApproxDuration(t Tester, expected, actual time.Duration, epsilon time.Duration, name string) bool {
delta := abs(actual - expected)
if delta > epsilon {
t.Helper() // Marks this func as a Helper, so this error gets logged at the caller's location
t.Errorf(`[%s] Expected: "%s" Actual: "%s" Delta: %s Tolerance: %s`, name, expected, actual, delta, epsilon)
return false
}
return true
}
func abs(d time.Duration) time.Duration {
if d < 0 {
return -d
}
return d
} | assert/assert.go | 0.791781 | 0.605216 | assert.go | starcoder |
package square
// A discount applicable to items.
type CatalogDiscount struct {
// The discount name. This is a searchable attribute for use in applicable query filters, and its value length is of Unicode code points.
Name string `json:"name,omitempty"`
// Indicates whether the discount is a fixed amount or percentage, or entered at the time of sale. See [CatalogDiscountType](#type-catalogdiscounttype) for possible values
DiscountType string `json:"discount_type,omitempty"`
// The percentage of the discount as a string representation of a decimal number, using a `.` as the decimal separator and without a `%` sign. A value of `7.5` corresponds to `7.5%`. Specify a percentage of `0` if `discount_type` is `VARIABLE_PERCENTAGE`. Do not use this field for amount-based or variable discounts.
Percentage string `json:"percentage,omitempty"`
AmountMoney *Money `json:"amount_money,omitempty"`
// Indicates whether a mobile staff member needs to enter their PIN to apply the discount to a payment in the Square Point of Sale app.
PinRequired bool `json:"pin_required,omitempty"`
// The color of the discount display label in the Square Point of Sale app. This must be a valid hex color code.
LabelColor string `json:"label_color,omitempty"`
// Indicates whether this discount should reduce the price used to calculate tax. Most discounts should use `MODIFY_TAX_BASIS`. However, in some circumstances taxes must be calculated based on an item's price, ignoring a particular discount. For example, in many US jurisdictions, a manufacturer coupon or instant rebate reduces the price a customer pays but does not reduce the sale price used to calculate how much sales tax is due. In this case, the discount representing that manufacturer coupon should have `DO_NOT_MODIFY_TAX_BASIS` for this field. If you are unsure whether you need to use this field, consult your tax professional. See [CatalogDiscountModifyTaxBasis](#type-catalogdiscountmodifytaxbasis) for possible values
ModifyTaxBasis string `json:"modify_tax_basis,omitempty"`
} | square/model_catalog_discount.go | 0.861086 | 0.541894 | model_catalog_discount.go | starcoder |
package testdata
/*
+extract
openapi: 3.0.0
servers:
- url: //petstore.swagger.io/v2
description: Default server
- url: //petstore.swagger.io/sandbox
description: Sandbox server
info:
description: |
This is a sample server Petstore server.
You can find out more about Swagger at
[http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/).
For this sample, you can use the api key `special-key` to test the authorization filters.
# Introduction
This API is documented in **OpenAPI format** and is based on
[Petstore sample](http://petstore.swagger.io/) provided by [swagger.io](http://swagger.io) team.
It was **extended** to illustrate features of [generator-openapi-repo](https://github.com/Rebilly/generator-openapi-repo)
tool and [ReDoc](https://github.com/Redocly/redoc) documentation. In addition to standard
OpenAPI syntax we use a few [vendor extensions](https://github.com/Redocly/redoc/blob/master/docs/redoc-vendor-extensions.md).
This file was generated using https://github.com/jacobkring/go-oas-extract. You can see this example [here](https://github.com/jacobkring/go-oas-extract/example)
# OpenAPI Specification
This API is documented in **OpenAPI format** and is based on
[Petstore sample](http://petstore.swagger.io/) provided by [swagger.io](http://swagger.io) team.
It was **extended** to illustrate features of [generator-openapi-repo](https://github.com/Rebilly/generator-openapi-repo)
tool and [ReDoc](https://github.com/Redocly/redoc) documentation. In addition to standard
OpenAPI syntax we use a few [vendor extensions](https://github.com/Redocly/redoc/blob/master/docs/redoc-vendor-extensions.md).
# Cross-Origin Resource Sharing
This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/).
And that allows cross-domain communication from the browser.
All responses have a wildcard same-origin which makes them completely public and accessible to everyone, including any code on any site.
# Authentication
Petstore offers two forms of authentication:
- API Key
- OAuth2
OAuth2 - an open protocol to allow secure authorization in a simple
and standard method from web, mobile and desktop applications.
<SecurityDefinitions />
version: 1.0.0
title: Swagger Petstore
termsOfService: 'http://swagger.io/terms/'
contact:
name: API Support
email: <EMAIL>
url: https://github.com/Redocly/redoc
x-logo:
url: 'https://redocly.github.io/redoc/petstore-logo.png'
altText: Petstore logo
license:
name: Apache 2.0
url: 'http://www.apache.org/licenses/LICENSE-2.0.html'
*/
/*
+extract:component:securitySchemes
petstore_auth:
description: |
Get access to data while protecting your account credentials.
OAuth2 is also a safer and more secure way to give you access.
type: oauth2
flows:
implicit:
authorizationUrl: 'http://petstore.swagger.io/api/oauth/dialog'
scopes:
'write:pets': modify pets in your account
'read:pets': read your pets
api_key:
description: >
For this sample, you can use the api key `special-key` to test the
authorization filters.
type: apiKey
name: api_key
in: header
*/ | testdata/doc.go | 0.702122 | 0.552178 | doc.go | starcoder |
package config
/**
* Configuration for LSN pool resource.
*/
type Lsnpool struct {
/**
* Name for the LSN pool. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the LSN pool is created. The following requirement applies only to the Citrix ADC CLI: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "lsn pool1" or 'lsn pool1').
*/
Poolname string `json:"poolname,omitempty"`
/**
* Type of NAT IP address and port allocation (from the LSN pools bound to an LSN group) for subscribers (of the LSN client entity bound to the LSN group):
Available options function as follows:
* Deterministic - Allocate a NAT IP address and a block of ports to each subscriber (of the LSN client bound to the LSN group). The Citrix ADC sequentially allocates NAT resources to these subscribers. The Citrix ADC ADC assigns the first block of ports (block size determined by the port block size parameter of the LSN group) on the beginning NAT IP address to the beginning subscriber IP address. The next range of ports is assigned to the next subscriber, and so on, until the NAT address does not have enough ports for the next subscriber. In this case, the first port block on the next NAT address is used for the subscriber, and so on. Because each subscriber now receives a deterministic NAT IP address and a block of ports, a subscriber can be identified without any need for logging. For a connection, a subscriber can be identified based only on the NAT IP address and port, and the destination IP address and port.
* Dynamic - Allocate a random NAT IP address and a port from the LSN NAT pool for a subscriber's connection. If port block allocation is enabled (in LSN pool) and a port block size is specified (in the LSN group), the Citrix ADC allocates a random NAT IP address and a block of ports for a subscriber when it initiates a connection for the first time. The ADC allocates this NAT IP address and a port (from the allocated block of ports) for different connections from this subscriber. If all the ports are allocated (for different subscriber's connections) from the subscriber's allocated port block, the ADC allocates a new random port block for the subscriber.
Only LSN Pools and LSN groups with the same NAT type settings can be bound together. Multiples LSN pools can be bound to an LSN group. A maximum of 16 LSN pools can be bound to an LSN group.
*/
Nattype string `json:"nattype,omitempty"`
/**
* Allocate a random NAT port block, from the available NAT port pool of an NAT IP address, for each subscriber when the NAT allocation is set as Dynamic NAT. For any connection initiated from a subscriber, the Citrix ADC allocates a NAT port from the subscriber's allocated NAT port block to create the LSN session.
You must set the port block size in the bound LSN group. For a subscriber, if all the ports are allocated from the subscriber's allocated port block, the Citrix ADC allocates a new random port block for the subscriber.
For Deterministic NAT, this parameter is enabled by default, and you cannot disable it.
*/
Portblockallocation string `json:"portblockallocation,omitempty"`
/**
* The waiting time, in seconds, between deallocating LSN NAT ports (when an LSN mapping is removed) and reallocating them for a new LSN session. This parameter is necessary in order to prevent collisions between old and new mappings and sessions. It ensures that all established sessions are broken instead of redirected to a different subscriber. This is not applicable for ports used in:
* Deterministic NAT
* Address-Dependent filtering and Address-Port-Dependent filtering
* Dynamic NAT with port block allocation
In these cases, ports are immediately reallocated.
*/
Portrealloctimeout int `json:"portrealloctimeout,omitempty"`
/**
* Maximum number of ports for which the port reallocation timeout applies for each NAT IP address. In other words, the maximum deallocated-port queue size for which the reallocation timeout applies for each NAT IP address.
When the queue size is full, the next port deallocated is reallocated immediately for a new LSN session.
*/
Maxportrealloctmq int `json:"maxportrealloctmq,omitempty"`
} | resource/config/lsnpool.go | 0.775562 | 0.546375 | lsnpool.go | starcoder |
package pigosat
import (
"fmt"
)
// Minimizer allows you to find the lowest integer K such that
// LowerBound() <= K <= UpperBound()
// and IsFeasible(K) returns status Satisfiable.
type Minimizer interface {
// LowerBound returns a lower bound for the optimal value of k.
LowerBound() int
// UpperBound returns an upper bound for the optimal value of k.
UpperBound() int
// IsFeasible takes a value k and returns whether the Minimizer instance's
// underlying model is feasible for that input value. IsFeasible can model
// any set of constraints it likes as long as there is a unique integer K
// such that k < K implies IsFeasible(k) returns status Unsatisfiable and
// k >= K implies IsFeasible(k) returns status Satisfiable.
IsFeasible(k int) (solution Solution, status Status)
// RecordSolution allows types implementing this interface to store
// solutions for after minimization has finished.
RecordSolution(k int, solution Solution, status Status)
}
// Minimize finds the value min that minimizes Minimizer m. If the value can be
// proved to be optimal, that is, k < min causes m.IsFeasible(k) to return
// status Unsatisfiable, optimal will be set to true. If
// m.IsFeasible(m.UpperBound()) returns status Unsatisfiable, feasible will be
// set to false. Every return value from IsFeasible will be passed to
// m.RecordSolution. Panic if m.UpperBound() < m.LowerBound(). If m.IsFeasible
// returns a status other than Satisfiable, it will be treated as Unsatisfiable.
func Minimize(m Minimizer) (min int, optimal, feasible bool) {
hi, lo := m.UpperBound(), m.LowerBound()
if hi < lo {
panic(fmt.Errorf("UpperBound()=%d < LowerBound()=%d", hi, lo))
}
solution, status := m.IsFeasible(hi)
m.RecordSolution(hi, solution, status)
if status != Satisfiable {
return hi, false, false
}
for hi > lo {
k := lo + (hi-lo)/2 // avoid overfow. See sort/search.go in stdlib
solution, status = m.IsFeasible(k)
m.RecordSolution(k, solution, status)
if status == Satisfiable {
hi = k
} else {
lo = k + 1
optimal = true
}
}
return hi, optimal, true
} | optimize.go | 0.758332 | 0.405684 | optimize.go | starcoder |
package utils
import (
"fmt"
"strconv"
"gopkg.in/yaml.v2"
)
type InsertionOrderedStringMap struct {
keys []string `yaml:"-"`
values map[string]interface{}
}
func NewEmptyInsertionOrderedStringMap(size int) *InsertionOrderedStringMap {
return &InsertionOrderedStringMap{
keys: make([]string, 0, size),
values: make(map[string]interface{}, size),
}
}
func NewInsertionOrderedStringMap(stringMap map[string]interface{}) *InsertionOrderedStringMap {
result := NewEmptyInsertionOrderedStringMap(len(stringMap))
for k, v := range stringMap {
result.Set(k, v)
}
return result
}
func (insertionOrderedStringMap *InsertionOrderedStringMap) Len() int {
return len(insertionOrderedStringMap.values)
}
func (insertionOrderedStringMap *InsertionOrderedStringMap) UnmarshalYAML(unmarshal func(interface{}) error) error {
var data yaml.MapSlice
if err := unmarshal(&data); err != nil {
return err
}
insertionOrderedStringMap.values = make(map[string]interface{})
for _, v := range data {
insertionOrderedStringMap.Set(v.Key.(string), toString(v.Value))
}
return nil
}
// toString converts an interface to string in a quick way
func toString(data interface{}) string {
switch s := data.(type) {
case nil:
return ""
case string:
return s
case bool:
return strconv.FormatBool(s)
case float64:
return strconv.FormatFloat(s, 'f', -1, 64)
case float32:
return strconv.FormatFloat(float64(s), 'f', -1, 32)
case int:
return strconv.Itoa(s)
case int64:
return strconv.FormatInt(s, 10)
case int32:
return strconv.Itoa(int(s))
case int16:
return strconv.FormatInt(int64(s), 10)
case int8:
return strconv.FormatInt(int64(s), 10)
case uint:
return strconv.FormatUint(uint64(s), 10)
case uint64:
return strconv.FormatUint(s, 10)
case uint32:
return strconv.FormatUint(uint64(s), 10)
case uint16:
return strconv.FormatUint(uint64(s), 10)
case uint8:
return strconv.FormatUint(uint64(s), 10)
case []byte:
return string(s)
default:
return fmt.Sprintf("%v", data)
}
}
func (insertionOrderedStringMap *InsertionOrderedStringMap) ForEach(fn func(key string, data interface{})) {
for _, key := range insertionOrderedStringMap.keys {
fn(key, insertionOrderedStringMap.values[key])
}
}
func (insertionOrderedStringMap *InsertionOrderedStringMap) Set(key string, value interface{}) {
_, present := insertionOrderedStringMap.values[key]
insertionOrderedStringMap.values[key] = value
if !present {
insertionOrderedStringMap.keys = append(insertionOrderedStringMap.keys, key)
}
} | v2/pkg/utils/insertion_ordered_map.go | 0.664976 | 0.468487 | insertion_ordered_map.go | starcoder |
package symphony
import (
"fmt"
)
// DAG (directed acyclic graph) is the representation of mathematical graph model of a flow
type DAG struct {
Active bool
Name string
Nodes []*Node
Root []*Node
}
// Node is a part of a DAG, have his own identity and a link for the next Node to be executed
type Node struct {
Name string
Type string
Next []*Node
Prev []*Node
Depends []string
Payload map[interface{}]interface{}
}
// GenerateExecutionTree update the next and previus nodes reference on each node
func (dag *DAG) GenerateExecutionTree() error {
var nodeIndexer = make(map[string]*Node)
for _, node := range dag.Nodes {
nodeIndexer[node.Name] = node
}
for _, node := range dag.Nodes {
if dependencies := node.Depends; len(dependencies) > 0 {
for _, dependency := range dependencies {
if _, exist := nodeIndexer[dependency]; !exist {
return fmt.Errorf("node %s depends on a non-existent node %s", node.Name, dependency)
}
nodeIndexer[dependency].Next = append(nodeIndexer[dependency].Next, node)
node.Prev = append(node.Prev, nodeIndexer[dependency])
}
} else {
dag.Root = append(dag.Root, node)
}
}
return nil
}
// IsValid execute a series of validations over the dag to prevents execution problems or deadlocks
func (dag *DAG) IsValid() (bool, error) {
var visited = make(map[string]bool)
for _, node := range dag.Nodes {
visited[node.Name] = false
}
if len(dag.Root) == 0 || executionLoopFound(dag.Root, visited) {
return false, fmt.Errorf("loop found on the dag %s by a cross dependency cycle", dag.Name)
}
return true, nil
}
// depth first search algorithm for cycle identification
func executionLoopFound(next []*Node, visited map[string]bool) bool {
var nextNodes = make(map[string]*Node)
for _, node := range next {
if visited[node.Name] {
return true
}
visited[node.Name] = true
for _, queuedNode := range node.Next {
nextNodes[node.Name] = queuedNode
}
}
if len(nextNodes) == 0 {
return false
}
executionQueue := make([]*Node, len(nextNodes))
idx := 0
for _, node := range nextNodes {
executionQueue[idx] = node
idx++
}
return executionLoopFound(executionQueue, visited)
} | src/app/orchestration/symphony/dag.go | 0.662687 | 0.459682 | dag.go | starcoder |
package pole
import (
"fmt"
"github.com/yaricom/goNEAT/v2/experiment"
"github.com/yaricom/goNEAT/v2/experiment/utils"
"github.com/yaricom/goNEAT/v2/neat"
"github.com/yaricom/goNEAT/v2/neat/genetics"
"github.com/yaricom/goNEAT/v2/neat/network"
"math"
"math/rand"
)
const twelveDegrees = 12.0 * math.Pi / 180.0
type cartPoleGenerationEvaluator struct {
// The output path to store execution results
OutputPath string
// The flag to indicate if cart emulator should be started from random position
RandomStart bool
// The number of emulation steps to be done balancing pole to win
WinBalancingSteps int
}
// NewCartPoleGenerationEvaluator is to create generations evaluator for single-pole balancing experiment.
// This experiment performs evolution on single pole balancing task in order to produce appropriate genome.
func NewCartPoleGenerationEvaluator(outDir string, randomStart bool, winBalanceSteps int) experiment.GenerationEvaluator {
return &cartPoleGenerationEvaluator{
OutputPath: outDir,
RandomStart: randomStart,
WinBalancingSteps: winBalanceSteps,
}
}
// GenerationEvaluate evaluates one epoch for given population and prints results into output directory if any.
func (e *cartPoleGenerationEvaluator) GenerationEvaluate(pop *genetics.Population, epoch *experiment.Generation, context *neat.Options) (err error) {
// Evaluate each organism on a test
for _, org := range pop.Organisms {
res, err := e.orgEvaluate(org)
if err != nil {
return err
}
if res && (epoch.Best == nil || org.Fitness > epoch.Best.Fitness) {
epoch.Solved = true
epoch.WinnerNodes = len(org.Genotype.Nodes)
epoch.WinnerGenes = org.Genotype.Extrons()
epoch.WinnerEvals = context.PopSize*epoch.Id + org.Genotype.Id
epoch.Best = org
if epoch.WinnerNodes == 7 {
// You could dump out optimal genomes here if desired
if optPath, err := utils.WriteGenomePlain("pole1_optimal", e.OutputPath, org, epoch); err != nil {
neat.ErrorLog(fmt.Sprintf("Failed to dump optimal genome, reason: %s\n", err))
} else {
neat.InfoLog(fmt.Sprintf("Dumped optimal genome to: %s\n", optPath))
}
}
}
}
// Fill statistics about current epoch
epoch.FillPopulationStatistics(pop)
// Only print to file every print_every generation
if epoch.Solved || epoch.Id%context.PrintEvery == 0 {
if _, err = utils.WritePopulationPlain(e.OutputPath, pop, epoch); err != nil {
neat.ErrorLog(fmt.Sprintf("Failed to dump population, reason: %s\n", err))
return err
}
}
if epoch.Solved {
// print winner organism
org := epoch.Best
if depth, err := org.Phenotype.MaxActivationDepthFast(0); err == nil {
neat.InfoLog(fmt.Sprintf("Activation depth of the winner: %d\n", depth))
}
genomeFile := "pole1_winner_genome"
// Prints the winner organism to file!
if orgPath, err := utils.WriteGenomePlain(genomeFile, e.OutputPath, org, epoch); err != nil {
neat.ErrorLog(fmt.Sprintf("Failed to dump winner organism's genome, reason: %s\n", err))
} else {
neat.InfoLog(fmt.Sprintf("Generation #%d winner's genome dumped to: %s\n", epoch.Id, orgPath))
}
// Prints the winner organism's Phenotype to the Cytoscape JSON file!
if orgPath, err := utils.WriteGenomeCytoscapeJSON(genomeFile, e.OutputPath, org, epoch); err != nil {
neat.ErrorLog(fmt.Sprintf("Failed to dump winner organism's phenome Cytoscape JSON graph, reason: %s\n", err))
} else {
neat.InfoLog(fmt.Sprintf("Generation #%d winner's phenome Cytoscape JSON graph dumped to: %s\n",
epoch.Id, orgPath))
}
}
return err
}
// orgEvaluate evaluates provided organism for cart pole balancing task
func (e *cartPoleGenerationEvaluator) orgEvaluate(organism *genetics.Organism) (bool, error) {
// Try to balance a pole now
if fitness, err := e.runCart(organism.Phenotype); err != nil {
return false, nil
} else {
organism.Fitness = float64(fitness)
}
if neat.LogLevel == neat.LogLevelDebug {
neat.DebugLog(fmt.Sprintf("Organism #%3d\tfitness: %f", organism.Genotype.Id, organism.Fitness))
}
// Decide if it's a winner
if organism.Fitness >= float64(e.WinBalancingSteps) {
organism.IsWinner = true
}
// adjust fitness to be in range [0;1]
if organism.IsWinner {
organism.Fitness = 1.0
organism.Error = 0.0
} else if organism.Fitness == 0 {
organism.Error = 1.0
} else {
// we use logarithmic scale because most cart runs fail to early within ~100 steps, but
// we test against 500'000 balancing steps
logSteps := math.Log(float64(e.WinBalancingSteps))
organism.Error = (logSteps - math.Log(organism.Fitness)) / logSteps
organism.Fitness = 1.0 - organism.Error
}
return organism.IsWinner, nil
}
// runCart runs the cart emulation and return number of emulation steps pole was balanced
func (e *cartPoleGenerationEvaluator) runCart(net *network.Network) (steps int, err error) {
var x float64 /* cart position, meters */
var xDot float64 /* cart velocity */
var theta float64 /* pole angle, radians */
var thetaDot float64 /* pole angular velocity */
if e.RandomStart {
/*set up random start state*/
x = float64(rand.Int31()%4800)/1000.0 - 2.4
xDot = float64(rand.Int31()%2000)/1000.0 - 1
theta = float64(rand.Int31()%400)/1000.0 - .2
thetaDot = float64(rand.Int31()%3000)/1000.0 - 1.5
}
netDepth, err := net.MaxActivationDepthFast(0) // The max depth of the network to be activated
if err != nil {
neat.WarnLog(fmt.Sprintf(
"Failed to estimate maximal depth of the network with loop.\nUsing default depth: %d", netDepth))
} else if netDepth == 0 {
// possibly disconnected - return minimal fitness score
return 1, nil
}
in := make([]float64, 5)
for steps = 0; steps < e.WinBalancingSteps; steps++ {
/*-- setup the input layer based on the four inputs --*/
in[0] = 1.0 // Bias
in[1] = (x + 2.4) / 4.8
in[2] = (xDot + .75) / 1.5
in[3] = (theta + twelveDegrees) / .41
in[4] = (thetaDot + 1.0) / 2.0
if err = net.LoadSensors(in); err != nil {
return 0, err
}
/*-- activate the network based on the input --*/
if res, err := net.ForwardSteps(netDepth); !res {
//If it loops, exit returning only fitness of 1 step
neat.DebugLog(fmt.Sprintf("Failed to activate Network, reason: %s", err))
return 1, nil
}
/*-- decide which way to push via which output unit is greater --*/
action := 1
if net.Outputs[0].Activation > net.Outputs[1].Activation {
action = 0
}
/*--- Apply action to the simulated cart-pole ---*/
x, xDot, theta, thetaDot = e.doAction(action, x, xDot, theta, thetaDot)
/*--- Check for failure. If so, return steps ---*/
if x < -2.4 || x > 2.4 || theta < -twelveDegrees || theta > twelveDegrees {
return steps, nil
}
}
return steps, nil
}
// doAction was taken directly from the pole simulator written by <NAME> and <NAME>.
// This simulator uses normalized, continuous inputs instead of discretizing the input space.
/*----------------------------------------------------------------------
Takes an action (0 or 1) and the current values of the
four state variables and updates their values by estimating the state
TAU seconds later.
----------------------------------------------------------------------*/
func (e *cartPoleGenerationEvaluator) doAction(action int, x, xDot, theta, thetaDot float64) (xRet, xDotRet, thetaRet, thetaDotRet float64) {
// The cart pole configuration values
const Gravity = 9.8
const MassCart = 1.0
const MassPole = 0.5
const TotalMass = MassPole + MassCart
const Length = 0.5 /* actually half the pole's length */
const PoleMassLength = MassPole * Length
const ForceMag = 10.0
const Tau = 0.02 /* seconds between state updates */
const FourThirds = 1.3333333333333
force := -ForceMag
if action > 0 {
force = ForceMag
}
cosTheta := math.Cos(theta)
sinTheta := math.Sin(theta)
temp := (force + PoleMassLength*thetaDot*thetaDot*sinTheta) / TotalMass
thetaAcc := (Gravity*sinTheta - cosTheta*temp) / (Length * (FourThirds - MassPole*cosTheta*cosTheta/TotalMass))
xAcc := temp - PoleMassLength*thetaAcc*cosTheta/TotalMass
/*** Update the four state variables, using Euler's method. ***/
xRet = x + Tau*xDot
xDotRet = xDot + Tau*xAcc
thetaRet = theta + Tau*thetaDot
thetaDotRet = thetaDot + Tau*thetaAcc
return xRet, xDotRet, thetaRet, thetaDotRet
} | examples/pole/cartpole.go | 0.664105 | 0.411347 | cartpole.go | starcoder |
package lander
import (
"math"
)
// Specific Impulse of fuel, in miles/second
// These units mean fuel is measured by mass, not weight
const fuelIsp = 1.8
// Gravity is the lunar gravity in miles/(second^2)
const Gravity = 0.001
// Kinematics models the motion of the lunar lander
// It also tracks elapsed time since that is directly tied to position and
// velocity changes
type Kinematics struct {
Velocity float64
Altitude float64
ElapsedTime float64
}
// velocityMassFactor implements part of a series solution for lander
// velocity change based on thrust and mass change due to fuel usage
func velocityMassFactor(massChange float64) float64 {
return ((-1.0 * massChange) +
(-1.0 * (math.Pow(massChange, 2.0) / 2.0)) +
(-1.0 * (math.Pow(massChange, 3.0) / 3.0)) +
(-1.0 * (math.Pow(massChange, 4.0) / 4.0)) +
(-1.0 * (math.Pow(massChange, 5.0) / 5.0)))
}
// altitudeMassFactor implements part of a series solution for lander
// altitude change based on thrust and mass change due to fuel usage
func altitudeMassFactor(massChange float64) float64 {
return ((massChange / 2.0) +
(math.Pow(massChange, 2.0) / 6.0) +
(math.Pow(massChange, 3.0) / 12.0) +
(math.Pow(massChange, 4.0) / 20.0) +
(math.Pow(massChange, 5.0) / 30.0))
}
// Lander models the lunar lander via parameters and associated functions
type Lander struct {
CapsuleMass float64
Fuel float64
TotalMass float64
Kinematics
}
// OutOfFuel tests whether the lander fuel is 'close' to zero
func (l *Lander) OutOfFuel() bool {
return l.Fuel < 0.001
}
// calcMassChange is an internal function that returns the percentage
// mass change due to a period of fuel usage
func (l *Lander) calcMassChange(burnRate float64, burnTime float64) float64 {
return (burnRate * burnTime) / l.TotalMass
}
// calcVelocity is an internal function that returns a new velocity due to
// a period of fuel usage along with the effect of gravity
func (l *Lander) calcVelocity(burnTime float64, massChange float64) float64 {
return l.Velocity + (Gravity * burnTime) +
(fuelIsp * velocityMassFactor(massChange))
}
// calcAltitude is an internal function that returns a new altitude due to
// a period of fuel usage along with the effect of gravity
func (l *Lander) calcAltitude(burnTime float64, massChange float64) float64 {
return l.Altitude +
(-1.0 * (Gravity * (math.Pow(burnTime, 2.0) / 2.0))) +
(-1.0 * l.Velocity * burnTime) +
(fuelIsp * burnTime * altitudeMassFactor(massChange))
}
// CalcDynamics returns a new Kinematics based on // the current lander
// Kinematics with altitude and velocity adjusted for the current burn
func (l *Lander) CalcDynamics(burnRate float64, burnTime float64) Kinematics {
var newK Kinematics
massChange := l.calcMassChange(burnRate, burnTime)
newK.Velocity = l.calcVelocity(burnTime, massChange)
newK.Altitude = l.calcAltitude(burnTime, massChange)
newK.ElapsedTime = l.ElapsedTime
return newK
}
// ActualBurnTime returns a value equal to or less than the requested burn
// time based on the burn rate and the available fuel
func (l *Lander) ActualBurnTime(burnRate float64, burnTime float64) float64 {
if l.Fuel < (burnRate * burnTime) {
return l.Fuel / burnRate
} else {
return burnTime
}
}
// UpwardBurnTime handles the dynamics case where the burn has caused the
// velocity of the lander to become negative for some part of the burn
// This is probably the most obscure part of the program.
func (l *Lander) UpwardBurnTime(burnRate float64) float64 {
factor := (1.0 - ((l.TotalMass * Gravity) /
(fuelIsp * burnRate))) / 2.0
return (((l.TotalMass * l.Velocity) / (fuelIsp * burnRate * (factor + math.Sqrt(
(factor*factor)+(l.Velocity/fuelIsp))))) + 0.05)
}
// UpdateLander updates the Lander structure based on a burn and the
// (externally calculated) changes to altitude and velocity
func (l *Lander) UpdateLander(burnRate float64, burnTime float64, newPhys Kinematics) {
l.Velocity = newPhys.Velocity
l.Altitude = newPhys.Altitude
l.ElapsedTime = l.ElapsedTime + burnTime
l.Fuel -= (burnRate * burnTime)
l.TotalMass = l.CapsuleMass + l.Fuel
}
// CalcImpact iteratively determines the moment of impact along with the
// velocity at that moment. It may fail (loop endlessly) if called for a burn
// that does not end on the surface
func (l *Lander) CalcImpact(burnRate float64, timeToImpact float64) {
var calcVelocity float64
var nextPhys Kinematics
for timeToImpact >= 0.005 {
calcVelocity = l.Velocity + math.Sqrt((math.Pow(l.Velocity, 2))+
(2.0*l.Altitude*(Gravity-
(fuelIsp*(burnRate/l.TotalMass)))))
timeToImpact = 2.0 * (l.Altitude / calcVelocity)
nextPhys = l.CalcDynamics(burnRate, timeToImpact)
l.UpdateLander(burnRate, timeToImpact, nextPhys)
}
} | go/src/lander/lander.go | 0.841435 | 0.748168 | lander.go | starcoder |
package item
import "golang.org/x/exp/constraints"
// Number constraint: ints, uints, complexes, floats and all their subtypes
type Number interface {
constraints.Integer | constraints.Float | constraints.Complex
}
// Number constraint any type that define the addition + operation, and their subtypes
type Addable interface {
constraints.Ordered | constraints.Complex
}
// Pair of Key-Value made to manage maps and other key-value structures
// TODO: use "constraints.Map" when it is defined in the constraints package
type Pair[K comparable, V any] struct {
Key K
Val V
}
// Add the two arguments using the plus + operator
func Add[T Addable](a, b T) T {
return a + b
}
// Multiply the two arguments using the multiplication * operator
func Multiply[T Number](a, b T) T {
return a * b
}
// Increment the argument
func Increment[T Number](a T) T {
return a + 1
}
// Neg inverts the sign of the given numeric argument
func Neg[T Number](a T) T {
return -a
}
// Not negates the boolean result of the input condition function
func Not[T any](condition func(i T) bool) func(i T) bool {
return func(i T) bool {
return !condition(i)
}
}
// IsZero returns true if the input value corresponds to the zero value of its type:
// 0 for numeric values, empty string, false, nil pointer, etc...
func IsZero[T comparable](input T) bool {
var zero T
return input == zero
}
// Equals returns a predicate that is true when the checked value is
// equal to the provided reference.
func Equals[T comparable](reference T) func(i T) bool {
return func(i T) bool {
return i == reference
}
}
// GreaterThan returns a predicate that is true when the checked value is larger than
// the provided reference.
func GreaterThan[T constraints.Ordered](reference T) func(i T) bool {
return func(i T) bool {
return i > reference
}
}
// GreaterThanOrEq returns a predicate that is true when the checked value is equal or larger than
// the provided reference.
func GreaterThanOrEq[T constraints.Ordered](reference T) func(i T) bool {
return func(i T) bool {
return i >= reference
}
}
// LessThan returns a predicate that is true when the checked value is less than
// the provided reference.
func LessThan[T constraints.Ordered](reference T) func(i T) bool {
return func(i T) bool {
return i < reference
}
}
// LessThanOrEq returns a predicate that is true when the checked value is equal or less than
// the provided reference.
func LessThanOrEq[T constraints.Ordered](reference T) func(i T) bool {
return func(i T) bool {
return i >= reference
}
} | item/item.go | 0.564579 | 0.572872 | item.go | starcoder |
package types
import (
"io"
"github.com/lyraproj/puppet-evaluator/errors"
"github.com/lyraproj/puppet-evaluator/eval"
)
type TypeType struct {
typ eval.Type
}
var typeType_DEFAULT = &TypeType{typ: anyType_DEFAULT}
var Type_Type eval.ObjectType
func init() {
Type_Type = newObjectType(`Pcore::TypeType`,
`Pcore::AnyType {
attributes => {
type => {
type => Optional[Type],
value => Any
},
}
}`, func(ctx eval.Context, args []eval.Value) eval.Value {
return NewTypeType2(args...)
})
newGoConstructor(`Type`,
func(d eval.Dispatch) {
d.Param(`String`)
d.Function(func(c eval.Context, args []eval.Value) eval.Value {
return c.ParseType(args[0])
})
},
func(d eval.Dispatch) {
d.Param2(TYPE_OBJECT_INIT_HASH)
d.Function(func(c eval.Context, args []eval.Value) eval.Value {
return NewObjectType(``, nil, args[0]).Resolve(c)
})
})
}
func DefaultTypeType() *TypeType {
return typeType_DEFAULT
}
func NewTypeType(containedType eval.Type) *TypeType {
if containedType == nil || containedType == anyType_DEFAULT {
return DefaultTypeType()
}
return &TypeType{containedType}
}
func NewTypeType2(args ...eval.Value) *TypeType {
switch len(args) {
case 0:
return DefaultTypeType()
case 1:
if containedType, ok := args[0].(eval.Type); ok {
return NewTypeType(containedType)
}
panic(NewIllegalArgumentType2(`Type[]`, 0, `Type`, args[0]))
default:
panic(errors.NewIllegalArgumentCount(`Type[]`, `0 or 1`, len(args)))
}
}
func (t *TypeType) ContainedType() eval.Type {
return t.typ
}
func (t *TypeType) Accept(v eval.Visitor, g eval.Guard) {
v(t)
t.typ.Accept(v, g)
}
func (t *TypeType) Default() eval.Type {
return typeType_DEFAULT
}
func (t *TypeType) Equals(o interface{}, g eval.Guard) bool {
if ot, ok := o.(*TypeType); ok {
return t.typ.Equals(ot.typ, g)
}
return false
}
func (t *TypeType) Generic() eval.Type {
return NewTypeType(eval.GenericType(t.typ))
}
func (t *TypeType) Get(key string) (value eval.Value, ok bool) {
switch key {
case `type`:
return t.typ, true
}
return nil, false
}
func (t *TypeType) IsAssignable(o eval.Type, g eval.Guard) bool {
if ot, ok := o.(*TypeType); ok {
return GuardedIsAssignable(t.typ, ot.typ, g)
}
return false
}
func (t *TypeType) IsInstance(o eval.Value, g eval.Guard) bool {
if ot, ok := o.(eval.Type); ok {
return GuardedIsAssignable(t.typ, ot, g)
}
return false
}
func (t *TypeType) MetaType() eval.ObjectType {
return Type_Type
}
func (t *TypeType) Name() string {
return `Type`
}
func (t *TypeType) Parameters() []eval.Value {
if t.typ == DefaultAnyType() {
return eval.EMPTY_VALUES
}
return []eval.Value{t.typ}
}
func (t *TypeType) Resolve(c eval.Context) eval.Type {
t.typ = resolve(c, t.typ)
return t
}
func (t *TypeType) CanSerializeAsString() bool {
return canSerializeAsString(t.typ)
}
func (t *TypeType) SerializationString() string {
return t.String()
}
func (t *TypeType) String() string {
return eval.ToString2(t, NONE)
}
func (t *TypeType) PType() eval.Type {
return &TypeType{t}
}
func (t *TypeType) ToString(b io.Writer, s eval.FormatContext, g eval.RDetect) {
TypeToString(t, b, s, g)
} | types/typetype.go | 0.610453 | 0.44903 | typetype.go | starcoder |
package petstore
import (
"bytes"
"encoding/json"
"errors"
"time"
)
var ErrInvalidNullable = errors.New("nullable cannot have non-zero Value and ExplicitNull simultaneously")
// PtrBool is a helper routine that returns a pointer to given integer value.
func PtrBool(v bool) *bool { return &v }
// PtrInt is a helper routine that returns a pointer to given integer value.
func PtrInt(v int) *int { return &v }
// PtrInt32 is a helper routine that returns a pointer to given integer value.
func PtrInt32(v int32) *int32 { return &v }
// PtrInt64 is a helper routine that returns a pointer to given integer value.
func PtrInt64(v int64) *int64 { return &v }
// PtrFloat32 is a helper routine that returns a pointer to given float value.
func PtrFloat32(v float32) *float32 { return &v }
// PtrFloat64 is a helper routine that returns a pointer to given float value.
func PtrFloat64(v float64) *float64 { return &v }
// PtrString is a helper routine that returns a pointer to given string value.
func PtrString(v string) *string { return &v }
// PtrTime is helper routine that returns a pointer to given Time value.
func PtrTime(v time.Time) *time.Time { return &v }
type NullableBool struct {
Value bool
ExplicitNull bool
}
func (v NullableBool) MarshalJSON() ([]byte, error) {
switch {
case v.ExplicitNull && v.Value:
return nil, ErrInvalidNullable
case v.ExplicitNull:
return []byte("null"), nil
default:
return json.Marshal(v.Value)
}
}
func (v *NullableBool) UnmarshalJSON(src []byte) error {
if bytes.Equal(src, []byte("null")) {
v.ExplicitNull = true
return nil
}
return json.Unmarshal(src, &v.Value)
}
type NullableInt struct {
Value int
ExplicitNull bool
}
func (v NullableInt) MarshalJSON() ([]byte, error) {
switch {
case v.ExplicitNull && v.Value != 0:
return nil, ErrInvalidNullable
case v.ExplicitNull:
return []byte("null"), nil
default:
return json.Marshal(v.Value)
}
}
func (v *NullableInt) UnmarshalJSON(src []byte) error {
if bytes.Equal(src, []byte("null")) {
v.ExplicitNull = true
return nil
}
return json.Unmarshal(src, &v.Value)
}
type NullableInt32 struct {
Value int32
ExplicitNull bool
}
func (v NullableInt32) MarshalJSON() ([]byte, error) {
switch {
case v.ExplicitNull && v.Value != 0:
return nil, ErrInvalidNullable
case v.ExplicitNull:
return []byte("null"), nil
default:
return json.Marshal(v.Value)
}
}
func (v *NullableInt32) UnmarshalJSON(src []byte) error {
if bytes.Equal(src, []byte("null")) {
v.ExplicitNull = true
return nil
}
return json.Unmarshal(src, &v.Value)
}
type NullableInt64 struct {
Value int64
ExplicitNull bool
}
func (v NullableInt64) MarshalJSON() ([]byte, error) {
switch {
case v.ExplicitNull && v.Value != 0:
return nil, ErrInvalidNullable
case v.ExplicitNull:
return []byte("null"), nil
default:
return json.Marshal(v.Value)
}
}
func (v *NullableInt64) UnmarshalJSON(src []byte) error {
if bytes.Equal(src, []byte("null")) {
v.ExplicitNull = true
return nil
}
return json.Unmarshal(src, &v.Value)
}
type NullableFloat32 struct {
Value float32
ExplicitNull bool
}
func (v NullableFloat32) MarshalJSON() ([]byte, error) {
switch {
case v.ExplicitNull && v.Value != 0.0:
return nil, ErrInvalidNullable
case v.ExplicitNull:
return []byte("null"), nil
default:
return json.Marshal(v.Value)
}
}
func (v *NullableFloat32) UnmarshalJSON(src []byte) error {
if bytes.Equal(src, []byte("null")) {
v.ExplicitNull = true
return nil
}
return json.Unmarshal(src, &v.Value)
}
type NullableFloat64 struct {
Value float64
ExplicitNull bool
}
func (v NullableFloat64) MarshalJSON() ([]byte, error) {
switch {
case v.ExplicitNull && v.Value != 0.0:
return nil, ErrInvalidNullable
case v.ExplicitNull:
return []byte("null"), nil
default:
return json.Marshal(v.Value)
}
}
func (v *NullableFloat64) UnmarshalJSON(src []byte) error {
if bytes.Equal(src, []byte("null")) {
v.ExplicitNull = true
return nil
}
return json.Unmarshal(src, &v.Value)
}
type NullableString struct {
Value string
ExplicitNull bool
}
func (v NullableString) MarshalJSON() ([]byte, error) {
switch {
case v.ExplicitNull && v.Value != "":
return nil, ErrInvalidNullable
case v.ExplicitNull:
return []byte("null"), nil
default:
return json.Marshal(v.Value)
}
}
func (v *NullableString) UnmarshalJSON(src []byte) error {
if bytes.Equal(src, []byte("null")) {
v.ExplicitNull = true
return nil
}
return json.Unmarshal(src, &v.Value)
}
type NullableTime struct {
Value time.Time
ExplicitNull bool
}
func (v NullableTime) MarshalJSON() ([]byte, error) {
switch {
case v.ExplicitNull && !v.Value.IsZero():
return nil, ErrInvalidNullable
case v.ExplicitNull:
return []byte("null"), nil
default:
return v.Value.MarshalJSON()
}
}
func (v *NullableTime) UnmarshalJSON(src []byte) error {
if bytes.Equal(src, []byte("null")) {
v.ExplicitNull = true
return nil
}
return json.Unmarshal(src, &v.Value)
} | samples/client/petstore/go-experimental/go-petstore/utils.go | 0.754915 | 0.461259 | utils.go | starcoder |
package hdrhistogram
import "fmt"
const truncatedErrStr = "Truncated compressed histogram decode. Expected minimum length of %d bytes and got %d."
// Read an LEB128 ZigZag encoded long value from the given buffer
func zig_zag_decode_i64(buf []byte) (signedValue int64, n int, err error) {
buflen := len(buf)
if buflen < 1 {
return 0, 0, nil
}
var value = uint64(buf[0]) & 0x7f
n = 1
if (buf[0] & 0x80) != 0 {
if buflen < 2 {
err = fmt.Errorf(truncatedErrStr, 2, buflen)
return
}
value |= uint64(buf[1]) & 0x7f << 7
n = 2
if (buf[1] & 0x80) != 0 {
if buflen < 3 {
err = fmt.Errorf(truncatedErrStr, 3, buflen)
return
}
value |= uint64(buf[2]) & 0x7f << 14
n = 3
if (buf[2] & 0x80) != 0 {
if buflen < 4 {
err = fmt.Errorf(truncatedErrStr, 4, buflen)
return
}
value |= uint64(buf[3]) & 0x7f << 21
n = 4
if (buf[3] & 0x80) != 0 {
if buflen < 5 {
err = fmt.Errorf(truncatedErrStr, 5, buflen)
return
}
value |= uint64(buf[4]) & 0x7f << 28
n = 5
if (buf[4] & 0x80) != 0 {
if buflen < 6 {
err = fmt.Errorf(truncatedErrStr, 6, buflen)
return
}
value |= uint64(buf[5]) & 0x7f << 35
n = 6
if (buf[5] & 0x80) != 0 {
if buflen < 7 {
err = fmt.Errorf(truncatedErrStr, 7, buflen)
return
}
value |= uint64(buf[6]) & 0x7f << 42
n = 7
if (buf[6] & 0x80) != 0 {
if buflen < 8 {
err = fmt.Errorf(truncatedErrStr, 8, buflen)
return
}
value |= uint64(buf[7]) & 0x7f << 49
n = 8
if (buf[7] & 0x80) != 0 {
if buflen < 9 {
err = fmt.Errorf(truncatedErrStr, 9, buflen)
return
}
value |= uint64(buf[8]) << 56
n = 9
}
}
}
}
}
}
}
}
signedValue = int64((value >> 1) ^ -(value & 1))
return
}
// Writes a int64_t value to the given buffer in LEB128 ZigZag encoded format
// ZigZag encoding maps signed integers to unsigned integers so that numbers with a small
// absolute value (for instance, -1) have a small varint encoded value too.
// It does this in a way that "zig-zags" back and forth through the positive and negative integers,
// so that -1 is encoded as 1, 1 is encoded as 2, -2 is encoded as 3, and so on.
func zig_zag_encode_i64(signedValue int64) (buffer []byte) {
buffer = make([]byte, 0)
var value = uint64((signedValue << 1) ^ (signedValue >> 63))
if value>>7 == 0 {
buffer = append(buffer, byte(value))
} else {
buffer = append(buffer, byte((value&0x7F)|0x80))
if value>>14 == 0 {
buffer = append(buffer, byte(value>>7))
} else {
buffer = append(buffer, byte((value>>7)|0x80))
if value>>21 == 0 {
buffer = append(buffer, byte(value>>14))
} else {
buffer = append(buffer, byte((value>>14)|0x80))
if value>>28 == 0 {
buffer = append(buffer, byte(value>>21))
} else {
buffer = append(buffer, byte((value>>21)|0x80))
if value>>35 == 0 {
buffer = append(buffer, byte(value>>28))
} else {
buffer = append(buffer, byte((value>>28)|0x80))
if value>>42 == 0 {
buffer = append(buffer, byte(value>>35))
} else {
buffer = append(buffer, byte((value>>35)|0x80))
if value>>49 == 0 {
buffer = append(buffer, byte(value>>42))
} else {
buffer = append(buffer, byte((value>>42)|0x80))
if value>>56 == 0 {
buffer = append(buffer, byte(value>>49))
} else {
buffer = append(buffer, byte((value>>49)|0x80))
buffer = append(buffer, byte(value>>56))
}
}
}
}
}
}
}
}
return
} | vendor/github.com/HdrHistogram/hdrhistogram-go/zigzag.go | 0.522446 | 0.42322 | zigzag.go | starcoder |
package geography
import (
"math"
"time"
"github.com/PlaceDescriber/PlaceDescriber/types"
)
const (
D_R = math.Pi / 180.0
R_D = 180.0 / math.Pi
R_MAJOR = 6378137.0
R_MINOR = 6356752.3142
RATIO = R_MINOR / R_MAJOR
)
var (
ECCENT = math.Sqrt(1.0 - (RATIO * RATIO))
COM = 0.5 * ECCENT
)
type MapTile struct {
Coordinates types.Point `json:"coordinates"`
Z int `json:"z"`
Y int `json:"y"`
X int `json:"x"`
Time time.Time `json:"time"`
Provider string `json:"provider"`
Type types.MapType `json:"type"`
Language string `json:"language"`
Content []byte `json:"content"`
}
type Conversion interface {
DegToTileNum(coordinates types.Point, z int) (x, y int)
TileNumToDeg(x, y, z int) types.Point
}
// Spherical Mercator.
// A popular spherical Mercator tiling format.
// This tile format is used by Google, OpenSteetMap and many others.
// It's based on a spherical Mercator projection ("Web Mercator", EPSG: 3857).
// http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
type SphericalConversion struct {
}
// DegToTileNum returns tile numbers by latitude and longitude in degrees.
func (s SphericalConversion) DegToTileNum(coordinates types.Point, z int) (x, y int) {
x = int(math.Floor((coordinates.Longitude + 180.0) / 360.0 * (math.Exp2(float64(z)))))
y = int(math.Floor((1.0 - math.Log(math.Tan(coordinates.Latitude*math.Pi/180.0)+1.0/math.Cos(coordinates.Latitude*math.Pi/180.0))/math.Pi) / 2.0 * (math.Exp2(float64(z)))))
return
}
// TileNumToDeg returns latitude and longitude in degrees by tile numbers.
func (s SphericalConversion) TileNumToDeg(x, y, z int) types.Point {
n := math.Pi - 2.0*math.Pi*float64(y)/math.Exp2(float64(z))
lat := 180.0 / math.Pi * math.Atan(0.5*(math.Exp(n)-math.Exp(-n)))
long := float64(x)/math.Exp2(float64(z))*360.0 - 180.0
return types.Point{lat, long}
}
// Elliptical Mercator.
// This tile format is used at least by Yandex. Compared to the spherical
// format, the conversion between latitude/longitude coordinates and Mercator
// coordinates differs, but the tiling logic is otherwise the same.
// http://wiki.openstreetmap.org/wiki/Mercator#Elliptical_Mercator
type EllipticalConversion struct {
}
// DegToTileNum returns tile numbers by latitude and longitude in degrees.
func (s EllipticalConversion) DegToTileNum(coordinates types.Point, z int) (x, y int) {
xmerc := D_R * coordinates.Longitude
lat := math.Min(89.5, math.Max(coordinates.Latitude, -89.5))
phi := D_R * lat
sinphi := math.Sin(phi)
con := ECCENT * sinphi
con = math.Pow((1.0-con)/(1.0+con), COM)
ts := math.Tan(0.5*(math.Pi*0.5-phi)) / con
ymerc := -math.Log(ts)
x = int((1 + xmerc/math.Pi) / 2 * math.Exp2(float64(z)))
y = int((1 - ymerc/math.Pi) / 2 * math.Exp2(float64(z)))
return
}
// TileNumToDeg returns latitude and longitude in degrees by tile numbers.
func (s EllipticalConversion) TileNumToDeg(x, y, z int) types.Point {
xmerc := (float64(x)/math.Exp2(float64(z))*2 - 1) * math.Pi
ymerc := (1 - float64(y)/math.Exp2(float64(z))*2) * math.Pi
long := R_D * xmerc
ts := math.Exp(-ymerc)
phi := math.Pi/2 - 2*math.Atan(ts)
dphi := 1.0
for i := 0; math.Abs(dphi) > 0.000000001 && i < 15; i++ {
con := ECCENT * math.Sin(phi)
dphi = math.Pi/2 - 2*math.Atan(ts*math.Pow((1.0-con)/(1.0+con), COM)) - phi
phi += dphi
}
lat := R_D * phi
return types.Point{lat, long}
} | geography/tile.go | 0.715424 | 0.437463 | tile.go | starcoder |
// Various functions to convert between numeric types.
package conversions
import (
"fmt"
"math"
"math/big"
"strconv"
"github.com/cockroachdb/apd/v2"
compact_float "github.com/kstenerud/go-compact-float"
"github.com/kstenerud/go-concise-encoding/internal/common"
)
// apd.Decimal to other
func BigDecimalFloatToBigFloat(value *apd.Decimal) (*big.Float, error) {
return StringToBigFloat(value.Text('g'), int(value.NumDigits()))
}
func BigDecimalFloatToBigInt(value *apd.Decimal, maxBase10Exponent int) (*big.Int, error) {
switch value.Form {
case apd.NaN, apd.NaNSignaling, apd.Infinite:
return nil, fmt.Errorf("%v cannot fit into a big.Int", value)
}
if value.Exponent < 0 {
return nil, fmt.Errorf("%v cannot fit into a big.Int", value)
}
if value.Exponent > int32(maxBase10Exponent) {
return nil, fmt.Errorf("%v has a decimal exponential component (%v) that is too large (max %v)", value, value.Exponent, maxBase10Exponent)
}
exp := big.NewInt(int64(value.Exponent))
exp.Exp(common.BigInt10, exp, nil)
return exp.Mul(exp, &value.Coeff), nil
}
func BigDecimalFloatToUint(value *apd.Decimal) (uint64, error) {
if i, err := value.Int64(); err == nil {
return uint64(i), nil
}
bf, err := BigDecimalFloatToBigFloat(value)
if err != nil {
return 0, err
}
return BigFloatToUint(bf)
}
// big.Float to other
func BigFloatToPBigDecimalFloat(value *big.Float) (*apd.Decimal, error) {
d, _, err := apd.NewFromString(BigFloatToString(value))
return d, err
}
func BigFloatToBigInt(value *big.Float, maxBase2Exponent int) (*big.Int, error) {
if value.MantExp(nil) > maxBase2Exponent {
return nil, fmt.Errorf("%v has a binary exponential component (%v) that is too large for a big int (max %v)", value, value.MantExp(nil), maxBase2Exponent)
}
bi, accuracy := value.Int(new(big.Int))
if accuracy != big.Exact {
return nil, fmt.Errorf("%v cannot fit into a big.Int", value)
}
return bi, nil
}
func BigFloatToFloat(value *big.Float) (float64, error) {
exp := value.MantExp(nil)
if exp < -1029 {
return 0, fmt.Errorf("%v is too small to fit into a float64", value)
}
if exp > 1024 {
return 0, fmt.Errorf("%v is too big to fit into a float64", value)
}
f, accuracy := value.Float64()
if accuracy != big.Exact {
if f == 0 {
return 0, fmt.Errorf("%v is too small to fit into a float64", value)
} else if math.IsInf(f, 0) {
return 0, fmt.Errorf("%v is too big to fit into a float64", value)
}
}
return f, nil
}
func BigFloatToInt(value *big.Float) (int64, error) {
i, accuracy := value.Int64()
if accuracy != big.Exact {
return 0, fmt.Errorf("cannot convert %v to int", value)
}
if big.NewFloat(float64(i)).Cmp(value) != 0 {
return 0, fmt.Errorf("cannot convert %v to int", value)
}
return i, nil
}
func BigFloatToUint(value *big.Float) (uint64, error) {
u, accuracy := value.Uint64()
if accuracy != big.Exact {
return 0, fmt.Errorf("cannot convert %v to uint", value)
}
if big.NewFloat(float64(u)).Cmp(value) != 0 {
return 0, fmt.Errorf("cannot convert %v to uint", value)
}
return u, nil
}
func BigFloatToString(value *big.Float) string {
return value.Text('g', common.BitsToDecimalDigits(int(value.Prec())))
}
// Decimal float to other
func DecimalFloatToBigInt(value compact_float.DFloat, maxBase10Exponent int) (*big.Int, error) {
if value.Exponent > int32(maxBase10Exponent) {
return nil, fmt.Errorf("%v has a decimal exponential component (%v) that is too large for a big int (max %v)", value, value.Exponent, maxBase10Exponent)
}
return value.BigInt()
}
// big.Int to other
func BigIntToBigDecimalFloat(value *big.Int) apd.Decimal {
return apd.Decimal{
Coeff: *value,
}
}
func BigIntToInt(value *big.Int) (int64, error) {
if !value.IsInt64() {
return 0, fmt.Errorf("%v is too big to fit into type int64", value)
}
return value.Int64(), nil
}
func BigIntToFloat(value *big.Int) (float64, error) {
asText := value.Text(10)
f, err := strconv.ParseFloat(asText, 64)
if err != nil {
return 0, err
}
asBigInt, accuracy := big.NewFloat(f).Int(nil)
if accuracy != big.Exact {
return 0, fmt.Errorf("cannot convert %v to float", value)
}
if asBigInt.Cmp(value) != 0 {
return 0, fmt.Errorf("cannot convert %v to float", value)
}
return f, nil
}
func BigIntToUint(value *big.Int) (uint64, error) {
if !value.IsUint64() {
return 0, fmt.Errorf("%v cannot fit into type uint64", value)
}
return value.Uint64(), nil
}
// float to other
func FloatToBigDecimalFloat(value float64) (apd.Decimal, error) {
var d apd.Decimal
_, _, err := apd.BaseContext.SetString(&d, FloatToString(value))
return d, err
}
func FloatToPBigDecimalFloat(value float64) (*apd.Decimal, error) {
d, _, err := apd.NewFromString(FloatToString(value))
return d, err
}
func FloatToBigInt(value float64, maxBase2Exponent int) (*big.Int, error) {
return BigFloatToBigInt(big.NewFloat(value), maxBase2Exponent)
}
func FloatToString(value float64) string {
return strconv.FormatFloat(value, 'g', -1, 64)
}
// int to other
func IntToBigDecimalFloat(value int64) apd.Decimal {
if value < 0 {
return apd.Decimal{
Negative: true,
Coeff: *big.NewInt(-value),
}
}
return apd.Decimal{
Coeff: *big.NewInt(value),
}
}
func IntToUint(value int64) (uint64, error) {
if value < 0 {
return 0, fmt.Errorf("%v is negative, and cannot be represented by an unsigned int", value)
}
return uint64(value), nil
}
// uint to other
func UintToBigDecimalFloat(value uint64) apd.Decimal {
return apd.Decimal{
Coeff: *UintToBigInt(value),
}
}
func UintToBigInt(value uint64) *big.Int {
if value <= 0x7fffffffffffffff {
return big.NewInt(int64(value))
}
bi := big.NewInt(int64(value >> 1))
return bi.Lsh(bi, 1)
}
func UintToInt(value uint64) (int64, error) {
if value > 0x7fffffffffffffff {
return 0, fmt.Errorf("%v is too big to fit into type int64", value)
}
return int64(value), nil
}
// string to other
func StringToBigFloat(value string, significantDigits int) (*big.Float, error) {
f, _, err := big.ParseFloat(value, 10, uint(common.DecimalDigitsToBits(significantDigits)), big.ToNearestEven)
return f, err
} | conversions/conversions.go | 0.791821 | 0.423875 | conversions.go | starcoder |
package ringbuffer
// ByteSliceBuffer implements a ringbuffer over bytes
type ByteSliceBuffer struct {
Buffer *Buffer
data [][]byte
}
// NewByteSliceBuffer returns a ringbuffer over bytes with size.
func NewByteSliceBuffer(size uint64) *ByteSliceBuffer {
return &ByteSliceBuffer{
Buffer: New(size, 0),
data: make([][]byte, size),
}
}
// Push adds byteslice b to the buffer, returning the number of bytes added.
func (bb *ByteSliceBuffer) Push(b ...[]byte) (n int, ok bool) {
return bb.PushSlice(b)
}
// PushSlice adds byteslices from slice b to the buffer, returning the number of bytes added.
func (bb *ByteSliceBuffer) PushSlice(b [][]byte) (n int, ok bool) {
for _, x := range b {
if pos, ok := bb.Buffer.GetWritePos(); ok {
bb.data[pos] = x
n++
} else {
return n, false
}
}
return n, true
}
// PopSlice up to len(b) byteslices from the buffer, returning the number of bytes read.
func (bb *ByteSliceBuffer) PopSlice(b [][]byte) (n int, ok bool) {
for wpos := range b {
if pos, ok := bb.Buffer.GetReadPos(); ok {
b[wpos] = bb.data[pos]
n++
} else {
return n, false
}
}
return n, true
}
// Pop reads one byteslice from the buffer, returning success.
func (bb *ByteSliceBuffer) Pop() (d []byte, ok bool) {
if pos, ok := bb.Buffer.GetReadPos(); ok {
return bb.data[pos], true
}
return nil, false
}
// GetPos returns the entry at position pos, or !ok if it isnt contained.
func (bb *ByteSliceBuffer) GetPos(pos uint64) ([]byte, bool) {
if n, ok := bb.Buffer.TransPos(pos); ok {
return bb.data[n], true
}
return nil, false
}
// Resize the byteSlicebuffer to new size. Returns true on success, false otherwise.
// Copies of ByteSliceBuffer data become invalid with this operation.
func (bb *ByteSliceBuffer) Resize(size uint64) (newbuffer *ByteSliceBuffer, ok bool) {
fill := bb.Buffer.Fill()
if fill > size {
return bb, false
}
bd := &ByteSliceBuffer{
Buffer: New(size, fill),
data: make([][]byte, size),
}
fb, fe, ss, sb, se := bb.Buffer.CutPoints()
copy(bd.data, bb.data[fb:fe])
copy(bd.data[ss:], bb.data[sb:se])
return bd, true
} | ringbuffer/bufferbyteslice.go | 0.833121 | 0.530905 | bufferbyteslice.go | starcoder |
package console
import (
"fmt"
"github.com/veandco/go-sdl2/img"
"github.com/veandco/go-sdl2/sdl"
"github.com/torlenor/asciiventure/renderers"
)
// Char holds the position, width and height of a char texture segment.
type Char struct {
X int32 `json:"x"`
Y int32 `json:"y"`
Width int32 `json:"width"`
Height int32 `json:"height"`
}
func (c Char) String() string {
return fmt.Sprintf("X: %d Y: %d W: %d H: %d", c.X, c.Y, c.Width, c.Height)
}
// FontTileSet provides the actual image texture and a way to retreive the correct texture coordinates
// to use it.
type FontTileSet struct {
t *sdl.Texture
charWidth int32
charHeight int32
characters map[string]Char
}
// GetCharWidth returns the detected char width in pixel of the font texture.
func (f FontTileSet) GetCharWidth() int32 {
return f.charWidth
}
// GetCharHeight returns the detected char height in pixel of the font texture.
func (f FontTileSet) GetCharHeight() int32 {
return f.charHeight
}
func createTextureFromFile(renderer *renderers.Renderer, imagePath string) (*sdl.Texture, error) {
image, err := img.Load(imagePath)
if err != nil {
return nil, fmt.Errorf("Failed to load image file: %s", err)
}
defer image.Free()
texture, err := renderer.CreateTextureFromSurface(image)
if err != nil {
return nil, fmt.Errorf("Failed to create texture: %s", err)
}
return texture, nil
}
// NewFontTileset returns a new FontTileSet.
// Currently only libtcod format is supported (with extensions to the provided char set).
func NewFontTileset(renderer *renderers.Renderer, imagePath string) (*FontTileSet, error) {
texture, err := createTextureFromFile(renderer, imagePath)
if err != nil {
return nil, fmt.Errorf("Error creating font: %s", err)
}
font := FontTileSet{
t: texture,
}
_, _, width, height, err := texture.Query()
if err != nil {
return nil, fmt.Errorf("Error determining texture size: %s", err)
}
if width%32 != 0 {
return nil, fmt.Errorf("Not a valid default font image, width not dividable by 32")
}
if height%8 != 0 {
return nil, fmt.Errorf("Not a valid default font image, height not dividable by 8")
}
runes := []rune{
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
0x40, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x7B,
0x7C, 0x7D, 0x7E, 0x2591, 0x2592, 0x2593, 0x2502, 0x2500,
0x253C, 0x2524, 0x2534, 0x251C, 0x252C, 0x2514, 0x250C, 0x2510,
0x2518, 0x2598, 0x259D, 0x2580, 0x2596, 0x259A, 0x2590, 0x2597,
0x2191, 0x2193, 0x2190, 0x2192, 0x25B2, 0x25BC, 0x25C4, 0x25BA,
0x2195, 0x2194, 0x2610, 0x2611, 0x25CB, 0x25C9, 0x2551, 0x2550,
0x256C, 0x2563, 0x2569, 0x2560, 0x2566, 0x255A, 0x2554, 0x2557,
0x255D, '·', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}
font.characters = make(map[string]Char)
dx := int32(width / 32)
dy := int32(height / 8)
for y := int32(0); y < 8; y++ {
for x := int32(0); x < 32; x++ {
r := runes[int(y*32+x)]
font.characters[string(r)] = Char{X: x * dx, Y: y * dy, Width: dx, Height: dy}
}
}
font.charWidth = dx
font.charHeight = dy
return &font, nil
}
// Get returns a glyph with Dst set to render at origin (0,0).
// Returns true as second value if the operation was successfull.
func (f *FontTileSet) Get(c string) (renderers.RenderGlyph, error) {
if a, ok := f.characters[c]; ok {
return renderers.RenderGlyph{T: f.t, Src: &sdl.Rect{X: int32(a.X), Y: int32(a.Y), W: int32(a.Width), H: int32(a.Height)}}, nil
}
return renderers.RenderGlyph{}, fmt.Errorf("Glyph for char '%s' not found", c)
} | console/tileset.go | 0.626696 | 0.582847 | tileset.go | starcoder |
package util
import (
"encoding/binary"
"github.com/pingcap/errors"
)
// EncodeRow encodes row data and column ids into a slice of byte.
// Row layout: colID1, value1, colID2, value2, ...
// valBuf and values pass by caller, for reducing EncodeRow allocates temporary bufs. If you pass valBuf and values as nil,
// EncodeRow will allocate it.
// It is a simplified and specialized version of `github.com/pingcap/tidb/tablecodec.EncodeRow`.
func EncodeRow(cols [][]byte, colIDs []int64, valBuf []byte) ([]byte, error) {
if len(cols) != len(colIDs) {
return nil, errors.Errorf("EncodeRow error: cols and colIDs count not match %d vs %d", len(cols), len(colIDs))
}
valBuf = valBuf[:0]
if len(cols) == 0 {
return append(valBuf, 0), nil
}
for i := range cols {
valBuf = encodeInt64(valBuf, colIDs[i])
valBuf = encodeBytes(valBuf, cols[i])
}
return valBuf, nil
}
const (
compactBytesFlag byte = 2
varintFlag byte = 8
)
func encodeInt64(b []byte, v int64) []byte {
b = append(b, varintFlag)
return appendVarint(b, v)
}
func encodeBytes(b []byte, v []byte) []byte {
b = append(b, compactBytesFlag)
b = appendVarint(b, int64(len(v)))
return append(b, v...)
}
func appendVarint(b []byte, v int64) []byte {
var data [binary.MaxVarintLen64]byte
n := binary.PutVarint(data[:], v)
return append(b, data[:n]...)
}
// DecodeRow decodes a byte slice into columns.
// Row layout: colID1, value1, colID2, value2, .....
// It is a simplified and specialized version of `github.com/pingcap/tidb/tablecodec.DecodeRow`.
func DecodeRow(b []byte) (map[int64][]byte, error) {
row := make(map[int64][]byte)
if len(b) == 0 {
return row, nil
}
if len(b) == 1 && b[0] == 0 {
return row, nil
}
for len(b) > 0 {
remain, rowID, err := decodeInt64(b)
if err != nil {
return row, err
}
var v []byte
remain, v, err = decodeBytes(remain)
if err != nil {
return row, err
}
row[rowID] = v
b = remain
}
return row, nil
}
func decodeInt64(b []byte) ([]byte, int64, error) {
return decodeVarint(b[1:])
}
func decodeVarint(b []byte) ([]byte, int64, error) {
v, n := binary.Varint(b)
if n > 0 {
return b[n:], v, nil
}
if n < 0 {
return nil, 0, errors.New("value larger than 64 bits")
}
return nil, 0, errors.New("insufficient bytes to decode value")
}
func decodeBytes(b []byte) ([]byte, []byte, error) {
remain, n, err := decodeVarint(b[1:])
if err != nil {
return nil, nil, err
}
if int64(len(remain)) < n {
return nil, nil, errors.Errorf("insufficient bytes to decode value, expected length: %v", n)
}
return remain[n:], remain[:n], nil
} | pkg/util/row.go | 0.704465 | 0.414188 | row.go | starcoder |
package datatype
import (
"bytes"
"fmt"
"github.com/datastax/go-cassandra-native-protocol/primitive"
"io"
"reflect"
)
type MapType interface {
DataType
GetKeyType() DataType
GetValueType() DataType
}
type mapType struct {
keyType DataType
valueType DataType
}
func (t *mapType) GetKeyType() DataType {
return t.keyType
}
func (t *mapType) GetValueType() DataType {
return t.valueType
}
func (t *mapType) GetDataTypeCode() primitive.DataTypeCode {
return primitive.DataTypeCodeMap
}
func (t *mapType) Clone() DataType {
return &mapType{
keyType: t.keyType.Clone(),
valueType: t.valueType.Clone(),
}
}
func (t *mapType) String() string {
return fmt.Sprintf("map<%v,%v>", t.keyType, t.valueType)
}
func (t *mapType) MarshalJSON() ([]byte, error) {
return []byte("\"" + t.String() + "\""), nil
}
func NewMapType(keyType DataType, valueType DataType) MapType {
return &mapType{keyType: keyType, valueType: valueType}
}
func writeMapType(t DataType, dest io.Writer, version primitive.ProtocolVersion) (err error) {
mapType, ok := t.(MapType)
if !ok {
return fmt.Errorf("expected MapType, got %T", t)
} else if err = WriteDataType(mapType.GetKeyType(), dest, version); err != nil {
return fmt.Errorf("cannot write map key type: %w", err)
} else if err = WriteDataType(mapType.GetValueType(), dest, version); err != nil {
return fmt.Errorf("cannot write map value type: %w", err)
}
return nil
}
func lengthOfMapType(t DataType, version primitive.ProtocolVersion) (length int, err error) {
mapType, ok := t.(MapType)
if !ok {
return -1, fmt.Errorf("expected MapType, got %T", t)
}
if keyLength, err := LengthOfDataType(mapType.GetKeyType(), version); err != nil {
return -1, fmt.Errorf("cannot compute length of map key type: %w", err)
} else {
length += keyLength
}
if valueLength, err := LengthOfDataType(mapType.GetValueType(), version); err != nil {
return -1, fmt.Errorf("cannot compute length of map value type: %w", err)
} else {
length += valueLength
}
return length, nil
}
func readMapType(source io.Reader, version primitive.ProtocolVersion) (decoded DataType, err error) {
mapType := &mapType{}
if mapType.keyType, err = ReadDataType(source, version); err != nil {
return nil, fmt.Errorf("cannot read map key type: %w", err)
} else if mapType.valueType, err = ReadDataType(source, version); err != nil {
return nil, fmt.Errorf("cannot read map value type: %w", err)
}
return mapType, nil
}
type MapCodec struct {
KeyCodec Codec
ValueCodec Codec
}
func NewMapCodec(keyCodec Codec, valueCodec Codec) *MapCodec {
return &MapCodec{KeyCodec: keyCodec, ValueCodec: valueCodec}
}
func (c *MapCodec) Encode(data interface{}, version primitive.ProtocolVersion) (encoded []byte, err error) {
if data == nil {
return nil, nil
}
value := reflect.ValueOf(data)
valueType := value.Type()
valueKind := valueType.Kind()
if valueKind == reflect.Map && value.IsNil() {
return nil, nil
}
if valueKind != reflect.Map {
return nil, fmt.Errorf("can not encode %T into map", data)
}
buf := &bytes.Buffer{}
n := value.Len()
if err := writeCollectionSize(version, n, buf); err != nil {
return nil, err
}
iter := value.MapRange()
for iter.Next() {
mapKey := iter.Key()
item, err := c.KeyCodec.Encode(mapKey.Interface(), version)
if err != nil {
return nil, err
}
if err := writeCollectionSize(version, len(item), buf); err != nil {
return nil, err
}
buf.Write(item)
mapValue := iter.Value()
item, err = c.ValueCodec.Encode(mapValue.Interface(), version)
if err != nil {
return nil, err
}
if err := writeCollectionSize(version, len(item), buf); err != nil {
return nil, err
}
buf.Write(item)
}
return buf.Bytes(), nil
}
func (c *MapCodec) Decode(encoded []byte, version primitive.ProtocolVersion) (value interface{}, err error) {
if encoded == nil {
return nil, nil
}
n, read, err := readCollectionSize(version, encoded)
if err != nil {
return nil, err
}
if len(encoded) < n {
return nil, fmt.Errorf("decode map: unexpected eof")
}
encoded = encoded[read:]
newMap := make(map[interface{}]interface{})
for i := 0; i < n; i++ {
decodedKeyValue, m, err := decodeChildElement(c.KeyCodec, encoded, version)
if err != nil {
return nil, err
}
encoded = encoded[m:]
decodedValue, m, err := decodeChildElement(c.ValueCodec, encoded, version)
if err != nil {
return nil, err
}
encoded = encoded[m:]
newMap[decodedKeyValue] = decodedValue
}
return newMap, nil
} | datatype/map.go | 0.587707 | 0.468487 | map.go | starcoder |
package schema
import (
"fmt"
"github.com/k14s/ytt/pkg/filepos"
"github.com/k14s/ytt/pkg/yamlmeta"
)
// Type encapsulates a schema describing a yamlmeta.Node.
type Type interface {
AssignTypeTo(node yamlmeta.Node) TypeCheck
GetValueType() Type
GetDefaultValue() interface{}
SetDefaultValue(interface{})
CheckType(node yamlmeta.Node) TypeCheck
GetDefinitionPosition() *filepos.Position
String() string
SetDescription(string)
GetDescription() string
}
var _ Type = (*DocumentType)(nil)
var _ Type = (*MapType)(nil)
var _ Type = (*MapItemType)(nil)
var _ Type = (*ArrayType)(nil)
var _ Type = (*ArrayItemType)(nil)
var _ Type = (*AnyType)(nil)
var _ Type = (*NullType)(nil)
var _ Type = (*ScalarType)(nil)
type DocumentType struct {
Source *yamlmeta.Document
ValueType Type // typically one of: MapType, ArrayType, ScalarType
Position *filepos.Position
defaultValue interface{}
}
type MapType struct {
Items []*MapItemType
Position *filepos.Position
description string
}
type MapItemType struct {
Key interface{} // usually a string
ValueType Type
Position *filepos.Position
defaultValue interface{}
}
type ArrayType struct {
ItemsType Type
Position *filepos.Position
defaultValue interface{}
description string
}
type ArrayItemType struct {
ValueType Type
Position *filepos.Position
defaultValue interface{}
}
type ScalarType struct {
ValueType interface{}
Position *filepos.Position
defaultValue interface{}
description string
}
type AnyType struct {
defaultValue interface{}
Position *filepos.Position
description string
}
type NullType struct {
ValueType Type
Position *filepos.Position
description string
}
// SetDefaultValue sets the default value of the wrapped type to `val`
func (n *NullType) SetDefaultValue(val interface{}) {
n.GetValueType().SetDefaultValue(val)
}
// SetDefaultValue does nothing
func (a *AnyType) SetDefaultValue(val interface{}) {
a.defaultValue = val
}
// SetDefaultValue sets the default value of the entire document to `val`
func (t *DocumentType) SetDefaultValue(val interface{}) {
t.defaultValue = val
}
// SetDefaultValue is ignored as default values should be set on each MapItemType, individually.
func (m *MapType) SetDefaultValue(val interface{}) {
// TODO: determine if we should set the contents of a MapType by setting the given Map...?
return
}
// SetDefaultValue sets the default value to `val`
func (t *MapItemType) SetDefaultValue(val interface{}) {
t.defaultValue = val
}
// SetDefaultValue sets the default value to `val`
func (a *ArrayType) SetDefaultValue(val interface{}) {
a.defaultValue = val
}
// SetDefaultValue sets the default value to `val`
func (a *ArrayItemType) SetDefaultValue(val interface{}) {
a.defaultValue = val
}
// SetDefaultValue sets the default value to `val`
func (s *ScalarType) SetDefaultValue(val interface{}) {
s.defaultValue = val
}
// GetDefaultValue provides the default value
func (n NullType) GetDefaultValue() interface{} {
return nil
}
// GetDefaultValue provides the default value
func (a AnyType) GetDefaultValue() interface{} {
if node, ok := a.defaultValue.(yamlmeta.Node); ok {
return node.DeepCopyAsInterface()
}
return a.defaultValue
}
// GetDefaultValue provides the default value
func (s ScalarType) GetDefaultValue() interface{} {
return s.defaultValue // scalar values are copied (even through an interface{} reference)
}
func (a ArrayItemType) GetDefaultValue() interface{} {
panic(fmt.Sprintf("Unexpected call to GetDefaultValue() on %+v", a))
}
// GetDefaultValue provides the default value
func (a ArrayType) GetDefaultValue() interface{} {
return a.defaultValue
}
// GetDefaultValue provides the default value
func (t MapItemType) GetDefaultValue() interface{} {
return &yamlmeta.MapItem{Key: t.Key, Value: t.defaultValue, Position: t.Position}
}
// GetDefaultValue provides the default value
func (m MapType) GetDefaultValue() interface{} {
defaultValues := &yamlmeta.Map{Position: m.Position}
for _, item := range m.Items {
newItem := item.GetDefaultValue()
defaultValues.Items = append(defaultValues.Items, newItem.(*yamlmeta.MapItem))
}
return defaultValues
}
// GetDefaultValue provides the default value
func (t DocumentType) GetDefaultValue() interface{} {
return &yamlmeta.Document{Value: t.defaultValue, Position: t.Position}
}
// AssignTypeTo assigns this NullType's wrapped Type to `node`.
func (n NullType) AssignTypeTo(node yamlmeta.Node) (chk TypeCheck) {
childCheck := n.ValueType.AssignTypeTo(node)
chk.Violations = append(chk.Violations, childCheck.Violations...)
return
}
// GetValueType provides the type of the value
func (n NullType) GetValueType() Type {
return n.ValueType
}
// CheckType checks the type of `node` against this NullType
// If `node`'s value is null, this check passes
// If `node`'s value is not null, then it is checked against this NullType's wrapped Type.
func (n NullType) CheckType(node yamlmeta.Node) (chk TypeCheck) {
if len(node.GetValues()) == 1 && node.GetValues()[0] == nil {
return
}
check := n.GetValueType().CheckType(node)
chk.Violations = check.Violations
return
}
func (n NullType) GetDefinitionPosition() *filepos.Position {
return n.Position
}
func (n NullType) String() string {
return "null"
}
// GetValueType provides the type of the value
func (t *DocumentType) GetValueType() Type {
return t.ValueType
}
// GetValueType provides the type of the value
func (m MapType) GetValueType() Type {
panic("Not implemented because it is unreachable")
}
// GetValueType provides the type of the value
func (t MapItemType) GetValueType() Type {
return t.ValueType
}
// GetValueType provides the type of the value
func (a ArrayType) GetValueType() Type {
return a.ItemsType
}
// GetValueType provides the type of the value
func (a ArrayItemType) GetValueType() Type {
return a.ValueType
}
// GetValueType provides the type of the value
func (s ScalarType) GetValueType() Type {
panic("Not implemented because it is unreachable")
}
// GetValueType provides the type of the value
func (a AnyType) GetValueType() Type {
return &a
}
func (t *DocumentType) GetDefinitionPosition() *filepos.Position {
return t.Position
}
func (m MapType) GetDefinitionPosition() *filepos.Position {
return m.Position
}
func (t MapItemType) GetDefinitionPosition() *filepos.Position {
return t.Position
}
func (a ArrayType) GetDefinitionPosition() *filepos.Position {
return a.Position
}
func (a ArrayItemType) GetDefinitionPosition() *filepos.Position {
return a.Position
}
// GetDefinitionPosition provides the file position
func (s ScalarType) GetDefinitionPosition() *filepos.Position {
return s.Position
}
func (a AnyType) GetDefinitionPosition() *filepos.Position {
return a.Position
}
func (t *DocumentType) String() string {
return "document"
}
func (m MapType) String() string {
return "map"
}
func (t MapItemType) String() string {
return fmt.Sprintf("%s: %s", t.Key, t.ValueType.String())
}
func (a ArrayType) String() string {
return "array"
}
func (a ArrayItemType) String() string {
return fmt.Sprintf("- %s", a.ValueType.String())
}
func (s ScalarType) String() string {
switch s.ValueType.(type) {
case float64:
return "float"
case int:
return "integer"
case bool:
return "boolean"
default:
return fmt.Sprintf("%T", s.ValueType)
}
}
func (a AnyType) String() string {
return "any"
}
// CheckType checks the type of `node` against this Type.
func (t *DocumentType) CheckType(node yamlmeta.Node) (chk TypeCheck) {
return
}
// CheckType checks the type of `node` against this MapType.
// If `node` is not a yamlmeta.Map, `chk` contains a violation describing this mismatch
// If a contained yamlmeta.MapItem is not allowed by this MapType, `chk` contains a corresponding violation
func (m *MapType) CheckType(node yamlmeta.Node) (chk TypeCheck) {
nodeMap, ok := node.(*yamlmeta.Map)
if !ok {
chk.Violations = append(chk.Violations,
NewMismatchedTypeAssertionError(node, m))
return
}
for _, item := range nodeMap.Items {
if !m.AllowsKey(item.Key) {
chk.Violations = append(chk.Violations,
NewUnexpectedKeyAssertionError(item, m.Position, m.AllowedKeys()))
}
}
return
}
// CheckType checks the type of `node` against this MapItemType
// If `node` is not a yamlmeta.MapItem, `chk` contains a violation describing the mismatch
func (t *MapItemType) CheckType(node yamlmeta.Node) (chk TypeCheck) {
_, ok := node.(*yamlmeta.MapItem)
if !ok {
// A Map must've yielded a non-MapItem which is not valid YAML
panic(fmt.Sprintf("MapItem type check was called on a non-MapItem: %#v", node))
}
return
}
// CheckType checks the type of `node` against this ArrayType
// If `node` is not a yamlmeta.Array, `chk` contains a violation describing the mismatch
func (a *ArrayType) CheckType(node yamlmeta.Node) (chk TypeCheck) {
_, ok := node.(*yamlmeta.Array)
if !ok {
chk.Violations = append(chk.Violations,
NewMismatchedTypeAssertionError(node, a))
}
return
}
// CheckType checks the type of `node` against this ArrayItemType
// If `node` is not a yamlmeta.ArrayItem, `chk` contains a violation describing the mismatch
func (a *ArrayItemType) CheckType(node yamlmeta.Node) (chk TypeCheck) {
_, ok := node.(*yamlmeta.ArrayItem)
if !ok {
// An Array must've yielded a non-ArrayItem which is not valid YAML
panic(fmt.Sprintf("ArrayItem type check was called on a non-ArrayItem: %#v", node))
}
return
}
// CheckType checks the type of `node`'s `value`, which is expected to be a scalar type.
// If the value is not a recognized scalar type, `chk` contains a corresponding violation
// If the value is not of the type specified in this ScalarType, `chk` contains a violation describing the mismatch
func (s *ScalarType) CheckType(node yamlmeta.Node) (chk TypeCheck) {
value := node.GetValues()[0]
switch value.(type) {
case string:
if _, ok := s.ValueType.(string); !ok {
chk.Violations = append(chk.Violations,
NewMismatchedTypeAssertionError(node, s))
}
case float64:
if _, ok := s.ValueType.(float64); !ok {
chk.Violations = append(chk.Violations,
NewMismatchedTypeAssertionError(node, s))
}
case int, int64, uint64:
if _, ok := s.ValueType.(int); !ok {
if _, ok = s.ValueType.(float64); !ok {
chk.Violations = append(chk.Violations,
NewMismatchedTypeAssertionError(node, s))
}
}
case bool:
if _, ok := s.ValueType.(bool); !ok {
chk.Violations = append(chk.Violations,
NewMismatchedTypeAssertionError(node, s))
}
default:
chk.Violations = append(chk.Violations,
NewMismatchedTypeAssertionError(node, s))
}
return
}
// CheckType is a no-op because AnyType allows any value.
// `chk` will always be an empty TypeCheck.
func (a AnyType) CheckType(node yamlmeta.Node) (chk TypeCheck) {
return
}
// AssignTypeTo assigns this schema metadata to `node`.
// If `node` is not a yamlmeta.Document, `chk` contains a violation describing the mismatch
// If `node`'s value is not of the same structure (i.e. yamlmeta.Node type), `chk` contains a violation describing this mismatch
func (t *DocumentType) AssignTypeTo(node yamlmeta.Node) (chk TypeCheck) {
doc, ok := node.(*yamlmeta.Document)
if !ok {
chk.Violations = append(chk.Violations,
NewMismatchedTypeAssertionError(node, t))
return
}
SetType(doc, t)
valueNode, isNode := doc.Value.(yamlmeta.Node)
if isNode {
childCheck := t.ValueType.AssignTypeTo(valueNode)
chk.Violations = append(chk.Violations, childCheck.Violations...)
} // else, is a scalar
return chk
}
// AssignTypeTo assigns this schema metadata to `node`.
// If `node` is not a yamlmeta.Map, `chk` contains a violation describing the mismatch
// If `node`'s yamlmeta.MapItem's cannot be assigned their corresponding MapItemType, `chk` contains a violation describing the mismatch
// If `node` is missing any yamlmeta.MapItem's specified in this MapType, they are added to `node`.
func (m *MapType) AssignTypeTo(node yamlmeta.Node) (chk TypeCheck) {
mapNode, ok := node.(*yamlmeta.Map)
if !ok {
chk.Violations = append(chk.Violations, NewMismatchedTypeAssertionError(node, m))
return
}
var foundKeys []interface{}
SetType(node.(yamlmeta.Node), m)
for _, mapItem := range mapNode.Items {
for _, itemType := range m.Items {
if mapItem.Key == itemType.Key {
foundKeys = append(foundKeys, itemType.Key)
childCheck := itemType.AssignTypeTo(mapItem)
chk.Violations = append(chk.Violations, childCheck.Violations...)
break
}
}
}
m.applySchemaDefaults(foundKeys, chk, mapNode)
return
}
func (m *MapType) applySchemaDefaults(foundKeys []interface{}, chk TypeCheck, mapNode *yamlmeta.Map) {
for _, item := range m.Items {
if contains(foundKeys, item.Key) {
continue
}
val := item.GetDefaultValue()
childCheck := item.AssignTypeTo(val.(*yamlmeta.MapItem))
chk.Violations = append(chk.Violations, childCheck.Violations...)
err := mapNode.AddValue(val)
if err != nil {
panic(fmt.Sprintf("Internal inconsistency: adding map item: %s", err))
}
}
}
func contains(haystack []interface{}, needle interface{}) bool {
for _, key := range haystack {
if key == needle {
return true
}
}
return false
}
// AssignTypeTo assigns this schema metadata to `node`.
// If `node` is not a yamlmeta.MapItem, `chk` contains a violation describing the mismatch
// If `node`'s value is not of the same structure (i.e. yamlmeta.Node type), `chk` contains a violation describing this mismatch
func (t *MapItemType) AssignTypeTo(node yamlmeta.Node) (chk TypeCheck) {
mapItem, ok := node.(*yamlmeta.MapItem)
if !ok {
panic(fmt.Sprintf("Attempt to assign type to a non-map-item (children of Maps can only be MapItems). type=%#v; typeWithValues=%#v", t, node))
}
SetType(node.(yamlmeta.Node), t)
valueNode, isNode := mapItem.Value.(yamlmeta.Node)
if isNode {
childCheck := t.ValueType.AssignTypeTo(valueNode)
chk.Violations = append(chk.Violations, childCheck.Violations...)
} // else, is scalar
return
}
// AssignTypeTo assigns this schema metadata to `node`.
// If `node` is not a yamlmeta.Array, `chk` contains a violation describing the mismatch
// For each `node`'s yamlmeta.ArrayItem's that cannot be assigned this ArrayType's ArrayItemType, `chk` contains a violation describing the mismatch
func (a *ArrayType) AssignTypeTo(node yamlmeta.Node) (chk TypeCheck) {
arrayNode, ok := node.(*yamlmeta.Array)
if !ok {
chk.Violations = append(chk.Violations, NewMismatchedTypeAssertionError(node, a))
return
}
SetType(node.(yamlmeta.Node), a)
for _, arrayItem := range arrayNode.Items {
childCheck := a.ItemsType.AssignTypeTo(arrayItem)
chk.Violations = append(chk.Violations, childCheck.Violations...)
}
return
}
// AssignTypeTo assigns this schema metadata to `node`.
// If `node` is not a yamlmeta.ArrayItem, `chk` contains a violation describing the mismatch
// If `node`'s value is not of the same structure (i.e. yamlmeta.Node type), `chk` contains a violation describing this mismatch
func (a *ArrayItemType) AssignTypeTo(node yamlmeta.Node) (chk TypeCheck) {
arrayItem, ok := node.(*yamlmeta.ArrayItem)
if !ok {
panic(fmt.Sprintf("Attempt to assign type to a non-array-item (children of Arrays can only be ArrayItems). type=%#v; typeWithValues=%#v", a, node))
}
SetType(node.(yamlmeta.Node), a)
valueNode, isNode := arrayItem.Value.(yamlmeta.Node)
if isNode {
childCheck := a.ValueType.AssignTypeTo(valueNode)
chk.Violations = append(chk.Violations, childCheck.Violations...)
} // else, is scalar
return
}
// AssignTypeTo returns a violation describing the type mismatch, given that ScalarType will never accept a yamlmeta.Node
func (s *ScalarType) AssignTypeTo(node yamlmeta.Node) TypeCheck {
return TypeCheck{[]error{NewMismatchedTypeAssertionError(node, s)}}
}
// AssignTypeTo is a no-op given that AnyType allows all types.
func (a AnyType) AssignTypeTo(yamlmeta.Node) (chk TypeCheck) {
return
}
// GetDescription provides descriptive information
func (t *DocumentType) GetDescription() string {
return ""
}
// GetDescription provides descriptive information
func (m *MapType) GetDescription() string {
return m.description
}
// GetDescription provides descriptive information
func (t *MapItemType) GetDescription() string {
return ""
}
// GetDescription provides descriptive information
func (a *ArrayType) GetDescription() string {
return a.description
}
// GetDescription provides descriptive information
func (a *ArrayItemType) GetDescription() string {
return ""
}
// GetDescription provides descriptive information
func (s *ScalarType) GetDescription() string {
return s.description
}
// GetDescription provides descriptive information
func (a *AnyType) GetDescription() string {
return a.description
}
// GetDescription provides descriptive information
func (n *NullType) GetDescription() string {
return n.description
}
// SetDescription sets the description of the type
func (t *DocumentType) SetDescription(desc string) {}
// SetDescription sets the description of the type
func (m *MapType) SetDescription(desc string) {
m.description = desc
}
// SetDescription sets the description of the type
func (t *MapItemType) SetDescription(desc string) {}
// SetDescription sets the description of the type
func (a *ArrayType) SetDescription(desc string) {
a.description = desc
}
// SetDescription sets the description of the type
func (a *ArrayItemType) SetDescription(desc string) {}
// SetDescription sets the description of the type
func (s *ScalarType) SetDescription(desc string) {
s.description = desc
}
// SetDescription sets the description of the type
func (a *AnyType) SetDescription(desc string) {
a.description = desc
}
// SetDescription sets the description of the type
func (n *NullType) SetDescription(desc string) {
n.description = desc
}
func (m *MapType) AllowsKey(key interface{}) bool {
for _, item := range m.Items {
if item.Key == key {
return true
}
}
return false
}
// AllowedKeys returns the set of keys (in string format) permitted in this map.
func (m *MapType) AllowedKeys() []string {
var keysAsString []string
for _, item := range m.Items {
keysAsString = append(keysAsString, fmt.Sprintf("%s", item.Key))
}
return keysAsString
}
// GetType retrieves schema metadata from `n`, typically set previously via SetType().
func GetType(n yamlmeta.Node) Type {
t := n.GetMeta("schema/type")
if t == nil {
return nil
}
return t.(Type)
}
// SetType attaches schema metadata to `n`, typically later retrieved via GetType().
func SetType(n yamlmeta.Node, t Type) {
n.SetMeta("schema/type", t)
}
func nodeValueTypeAsString(n yamlmeta.Node) string {
switch typed := n.(type) {
case *yamlmeta.DocumentSet:
return documentSetValueTypeAsString(typed)
case *yamlmeta.Document:
return documentValueTypeAsString(typed)
case *yamlmeta.Map:
return mapValueTypeAsString(typed)
case *yamlmeta.MapItem:
return mapItemValueTypeAsString(typed)
case *yamlmeta.Array:
return arrayValueTypeAsString(typed)
case *yamlmeta.ArrayItem:
return arrayItemValueTypeAsString(typed)
case *yamlmeta.Scalar:
return scalarValueTypeAsString(typed)
default:
panic(fmt.Sprintf("unexpected node type: %T", n))
}
}
func documentSetValueTypeAsString(_ *yamlmeta.DocumentSet) string { return "document set" }
func documentValueTypeAsString(d *yamlmeta.Document) string { return typeToString(d.Value) }
func mapValueTypeAsString(_ *yamlmeta.Map) string { return "map" }
func mapItemValueTypeAsString(mi *yamlmeta.MapItem) string { return typeToString(mi.Value) }
func arrayValueTypeAsString(_ *yamlmeta.Array) string { return "array" }
func arrayItemValueTypeAsString(ai *yamlmeta.ArrayItem) string { return typeToString(ai.Value) }
func scalarValueTypeAsString(s *yamlmeta.Scalar) string { return typeToString(s.Value) }
func typeToString(value interface{}) string {
switch value.(type) {
case float64:
return "float"
case int, int64, uint64:
return "integer"
case bool:
return "boolean"
case nil:
return "null"
default:
if t, ok := value.(yamlmeta.Node); ok {
return nodeValueTypeAsString(t)
}
return fmt.Sprintf("%T", value)
}
} | pkg/schema/type.go | 0.619011 | 0.433382 | type.go | starcoder |
package logic2
import "math"
/*
We want to make a row of bricks that is goal inches long. We have a number of small bricks (1 inch each) and big bricks (5 inches each). Return True if it is possible to make the goal by choosing from the given bricks.
*/
func make_bricks(small, big, goal int) bool {
if goal > 5*big+small {
return false
} else if goal%5 > small {
return false
} else {
return true
}
}
/*
Given 3 int values, a b c, return their sum. However, if one of the values is the same as another of the values, it does not count towards the sum.
*/
func lone_sum(a, b, c int) int {
if a == b && b == c {
return 0
} else if a == b {
return c
} else if b == c {
return a
} else if c == a {
return b
} else {
return a + b + c
}
}
/*
Given 3 int values, a b c, return their sum. However, if one of the values is 13 then it does not count towards the sum and values to its right do not count. So for example, if b is 13, then both b and c do not count.
*/
func lucky_sum(a, b, c int) int {
if a == 13 {
return 0
} else if b == 13 {
return a
} else if c == 13 {
return a + b
} else {
return a + b + c
}
}
/*
Given 3 int values, a b c, return their sum. However, if any of the values is a teen -- in the range 13..19 inclusive -- then that value counts as 0, except 15 and 16 do not count as a teens.
*/
func no_teen_sum(a, b, c int) int {
fix_teen := func(num int) int {
if num == 15 || num == 16 {
return num
} else if num >= 13 && num <= 19 {
return 0
} else {
return num
}
}
return fix_teen(a) + fix_teen(b) + fix_teen(c)
}
/*
For this problem, we'll round an int value up to the next multiple of 10 if its rightmost digit is 5 or more, so 15 rounds up to 20. Alternately, round down to the previous multiple of 10 if its rightmost digit is less than 5, so 12 rounds down to 10. Given 3 ints, a b c, return the sum of their rounded values.
*/
func round_sum(a, b, c int) int {
round10 := func(num int) int {
if num%10 >= 5 {
return num + (10 - num%10)
}
return num - (num % 10)
}
return round10(a) + round10(b) + round10(c)
}
/*
Given three ints, a b c, return True if one of b or c is "close" (differing from a by at most 1), while the other is "far", differing from both other values by 2 or more.
*/
func close_far(a, b, c int) bool {
iabs := func(num int) int {
return int(math.Abs(float64(num)))
}
is_close := func(foo, bar int) bool {
return iabs(foo-bar) <= 1
}
return (is_close(a, b) && !is_close(a, c) || is_close(a, c) && !is_close(a, b)) && !is_close(b, c)
}
/*
We want make a package of goal kilos of chocolate. We have small bars (1 kilo each) and big bars (5 kilos each). Return the number of small bars to use, assuming we always use big bars before small bars. Return -1 if it can't be done.
*/
func make_chocolate(small, big, goal int) int {
if small >= goal%5 {
if (big * 5) > (goal - goal%5) {
return goal % 5
}
goal -= big * 5
if small >= goal {
return goal
}
}
return -1
} | Go/CodingBat/logic-2.go | 0.709824 | 0.699896 | logic-2.go | starcoder |
package engine
import (
"fmt"
"image"
"math"
"sync"
)
// Model is the model of the game.
type Model struct {
// Mutex to protect the model from concurrent access.
sync.RWMutex
// Game counter. Must be increased by one when a new game is initialized.
// Can be used to invalidate caches when its value changes.
Counter int
// Size of the labyrinth in blocks.
Rows, Cols int
// Blocks of the lab. First indexed by row, then by column.
Lab [][]Block
// ExitPos: the position Gopher has to reach to win the game.
ExitPos image.Point
// Our well-beloved hero Gopher
Gopher *MovingObj
// The ancient enemies of Gopher: the bloodthirsty Bulldogs.
Bulldogs []*MovingObj
// Dead tells if Gopher is dead.
Dead bool
// Won tells if we won
Won bool
// For Gopher we maintain multiple target positions which specify a path on which Gopher will move along
TargetPoss []image.Point
}
// Block is a square unit of the Labyrinth
type Block int
const (
// BlockEmpty is the empty, free-to-walk block
BlockEmpty = iota
// BlockWall designates an unpassable wall.
BlockWall
// BlockCount is not a valid block: just to tell how many blocks there are
BlockCount
)
// MovingObj describes moving objects in the labyrinth.
type MovingObj struct {
// The position in the labyrinth in pixel coordinates
Pos struct {
X, Y float64
}
// Direction this object is facing to
Dir Dir
// Target position this object is moving to
TargetPos image.Point
}
// steps steps the MovingObj.
func (m *MovingObj) step() {
x, y := int(m.Pos.X), int(m.Pos.Y)
// Only horizontal or vertical movement is allowed!
if x != m.TargetPos.X {
dx := math.Min(dt*v, math.Abs(float64(m.TargetPos.X)-m.Pos.X))
if x > m.TargetPos.X {
dx = -dx
m.Dir = DirLeft
} else {
m.Dir = DirRight
}
m.Pos.X += dx
} else if y != m.TargetPos.Y {
dy := math.Min(dt*v, math.Abs(float64(m.TargetPos.Y)-m.Pos.Y))
if y > m.TargetPos.Y {
dy = -dy
m.Dir = DirUp
} else {
m.Dir = DirDown
}
m.Pos.Y += dy
}
}
// Dir represents directions
type Dir int
const (
// DirRight .
DirRight = iota
// DirLeft .
DirLeft
// DirUp .
DirUp
// DirDown .
DirDown
// DirCount is not a valid direction: just to tell how many directions there are
DirCount
)
func (d Dir) String() string {
switch d {
case DirRight:
return "right"
case DirLeft:
return "left"
case DirUp:
return "up"
case DirDown:
return "down"
}
return fmt.Sprintf("Dir(%d)", d)
} | engine/model.go | 0.549157 | 0.456894 | model.go | starcoder |
package api
const (
docsRaml = `#%RAML 0.8
title: Sentinel API
baseUri: http://sentinel.sh/api/{version}
version: v1
documentation:
- title: Signup
content: |
Signup an user using a valid email address and password to create a
Sentinel account. After signup an email message is sent to the given
email address to verify if the user controls the given email address.
- title: Authenticate
content: |
Some methods of the API require the user to be authenticated.
This is done by requesting an authentication token using the user's
credentials, email and password. This authentication token is a JWT
and has an expire date, usually an hour or so in which a new
authentication token needs to be requested.
- title: Authentication token
content: |
An authentication token is best described as a stateless session id with
an expiry date.
- title: One time login
content: |
A one time login allows a user to authenticate without a password. This
can be used to change a forgotten password, to securely login on a
public WiFi.
- title: JWT
content: |
JSON Web tokens (JWT for short) are JSON objects signed by the Sentinel API.
The tokens can contain information about resources managed by the Sentinel
API and can be used to authenticate users, verify email addresses,
password-less logins and more in the future. JWTs generated by the
Sentinel API can be verified by clients using the public key available
in the API.
mediaType: application/json; chartset=utf-8
traits:
- secured:
usage: Apply this to any method that needs to be secured
description: Some requests require authentication.
headers:
Authorization:
type: string
example: Bearer eyJ...
responses:
401:
headers:
WWW-Authenticate:
type: string
example: |
Bearer realm="https://Sentinel", error="invalid_credentials",
error_description="missing or invalid authentication credentials"
body:
application/json; charset=utf-8:
schema: error
example: |
{
"error": "invalid_credentials",
"error_description": "missing or invalid authentication credentials"
}
403:
description: Unauthorized access.
- limited:
usage:
description: |
Limit the range of returend results using HTTP Range headers as defined
in [RFC6902](http://tools.ietf.org/html/rfc6902).
headers:
Range:
description: |
Set the limit and offset in a request by setting the range in the
format 'first-last' or 'first-'. Index starts at 0.
type: string
example: 0-10
responses:
200:
headers:
Content-Range:
description: |
The response range as set by the server in the format
'first-last/length' where length is the total number of
results. The length can be set as a wildcard to indicate
that the length is unknown or very large.
type: string
example: 0-10/20
schemas:
- user: |
{ "$schema": "http://json-schema.org/schema",
"type": "object",
"properties": {
"id": {
"description": "UUID version 4 identifier as defined in RFC4122.",
"type": "string",
"format": "uuid"
},
"name": {
"type": "string"
},
"lastLogin": {
"type": "date"
},
"defaultAuthLevel": {
"description": "Default authentication level, options are 0:unknown 1:notify 2:fast 3:secure",
"type": "integer",
"default": 0,
"enum": [ 0, 1, 2, 3 ]
},
"deviceToken": {
"description": "An APN device token or GCM registration token.",
"type": "string",
"maxLength": "256",
"minLength": "64"
}
}
}
- authemail: |
{ "$schema": "http://json-schema.org/schema",
"type": "object",
"properties": {
"id": {
"description": "UUID version 4 identifier as defined in RFC4122.",
"type": "string",
"format": "uuid"
},
"email": {
"type": "string",
"pattern": "^[^@\s]+@[^@\s]+$"
},
"isVerified": {
"description": "True when the email address is verified by the owner.",
"type": "boolean",
"default": false
}
}
}
- service: |
{ "$schema": "http://json-schema.org/schema",
"type": "object",
"properties": {
"id": {
"description": "UUID version 4 identifier as defined in RFC4122.",
"type": "string",
"format": "uuid"
},
"serviceUrl": {
"type": "string",
"format": "uri"
},
"serviceLogoUrl": {
"type": "string",
"format": "uri"
},
"authLevel": {
"description": "Authentication level, options are 0:unknown 1:notify 2:fast 3:secure",
"type": "integer",
"default": 0,
"enum": [ 0, 1, 2, 3 ]
},
"lastEntryDate": {
"type": "date"
}
}
}
- error: |
{ "$schema": "http://json-schema.org/schema",
"type": "object",
"properties": {
"error": {
"type": "string"
},
"error_description": {
"type": "string"
}
}
}
/signup:
post:
description: |
Signup for a Sentinel account with email and password. To verifiy the email
address, an email message will be sent with a verification link.
body:
application/x-www-form-urlencoded; chartset=utf-8:
formParameters:
email:
description: A valid email address controlled by the user.
type: string
pattern: ^[^@\s]+@[^@\s]+$
password:
description: The password with which the user registered the account.
type: string
minLength: 8
headers:
Prefer:
description: Request the API to return the created resource
type: string
example: return=representation
responses:
201:
description: |
The response body is empty; to have the resource returned, include
the the "Prefer: return=representation" header in the request.
body:
application/json; charset=utf-8:
schema: user
422:
description: |
Request had validation errors, email didn't match
pattern, email already registered, etc.
body:
application/json; chartset=utf-8:
schema: error
/token:
post:
description: Authenticate with email address and password to request an authentication token.
headers:
Authorization:
type: string
example: Basic dXNlcjpwYXNz
responses:
200:
body:
application/json; charset=utf-8:
schema: |
{ "$schema": "http://json-schema.org/schema",
"type": "object",
"properties": {
"token_type": { "type": "string" },
"expires_in": { "type": "int" },
"id_token": { "type": "string" }
}
}
example: |
{
"token_type": "Bearer",
"expires_in": 3600,
"id_token": "eyJ..."
}
/onetimelogin:
post:
description: |
Request a one time login. An email message with a one time loging link
will be sent to the registered email address. This will allow the user
to be authenticated with any credentials for a single, limited time.
body:
application/x-www-form-urlencoded; chartset=utf-8:
formParameters:
email:
description: An email address with which the user registered an account.
type: string
pattern: ^[^@\s]+@[^@\s]+$
/user/self:
is: [ secured ]
get:
description: Get user details for authenticated user.
body:
application/json; charset=utf-8:
schema: user
put:
description: Update user details for authenticated user.
body:
application/x-www-form-urlencoded; chartset=utf-8:
formParameters:
name:
description: Name of the user.
type: string
password:
description: <PASSWORD>.
type: string
minLength: 8
defaultAuthLevel:
description: |
Default authentication level, options are 1:Notify, 2:Fast or 3:Secure.
type: int
enum: [ 1, 2, 3 ]
responses:
200:
body:
application/json; charset=utf-8:
schema: user
422:
description: |
Request had validation errors.
body:
application/json; chartset=utf-8:
schema: error
/email:
is: [ secured ]
get:
is: [ limited ]
description: List all email addresses associated by the authenticated user.
responses:
200:
body:
application/json; charset=utf-8:
post:
description: |
Register a new email address; a verification email will be sent to given
email address.
body:
application/x-www-form-urlencoded; chartset=utf-8:
formParameters:
email:
description: An email address with which the user registered an account.
type: string
pattern: ^[^@\s]+@[^@\s]+$
headers:
Prefer:
description: Request the API to return the created resource
type: string
example: return=representation
responses:
201:
description: |
The response body is empty; to have the resource returned, include
the the "Prefer: return=representation" header in the request.
body:
application/json; charset=utf-8:
schema: authemail
422:
description: |
Request had validation errors, email didn't match
pattern, email already registered, etc.
body:
application/json; chartset=utf-8:
schema: error
/{id}:
is: [ secured ]
get:
description: Get the email address.
responses:
200:
body:
application/json; chartset=utf-8:
schema: authemail
delete:
description: Delete the email address.
responses:
204:
/verify:
post:
body:
application/x-www-form-urlencoded; chartset=utf-8:
formParameters:
token:
description: The base64 encoded JWT token which was sent in the verification email.
type: string
example: eyJ...
responses:
204:
422:
description: invalid token
body:
application/json; chartset=utf-8:
schema: error
/service/{id}:
get:
description: Get the service associated with the id.
responses:
200:
body:
application/json; charset=utf-8:
schema: service
/pubkey:
get:
description: Use this public key to validate the signature of JWT tokens created by the API.
responses:
200:
body:
text/plain; charset=utf-8:
`
) | src/sentinel/api/docs_generated.go | 0.856122 | 0.454472 | docs_generated.go | starcoder |
package ql
import (
"reflect"
"time"
)
// Unvetted thots:
// Given a query and given a structure (field list), there's 2 sets of fields.
// Take the intersection. We can fill those in. great.
// For fields in the structure that aren't in the query, we'll let that slide if db:"-".
// For fields in the structure that aren't in the query but without db:"-", return error.
// For fields in the query that aren't in the structure, we'll ignore them.
type loader struct {
EventReceiver
runner
builder queryBuilder
}
// All executes the query and loads the resulting data into the dest, which can be a slice of
// either structs, or primitive values. It returns n found items (which is not necessarily the
// number of items set).
func (l loader) All(dest interface{}) (n int, err error) {
valOfDest := reflect.ValueOf(dest)
if valOfDest.Kind() != reflect.Ptr {
panic("dest must be a pointer to a slice")
}
valOfIndirect := reflect.Indirect(valOfDest)
if valOfIndirect.Kind() != reflect.Slice {
panic("dest must be a pointer to a slice")
}
originType := valOfIndirect.Type().Elem()
elemType := originType
canBeStruct := true
if originType.Kind() != reflect.Ptr {
canBeStruct = false
} else {
elemType = originType.Elem()
}
switch elemType.Kind() {
case reflect.Struct:
if !canBeStruct {
panic("elements of the dest slice must be pointers to structs")
}
return l.loadStructs(dest, valOfIndirect, elemType)
default:
return l.loadValues(dest, valOfIndirect, originType)
}
}
// One executes the query and loads the resulting data into the dest, which can be either
// a struct, or a primitive value. Returns ErrNotFound if no item was found, and it was
// therefore not set.
func (l loader) One(dest interface{}) error {
valOfDest := reflect.ValueOf(dest)
if valOfDest.Kind() != reflect.Ptr {
panic("dest must be a pointer")
}
valOfIndirect := reflect.Indirect(valOfDest)
switch valOfIndirect.Kind() {
case reflect.Struct:
return l.loadStruct(dest, valOfIndirect)
default:
return l.loadValue(dest)
}
}
// loadStructs executes the query and loads the resulting data into a slice of structs,
// dest must be a pointer to a slice of pointers to structs. It returns the number of items
// found (which is not necessarily the number of items set).
func (l loader) loadStructs(dest interface{}, valueOfDest reflect.Value, elemType reflect.Type) (int, error) {
fullSql, err := Preprocess(l.builder.ToSql())
if err != nil {
return 0, l.EventErr("dbr.select.load_all.interpolate", err)
}
numberOfRowsReturned := 0
startTime := time.Now()
defer func() { l.TimingKv("dbr.select", time.Since(startTime).Nanoseconds(), kvs{"sql": fullSql}) }()
rows, err := l.runner.Query(fullSql)
if err != nil {
return 0, l.EventErrKv("dbr.select.load_all.query", err, kvs{"sql": fullSql})
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
return numberOfRowsReturned, l.EventErrKv("dbr.select.load_one.rows.Columns", err, kvs{"sql": fullSql})
}
fieldMap, err := calculateFieldMap(elemType, columns, false)
if err != nil {
return numberOfRowsReturned, l.EventErrKv("dbr.select.load_all.calculateFieldMap", err, kvs{"sql": fullSql})
}
// Build a 'holder', which is an []interface{}. Each value will be the set to address of the field corresponding to our newly made records:
holder := make([]interface{}, len(fieldMap))
// Iterate over rows and scan their data into the structs
sliceValue := valueOfDest
for rows.Next() {
// Create a new record to store our row:
pointerToNewRecord := reflect.New(elemType)
newRecord := reflect.Indirect(pointerToNewRecord)
// Prepare the holder for this record
scannable, err := prepareHolderFor(newRecord, fieldMap, holder)
if err != nil {
return numberOfRowsReturned, l.EventErrKv("dbr.select.load_all.holderFor", err, kvs{"sql": fullSql})
}
// Load up our new structure with the row's values
err = rows.Scan(scannable...)
if err != nil {
return numberOfRowsReturned, l.EventErrKv("dbr.select.load_all.scan", err, kvs{"sql": fullSql})
}
// Append our new record to the slice:
sliceValue = reflect.Append(sliceValue, pointerToNewRecord)
numberOfRowsReturned++
}
valueOfDest.Set(sliceValue)
// Check for errors at the end. Supposedly these are error that can happen during iteration.
if err = rows.Err(); err != nil {
return numberOfRowsReturned, l.EventErrKv("dbr.select.load_all.rows_err", err, kvs{"sql": fullSql})
}
return numberOfRowsReturned, nil
}
// loadStruct executes the query and loads the resulting data into a struct,
// dest must be a pointer to a struct. Returns ErrNotFound if nothing was found.
func (l loader) loadStruct(dest interface{}, valueOfDest reflect.Value) error {
fullSql, err := Preprocess(l.builder.ToSql())
if err != nil {
return err
}
startTime := time.Now()
defer func() {
l.TimingKv("dbr.select", time.Since(startTime).Nanoseconds(), kvs{"sql": fullSql})
}()
rows, err := l.runner.Query(fullSql)
if err != nil {
return l.EventErrKv("dbr.select.load_one.query", err, kvs{"sql": fullSql})
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
return l.EventErrKv("dbr.select.load_one.rows.Columns", err, kvs{"sql": fullSql})
}
fieldMap, err := calculateFieldMap(valueOfDest.Type(), columns, false)
if err != nil {
return l.EventErrKv("dbr.select.load_one.calculateFieldMap", err, kvs{"sql": fullSql})
}
// Build a 'holder', which is an []interface{}. Each value will be the set to address of the field corresponding to our newly made records:
holder := make([]interface{}, len(fieldMap))
if rows.Next() {
// Build a 'holder', which is an []interface{}. Each value will be the address of the field corresponding to our newly made record:
scannable, err := prepareHolderFor(valueOfDest, fieldMap, holder)
if err != nil {
return l.EventErrKv("dbr.select.load_one.holderFor", err, kvs{"sql": fullSql})
}
// Load up our new structure with the row's values
err = rows.Scan(scannable...)
if err != nil {
return l.EventErrKv("dbr.select.load_one.scan", err, kvs{"sql": fullSql})
}
return nil
}
if err := rows.Err(); err != nil {
return l.EventErrKv("dbr.select.load_one.rows_err", err, kvs{"sql": fullSql})
}
return ErrNotFound
}
// loadValues executes the query and loads the resulting data into a slice of
// primitive values. Returns ErrNotFound if no value was found, and it was therefore not set.
func (l loader) loadValues(dest interface{}, valueOfDest reflect.Value, elemType reflect.Type) (int, error) {
fullSql, err := Preprocess(l.builder.ToSql())
if err != nil {
return 0, err
}
numberOfRowsReturned := 0
startTime := time.Now()
defer func() { l.TimingKv("dbr.select", time.Since(startTime).Nanoseconds(), kvs{"sql": fullSql}) }()
rows, err := l.runner.Query(fullSql)
if err != nil {
return numberOfRowsReturned, l.EventErrKv("dbr.select.load_all_values.query", err, kvs{"sql": fullSql})
}
defer rows.Close()
sliceValue := valueOfDest
for rows.Next() {
// Create a new value to store our row:
pointerToNewValue := reflect.New(elemType)
newValue := reflect.Indirect(pointerToNewValue)
err = rows.Scan(pointerToNewValue.Interface())
if err != nil {
return numberOfRowsReturned, l.EventErrKv("dbr.select.load_all_values.scan", err, kvs{"sql": fullSql})
}
// Append our new value to the slice:
sliceValue = reflect.Append(sliceValue, newValue)
numberOfRowsReturned++
}
valueOfDest.Set(sliceValue)
if err := rows.Err(); err != nil {
return numberOfRowsReturned, l.EventErrKv("dbr.select.load_all_values.rows_err", err, kvs{"sql": fullSql})
}
return numberOfRowsReturned, nil
}
// loadValue executes the query and loads the resulting data into a primitive value.
// Returns ErrNotFound if no value was found, and it was therefore not set.
func (l loader) loadValue(dest interface{}) error {
fullSql, err := Preprocess(l.builder.ToSql())
if err != nil {
return err
}
startTime := time.Now()
defer func() {
l.TimingKv("dbr.select", time.Since(startTime).Nanoseconds(), kvs{"sql": fullSql})
}()
// Run the query:
rows, err := l.runner.Query(fullSql)
if err != nil {
return l.EventErrKv("dbr.select.load_value.query", err, kvs{"sql": fullSql})
}
defer rows.Close()
if rows.Next() {
err = rows.Scan(dest)
if err != nil {
return l.EventErrKv("dbr.select.load_value.scan", err, kvs{"sql": fullSql})
}
return nil
}
if err := rows.Err(); err != nil {
return l.EventErrKv("dbr.select.load_value.rows_err", err, kvs{"sql": fullSql})
}
return ErrNotFound
} | select_load.go | 0.771284 | 0.601038 | select_load.go | starcoder |
package codegen
import (
"bytes"
"fmt"
"go/token"
"reflect"
)
type SnippetType interface {
Snippet
snippetType()
}
type ImportPathAliaser func(importPath string) string
var TypeOf = createTypeOf(LowerSnakeCase)
func createTypeOf(aliaser ImportPathAliaser) func(tpe reflect.Type) SnippetType {
return func(tpe reflect.Type) SnippetType {
if tpe.PkgPath() != "" {
return Type(aliaser(tpe.PkgPath()) + "." + tpe.Name())
}
typeof := createTypeOf(aliaser)
switch tpe.Kind() {
case reflect.Ptr:
return Star(typeof(tpe.Elem()))
case reflect.Chan:
return Chan(typeof(tpe.Elem()))
case reflect.Struct:
fields := make([]*SnippetField, 0)
for i := 0; i < tpe.NumField(); i++ {
f := tpe.Field(i)
if f.Anonymous {
fields = append(fields, Var(typeof(f.Type)).WithTag(string(f.Tag)))
} else {
fields = append(fields, Var(typeof(f.Type), f.Name).WithTag(string(f.Tag)))
}
}
return Struct(fields...)
case reflect.Array:
return Array(typeof(tpe.Elem()), tpe.Len())
case reflect.Slice:
return Slice(typeof(tpe.Elem()))
case reflect.Map:
return Map(typeof(tpe.Key()), typeof(tpe.Elem()))
default:
return BuiltInType(tpe.String())
}
}
}
func Ellipsis(tpe SnippetType) *EllipsisType {
return &EllipsisType{
Elem: tpe,
}
}
type EllipsisType struct {
SnippetType
Elem SnippetType
}
func (tpe *EllipsisType) Bytes() []byte {
buf := &bytes.Buffer{}
buf.WriteString(token.ELLIPSIS.String())
buf.Write(tpe.Elem.Bytes())
return buf.Bytes()
}
func Chan(tpe SnippetType) *ChanType {
return &ChanType{
Elem: tpe,
}
}
type ChanType struct {
SnippetType
Elem SnippetType
}
func (tpe *ChanType) Bytes() []byte {
buf := &bytes.Buffer{}
buf.WriteString(token.CHAN.String() + " ")
buf.Write(tpe.Elem.Bytes())
return buf.Bytes()
}
func Type(name string) *NamedType {
return &NamedType{
Name: Id(name),
}
}
type NamedType struct {
SnippetType
SnippetCanBeInterfaceMethod
SnippetCanAddr
Name *SnippetIdent
}
func (tpe *NamedType) Bytes() []byte {
return tpe.Name.Bytes()
}
func Func(params ...*SnippetField) *FuncType {
return &FuncType{
Params: params,
}
}
type FuncType struct {
SnippetType
SnippetCanBeInterfaceMethod
Name *SnippetIdent
Recv *SnippetField
Params []*SnippetField
Results []*SnippetField
Body []Snippet
noFuncToken bool
}
func (f FuncType) withoutFuncToken() *FuncType {
f.noFuncToken = true
return &f
}
func (f FuncType) Do(bodies ...Snippet) *FuncType {
f.Body = append([]Snippet{}, bodies...)
return &f
}
func (f FuncType) Named(name string) *FuncType {
f.Name = Id(name)
return &f
}
func (f FuncType) MethodOf(recv *SnippetField) *FuncType {
f.Recv = recv
return &f
}
func (f FuncType) Return(results ...*SnippetField) *FuncType {
f.Results = results
return &f
}
func (f *FuncType) Bytes() []byte {
buf := &bytes.Buffer{}
if !f.noFuncToken {
buf.WriteString(token.FUNC.String())
buf.WriteRune(' ')
}
if f.Recv != nil {
buf.WriteByte('(')
buf.Write(f.Recv.Bytes())
buf.WriteString(") ")
}
if f.Name != nil {
buf.Write(f.Name.Bytes())
}
buf.WriteByte('(')
for i := range f.Params {
if i > 0 {
buf.WriteString(", ")
}
buf.Write(f.Params[i].WithoutTag().Bytes())
}
buf.WriteByte(')')
hasResults := len(f.Results) > 0
if hasResults {
buf.WriteString(" (")
}
for i := range f.Results {
if i > 0 {
buf.WriteString(", ")
}
buf.Write(f.Results[i].WithoutTag().Bytes())
}
if hasResults {
buf.WriteByte(')')
}
if f.Body != nil {
buf.WriteRune(' ')
buf.Write(Body(f.Body).Bytes())
}
return buf.Bytes()
}
func Struct(fields ...*SnippetField) *StructType {
return &StructType{
Fields: fields,
}
}
type StructType struct {
SnippetType
Fields []*SnippetField
}
func (tpe *StructType) Bytes() []byte {
buf := &bytes.Buffer{}
buf.WriteString(token.STRUCT.String() + " {")
for i := range tpe.Fields {
buf.WriteRune('\n')
buf.Write(tpe.Fields[i].Bytes())
}
buf.WriteRune('\n')
buf.WriteRune('}')
return buf.Bytes()
}
func Interface(methods ...SnippetCanBeInterfaceMethod) *InterfaceType {
return &InterfaceType{
Methods: methods,
}
}
type SnippetCanBeInterfaceMethod interface {
canBeInterfaceMethod()
}
type InterfaceType struct {
SnippetType
Methods []SnippetCanBeInterfaceMethod
}
func (tpe *InterfaceType) Bytes() []byte {
buf := &bytes.Buffer{}
buf.WriteString(token.INTERFACE.String() + " {")
for i := range tpe.Methods {
if i == 0 {
buf.WriteRune('\n')
}
methodType := tpe.Methods[i]
switch methodType.(type) {
case *FuncType:
buf.Write(methodType.(*FuncType).withoutFuncToken().Bytes())
case *NamedType:
buf.Write(methodType.(*NamedType).Bytes())
}
buf.WriteRune('\n')
}
buf.WriteRune('}')
return buf.Bytes()
}
func Map(key SnippetType, value SnippetType) *MapType {
return &MapType{
Key: key,
Value: value,
}
}
type MapType struct {
SnippetType
Key SnippetType
Value SnippetType
}
func (tpe *MapType) Bytes() []byte {
buf := &bytes.Buffer{}
buf.WriteString(token.MAP.String() + "[")
buf.Write(tpe.Key.Bytes())
buf.WriteRune(']')
buf.Write(tpe.Value.Bytes())
return buf.Bytes()
}
func Slice(tpe SnippetType) *SliceType {
return &SliceType{
Elem: tpe,
}
}
type SliceType struct {
SnippetType
Elem SnippetType
}
func (tpe *SliceType) Bytes() []byte {
buf := &bytes.Buffer{}
buf.WriteString("[]")
buf.Write(tpe.Elem.Bytes())
return buf.Bytes()
}
func Array(tpe SnippetType, len int) *ArrayType {
return &ArrayType{
Elem: tpe,
Len: len,
}
}
type ArrayType struct {
SnippetType
Elem SnippetType
Len int
}
func (tpe *ArrayType) Bytes() []byte {
buf := &bytes.Buffer{}
buf.WriteString(fmt.Sprintf("[%d]", tpe.Len))
buf.Write(tpe.Elem.Bytes())
return buf.Bytes()
}
type BuiltInType string
func (BuiltInType) snippetType() {}
func (tpe BuiltInType) Bytes() []byte {
return []byte(string(tpe))
}
const (
Bool BuiltInType = "bool"
Int BuiltInType = "int"
Int8 BuiltInType = "int8"
Int16 BuiltInType = "int16"
Int32 BuiltInType = "int32"
Int64 BuiltInType = "int64"
Uint BuiltInType = "uint"
Uint8 BuiltInType = "uint8"
Uint16 BuiltInType = "uint16"
Uint32 BuiltInType = "uint32"
Uint64 BuiltInType = "uint64"
Uintptr BuiltInType = "uintptr"
Float32 BuiltInType = "float32"
Float64 BuiltInType = "float64"
Complex64 BuiltInType = "complex64"
Complex128 BuiltInType = "complex128"
String BuiltInType = "string"
Byte BuiltInType = "byte"
Rune BuiltInType = "rune"
Error BuiltInType = "error"
) | snippet_types.go | 0.523177 | 0.412234 | snippet_types.go | starcoder |
package main
import (
"fmt"
"reflect"
)
// A Tour is an activity that someone can partake in.
type Tour struct {
Id string
Name string
Price float64
}
func (t Tour) String() string {
return t.Id
}
// A SalesPromotion is a rule which determines the eligibility for price adjustments and/or free
// Tours.
type SalesPromotion func(t ...Tour) ([]Tour, float64)
// A ShoppingCart contains the Tours intended for purchase and any SalesPromotions that are to be
// applied to an order.
type ShoppingCart struct {
Tours []Tour
SalesPromotions []SalesPromotion
}
// AddSalesPromotion adds a SalesPromotion to a ShoppingCart.
func (s *ShoppingCart) AddSalesPromotion(sp ...SalesPromotion) {
for i := range sp {
var found bool
for j := range s.SalesPromotions {
if reflect.ValueOf(s.SalesPromotions[j]).Pointer() == reflect.ValueOf(sp[i]).Pointer() {
found = true
}
}
if !found {
s.SalesPromotions = append(s.SalesPromotions, sp[i])
}
}
}
// AddTour adds a tour to a ShoppingCart.
func (s *ShoppingCart) AddTour(t ...Tour) {
for i := range t {
s.Tours = append(s.Tours, t[i])
}
}
// Empty removes any/all Tours and SalesPromotions from a ShoppingCart.
func (s *ShoppingCart) Empty() {
s.Tours = s.Tours[:0]
s.SalesPromotions = s.SalesPromotions[:0]
}
// Review displays the Tours that were added for purchase and any/all adjustments that result from
// the SalesPromotions that are in effect.
func (s *ShoppingCart) Review() {
var st, ap float64
for i := range s.Tours {
st += s.Tours[i].Price
}
var at []Tour
for i := range s.SalesPromotions {
t, a := s.SalesPromotions[i](s.Tours...)
at = append(at, t...)
ap += a
}
const summary = `
Items: %s
Subtotal: $%.2f
Sales promotion adjustments
Tours added: %s Price adjustments: $%.2f
Grand total: $%.2f
`
fmt.Printf(summary, s.Tours, st, at, ap, (st + ap))
}
func main() {
OH := Tour{"OH", "Opera House Tour", 300.00}
BC := Tour{"BC", "Sydney Bridge Climb", 110.00}
SK := Tour{"SK", "Sydney Sky Tower", 30.00}
// Purchasing three (3) Opera House tour yields a free Opera House tour.
ThreeForTwoOperaHouse := func(t ...Tour) ([]Tour, float64) {
var cnt int
for i := range t {
if t[i].Id == "OH" {
cnt++
}
}
return nil, float64(cnt / 3) * -OH.Price
}
// Purchasing one (1) Opera House tour yields a free Sky Tower tour.
FreeSkyTowerWithOperaHouse := func(t ...Tour) ([]Tour, float64) {
var cntOH, cntSK int
for i := range t {
switch t[i].Id {
case "OH":
cntOH++
case "SK":
cntSK++
}
}
switch {
// There are no Opera House tours being purchased so this promotion does not apply.
case cntOH == 0:
return nil, 0.00
// Yield a credit for the same number of Sky Tower tours being purchased as the number of
// Opera House tours being purchased.
case cntOH <= cntSK:
return nil, float64(cntOH) * -SK.Price
// Yield a credit for the same number of Sky Tower tours being purchased as the number of
// Opera House tours being purchased along with extra Sky Tower tours for every Opera House
// tour being purchased in excess of the number of Sky Tower tours being purchased.
default:
var tmp []Tour
for i := 0; i < (cntOH - cntSK); i++ {
tmp = append(tmp, SK)
}
return tmp, float64(cntSK) * -SK.Price
}
}
// Purchasing more than four (4) Bridge Climb tours yields a discount of $20.00 on all
// Bridge Climb tours to be purchased.
SydneyBridgeClimbBulkDiscount := func(t ...Tour) ([]Tour, float64) {
var cnt int
for i := range t {
if t[i].Id == "BC" {
cnt++
}
}
if cnt > 4 {
return nil, float64(-20.00 * cnt)
}
return nil, 0.00
}
tests := [][]Tour{
[]Tour{OH, OH, OH, BC},
[]Tour{OH, SK},
[]Tour{BC, BC, BC, BC, BC, OH},
[]Tour{OH, OH, OH, BC, SK},
[]Tour{OH, BC, BC, SK, SK},
[]Tour{BC, BC, BC, BC, BC, BC, OH, OH},
[]Tour{SK, SK, BC},
}
currentPromotions := []SalesPromotion{
ThreeForTwoOperaHouse,
FreeSkyTowerWithOperaHouse,
SydneyBridgeClimbBulkDiscount,
}
for t := range tests {
var sc ShoppingCart
sc.AddTour(tests[t]...)
sc.AddSalesPromotion(currentPromotions...)
sc.Review()
}
} | 316-sydney_tourist_shopping_cart/main.go | 0.743727 | 0.433862 | main.go | starcoder |
package graph
import (
"errors"
"fmt"
"io"
"strconv"
)
type (
GremlinTraversalSequence struct {
GraphTraversal *GraphTraversal
steps []GremlinTraversalStep
extensions []GremlinTraversalExtension
}
GremlinTraversalStep interface {
Exec(last GraphTraversalStep) (GraphTraversalStep, error)
}
GremlinTraversalStepParams []interface{}
// built in steps
gremlinTraversalStepG struct{}
gremlinTraversalStepV struct{ params GremlinTraversalStepParams }
gremlinTraversalStepE struct{}
gremlinTraversalStepOut struct{ params GremlinTraversalStepParams }
gremlinTraversalStepIn struct{ params GremlinTraversalStepParams }
gremlinTraversalStepOutV struct{ params GremlinTraversalStepParams }
gremlinTraversalStepInV struct{ params GremlinTraversalStepParams }
gremlinTraversalStepOutE struct{ params GremlinTraversalStepParams }
gremlinTraversalStepInE struct{ params GremlinTraversalStepParams }
gremlinTraversalStepDedup struct{}
gremlinTraversalStepHas struct{ params GremlinTraversalStepParams }
gremlinTraversalStepShortestPathTo struct{ params GremlinTraversalStepParams }
gremlinTraversalStepBoth struct{ params GremlinTraversalStepParams }
)
var (
ExecutionError error = errors.New("Error while executing the query")
)
type GremlinTraversalParser struct {
Graph *Graph
Reader io.Reader
scanner *GremlinTraversalScanner
buf struct {
tok Token
lit string
n int
}
extensions []GremlinTraversalExtension
}
func (s *gremlinTraversalStepG) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
return nil, nil
}
func (s *gremlinTraversalStepV) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
g, ok := last.(*GraphTraversal)
if !ok {
return nil, ExecutionError
}
switch len(s.params) {
case 1:
if k, ok := s.params[0].(string); ok {
return g.V(Identifier(k)), nil
}
return nil, ExecutionError
case 0:
return g.V(), nil
default:
return nil, ExecutionError
}
}
func (s *gremlinTraversalStepHas) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalV:
return last.(*GraphTraversalV).Has(s.params...), nil
case *GraphTraversalE:
return last.(*GraphTraversalE).Has(s.params...), nil
}
return nil, ExecutionError
}
func (s *gremlinTraversalStepDedup) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalV:
return last.(*GraphTraversalV).Dedup(), nil
case *GraphTraversalE:
return last.(*GraphTraversalE).Dedup(), nil
}
return nil, ExecutionError
}
func (s *gremlinTraversalStepOut) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalV:
return last.(*GraphTraversalV).Out(s.params...), nil
}
return nil, ExecutionError
}
func (s *gremlinTraversalStepIn) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalV:
return last.(*GraphTraversalV).In(s.params...), nil
}
return nil, ExecutionError
}
func (s *gremlinTraversalStepOutV) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalE:
return last.(*GraphTraversalE).OutV(s.params...), nil
}
return nil, ExecutionError
}
func (s *gremlinTraversalStepInV) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalE:
return last.(*GraphTraversalE).InV(s.params...), nil
}
return nil, ExecutionError
}
func (s *gremlinTraversalStepOutE) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalV:
return last.(*GraphTraversalV).OutE(s.params...), nil
}
return nil, ExecutionError
}
func (s *gremlinTraversalStepInE) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalV:
return last.(*GraphTraversalV).InE(s.params...), nil
}
return nil, ExecutionError
}
func (s *gremlinTraversalStepShortestPathTo) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalV:
if len(s.params) > 1 {
return last.(*GraphTraversalV).ShortestPathTo(s.params[0].(Metadata), s.params[1].(Metadata)), nil
}
return last.(*GraphTraversalV).ShortestPathTo(s.params[0].(Metadata)), nil
}
return nil, ExecutionError
}
func (s *gremlinTraversalStepBoth) Exec(last GraphTraversalStep) (GraphTraversalStep, error) {
switch last.(type) {
case *GraphTraversalV:
return last.(*GraphTraversalV).Both(s.params...), nil
}
return nil, ExecutionError
}
func (s *GremlinTraversalSequence) nextStepToExec(i int) (GremlinTraversalStep, int) {
step := s.steps[i]
// check whether we can skip a step
if i+1 < len(s.steps) {
next := s.steps[i+1]
switch next.(type) {
// optimisation possible, uses the param of has for current predicate and
// inc the position in order to skip the HAS
case *gremlinTraversalStepHas:
params := next.(*gremlinTraversalStepHas).params
switch step.(type) {
case *gremlinTraversalStepIn:
if len(step.(*gremlinTraversalStepIn).params) == 0 {
step.(*gremlinTraversalStepIn).params = params
i++
}
case *gremlinTraversalStepOut:
if len(step.(*gremlinTraversalStepOut).params) == 0 {
step.(*gremlinTraversalStepOut).params = params
i++
}
case *gremlinTraversalStepOutV:
if len(step.(*gremlinTraversalStepOutV).params) == 0 {
step.(*gremlinTraversalStepOutV).params = params
i++
}
case *gremlinTraversalStepInV:
if len(step.(*gremlinTraversalStepInV).params) == 0 {
step.(*gremlinTraversalStepInV).params = params
i++
}
case *gremlinTraversalStepOutE:
if len(step.(*gremlinTraversalStepOutE).params) == 0 {
step.(*gremlinTraversalStepOutE).params = params
i++
}
case *gremlinTraversalStepInE:
if len(step.(*gremlinTraversalStepInE).params) == 0 {
step.(*gremlinTraversalStepInE).params = params
i++
}
case *gremlinTraversalStepBoth:
if len(step.(*gremlinTraversalStepBoth).params) == 0 {
step.(*gremlinTraversalStepBoth).params = params
i++
}
}
}
}
i++
// last element return -1 to specify to end
if i == len(s.steps) {
return step, -1
}
return step, i
}
func (s *GremlinTraversalSequence) Exec() (GraphTraversalStep, error) {
var step GremlinTraversalStep
var last GraphTraversalStep
var err error
last = s.GraphTraversal
for i := 0; i != -1; {
step, i = s.nextStepToExec(i)
if last, err = step.Exec(last); err != nil {
return nil, err
}
}
res, ok := last.(GraphTraversalStep)
if !ok {
return nil, ExecutionError
}
return res, nil
}
func (p *GremlinTraversalParser) AddTraversalExtension(e GremlinTraversalExtension) {
p.extensions = append(p.extensions, e)
}
func NewGremlinTraversalParser(r io.Reader, g *Graph) *GremlinTraversalParser {
return &GremlinTraversalParser{
Graph: g,
Reader: r,
}
}
func (p *GremlinTraversalParser) parserStepParams() (GremlinTraversalStepParams, error) {
tok, lit := p.scanIgnoreWhitespace()
if tok != LEFT_PARENTHESIS {
return nil, fmt.Errorf("Expected left parenthesis, got: %s", lit)
}
params := GremlinTraversalStepParams{}
for tok, lit := p.scanIgnoreWhitespace(); tok != RIGHT_PARENTHESIS; {
switch tok {
case EOF:
return params, nil
case COMMA:
case NUMBER:
if i, err := strconv.ParseInt(lit, 10, 64); err == nil {
params = append(params, i)
} else {
if f, err := strconv.ParseFloat(lit, 64); err == nil {
params = append(params, f)
} else {
return nil, fmt.Errorf("Expected number token, got: %s", lit)
}
}
case STRING:
params = append(params, lit)
case METADATA:
metadataParams, err := p.parserStepParams()
if err != nil {
return nil, err
}
metadata, err := sliceToMetadata(metadataParams...)
if err != nil {
return nil, err
}
params = append(params, metadata)
case WITHIN:
withParams, err := p.parserStepParams()
if err != nil {
return nil, err
}
params = append(params, Within(withParams...))
case WITHOUT:
withParams, err := p.parserStepParams()
if err != nil {
return nil, err
}
params = append(params, Without(withParams...))
case NE:
withParams, err := p.parserStepParams()
if err != nil {
return nil, err
}
if len(withParams) != 1 {
return nil, fmt.Errorf("One parameter expected to EQ: %v", withParams)
}
params = append(params, Ne(withParams[0]))
default:
return nil, fmt.Errorf("Unexpected token while parsing parameters, got: %s", lit)
}
tok, lit = p.scanIgnoreWhitespace()
}
return params, nil
}
func (p *GremlinTraversalParser) parserStep() (GremlinTraversalStep, error) {
tok, lit := p.scanIgnoreWhitespace()
if tok == IDENT {
return nil, fmt.Errorf("Expected step function, got: %s", lit)
}
if tok == G {
return &gremlinTraversalStepG{}, nil
}
params, err := p.parserStepParams()
if err != nil {
return nil, err
}
// built in
switch tok {
case V:
return &gremlinTraversalStepV{params: params}, nil
case OUT:
return &gremlinTraversalStepOut{params: params}, nil
case IN:
return &gremlinTraversalStepIn{params: params}, nil
case OUTV:
return &gremlinTraversalStepOutV{params: params}, nil
case INV:
return &gremlinTraversalStepInV{params: params}, nil
case OUTE:
return &gremlinTraversalStepOutE{params: params}, nil
case INE:
return &gremlinTraversalStepInE{params: params}, nil
case DEDUP:
return &gremlinTraversalStepDedup{}, nil
case HAS:
return &gremlinTraversalStepHas{params: params}, nil
case SHORTESTPATHTO:
if len(params) == 0 || len(params) > 2 {
return nil, fmt.Errorf("ShortestPathTo predicate accept only 1 or 2 parameters")
}
return &gremlinTraversalStepShortestPathTo{params: params}, nil
case BOTH:
return &gremlinTraversalStepBoth{params: params}, nil
}
// extensions
for _, e := range p.extensions {
step, err := e.ParseStep(tok, params)
if err != nil {
return nil, err
}
if step != nil {
return step, nil
}
}
return nil, fmt.Errorf("Expected step function, got: %s", lit)
}
func (p *GremlinTraversalParser) Parse() (*GremlinTraversalSequence, error) {
p.scanner = NewGremlinTraversalScanner(p.Reader, p.extensions)
seq := &GremlinTraversalSequence{
GraphTraversal: NewGrahTraversal(p.Graph),
extensions: p.extensions,
}
if tok, lit := p.scanIgnoreWhitespace(); tok != G {
return nil, fmt.Errorf("found %q, expected G", lit)
}
// loop over all dot-delimited steps
for {
tok, lit := p.scanIgnoreWhitespace()
if tok == EOF {
break
}
if tok != DOT {
return nil, fmt.Errorf("found %q, expected .", lit)
}
step, err := p.parserStep()
if err != nil {
return nil, err
}
seq.steps = append(seq.steps, step)
}
return seq, nil
}
func (p *GremlinTraversalParser) scan() (tok Token, lit string) {
if p.buf.n != 0 {
p.buf.n = 0
return p.buf.tok, p.buf.lit
}
p.buf.tok, p.buf.lit = p.scanner.Scan()
return p.buf.tok, p.buf.lit
}
func (p *GremlinTraversalParser) scanIgnoreWhitespace() (Token, string) {
tok, lit := p.scan()
for tok == WS {
tok, lit = p.scan()
}
return tok, lit
}
func (p *GremlinTraversalParser) unscan() {
p.buf.n = 1
} | topology/graph/traversal_parser.go | 0.672547 | 0.566558 | traversal_parser.go | starcoder |
package translatedassert
// OpADD has nodoc
func OpADD(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
return x.(uint8) + y.(uint8)
case uint16:
return x.(uint16) + y.(uint16)
case uint32:
return x.(uint32) + y.(uint32)
case uint64:
return x.(uint64) + y.(uint64)
case uint:
return x.(uint) + y.(uint)
case int8:
return x.(int8) + y.(int8)
case int16:
return x.(int16) + y.(int16)
case int32:
return x.(int32) + y.(int32)
case int64:
return x.(int64) + y.(int64)
case int:
return x.(int) + y.(int)
case float32:
return x.(float32) + y.(float32)
case float64:
return x.(float64) + y.(float64)
case complex64:
return x.(complex64) + y.(complex64)
case complex128:
return x.(complex128) + y.(complex128)
case string:
return x.(string) + y.(string)
}
panic("+ can take integers, floats, complex values, strings")
}
// OpSUB has nodoc
func OpSUB(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
return x.(uint8) - y.(uint8)
case uint16:
return x.(uint16) - y.(uint16)
case uint32:
return x.(uint32) - y.(uint32)
case uint64:
return x.(uint64) - y.(uint64)
case uint:
return x.(uint) - y.(uint)
case int8:
return x.(int8) - y.(int8)
case int16:
return x.(int16) - y.(int16)
case int32:
return x.(int32) - y.(int32)
case int64:
return x.(int64) - y.(int64)
case int:
return x.(int) - y.(int)
case float32:
return x.(float32) - y.(float32)
case float64:
return x.(float64) - y.(float64)
case complex64:
return x.(complex64) - y.(complex64)
case complex128:
return x.(complex128) - y.(complex128)
}
panic("+ can take integers, floats, complex values")
}
// OpMUL has nodoc
func OpMUL(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
return x.(uint8) * y.(uint8)
case uint16:
return x.(uint16) * y.(uint16)
case uint32:
return x.(uint32) * y.(uint32)
case uint64:
return x.(uint64) * y.(uint64)
case uint:
return x.(uint) * y.(uint)
case int8:
return x.(int8) * y.(int8)
case int16:
return x.(int16) * y.(int16)
case int32:
return x.(int32) * y.(int32)
case int64:
return x.(int64) * y.(int64)
case int:
return x.(int) * y.(int)
case float32:
return x.(float32) * y.(float32)
case float64:
return x.(float64) * y.(float64)
case complex64:
return x.(complex64) * y.(complex64)
case complex128:
return x.(complex128) * y.(complex128)
}
panic("* can take integers, floats, complex values")
}
// OpQUO has nodoc
func OpQUO(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
return x.(uint8) / y.(uint8)
case uint16:
return x.(uint16) / y.(uint16)
case uint32:
return x.(uint32) / y.(uint32)
case uint64:
return x.(uint64) / y.(uint64)
case uint:
return x.(uint) / y.(uint)
case int8:
return x.(int8) / y.(int8)
case int16:
return x.(int16) / y.(int16)
case int32:
return x.(int32) / y.(int32)
case int64:
return x.(int64) / y.(int64)
case int:
return x.(int) / y.(int)
case float32:
return x.(float32) / y.(float32)
case float64:
return x.(float64) / y.(float64)
case complex64:
return x.(complex64) / y.(complex64)
case complex128:
return x.(complex128) / y.(complex128)
}
panic("/ can take integers, floats, complex values")
}
// OpREM has nodoc
func OpREM(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
return x.(uint8) % y.(uint8)
case uint16:
return x.(uint16) % y.(uint16)
case uint32:
return x.(uint32) % y.(uint32)
case uint64:
return x.(uint64) % y.(uint64)
case uint:
return x.(uint) % y.(uint)
case int8:
return x.(int8) % y.(int8)
case int16:
return x.(int16) % y.(int16)
case int32:
return x.(int32) % y.(int32)
case int64:
return x.(int64) % y.(int64)
case int:
return x.(int) % y.(int)
}
errm := "% can take integers"
panic(errm)
}
// OpAND has nodoc
func OpAND(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
return x.(uint8) & y.(uint8)
case uint16:
return x.(uint16) & y.(uint16)
case uint32:
return x.(uint32) & y.(uint32)
case uint64:
return x.(uint64) & y.(uint64)
case uint:
return x.(uint) & y.(uint)
case int8:
return x.(int8) & y.(int8)
case int16:
return x.(int16) & y.(int16)
case int32:
return x.(int32) & y.(int32)
case int64:
return x.(int64) & y.(int64)
case int:
return x.(int) & y.(int)
}
panic("& can take integers")
}
// OpOR has nodoc
func OpOR(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
return x.(uint8) | y.(uint8)
case uint16:
return x.(uint16) | y.(uint16)
case uint32:
return x.(uint32) | y.(uint32)
case uint64:
return x.(uint64) | y.(uint64)
case uint:
return x.(uint) | y.(uint)
case int8:
return x.(int8) | y.(int8)
case int16:
return x.(int16) | y.(int16)
case int32:
return x.(int32) | y.(int32)
case int64:
return x.(int64) | y.(int64)
case int:
return x.(int) | y.(int)
}
panic("| can take integers")
}
// OpXOR has nodoc
func OpXOR(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
return x.(uint8) ^ y.(uint8)
case uint16:
return x.(uint16) ^ y.(uint16)
case uint32:
return x.(uint32) ^ y.(uint32)
case uint64:
return x.(uint64) ^ y.(uint64)
case uint:
return x.(uint) ^ y.(uint)
case int8:
return x.(int8) ^ y.(int8)
case int16:
return x.(int16) ^ y.(int16)
case int32:
return x.(int32) ^ y.(int32)
case int64:
return x.(int64) ^ y.(int64)
case int:
return x.(int) ^ y.(int)
}
panic("^ can take integers")
}
// OpANDNOT has nodoc
func OpANDNOT(x interface{}, y interface{}) interface{} { // nolint
switch x.(type) {
case uint8:
return x.(uint8) &^ y.(uint8)
case uint16:
return x.(uint16) &^ y.(uint16)
case uint32:
return x.(uint32) &^ y.(uint32)
case uint64:
return x.(uint64) &^ y.(uint64)
case uint:
return x.(uint) &^ y.(uint)
case int8:
return x.(int8) &^ y.(int8)
case int16:
return x.(int16) &^ y.(int16)
case int32:
return x.(int32) &^ y.(int32)
case int64:
return x.(int64) &^ y.(int64)
case int:
return x.(int) &^ y.(int)
}
panic("&^ can take integers")
}
// OpSHL has nodoc
func OpSHL(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
x := x.(uint8)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
case uint16:
x := x.(uint16)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
case uint32:
x := x.(uint32)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
case uint64:
x := x.(uint64)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
case uint:
x := x.(uint)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
case int8:
x := x.(int8)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
case int16:
x := x.(int16)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
case int32:
x := x.(int32)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
case int64:
x := x.(int64)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
case int:
x := x.(int)
switch y.(type) {
case uint8:
return x << y.(uint8)
case uint16:
return x << y.(uint16)
case uint32:
return x << y.(uint32)
case uint64:
return x << y.(uint64)
case uint:
return x << y.(uint)
}
}
panic("<< can take (left)integer, (right)unsigned integer")
}
// OpSHR has nodoc
func OpSHR(x interface{}, y interface{}) interface{} {
switch x.(type) {
case uint8:
x := x.(uint8)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
case uint16:
x := x.(uint16)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
case uint32:
x := x.(uint32)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
case uint64:
x := x.(uint64)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
case uint:
x := x.(uint)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
case int8:
x := x.(int8)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
case int16:
x := x.(int16)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
case int32:
x := x.(int32)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
case int64:
x := x.(int64)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
case int:
x := x.(int)
switch y.(type) {
case uint8:
return x >> y.(uint8)
case uint16:
return x >> y.(uint16)
case uint32:
return x >> y.(uint32)
case uint64:
return x >> y.(uint64)
case uint:
return x >> y.(uint)
}
}
panic(">> can take (left)integer, (right)unsigned integer")
}
// OpLAND has nodoc
func OpLAND(x interface{}, y interface{}) bool {
switch x.(type) {
case bool:
return x.(bool) && y.(bool)
}
panic("&& can take bool")
}
// OpLOR has nodoc
func OpLOR(x interface{}, y interface{}) bool {
switch x.(type) {
case bool:
return x.(bool) || y.(bool)
}
panic("|| can bool")
} | data/train/go/a4bac851010a26b86e5f8b33981630d64f0bdbb7op.go | 0.58261 | 0.637003 | a4bac851010a26b86e5f8b33981630d64f0bdbb7op.go | starcoder |
package fb
import (
"errors"
"image"
"image/color"
"image/draw"
)
// NewMonochrome returns a new Monochrome image with the given bounds
// and stride. If stride is zero, a working stride is computed.
func NewMonochrome(r image.Rectangle, stride int) *Monochrome {
w, h := r.Dx(), r.Dy()
if stride == 0 {
stride = (w + 7) / 8
}
pix := make([]uint8, stride*h)
return &Monochrome{Pix: pix, Stride: stride, Rect: r}
}
// NewMonochromeWith returns a new Monochrome image with the given bounds
// and stride, backed by the []byte, pix. If stride is zero, a working
// stride is computed. If the length of pix is less than stride*h, an
// error is returned.
func NewMonochromeWith(pix []byte, r image.Rectangle, stride int) (draw.Image, error) {
w, h := r.Dx(), r.Dy()
if stride == 0 {
stride = (w + 7) / 8
}
if len(pix) < stride*h {
return nil, errors.New("ev3dev: bad pixel buffer length")
}
return &Monochrome{Pix: pix, Stride: stride, Rect: r}, nil
}
// Monochrome is an in-memory image whose At method returns Pixel values.
type Monochrome struct {
// Pix holds the image's pixels, as bit values.
// The pixel at (x, y) is the x%8^th bit in
// Pix[(x-Rect.Min.X)/8 + (y-Rect.Min.Y)*Stride].
Pix []uint8
// Stride is the Pix stride (in bytes) between
// vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect image.Rectangle
}
// ColorModel returns the monochrome color model.
func (p *Monochrome) ColorModel() color.Model { return MonochromeModel }
// Bounds returns the bounding rectangle for the image.
func (p *Monochrome) Bounds() image.Rectangle { return p.Rect }
// At returns the color of the pixel at (x, y).
func (p *Monochrome) At(x, y int) color.Color {
if !(image.Point{x, y}.In(p.Rect)) {
return Pixel(White)
}
i := p.pixOffset(x, y)
return Pixel(p.Pix[i]&(1<<uint(x%8)) != 0)
}
// Set sets the color of the pixel at (x, y) to c.
func (p *Monochrome) Set(x, y int, c color.Color) {
if !(image.Point{x, y}.In(p.Rect)) {
return
}
i := p.pixOffset(x, y)
if MonochromeModel.Convert(c).(Pixel) == Black {
p.Pix[i] |= 1 << uint(x%8)
} else {
p.Pix[i] &^= 1 << uint(x%8)
}
}
// pixOffset returns the index into p.Pix for the byte
// containing bit describing the pixel at (x, y).
func (p *Monochrome) pixOffset(x, y int) int {
return (x-p.Rect.Min.X)/8 + (y-p.Rect.Min.Y)*p.Stride
}
// Pixel is a black and white monochrome pixel.
type Pixel bool
const (
Black Pixel = true
White Pixel = false
)
// RGBA returns the RGBA values for the receiver.
func (c Pixel) RGBA() (r, g, b, a uint32) {
if c == Black {
return 0, 0, 0, 0xffff
}
return 0xffff, 0xffff, 0xffff, 0xffff
}
// MonochromeModel is the color model for black and white images.
var MonochromeModel color.Model = color.ModelFunc(monoModel)
func monoModel(c color.Color) color.Color {
if _, ok := c.(Pixel); ok {
return c
}
r, g, b, _ := c.RGBA()
y := (299*r + 587*g + 114*b + 500) / 1000
return Pixel(uint16(y) < 0x8000)
} | fb/mono.go | 0.869811 | 0.500427 | mono.go | starcoder |
package pinapi
import (
"encoding/json"
)
// SpecialsFixturesLeague struct for SpecialsFixturesLeague
type SpecialsFixturesLeague struct {
// FixturesLeague Id.
Id *int `json:"id,omitempty"`
// A collection of Specials
Specials *[]SpecialFixture `json:"specials,omitempty"`
}
// NewSpecialsFixturesLeague instantiates a new SpecialsFixturesLeague object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewSpecialsFixturesLeague() *SpecialsFixturesLeague {
this := SpecialsFixturesLeague{}
return &this
}
// NewSpecialsFixturesLeagueWithDefaults instantiates a new SpecialsFixturesLeague object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewSpecialsFixturesLeagueWithDefaults() *SpecialsFixturesLeague {
this := SpecialsFixturesLeague{}
return &this
}
// GetId returns the Id field value if set, zero value otherwise.
func (o *SpecialsFixturesLeague) GetId() int {
if o == nil || o.Id == nil {
var ret int
return ret
}
return *o.Id
}
// GetIdOk returns a tuple with the Id field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SpecialsFixturesLeague) GetIdOk() (*int, bool) {
if o == nil || o.Id == nil {
return nil, false
}
return o.Id, true
}
// HasId returns a boolean if a field has been set.
func (o *SpecialsFixturesLeague) HasId() bool {
if o != nil && o.Id != nil {
return true
}
return false
}
// SetId gets a reference to the given int and assigns it to the Id field.
func (o *SpecialsFixturesLeague) SetId(v int) {
o.Id = &v
}
// GetSpecials returns the Specials field value if set, zero value otherwise.
func (o *SpecialsFixturesLeague) GetSpecials() []SpecialFixture {
if o == nil || o.Specials == nil {
var ret []SpecialFixture
return ret
}
return *o.Specials
}
// GetSpecialsOk returns a tuple with the Specials field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *SpecialsFixturesLeague) GetSpecialsOk() (*[]SpecialFixture, bool) {
if o == nil || o.Specials == nil {
return nil, false
}
return o.Specials, true
}
// HasSpecials returns a boolean if a field has been set.
func (o *SpecialsFixturesLeague) HasSpecials() bool {
if o != nil && o.Specials != nil {
return true
}
return false
}
// SetSpecials gets a reference to the given []SpecialFixture and assigns it to the Specials field.
func (o *SpecialsFixturesLeague) SetSpecials(v []SpecialFixture) {
o.Specials = &v
}
func (o SpecialsFixturesLeague) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Id != nil {
toSerialize["id"] = o.Id
}
if o.Specials != nil {
toSerialize["specials"] = o.Specials
}
return json.Marshal(toSerialize)
}
type NullableSpecialsFixturesLeague struct {
value *SpecialsFixturesLeague
isSet bool
}
func (v NullableSpecialsFixturesLeague) Get() *SpecialsFixturesLeague {
return v.value
}
func (v *NullableSpecialsFixturesLeague) Set(val *SpecialsFixturesLeague) {
v.value = val
v.isSet = true
}
func (v NullableSpecialsFixturesLeague) IsSet() bool {
return v.isSet
}
func (v *NullableSpecialsFixturesLeague) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableSpecialsFixturesLeague(val *SpecialsFixturesLeague) *NullableSpecialsFixturesLeague {
return &NullableSpecialsFixturesLeague{value: val, isSet: true}
}
func (v NullableSpecialsFixturesLeague) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableSpecialsFixturesLeague) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | pinapi/model_specials_fixtures_league.go | 0.732113 | 0.596815 | model_specials_fixtures_league.go | starcoder |
package bfv
import (
"github.com/ldsec/lattigo/ring"
)
// Operand is a common interface for Ciphertext and Plaintext.
type Operand interface {
Element() *bfvElement
Degree() uint64
}
// bfvElement is a common struct for Plaintexts and Ciphertexts. It stores a value
// as a slice of polynomials, and an isNTT flag that indicates if the element is in the NTT domain.
type bfvElement struct {
value []*ring.Poly
isNTT bool
}
// newBfvElement creates a new bfvElement of the target degree with zero values.
func newBfvElement(params *Parameters, degree uint64) *bfvElement {
if !params.isValid {
panic("cannot newBfvElement: params not valid (check if they were generated properly)")
}
el := new(bfvElement)
el.value = make([]*ring.Poly, degree+1)
for i := uint64(0); i < degree+1; i++ {
el.value[i] = ring.NewPoly(1<<params.LogN, uint64(len(params.LogQi)))
}
el.isNTT = true
return el
}
func newBfvElementRandom(params *Parameters, degree uint64) *bfvElement {
if !params.isValid {
panic("cannot newBfvElementRandom: params not valid (check if they were generated properly)")
}
el := new(bfvElement)
el.value = make([]*ring.Poly, degree+1)
for i := uint64(0); i < degree+1; i++ {
el.value[i] = ring.NewPolyUniform(1<<params.LogN, uint64(len(params.LogQi)))
}
el.isNTT = true
return el
}
// Value returns the value of the target bfvElement (as a slice of polynomials in CRT form).
func (el *bfvElement) Value() []*ring.Poly {
return el.value
}
// SetValue assigns the input slice of polynomials to the target bfvElement value.
func (el *bfvElement) SetValue(value []*ring.Poly) {
el.value = value
}
// Degree returns the degree of the target bfvElement.
func (el *bfvElement) Degree() uint64 {
return uint64(len(el.value) - 1)
}
// Resize resizes the target bfvElement degree to the degree given as input. If the input degree is bigger, then
// it will append new empty polynomials; if the degree is smaller, it will delete polynomials until the degree matches
// the input degree.
func (el *bfvElement) Resize(params *Parameters, degree uint64) {
if el.Degree() > degree {
el.value = el.value[:degree+1]
} else if el.Degree() < degree {
for el.Degree() < degree {
el.value = append(el.value, []*ring.Poly{new(ring.Poly)}...)
el.value[el.Degree()].Coeffs = make([][]uint64, len(params.LogQi))
for i := 0; i < len(params.LogQi); i++ {
el.value[el.Degree()].Coeffs[i] = make([]uint64, uint64(1<<params.LogN))
}
}
}
}
// IsNTT returns true if the target bfvElement is in the NTT domain, and false otherwise.
func (el *bfvElement) IsNTT() bool {
return el.isNTT
}
// SetIsNTT assigns the input Boolean value to the isNTT flag of the target bfvElement.
func (el *bfvElement) SetIsNTT(value bool) {
el.isNTT = value
}
// CopyNew creates a new bfvElement which is a copy of the target bfvElement, and returns the value as
// a bfvElement.
func (el *bfvElement) CopyNew() *bfvElement {
ctxCopy := new(bfvElement)
ctxCopy.value = make([]*ring.Poly, el.Degree()+1)
for i := range el.value {
ctxCopy.value[i] = el.value[i].CopyNew()
}
ctxCopy.isNTT = el.isNTT
return ctxCopy
}
// Copy copies the value and parameters of the input on the target bfvElement.
func (el *bfvElement) Copy(ctxCopy *bfvElement) {
if el != ctxCopy {
for i := range ctxCopy.Value() {
el.Value()[i].Copy(ctxCopy.Value()[i])
}
el.isNTT = ctxCopy.isNTT
}
}
// NTT puts the target bfvElement in the NTT domain and sets its isNTT flag to true. If it is already in the NTT domain, does nothing.
func (el *bfvElement) NTT(context *ring.Context, c *bfvElement) {
if el.Degree() != c.Degree() {
panic("cannot NTT: receiver element invalid degree (degrees do not match)")
}
if el.IsNTT() != true {
for i := range el.value {
context.NTT(el.Value()[i], c.Value()[i])
}
c.SetIsNTT(true)
}
}
// InvNTT puts the target bfvElement outside of the NTT domain, and sets its isNTT flag to false. If it is not in the NTT domain, it does nothing.
func (el *bfvElement) InvNTT(context *ring.Context, c *bfvElement) {
if el.Degree() != c.Degree() {
panic("cannot InvNTT: receiver element invalid degree (degrees do not match)")
}
if el.IsNTT() != false {
for i := range el.value {
context.InvNTT(el.Value()[i], c.Value()[i])
}
c.SetIsNTT(false)
}
}
func (el *bfvElement) Element() *bfvElement {
return el
}
func (el *bfvElement) Ciphertext() *Ciphertext {
return &Ciphertext{el}
}
func (el *bfvElement) Plaintext() *Plaintext {
return &Plaintext{el, el.value[0]}
} | bfv/operand.go | 0.736116 | 0.428353 | operand.go | starcoder |
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "strings"
const Cf2 = 2.0
func fEqEq(a int, f float64) bool {
return a == 0 && f > Cf2 || a == 0 && f < -Cf2 // ERROR "Redirect Eq64 based on Eq64$"
}
func fEqNeq(a int32, f float64) bool {
return a == 0 && f > Cf2 || a != 0 && f < -Cf2 // ERROR "Redirect Neq32 based on Eq32$"
}
func fEqLess(a int8, f float64) bool {
return a == 0 && f > Cf2 || a < 0 && f < -Cf2
}
func fEqLeq(a float64, f float64) bool {
return a == 0 && f > Cf2 || a <= 0 && f < -Cf2
}
func fEqLessU(a uint, f float64) bool {
return a == 0 && f > Cf2 || a < 0 && f < -Cf2
}
func fEqLeqU(a uint64, f float64) bool {
return a == 0 && f > Cf2 || a <= 0 && f < -Cf2 // ERROR "Redirect Leq64U based on Eq64$"
}
func fNeqEq(a int, f float64) bool {
return a != 0 && f > Cf2 || a == 0 && f < -Cf2 // ERROR "Redirect Eq64 based on Neq64$"
}
func fNeqNeq(a int32, f float64) bool {
return a != 0 && f > Cf2 || a != 0 && f < -Cf2 // ERROR "Redirect Neq32 based on Neq32$"
}
func fNeqLess(a float32, f float64) bool {
// TODO: Add support for floating point numbers in prove
return a != 0 && f > Cf2 || a < 0 && f < -Cf2
}
func fNeqLeq(a int16, f float64) bool {
return a != 0 && f > Cf2 || a <= 0 && f < -Cf2 // ERROR "Redirect Leq16 based on Neq16$"
}
func fNeqLessU(a uint, f float64) bool {
return a != 0 && f > Cf2 || a < 0 && f < -Cf2
}
func fNeqLeqU(a uint32, f float64) bool {
return a != 0 && f > Cf2 || a <= 0 && f < -Cf2 // ERROR "Redirect Leq32U based on Neq32$"
}
func fLessEq(a int, f float64) bool {
return a < 0 && f > Cf2 || a == 0 && f < -Cf2
}
func fLessNeq(a int32, f float64) bool {
return a < 0 && f > Cf2 || a != 0 && f < -Cf2
}
func fLessLess(a float32, f float64) bool {
return a < 0 && f > Cf2 || a < 0 && f < -Cf2 // ERROR "Redirect Less32F based on Less32F$"
}
func fLessLeq(a float64, f float64) bool {
return a < 0 && f > Cf2 || a <= 0 && f < -Cf2
}
func fLeqEq(a float64, f float64) bool {
return a <= 0 && f > Cf2 || a == 0 && f < -Cf2
}
func fLeqNeq(a int16, f float64) bool {
return a <= 0 && f > Cf2 || a != 0 && f < -Cf2 // ERROR "Redirect Neq16 based on Leq16$"
}
func fLeqLess(a float32, f float64) bool {
return a <= 0 && f > Cf2 || a < 0 && f < -Cf2
}
func fLeqLeq(a int8, f float64) bool {
return a <= 0 && f > Cf2 || a <= 0 && f < -Cf2 // ERROR "Redirect Leq8 based on Leq8$"
}
func fLessUEq(a uint8, f float64) bool {
return a < 0 && f > Cf2 || a == 0 && f < -Cf2
}
func fLessUNeq(a uint16, f float64) bool {
return a < 0 && f > Cf2 || a != 0 && f < -Cf2
}
func fLessULessU(a uint32, f float64) bool {
return a < 0 && f > Cf2 || a < 0 && f < -Cf2
}
func fLessULeqU(a uint64, f float64) bool {
return a < 0 && f > Cf2 || a <= 0 && f < -Cf2
}
func fLeqUEq(a uint8, f float64) bool {
return a <= 0 && f > Cf2 || a == 0 && f < -Cf2 // ERROR "Redirect Eq8 based on Leq8U$"
}
func fLeqUNeq(a uint16, f float64) bool {
return a <= 0 && f > Cf2 || a != 0 && f < -Cf2 // ERROR "Redirect Neq16 based on Leq16U$"
}
func fLeqLessU(a uint32, f float64) bool {
return a <= 0 && f > Cf2 || a < 0 && f < -Cf2
}
func fLeqLeqU(a uint64, f float64) bool {
return a <= 0 && f > Cf2 || a <= 0 && f < -Cf2 // ERROR "Redirect Leq64U based on Leq64U$"
}
// Arg tests are disabled because the op name is different on amd64 and arm64.
func fEqPtrEqPtr(a, b *int, f float64) bool {
return a == b && f > Cf2 || a == b && f < -Cf2 // ERROR "Redirect EqPtr based on EqPtr$"
}
func fEqPtrNeqPtr(a, b *int, f float64) bool {
return a == b && f > Cf2 || a != b && f < -Cf2 // ERROR "Redirect NeqPtr based on EqPtr$"
}
func fNeqPtrEqPtr(a, b *int, f float64) bool {
return a != b && f > Cf2 || a == b && f < -Cf2 // ERROR "Redirect EqPtr based on NeqPtr$"
}
func fNeqPtrNeqPtr(a, b *int, f float64) bool {
return a != b && f > Cf2 || a != b && f < -Cf2 // ERROR "Redirect NeqPtr based on NeqPtr$"
}
func fEqInterEqInter(a interface{}, f float64) bool {
return a == nil && f > Cf2 || a == nil && f < -Cf2 // ERROR "Redirect IsNonNil based on IsNonNil$"
}
func fEqInterNeqInter(a interface{}, f float64) bool {
return a == nil && f > Cf2 || a != nil && f < -Cf2
}
func fNeqInterEqInter(a interface{}, f float64) bool {
return a != nil && f > Cf2 || a == nil && f < -Cf2
}
func fNeqInterNeqInter(a interface{}, f float64) bool {
return a != nil && f > Cf2 || a != nil && f < -Cf2 // ERROR "Redirect IsNonNil based on IsNonNil$"
}
func fEqSliceEqSlice(a []int, f float64) bool {
return a == nil && f > Cf2 || a == nil && f < -Cf2 // ERROR "Redirect IsNonNil based on IsNonNil$"
}
func fEqSliceNeqSlice(a []int, f float64) bool {
return a == nil && f > Cf2 || a != nil && f < -Cf2
}
func fNeqSliceEqSlice(a []int, f float64) bool {
return a != nil && f > Cf2 || a == nil && f < -Cf2
}
func fNeqSliceNeqSlice(a []int, f float64) bool {
return a != nil && f > Cf2 || a != nil && f < -Cf2 // ERROR "Redirect IsNonNil based on IsNonNil$"
}
func fPhi(a, b string) string {
aslash := strings.HasSuffix(a, "/") // ERROR "Redirect Phi based on Phi$"
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
func main() {
} | test/fuse.go | 0.732592 | 0.450178 | fuse.go | starcoder |
package sticking
import (
"github.com/FelixDux/imposcg/dynamics/impact"
"github.com/FelixDux/imposcg/dynamics/forcingphase"
"github.com/FelixDux/imposcg/dynamics/parameters"
"math"
)
type ReleaseImpact struct {
NewImpact bool
Impact impact.Impact
}
type Sticking struct {
PhaseIn float64
PhaseOut float64
Converter forcingphase.PhaseConverter
Generator impact.Generator
}
func NewSticking(parameters parameters.Parameters) (*Sticking, error) {
var phaseIn float64
var phaseOut float64
if (1.0 <= parameters.ObstacleOffset) {
// No sticking
phaseIn = 0.0
phaseOut = 0.0
} else if -1.0 >= parameters.ObstacleOffset || parameters.ForcingFrequency == 0.0 {
// Sticking for all phases
phaseIn = 1.0
phaseOut = 0.0
} else {
converter, err := forcingphase.NewPhaseConverter(parameters.ForcingFrequency)
if err == nil {
// (OK to divide by.ForcingFrequency because zero case trapped above)
angle := math.Acos(parameters.ObstacleOffset)
phase1 := converter.TimeToPhase(angle/parameters.ForcingFrequency)
phase2 := 1.0 - phase1
if (math.Sin(angle) < 0.0) {
phaseIn = phase1
phaseOut = phase2
} else {
phaseIn = phase2
phaseOut = phase1
}
return &Sticking{PhaseIn: phaseIn, PhaseOut: phaseOut, Converter: *converter, Generator: impact.ImpactGenerator(*converter)}, nil
} else {
return nil, err
}
}
return &Sticking{PhaseIn: phaseIn, PhaseOut: phaseOut}, nil
}
func (sticking Sticking) never() bool {
return sticking.PhaseIn == sticking.PhaseOut
}
func (sticking Sticking) always() bool {
return sticking.PhaseIn == 1.0 && sticking.PhaseOut == 0.0
}
func (sticking Sticking) phaseSticks(phase float64) bool {
if (sticking.never()) {return false}
if (sticking.always()) {return true}
return phase < sticking.PhaseOut || phase >= sticking.PhaseIn
}
func (sticking Sticking) TimeSticks(time float64) bool {
return sticking.phaseSticks(sticking.Converter.TimeToPhase(time))
}
func (sticking Sticking) releaseTime(time float64) float64 {
return sticking.Converter.ForwardToPhase(time, sticking.PhaseOut)
}
func (sticking Sticking) CheckImpact(impact impact.Impact) *ReleaseImpact {
if (impact.Velocity == 0.0 && sticking.phaseSticks(impact.Phase) && !sticking.always()) {
return &ReleaseImpact{NewImpact: true, Impact: *sticking.Generator(sticking.releaseTime(impact.Time), 0.0)}
} else {
return &ReleaseImpact{NewImpact: false, Impact: impact}
}
} | dynamics/sticking/sticking.go | 0.687 | 0.492737 | sticking.go | starcoder |
package govector
import (
"math"
)
// Equal determines if two vectors have the same values.
func Equal(a, b Vector) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
// CosineSimilarity calculates the cosine similarity of two vectors
func CosineSimilarity(a, b Vector) (float64, error) {
var sum float64
if len(a) != len(b) {
return sum, ErrorVectorLengths
}
var x, y int
for n := len(a); n > 0; n-- {
sum += a[x] * b[y]
x++
y++
}
return sum, nil
}
// EuclideanDistance calculates the euclidian distance between two vectors
func EuclideanDistance(a, b Vector) (float64, error) {
var sum float64
if len(a) != len(b) {
return sum, ErrorVectorLengths
}
for i := 0; i < len(a); i++ {
sum += math.Pow((a[i] - b[i]), 2.0)
}
return math.Sqrt(sum), nil
}
// ManhattenDistance calculates the Manhatten distance between two vectors
func ManhattenDistance(a, b Vector) (float64, error) {
var sum float64
if len(a) != len(b) {
return sum, ErrorVectorLengths
}
for i := 0; i < len(a); i++ {
sum += math.Abs(a[i] - b[i])
}
return sum, nil
}
// MinowskiDistance calculates the Minowski distance between two vectors
func MinowskiDistance(a, b Vector, pow int) (float64, error) {
var sum float64
if len(a) != len(b) {
return sum, ErrorVectorLengths
}
for i := 0; i < len(a); i++ {
sum += math.Pow(math.Abs(a[i]-b[i]), float64(pow))
}
return NthRoot(sum, pow), nil
}
// NthRoot calculates the nth root
func NthRoot(a float64, n int) float64 {
z := a / float64(n)
for i := 0; i < 20; i++ {
if math.Pow(z, float64(n)) == a {
return z
}
z -= (math.Pow(z, float64(n)) - a) / (float64(n) * math.Pow(z, float64(n-1)))
}
return z
}
// Probability calculates the the probability two vectors are the same.
// Use for face vectors, mathematically if the probability is 0.85 or
// greater it most likely the same person.
func Probability(a, b Vector) (float64, error) {
if len(a) != len(b) {
return 0, ErrorVectorLengths
}
dist, _ := EuclideanDistance(a, b)
return (1 - (dist / 4)), nil
} | similarity.go | 0.798423 | 0.600745 | similarity.go | starcoder |
package physics
//Shape type de la forme
type Shape interface {
Pos() Vec2
SetPos(Vec2)
UpdatePos()
Width() float64
Height() float64
Center() Vec2
SetCenter(Vec2)
Velocity() Vec2
SetVelocity(Vec2)
MaxVel() Vec2
SetMaxVel(Vec2)
Accel() Vec2
SetAccel(Vec2)
MaxAccel() Vec2
SetMaxAccel(Vec2)
Gravity() Vec2
SetGravity(Vec2)
IsGrounded() bool
SetGrounded(bool)
IsStatic() bool
SetStatic(bool)
IsSolid() bool
SetSolid(bool)
Friction() float64
SetFriction(float64)
InvMass() float64
Elasticity() float64
SetElasticity(float64)
ShapeName() string
Name() string
Tags() []string
SetTags([]string)
}
// BasicShape Une forme générique
type BasicShape struct {
Kind Shape
pos Vec2
velocity Vec2
accel Vec2
gravity Vec2
maxVel Vec2 //vitesse maximale
maxAccel Vec2 //accélération maximale
grounded bool
static bool
solid bool
mass float64
invMass float64
elasticity float64
friction float64
name string
tags []string
}
//Pos retourne la position de la forme
func (s *BasicShape) Pos() Vec2 {
return s.pos
}
//UpdatePos met à jour position avec la vitesse
// La fonction SetPos est définie sur les shape parce que
// Circle doit mettre à jour le centre
func (s *BasicShape) UpdatePos() {
// clamp accel
s.clampAccel()
//ajout accélération et gravité
s.velocity = s.velocity.Add(s.accel).Add(s.gravity)
s.clampVelocity()
s.Kind.SetPos(s.Pos().Add(s.Velocity()))
//reset ground state
s.SetGrounded(false)
}
//clampVelocity restreint la vitesse à -maxVel, maxVel
// Si maxVel est à 0,0 la vitesse n'est pas restreinte
func (s *BasicShape) clampVelocity() {
if s.maxVel.X > 0 && s.maxVel.Y > 0 {
s.velocity.X = Max(-s.maxVel.X, Min(s.velocity.X, s.maxVel.X))
s.velocity.Y = Max(-s.maxVel.Y, Min(s.velocity.Y, s.maxVel.Y))
}
}
//Velocity retourne la vitesse de la shape
func (s *BasicShape) Velocity() Vec2 {
return s.velocity
}
//SetVelocity mets la vitesse à v
func (s *BasicShape) SetVelocity(v Vec2) {
s.velocity = v
}
//MaxVel retourne la vitesse maximum de la shape
func (s *BasicShape) MaxVel() Vec2 {
return s.maxVel
}
//SetMaxVel mets la vitesse maximum à v
func (s *BasicShape) SetMaxVel(v Vec2) {
s.maxVel = v
}
// //AddForces ajoute un vecteur (Vec2.Add()) au vecteur existant (initialisé à 0.0)
// // le vecteur est remis à 0 à chaque thick
// func (s *BasicShape) AddForce(v Vec2) {
// s.force.Add(v)
// }
//clampAccel restreint l'accélération à -maxAccel, maxAccel
// Si maxAccel est à 0,0 l'accélération n'est pas restreinte
func (s *BasicShape) clampAccel() {
if s.maxAccel.X > 0 && s.maxAccel.Y > 0 {
s.accel.X = Max(-s.maxAccel.X, Min(s.accel.X, s.maxAccel.X))
s.accel.Y = Max(-s.maxAccel.Y, Min(s.accel.Y, s.maxAccel.Y))
}
}
//Accel retourne l'accélération
func (s *BasicShape) Accel() Vec2 {
return s.accel
}
//SetAccel mets l'accélération à 'a'
func (s *BasicShape) SetAccel(a Vec2) {
s.accel = a
}
//MaxAccel retourne l'accélération maximale
func (s *BasicShape) MaxAccel() Vec2 {
return s.maxAccel
}
//SetMaxAccel mets l'accélération maximale à 'a'
func (s *BasicShape) SetMaxAccel(a Vec2) {
s.maxAccel = a
}
//Gravity retourne la gravité
func (s *BasicShape) Gravity() Vec2 {
return s.gravity
}
//IsGrounded retourne true si la forme est au sol
func (s *BasicShape) IsGrounded() bool {
return s.grounded
}
//SetGrounded mets le statut au sol à true ou false
func (s *BasicShape) SetGrounded(b bool) {
s.grounded = b
}
//SetGravity mets la gravité à 'g'
func (s *BasicShape) SetGravity(g Vec2) {
s.gravity = g
}
//SetMass mets la masse à m
func (s *BasicShape) SetMass(mass float64) {
s.mass = mass
if mass <= 0 {
s.invMass = 0
} else {
s.invMass = 1 / mass
}
}
//InvMass retourne la masse inverse
func (s *BasicShape) InvMass() float64 {
return s.invMass
}
//Elasticity retourne l'elasticité (le 'coefficient de restitution')
func (s *BasicShape) Elasticity() float64 {
return s.elasticity
}
//SetElasticity met l'élasticité à e
func (s *BasicShape) SetElasticity(e float64) {
s.elasticity = e
}
//Friction return friction
func (s *BasicShape) Friction() float64 {
return s.friction
}
//SetFriction mets friction à f
func (s *BasicShape) SetFriction(f float64) {
s.friction = f
}
//IsStatic retourne true si la forme est de type statique
func (s *BasicShape) IsStatic() bool {
return s.static
}
//SetStatic mets la forme à statique ou non
func (s *BasicShape) SetStatic(b bool) {
s.static = b
}
//IsSolid retourne true si la forme est de type solide, false sinon
func (s *BasicShape) IsSolid() bool {
return s.solid
}
//SetSolid mets la forme à solide ou non
func (s *BasicShape) SetSolid(b bool) {
s.solid = b
}
//SetTags attribue la liste des tags
func (s *BasicShape) SetTags(tags []string) {
s.tags = tags
}
//Tags retourne la liste des tags
func (s *BasicShape) Tags() []string {
return s.tags
}
//HasTag retourne true si tag est contenu dans la liste des tags
func (s *BasicShape) HasTag(tag string) bool {
for _, t := range s.tags {
if t == tag {
return true
}
}
return false
}
// Rectangle type
type Rectangle struct {
*BasicShape
width float64
height float64
}
//Width retourne la largeur
func (r *Rectangle) Width() float64 {
return r.width
}
//Height retourne la hauteur
func (r *Rectangle) Height() float64 {
return r.height
}
//getMax retourne le max
func (r *Rectangle) getMax() Vec2 {
return Vec2{r.Pos().X + r.Width(), r.Pos().Y + r.Height()}
}
//Center retourne les coordonées du centre du Rectangle
func (r *Rectangle) Center() Vec2 {
return Vec2{r.Pos().X + r.Width()/2, r.Pos().Y + r.Height()/2}
}
//SetCenter Positionne un rectangle par son centre
func (r *Rectangle) SetCenter(c Vec2) {
r.SetPos(Vec2{c.X - r.Width()/2, c.Y - r.Height()/2})
}
//SetPos mets la position de l'objet à p
func (r *Rectangle) SetPos(p Vec2) {
r.BasicShape.pos = p
}
//ShapeName retourne le nom de la forme
func (r *Rectangle) ShapeName() string {
return "Rectangle"
}
//SetName met name à n
func (r *Rectangle) SetName(n string) {
r.name = n
}
//Name retourne le nom de la forme
func (r *Rectangle) Name() string {
return r.name
}
//NewRectangle Crée un rectangle
func NewRectangle(pos Vec2, width float64, height float64) *Rectangle {
rect := &Rectangle{width: width, height: height}
rect.BasicShape = &BasicShape{Kind: rect, pos: pos}
rect.SetName(UUID())
rect.SetSolid(true)
return rect
}
//Circle un cercle
type Circle struct {
*BasicShape
center Vec2
radius float64
}
//ShapeName retourne le nom de la forme
func (s *Circle) ShapeName() string {
return "Circle"
}
//SetName met name à n
func (s *Circle) SetName(n string) {
s.name = n
}
//Name retourne le nom de la forme
func (s *Circle) Name() string {
return s.name
}
//Center retourne les coordonées du centre du Rectangle
func (s *Circle) Center() Vec2 {
return s.center
}
//SetCenter positionne un cercle par son centre
func (s *Circle) SetCenter(c Vec2) {
s.center = c
s.SetPos(s.center.SubScalar(s.radius))
}
//SetPos mets la position à p
func (s *Circle) SetPos(p Vec2) {
s.BasicShape.pos = p
s.center = p.AddScalar(s.radius)
}
//getMax retourne le max
func (s *Circle) getMax() Vec2 {
return s.center.AddScalar(s.radius)
}
//Radius retourne le rayon du cercle
func (s *Circle) Radius() float64 {
return s.radius
}
//Width retourne la largeur
func (s *Circle) Width() float64 {
return s.radius * 2
}
//Height retourne la hauteur
func (s *Circle) Height() float64 {
return s.Width()
}
//NewCircle créé un nouveau cercle
func NewCircle(center Vec2, radius float64) *Circle {
circ := &Circle{radius: radius}
circ.BasicShape = &BasicShape{Kind: circ, pos: center.SubScalar(radius)}
circ.SetName(UUID())
circ.SetSolid(true)
return circ
} | shapes.go | 0.594787 | 0.69903 | shapes.go | starcoder |
package parser
import (
"luago/compiler/ast"
"luago/compiler/lexer"
"luago/number"
"math"
)
func optimizeLogicalOr(exp *ast.BinOpExp) ast.Exp {
if isTrue(exp.Exp1) {
return exp.Exp1 // true or x => true
}
if isFalse(exp.Exp1) && !isVarargOrFuncCall(exp.Exp2) {
return exp.Exp2 // false or x => x
}
return exp
}
func optimizeLogicalAnd(exp *ast.BinOpExp) ast.Exp {
if isFalse(exp.Exp1) {
return exp.Exp1 // false and x => false
}
if isTrue(exp.Exp1) && !isVarargOrFuncCall(exp.Exp2) {
return exp.Exp2 // true and x => x
}
return exp
}
func optimizeBitwiseBinOp(exp *ast.BinOpExp) ast.Exp {
if x, ok := castToInt(exp.Exp1); ok {
if y, ok := castToInt(exp.Exp2); ok {
switch exp.Op {
case lexer.TokenOpBand:
return &ast.IntegerExp{Line: exp.Line, Val: x & y}
case lexer.TokenOpBor:
return &ast.IntegerExp{Line: exp.Line, Val: x | y}
case lexer.TokenOpBxor:
return &ast.IntegerExp{Line: exp.Line, Val: x ^ y}
case lexer.TokenOpShl:
return &ast.IntegerExp{Line: exp.Line, Val: number.ShiftLeft(x, y)}
case lexer.TokenOpShr:
return &ast.IntegerExp{Line: exp.Line, Val: number.ShiftRight(x, y)}
}
}
}
return exp
}
func optimizeArithBinOp(exp *ast.BinOpExp) ast.Exp {
if x, ok := exp.Exp1.(*ast.IntegerExp); ok {
if y, ok := exp.Exp2.(*ast.IntegerExp); ok {
switch exp.Op {
case lexer.TokenOpAdd:
return &ast.IntegerExp{Line: exp.Line, Val: x.Val + y.Val}
case lexer.TokenOpSub:
return &ast.IntegerExp{Line: exp.Line, Val: x.Val - y.Val}
case lexer.TokenOpMul:
return &ast.IntegerExp{Line: exp.Line, Val: x.Val * y.Val}
case lexer.TokenOpIDiv:
if y.Val != 0 {
return &ast.IntegerExp{
Line: exp.Line,
Val: number.IFloorDiv(x.Val, y.Val),
}
}
case lexer.TokenOpMod:
if y.Val != 0 {
return &ast.IntegerExp{
Line: exp.Line,
Val: number.IMod(x.Val, y.Val),
}
}
}
}
}
if x, ok := castToFloat(exp.Exp1); ok {
if y, ok := castToFloat(exp.Exp2); ok {
switch exp.Op {
case lexer.TokenOpAdd:
return &ast.FloatExp{Line: exp.Line, Val: x + y}
case lexer.TokenOpSub:
return &ast.FloatExp{Line: exp.Line, Val: x - y}
case lexer.TokenOpMul:
return &ast.FloatExp{Line: exp.Line, Val: x * y}
case lexer.TokenOpDiv:
if y != 0 {
return &ast.FloatExp{Line: exp.Line, Val: x / y}
}
case lexer.TokenOpIDiv:
if y != 0 {
return &ast.FloatExp{Line: exp.Line, Val: number.FFloorDiv(x, y)}
}
case lexer.TokenOpMod:
return &ast.FloatExp{Line: exp.Line, Val: number.FMod(x, y)}
case lexer.TokenOpPow:
return &ast.FloatExp{Line: exp.Line, Val: math.Pow(x, y)}
}
}
}
return exp
}
func optimizeUnaryOp(exp *ast.UnOpExp) ast.Exp {
switch exp.Op {
case lexer.TokenOpNot:
return optimizeNot(exp)
case lexer.TokenOpUnm:
return optimizeUnm(exp)
case lexer.TokenOpBnot:
return optimizeBnot(exp)
// NOTE: len?
default:
return exp
}
}
func optimizeNot(exp *ast.UnOpExp) ast.Exp {
switch exp.MExp.(type) {
case *ast.NilExp, *ast.FalseExp:
return &ast.TrueExp{Line: exp.Line}
case *ast.TrueExp, *ast.IntegerExp, *ast.FloatExp, *ast.StringExp:
return &ast.FalseExp{Line: exp.Line}
default:
return exp
}
}
func optimizeUnm(exp *ast.UnOpExp) ast.Exp {
switch x := exp.MExp.(type) {
case *ast.IntegerExp:
x.Val = -x.Val
return x
case *ast.FloatExp:
x.Val = -x.Val
return x
default:
return exp
}
}
func optimizeBnot(exp *ast.UnOpExp) ast.Exp {
switch x := exp.MExp.(type) {
case *ast.IntegerExp:
x.Val = ^x.Val // ^ is bitwise not in golang
return x
case *ast.FloatExp:
if i, ok := number.FloatToInteger(x.Val); ok {
return &ast.IntegerExp{Line: x.Line, Val: ^i}
}
}
return exp
}
// exp = exp0 ^ exp2 | exp0
func optimizePow(exp ast.Exp) ast.Exp {
if binOp, ok := exp.(*ast.BinOpExp); ok { // exp0 ^ exp2
if binOp.Op == lexer.TokenOpPow {
binOp.Exp2 = optimizePow(binOp.Exp2)
}
return optimizeArithBinOp(binOp)
}
return exp // exp0
}
// nil and false => false
// other => true
func isTrue(exp ast.Exp) bool {
switch exp.(type) {
case *ast.TrueExp, *ast.IntegerExp, *ast.FloatExp, *ast.StringExp:
return true
default:
return false
}
}
func isFalse(exp ast.Exp) bool {
switch exp.(type) {
case *ast.NilExp, *ast.FalseExp:
return true
default:
return false
}
}
func isVarargOrFuncCall(exp ast.Exp) bool {
switch exp.(type) {
case *ast.VarargExp, *ast.FuncCallExp:
return true
default:
return false
}
}
func castToInt(exp ast.Exp) (int64, bool) {
switch x := exp.(type) {
case *ast.IntegerExp:
return x.Val, true
case *ast.FloatExp:
return number.FloatToInteger(x.Val)
default:
return 0, false
}
}
func castToFloat(exp ast.Exp) (float64, bool) {
switch x := exp.(type) {
case *ast.IntegerExp:
return float64(x.Val), true
case *ast.FloatExp:
return x.Val, true
default:
return 0.0, false
}
} | compiler/parser/optimizer.go | 0.533154 | 0.514522 | optimizer.go | starcoder |
package m3d
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"github.com/jonathaningram/dark-omen/internal/cstringutil"
)
const (
// format is the format ID used in all .M3D files.
// "PD3M" is probably "M3DP" backwards, which is probably "Model 3D
// <something>".
format = "PD3M"
headerSize = 24
textureSize = 96
vectorSize = 12
objectHeaderSize = 52 + vectorSize
objectFaceSize = 16 + vectorSize
objectVertexSize = (2 * vectorSize) + 20
)
// A Model is made up of a list of textures and a list of objects.
type Model struct {
format string
Textures []*Texture
Objects []*Object
}
// A Texture contains information about texturing a 3D surface.
type Texture struct {
// Path appears to be a directory on the original Dark Omen developer's
// machine. It does not seem to be used for anything useful and might best
// be treated as an Easter egg.
Path string
// FileName is the name of the texture image file.
FileName string
}
// A Vector in 3-dimensional space.
type Vector struct {
X, Y, Z float32
}
type Object struct {
Name string
ParentIndex int16
padding int16
Pivot Vector
Flags uint32
unknown1 uint32
unknown2 uint32
Faces []*Face
Vertexes []*Vertex
}
type Face struct {
Indexes [3]uint16
TextureIndex uint16
Normal Vector
unknown1 uint32
unknown2 uint32
}
type Color struct {
R, G, B, A uint8
}
type Vertex struct {
Position Vector
Normal Vector
Color Color
U, V float32
Index uint32
unknown1 uint32
}
// Decoder reads and decodes a 3D model from an input stream.
type Decoder struct {
r io.ReaderAt
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.ReaderAt) *Decoder {
return &Decoder{r: r}
}
// Decode reads the encoded 3D model information from its input and returns a
// new model containing decoded textures and objects.
func (d *Decoder) Decode() (*Model, error) {
header, pos, err := d.readHeader()
if err != nil {
return nil, fmt.Errorf("could not read header: %w", err)
}
if f := header.format; f != format {
return nil, fmt.Errorf("unknown format %q, expected %q", f, format)
}
textures, pos, err := d.readTextures(header.textureCount, pos)
if err != nil {
return nil, err
}
objects, err := d.readObjects(header.objectCount, pos)
if err != nil {
return nil, err
}
return &Model{
format: format,
Textures: textures,
Objects: objects,
}, nil
}
type header struct {
format string
magic uint32
version uint32
crc uint32
notCRC uint32
textureCount uint16
objectCount uint16
}
func (d *Decoder) readHeader() (h *header, pos int64, err error) {
buf := make([]byte, headerSize)
n, err := d.r.ReadAt(buf, 0)
pos = int64(n)
if n != headerSize {
return nil, pos, fmt.Errorf("read %d byte(s), expected %d", n, headerSize)
}
if err != nil && err != io.EOF {
return nil, pos, err
}
return &header{
format: string(buf[0:4]),
magic: binary.LittleEndian.Uint32(buf[4:8]),
version: binary.LittleEndian.Uint32(buf[8:12]),
crc: binary.LittleEndian.Uint32(buf[12:16]),
notCRC: binary.LittleEndian.Uint32(buf[16:20]),
textureCount: binary.LittleEndian.Uint16(buf[20:22]),
objectCount: binary.LittleEndian.Uint16(buf[22:24]),
}, pos, nil
}
func (d *Decoder) readTextures(count uint16, startPos int64) (textures []*Texture, pos int64, err error) {
textures = make([]*Texture, count)
pos = startPos
for i := uint16(0); i < count; i++ {
textures[i], pos, err = d.readTexture(pos)
if err != nil {
return nil, pos, fmt.Errorf("could not read texture %d: %w", i, err)
}
}
return textures, pos, nil
}
func (d *Decoder) readTexture(startPos int64) (texture *Texture, pos int64, err error) {
pos = startPos
buf := make([]byte, textureSize)
n, err := d.r.ReadAt(buf, pos)
pos += int64(n)
if n != textureSize {
return nil, pos, fmt.Errorf("read %d byte(s), expected %d", n, textureSize)
}
if err != nil && err != io.EOF {
return nil, pos, err
}
return &Texture{
Path: cstringutil.ToGo(buf[:64]),
FileName: cstringutil.ToGo(buf[64:]),
}, pos, nil
}
func (d *Decoder) readObjects(count uint16, startPos int64) (objects []*Object, err error) {
objects = make([]*Object, count)
pos := startPos
for i := uint16(0); i < count; i++ {
objects[i], pos, err = d.readObject(pos)
if err != nil {
return nil, fmt.Errorf("could not read object %d: %w", i, err)
}
}
return objects, nil
}
func (d *Decoder) readObject(startPos int64) (object *Object, pos int64, err error) {
pos = startPos
buf := make([]byte, objectHeaderSize)
n, err := d.r.ReadAt(buf, pos)
pos += int64(n)
if n != objectHeaderSize {
return nil, pos, fmt.Errorf("read %d byte(s), expected %d", n, objectHeaderSize)
}
if err != nil && err != io.EOF {
return nil, pos, err
}
pivot, err := d.readVector(buf[36:48])
if err != nil {
return nil, pos, fmt.Errorf("could not read pivot vector: %w", err)
}
vertextCount := binary.LittleEndian.Uint16(buf[48:50])
faceCount := binary.LittleEndian.Uint16(buf[50:52])
faces := make([]*Face, faceCount)
for i := uint16(0); i < faceCount; i++ {
faces[i], pos, err = d.readFace(pos)
if err != nil {
return nil, pos, fmt.Errorf("could not read face %d: %w", i, err)
}
}
vertexes := make([]*Vertex, vertextCount)
for i := uint16(0); i < vertextCount; i++ {
vertexes[i], pos, err = d.readVertex(pos)
if err != nil {
return nil, pos, fmt.Errorf("could not read vertex %d: %w", i, err)
}
}
return &Object{
Name: cstringutil.ToGo(buf[:32]),
ParentIndex: int16(binary.LittleEndian.Uint16(buf[32:34])),
padding: int16(binary.LittleEndian.Uint16(buf[34:36])),
Pivot: pivot,
Flags: binary.LittleEndian.Uint32(buf[52:56]),
unknown1: binary.LittleEndian.Uint32(buf[56:60]),
unknown2: binary.LittleEndian.Uint32(buf[60:64]),
Faces: faces,
Vertexes: vertexes,
}, pos, nil
}
func (d *Decoder) readFace(startPos int64) (face *Face, pos int64, err error) {
pos = startPos
buf := make([]byte, objectFaceSize)
n, err := d.r.ReadAt(buf, pos)
pos += int64(n)
if n != objectFaceSize {
return nil, pos, fmt.Errorf("read %d byte(s), expected %d", n, objectFaceSize)
}
if err != nil && err != io.EOF {
return nil, pos, err
}
normal, err := d.readVector(buf[8:20])
if err != nil {
return nil, pos, fmt.Errorf("could not read normal vector: %w", err)
}
return &Face{
Indexes: [3]uint16{
binary.LittleEndian.Uint16(buf[0:2]),
binary.LittleEndian.Uint16(buf[2:4]),
binary.LittleEndian.Uint16(buf[4:6]),
},
TextureIndex: binary.LittleEndian.Uint16(buf[6:8]),
Normal: normal,
unknown1: binary.LittleEndian.Uint32(buf[20:24]),
unknown2: binary.LittleEndian.Uint32(buf[24:28]),
}, pos, nil
}
func (d *Decoder) readVertex(startPos int64) (vertex *Vertex, pos int64, err error) {
pos = startPos
buf := make([]byte, objectVertexSize)
n, err := d.r.ReadAt(buf, pos)
pos += int64(n)
if n != objectVertexSize {
return nil, pos, fmt.Errorf("read %d byte(s), expected %d", n, objectVertexSize)
}
if err != nil && err != io.EOF {
return nil, pos, err
}
position, err := d.readVector(buf[0:12])
if err != nil {
return nil, pos, fmt.Errorf("could not read position vector: %w", err)
}
normal, err := d.readVector(buf[12:24])
if err != nil {
return nil, pos, fmt.Errorf("could not read normal vector: %w", err)
}
var u float32
if err := binary.Read(bytes.NewReader(buf[28:32]), binary.LittleEndian, &u); err != nil {
return nil, pos, fmt.Errorf("could not read u: %w", err)
}
var v float32
if err := binary.Read(bytes.NewReader(buf[32:36]), binary.LittleEndian, &v); err != nil {
return nil, pos, fmt.Errorf("could not read v: %w", err)
}
return &Vertex{
Position: position,
Normal: normal,
Color: Color{
R: buf[24],
G: buf[25],
B: buf[26],
A: buf[27],
},
U: u,
V: v,
Index: binary.LittleEndian.Uint32(buf[36:40]),
unknown1: binary.LittleEndian.Uint32(buf[40:44]),
}, pos, nil
}
func (d *Decoder) readVector(buf []byte) (Vector, error) {
var v Vector
if err := binary.Read(bytes.NewReader(buf[0:4]), binary.LittleEndian, &v.X); err != nil {
return v, fmt.Errorf("could not read x: %w", err)
}
if err := binary.Read(bytes.NewReader(buf[4:8]), binary.LittleEndian, &v.Y); err != nil {
return v, fmt.Errorf("could not read y: %w", err)
}
if err := binary.Read(bytes.NewReader(buf[8:12]), binary.LittleEndian, &v.Z); err != nil {
return v, fmt.Errorf("could not read z: %w", err)
}
return v, nil
} | encoding/m3d/m3d.go | 0.644001 | 0.42316 | m3d.go | starcoder |
package render
import (
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/mathgl/mgl32"
)
type SceneMD1Entity struct {
TextureId uint32 // texture id in OpenGL
VertexBuffer []float32 // 3 elements for x,y,z, 2 elements for texture u,v, and 3 elements for normal x,y,z
ModelPosition mgl32.Vec3 // Position in world space
RotationAngle float32
VertexArrayObject uint32
VertexBufferObject uint32
}
func (r *RenderDef) RenderStaticEntity(entity SceneMD1Entity, renderType int32) {
vertexBuffer := entity.VertexBuffer
textureId := entity.TextureId
modelPosition := entity.ModelPosition
modelMatrix := mgl32.Ident4()
modelMatrix = modelMatrix.Mul4(mgl32.Translate3D(modelPosition.X(), modelPosition.Y(), modelPosition.Z()))
modelMatrix = modelMatrix.Mul4(mgl32.HomogRotate3DY(mgl32.DegToRad(float32(entity.RotationAngle))))
if len(vertexBuffer) == 0 {
return
}
programShader := r.ProgramShader
renderTypeUniform := gl.GetUniformLocation(programShader, gl.Str("renderType\x00"))
gl.Uniform1i(renderTypeUniform, renderType)
modelLoc := gl.GetUniformLocation(programShader, gl.Str("model\x00"))
gl.UniformMatrix4fv(modelLoc, 1, false, &modelMatrix[0])
floatSize := 4
// 3 floats for vertex, 2 floats for texture UV, 3 float for normals
stride := int32(8 * floatSize)
vao := entity.VertexArrayObject
gl.BindVertexArray(vao)
vbo := entity.VertexBufferObject
gl.BindBuffer(gl.ARRAY_BUFFER, vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(vertexBuffer)*floatSize, gl.Ptr(vertexBuffer), gl.STATIC_DRAW)
// Position attribute
gl.VertexAttribPointer(0, 3, gl.FLOAT, false, stride, gl.PtrOffset(0))
gl.EnableVertexAttribArray(0)
// Texture
gl.VertexAttribPointer(1, 2, gl.FLOAT, false, stride, gl.PtrOffset(3*floatSize))
gl.EnableVertexAttribArray(1)
// Normal
gl.VertexAttribPointer(2, 3, gl.FLOAT, false, stride, gl.PtrOffset(5*floatSize))
gl.EnableVertexAttribArray(2)
diffuseUniform := gl.GetUniformLocation(programShader, gl.Str("diffuse\x00"))
gl.Uniform1i(diffuseUniform, 0)
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, textureId)
gl.DrawArrays(gl.TRIANGLES, 0, int32(len(vertexBuffer)/8))
// Cleanup
gl.DisableVertexAttribArray(0)
gl.DisableVertexAttribArray(1)
gl.DisableVertexAttribArray(2)
} | render/StaticEntity.go | 0.621771 | 0.40751 | StaticEntity.go | starcoder |
package async
/*
Series is a shorthand function to List.RunSeries without having to manually
create a new list, add the routines, etc.
*/
func Series(routines []Routine, callbacks ...Done) {
l := New()
l.Multiple(routines...)
l.RunSeries(callbacks...)
}
/*
SeriesParallel is a shorthand function to List.RunSeriesParallel without
having to manually create a new list, add the routines, etc.
*/
func SeriesParallel(routines []Routine, callbacks ...Done) {
l := New()
l.Multiple(routines...)
l.RunSeriesParallel(callbacks...)
}
/*
RunSeries will run all of the Routine functions in a series effect.
If there is an error, series will immediately exit and trigger the
callbacks with the error.
There are no arguments passed between the routines that are used in series.
It is just for commands that need to run asynchronously without seeing the
results of its previous routine.
For example, take a look at one of the tests for this function:
func TestSeries(t *testing.T) {
counter := 0
Status("Calling Series")
async.Series([]async.Routine{
func(done async.Done, args ...interface{}) {
Status("Increasing counter...")
counter++
done(nil)
},
func(done async.Done, args ...interface{}) {
Status("Increasing counter...")
counter++
done(nil)
},
func(done async.Done, args ...interface{}) {
Status("Increasing counter...")
counter++
done(nil)
},
func(done async.Done, args ...interface{}) {
Status("Increasing counter...")
counter++
done(nil)
},
}, func(err error, results ...interface{}) {
if err != nil {
t.Errorf("Unexpected error: %s", err)
return
}
if counter != 4 {
t.Errorf("Not all routines were completed.")
return
}
Status("Counter: %d", counter)
})
}
*/
func (l *List) RunSeries(callbacks ...Done) {
fall := fallSeries(l, callbacks...)
next := nextSeries(l, callbacks...)
l.Wait.Add(l.Len())
fall(next)
}
/*
RunSeriesParallel all of the Routine functions in a series effect, and in
parallel mode.
If there is an error, any further results will be discarded but it will not
immediately exit. It will continue to run all of the other Routine functions
that were passed into it. This is because by the time the error is sent, the
goroutines have already been started. At this current time, there is no way
to cancel a sleep timer in Go.
There are no arguments passed between the routines that are used in series.
It is just for commands that need to run asynchronously without seeing the
results of its previous routine.
For example, take a look at one of the tests for this function:
func TestSeriesParallel(t *testing.T) {
counter := 0
Status("Calling Series")
async.SeriesParallel([]async.Routine{
func(done async.Done, args ...interface{}) {
Status("Increasing counter...")
counter++
done(nil)
},
func(done async.Done, args ...interface{}) {
Status("Increasing counter...")
counter++
done(nil)
},
func(done async.Done, args ...interface{}) {
Status("Increasing counter...")
counter++
done(nil)
},
func(done async.Done, args ...interface{}) {
Status("Increasing counter...")
counter++
done(nil)
},
}, func(err error, results ...interface{}) {
if err != nil {
t.Errorf("Unexpected error: %s", err)
return
}
if counter != 4 {
t.Errorf("Not all routines were completed.")
return
}
Status("Counter: %d", counter)
})
}
*/
func (l *List) RunSeriesParallel(callbacks ...Done) {
var routines []Routine
for l.Len() > 0 {
e := l.Front()
_, r := l.Remove(e)
routines = append(routines, func(routine Routine) Routine {
return func(done Done, args ...interface{}) {
r(func(err error, args ...interface{}) {
// As with our normal RunSeries, we do not want to handle any args
// that are returned. We only want to return if an error occurred.
done(err)
})
}
}(r))
}
l.Wait.Add(l.Len())
Parallel(routines, callbacks...)
}
func fallSeries(l *List, callbacks ...Done) func(Done, ...interface{}) {
return func(next Done, args ...interface{}) {
e := l.Front()
_, r := l.Remove(e)
// Run the first series routine and give it the next function, and
// any arguments that were provided
go r(next)
l.Wait.Wait()
}
}
func nextSeries(l *List, callbacks ...Done) Done {
fall := fallSeries(l, callbacks...)
return func(err error, args ...interface{}) {
next := nextSeries(l, callbacks...)
l.Wait.Done()
if err != nil || l.Len() == 0 {
// Just in case it's an error, let's make sure we've cleared
// all of the sync.WaitGroup waits that we initiated.
for i := 0; i < l.Len(); i++ {
l.Wait.Done()
}
// Send the results to the callbacks
for i := 0; i < len(callbacks); i++ {
callbacks[i](err)
}
return
}
// Run the next series routine with any arguments that were provided
fall(next)
return
}
} | series.go | 0.596198 | 0.429429 | series.go | starcoder |
package automata
import (
"fmt"
"strconv"
"github.com/cheggaaa/pb"
"github.com/fogleman/gg"
)
//NewGrid is the constructor function for the Grid object
func NewGrid(xSize, ySize int) *Grid {
myNewGrid := &Grid{ColSize: xSize, RowSize: ySize}
for row := 1; row <= ySize; row++ {
for col := 1; col <= xSize; col++ {
myNewCell := Cell{XPos: col, YPos: row, Value: 0, Grid: myNewGrid, SimFunc: defaultSimFunc, DrawFunc: defaultDrawFunc}
myNewGrid.CellList = append(myNewGrid.CellList, &myNewCell)
}
}
return (myNewGrid)
}
//Grid carrys a reference to and overidable function called simFunction. This is so the user can define a function and pass it to the specific Grid. That way they can run parrell simulations
type Grid struct {
ColSize int
RowSize int
CellList []*Cell
}
func (g *Grid) SetupGrid(SetupFunc func(*Grid, int, int)) {
for col := 1; col <= g.ColSize; col++ {
for row := 1; row <= g.RowSize; row++ {
if row != 1 && row != g.RowSize {
if col != 1 && col != g.ColSize {
SetupFunc(g, col, row)
}
}
}
}
}
//CopyGrid preforms a deep copy of the Grid to create new references in memory
func (g *Grid) CopyGrid() *Grid {
tempGrid := *g
var tempCellList []*Cell
for _, v := range g.CellList {
newCell := *v
newCell.Grid = g
tempCellList = append(tempCellList, &newCell)
}
tempGrid.CellList = tempCellList
return (&tempGrid)
}
//GetCell is used to grab a specific cell
func (g *Grid) GetCell(x, y int) *Cell {
return (g.CellList[(y-1)*g.ColSize+x-1])
}
//PrettyPrint prints the Grid to terminal, if it isn't pretty your Grid is wider then your terminal
func (g *Grid) PrettyPrint() {
var stringArray []string
for row := 1; row <= g.RowSize; row++ {
tempString := ""
for col := 1; col <= g.ColSize; col++ {
tempString = tempString + " " + strconv.FormatFloat(g.GetCell(col, row).Value, 'f', 3, 64)
}
stringArray = append(stringArray, tempString)
}
stringArrayLen := len(stringArray)
for k := range stringArray {
fmt.Println(stringArray[stringArrayLen-k-1])
}
}
//PrintPNG prints a png of the current Grid to the folder the program is ran in
func (g *Grid) PrintPNG(fileType string) {
dc := gg.NewContext(g.ColSize, g.RowSize)
for _, v := range g.CellList {
dc.DrawRectangle(float64(g.ColSize-v.XPos), float64(g.RowSize-v.YPos), 1, 1)
v.draw(dc)
dc.Fill()
}
dc.SavePNG("./pictures/" + fileType + ".png")
}
//Simulate calls the Grids simulate function, defined here explicitly for potential pre-flight checks
func (g *Grid) Simulate() {
tempGrid := g.CopyGrid()
for row := 1; row <= tempGrid.RowSize; row++ {
for col := 1; col <= tempGrid.ColSize; col++ {
//ignore the edges
if row != 1 && row != tempGrid.RowSize {
if col != 1 && col != tempGrid.ColSize {
tempGrid.GetCell(col, row).simulate()
}
}
}
}
g.CellList = tempGrid.CellList
}
//RunSimulation calls the Grids simulate function multiple times
func (g *Grid) RunSimulation(steps int, interupt func(*Grid, int)) {
//Used for the progressbar (pb)
bar := pb.StartNew(steps)
//Simulation Loop
for i := 0; i < steps; i++ {
g.Simulate()
interupt(g, i)
bar.Increment()
}
bar.Finish()
}
//defaultSimulation is a catch all simulation function so that the user can't accidentally crash the program.
func defaultSimulation(g *Grid) {
tempGrid := g.CopyGrid()
g.CellList = tempGrid.CellList
} | grid.go | 0.566019 | 0.40248 | grid.go | starcoder |
package gohome
import (
// "fmt"
"github.com/PucklaMotzer09/mathgl/mgl32"
)
// A transform storing everything needed for the transformation matrix
type TransformableObject2D struct {
// The position in the world
Position mgl32.Vec2
// The size of the object in pixels
Size mgl32.Vec2
// The scale that will be multiplied with the size
Scale mgl32.Vec2
// The rotation
Rotation float32
// Defines where the [0,0] position is.
// Takes [0.0-1.0] normalised for size*scale
Origin mgl32.Vec2
// The anchor that will be used for the rotation
// Takes [0.0-1.0] normalised for size*scale
RotationPoint mgl32.Vec2
oldPosition mgl32.Vec2
oldSize mgl32.Vec2
oldScale mgl32.Vec2
oldRotation float32
transformMatrix mgl32.Mat3
camNotRelativeMatrix mgl32.Mat3
}
func (tobj *TransformableObject2D) getOrigin(i int) float32 {
return tobj.Size[i] * tobj.Scale[i] * ((tobj.Origin[i]*2.0 - 1.0) / -2.0)
}
func (tobj *TransformableObject2D) getRotationPoint(i int) float32 {
return tobj.Size[i] * tobj.Scale[i] * ((tobj.RotationPoint[i]*2.0 - 1.0) / -2.0)
}
func (tobj *TransformableObject2D) valuesChanged() bool {
return (tobj.Position != tobj.oldPosition || tobj.Size != tobj.oldSize || tobj.Scale != tobj.oldScale || tobj.Rotation != tobj.oldRotation)
}
func (tobj *TransformableObject2D) CalculateTransformMatrix(rmgr *RenderManager, notRelativeToCamera int) {
var cam2d *Camera2D = nil
if rmgr != nil {
if notRelativeToCamera != -1 && len(rmgr.camera2Ds) > notRelativeToCamera {
cam2d = rmgr.camera2Ds[notRelativeToCamera]
}
if cam2d != nil {
cam2d.CalculateViewMatrix()
}
}
// OT T -RPT R RPT S
if tobj.valuesChanged() {
tobj.transformMatrix = mgl32.Translate2D(tobj.getOrigin(0), tobj.getOrigin(1)).Mul3(mgl32.Translate2D(tobj.Position[0], tobj.Position[1])).Mul3(mgl32.Translate2D(-tobj.getRotationPoint(0), -tobj.getRotationPoint(1))).Mul3(mgl32.Rotate2D(-mgl32.DegToRad(tobj.Rotation)).Mat3()).Mul3(mgl32.Translate2D(tobj.getRotationPoint(0), tobj.getRotationPoint(1))).Mul3(mgl32.Scale2D(tobj.Scale[0]*tobj.Size[0], tobj.Scale[1]*tobj.Size[1]))
tobj.oldPosition = tobj.Position
tobj.oldSize = tobj.Size
tobj.oldScale = tobj.Scale
tobj.oldRotation = tobj.Rotation
}
if cam2d != nil {
tobj.camNotRelativeMatrix = cam2d.GetInverseViewMatrix().Mul3(tobj.transformMatrix)
} else {
tobj.camNotRelativeMatrix = tobj.transformMatrix
}
}
// Returns the Mat3 representing the transformation of this object
func (tobj *TransformableObject2D) GetTransformMatrix() mgl32.Mat3 {
return tobj.camNotRelativeMatrix
}
// Sets the current transformation matrix in the render manager
func (tobj *TransformableObject2D) SetTransformMatrix(rmgr *RenderManager) {
rmgr.setTransformMatrix2D(tobj.GetTransformMatrix())
} | src/gohome/transformableobject2d.go | 0.61832 | 0.413359 | transformableobject2d.go | starcoder |
package tee
import "github.com/ethereum/go-ethereum/common"
type Parameters struct {
PowDepth uint64 // required confirmed block depth
PhaseDuration uint64 // number of blocks of one phase (not epoch)
ResponseDuration uint64 // challenge response grace period for operator at end of exit phase
InitBlock uint64 // block at which Erdstall contract was deployed
TEE common.Address // Enclave's public key address
Contract common.Address // Erdstall contract address
}
// DepositEpoch returns the deposit epoch at the given block number.
func (p Parameters) DepositEpoch(blockNum uint64) Epoch {
return p.epoch(blockNum)
}
// TxEpoch returns the transaction epoch at the given block number.
func (p Parameters) TxEpoch(blockNum uint64) Epoch {
return p.epoch(blockNum) - 1
}
// ExitEpoch returns the exit epoch at the given block number.
func (p Parameters) ExitEpoch(blockNum uint64) Epoch {
return p.epoch(blockNum) - 2
}
// FreezingEpoch returns the freezing epoch at the given block number.
func (p Parameters) FreezingEpoch(blockNum uint64) Epoch {
return p.epoch(blockNum) - 3
}
// SealedEpoch returns the sealed epoch at the given block number.
// It has the same value as `FreezingEpoch`.
func (p Parameters) SealedEpoch(blockNum uint64) Epoch {
return p.epoch(blockNum) - 3
}
// Don't use this, use the specific FooEpoch methods.
func (p Parameters) epoch(blockNum uint64) Epoch {
return (blockNum - p.InitBlock) / p.PhaseDuration
}
func (p Parameters) IsChallengeResponsePhase(blockNum uint64) bool {
return p.PhaseDuration-((blockNum-p.InitBlock)%p.PhaseDuration) <= p.ResponseDuration
}
// IsLastPhaseBlock tells whether this block is the last block of a phase.
func (p Parameters) IsLastPhaseBlock(blockNum uint64) bool {
return ((blockNum - p.InitBlock) % p.PhaseDuration) == p.PhaseDuration-1
}
func (p Parameters) DepositStartBlock(epoch uint64) uint64 {
return p.InitBlock + epoch*p.PhaseDuration
}
func (p Parameters) DepositDoneBlock(epoch uint64) uint64 {
return p.DepositStartBlock(epoch) + p.PhaseDuration
}
func (p Parameters) TxDoneBlock(epoch uint64) uint64 {
return p.DepositStartBlock(epoch) + 2*p.PhaseDuration
}
func (p Parameters) ExitDoneBlock(epoch uint64) uint64 {
return p.DepositStartBlock(epoch) + 3*p.PhaseDuration
} | tee/params.go | 0.861858 | 0.446676 | params.go | starcoder |
package imaging
import (
"image"
"math"
)
// Edge returns a new image that has had all the edges within the given
// threshold set to 0xFFFFFFFF.
// Img is the input image.
// Image edges are detected using the Canny edge detection algorithm defined at
// https://en.wikipedia.org/wiki/Canny_edge_detector .
func Edge(img image.Image, t, b int) image.Image {
out := Gaussian(img, b)
hyp, deg := intencityGradient(out)
max := nonMaximumSuppression(hyp, deg, img.Bounds().Max.X)
for y, i := img.Bounds().Min.Y, 0; y < img.Bounds().Max.Y; y++ {
for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x, i = x+1, i+1 {
o := (y*img.Bounds().Max.X + x) * 4
if max[i] > t {
out.Pix[o+0] = 0xFF
out.Pix[o+1] = 0xFF
out.Pix[o+2] = 0xFF
out.Pix[o+3] = 0xFF
} else {
out.Pix[o+0] = 0x00
out.Pix[o+1] = 0x00
out.Pix[o+2] = 0x00
out.Pix[o+3] = 0xFF
}
}
}
return out
}
// IntencityGradient returns the image intensities and their direction.
// Image intensities are processed using the Sorbel operator.
// https://en.wikipedia.org/wiki/Sobel_operator
func intencityGradient(img image.Image) ([]int, []int) {
hyp := make([]int, 0, img.Bounds().Max.X*img.Bounds().Max.Y)
deg := make([]int, 0, img.Bounds().Max.X*img.Bounds().Max.Y)
for y := img.Bounds().Min.Y; y < img.Bounds().Max.Y; y++ {
for x := img.Bounds().Min.X; x < img.Bounds().Max.X; x++ {
gX := xG(img, x, y)
gY := yG(img, x, y)
g := math.Hypot(float64(gX), float64(gY))
o := math.Atan2(float64(gY), float64(gX))
o = math.Abs(o * 180 / math.Pi)
if o > 0 && o <= 22.5 || o > 157.5 && o <= 180 {
o = 0
}
if o > 22.5 && o <= 67.5 {
o = 45
}
if o > 67.5 && o <= 112.5 {
o = 90
}
if o > 112.5 && o <= 157.5 {
o = 135
}
hyp = append(hyp, int(g))
deg = append(deg, int(o))
}
}
return hyp, deg
}
// NonMaximumSuppression thins the edge.
// See Non-maximum suppression at https://en.wikipedia.org/wiki/Canny_edge_detector
func nonMaximumSuppression(hyp, deg []int, width int) (out []int) {
out = make([]int, 0, len(hyp))
for i := range hyp {
z := deg[i]
switch z {
case 0: // 0 - east and west
w, e := 0, 0
// Don't wrap and don't overflow.
if i%width != 0 && i-1 >= 0 {
w = hyp[i-1]
}
// Don't wrap and don't overlow.
if i+1%width != 0 && i+1 < len(hyp) {
e = hyp[i+1]
}
if hyp[i] > w && hyp[i] > e {
out = append(out, hyp[i])
} else {
out = append(out, 0)
}
case 90: // 90 - north south
n, s := 0, 0
if i-width >= 0 {
n = hyp[i-width]
}
if i+width < len(hyp) {
s = hyp[i+width]
}
if hyp[i] > n && hyp[i] > s {
out = append(out, hyp[i])
} else {
out = append(out, 0)
}
case 135: // 135 - north west and south east
nw, se := 0, 0
if i-1%width != 0 && i-width-1 >= 0 {
nw = hyp[i-width-1]
}
if i+1%width != 0 && i+width+1 < len(hyp) {
se = hyp[i+width+1]
}
if hyp[i] > nw && hyp[i] > se {
out = append(out, hyp[i])
} else {
out = append(out, 0)
}
case 45: // 45 - north east and south west
ne, sw := 0, 0
if i-width+1 >= 0 {
ne = hyp[i-width+1]
}
if i+width-1 < len(hyp) {
sw = hyp[i+width-1]
}
if hyp[i] > ne && hyp[i] > sw {
out = append(out, hyp[i])
} else {
out = append(out, 0)
}
default:
out = append(out, 0)
}
}
return
}
// XG calculates the horizontal derivative approximation at x,y coordinates.
// The horizontal Sorbel kernel.
func xG(a image.Image, x, y int) int {
k := []int{-1, 0, 1, -2, 0, 2, -1, 0, 1}
return processKern(k, a, x, y)
}
// XG calculates the vertical derivative approximation at x,y coordinates.
// The vertical Sorbel kernel.
func yG(a image.Image, x, y int) int {
k := []int{-1, -2, -1, 0, 0, 0, 1, 2, 1}
return processKern(k, a, x, y)
}
// ProcessKern processes the given kernel on the x,y coordinates while respecting image boundaries.
func processKern(k []int, img image.Image, x, y int) int {
c, xg := 0, 0
sx, sy, ex, ey := CalcBounds(img, x, y, 1)
for y := sy; y <= ey; y++ {
for x := sx; x <= ex; x++ {
avg := CalcLum(img, x, y)
xg += k[c] * avg
c++
}
}
return xg
} | edge.go | 0.81604 | 0.588978 | edge.go | starcoder |
package meta
import (
"encoding/json"
"fmt"
"math"
)
type entryType int
const (
_ entryType = iota
metaStringType
metaInt64Type
metaUInt64Type
metaFloat64Type
metaBoolType
)
// Data is a map of meta data values. No setter and getter methods are
// implemented for this, callers are expected to add and remove entries as they
// would from a normal map.
type Data map[string]Entry
// Clone returns a copy of d.
func (d Data) Clone() Data {
if d == nil {
return nil
}
cpy := make(Data)
for k, v := range d {
cpy[k] = v
}
return cpy
}
// Entry is an entry in the metadata set. The typed value may be bool, float64,
// int64, uint64, or string.
type Entry struct {
s string
i int64
u uint64
f float64
b bool
typ entryType
}
// Bool returns a new bool Entry.
func Bool(b bool) Entry { return Entry{b: b, typ: metaBoolType} }
// Float64 returns a new float64 Entry.
func Float64(f float64) Entry { return Entry{f: f, typ: metaFloat64Type} }
// Int64 returns a new int64 Entry.
func Int64(i int64) Entry { return Entry{i: i, typ: metaInt64Type} }
// UInt64 returns a new uint64 Entry.
func UInt64(u uint64) Entry { return Entry{u: u, typ: metaUInt64Type} }
// String returns a new string Entry.
func String(s string) Entry { return Entry{s: s, typ: metaStringType} }
// Bool returns the bool value of e.
func (e Entry) Bool() (value, ok bool) { return e.b, e.typ == metaBoolType }
// Float64 returns the float64 value of e.
func (e Entry) Float64() (float64, bool) { return e.f, e.typ == metaFloat64Type }
// Int64 returns the int64 value of e.
func (e Entry) Int64() (int64, bool) { return e.i, e.typ == metaInt64Type }
// UInt64 returns the uint64 value of e.
func (e Entry) UInt64() (uint64, bool) { return e.u, e.typ == metaUInt64Type }
// String returns a string representation of e.
func (e Entry) String() string {
switch e.typ {
case metaBoolType:
return fmt.Sprintf("%v", e.b)
case metaFloat64Type:
return fmt.Sprintf("%.15g", e.f)
case metaInt64Type:
return fmt.Sprintf("%v", e.i)
case metaUInt64Type:
return fmt.Sprintf("%v", e.u)
case metaStringType:
return e.s
default:
return fmt.Sprintf("%v", nil)
}
}
// IsString returns true if e is a string value.
func (e Entry) IsString() bool {
return e.typ == metaStringType
}
// Interface returns e's value. It is intended to be used with type switches
// and when printing an entry's type with the "%T" formatting.
func (e Entry) Interface() interface{} {
switch e.typ {
case metaBoolType:
return e.b
case metaFloat64Type:
return e.f
case metaInt64Type:
return e.i
case metaUInt64Type:
return e.u
case metaStringType:
return e.s
default:
return nil
}
}
// MarshalJSON implements the "encoding/json".Marshaller interface.
func (e Entry) MarshalJSON() ([]byte, error) {
switch e.typ {
case metaBoolType:
return json.Marshal(e.b)
case metaFloat64Type:
if math.IsNaN(e.f) {
return json.Marshal(nil)
}
return json.Marshal(e.f)
case metaInt64Type:
return json.Marshal(e.i)
case metaUInt64Type:
return json.Marshal(e.u)
case metaStringType:
return json.Marshal(e.s)
default:
return json.Marshal(nil)
}
}
// UnmarshalJSON implements the "encoding/json".Unmarshaller interface.
func (e *Entry) UnmarshalJSON(raw []byte) error {
var b *bool
if json.Unmarshal(raw, &b) == nil && b != nil {
*e = Bool(*b)
return nil
}
var s *string
if json.Unmarshal(raw, &s) == nil && s != nil {
*e = String(*s)
return nil
}
var i *int64
if json.Unmarshal(raw, &i) == nil && i != nil {
*e = Int64(*i)
return nil
}
var u *uint64
if json.Unmarshal(raw, &u) == nil && u != nil {
*e = UInt64(*u)
return nil
}
var f *float64
if json.Unmarshal(raw, &f) == nil {
if f != nil {
*e = Float64(*f)
} else {
*e = Float64(math.NaN())
}
return nil
}
return fmt.Errorf("unable to parse %q as meta entry", raw)
} | vendor/collectd.org/meta/meta.go | 0.658747 | 0.461259 | meta.go | starcoder |
package fractal
import (
"math"
)
type Plane struct {
complexSet ComplexSet
width int
height int
iterations int
values [][]int
}
func (p Plane) Width() int {
return p.width
}
func (p Plane) Height() int {
return p.height
}
func (p Plane) ComplexSet() ComplexSet {
return p.complexSet
}
func (p Plane) XStep() float64 {
return p.complexSet.Real.Length() / float64(p.width)
}
func (p Plane) YStep() float64 {
return p.complexSet.Imaginary.Length() / float64(p.height)
}
func (p Plane) Deviation() float64 {
return deviation(p.values)
}
func (p Plane) Box() Box {
return Box{0, 0, p.width, p.height}
}
func (p Plane) Value(x, y int) int {
return p.values[x][y]
}
func deviation(plane [][]int) float64 {
m := mean(plane)
sum := 0.0
count := 0.0
for _, col := range plane {
for _, val := range col {
count++
sum += math.Pow(float64(val)-m, 2.0)
}
}
return math.Sqrt(sum / count)
}
func mean(plane [][]int) float64 {
count := 0
sum := 0
for _, col := range plane {
for _, val := range col {
count++
sum += val
}
}
return float64(sum) / float64(count)
}
func crop(plane [][]int, box Box) [][]int {
part := make([][]int, box.Width)
px := 0
for x, col := range plane {
if x >= box.X && x < (box.Width+box.X) {
partCol := make([]int, box.Height)
py := 0
for y, val := range col {
if y >= box.Y && y < (box.Height+box.Y) {
partCol[py] = val
py++
}
}
part[px] = partCol
px++
}
}
return part
}
func (p Plane) Crop(b Box) Plane {
xstart := float64(b.X)*p.XStep() + p.complexSet.Real.Start
xend := float64(b.Width)*p.XStep() + xstart
ystart := float64(b.Y)*p.YStep() + p.complexSet.Imaginary.Start
yend := float64(b.Height)*p.YStep() + ystart
zoomSet := ComplexSet{
Real: Range{xstart, xend},
Imaginary: Range{ystart, yend},
Algorithm: p.complexSet.Algorithm,
}
return Plane{
complexSet: zoomSet,
width: b.Width,
height: b.Height,
iterations: p.iterations,
values: crop(p.values, b),
}
}
func (p Plane) Scale(width int, height int) Plane {
return p.complexSet.Plane(width, height, p.iterations)
} | plane.go | 0.78842 | 0.441312 | plane.go | starcoder |
package hangul
// IsLead checks given rune is lead consonant
func IsLead(r rune) bool {
if LeadG <= r && r <= LeadH {
return true
}
return false
}
// IsMedial checks given rune is medial vowel
func IsMedial(r rune) bool {
if MedialA <= r && r <= MedialI {
return true
}
return false
}
// IsTail checks given rune is tail consonant
func IsTail(r rune) bool {
if TailG <= r && r <= TailH {
return true
}
return false
}
// IsJaeum checks given rune is Hangul Jaeum
func IsJaeum(r rune) bool {
switch {
case G <= r && r <= H:
return true
case IsLead(r):
return true
case IsTail(r):
return true
}
return false
}
// IsMoeum checks given rune is Hangul Moeum
func IsMoeum(r rune) bool {
switch {
case A <= r && r <= I:
return true
case IsMedial(r):
return true
}
return false
}
var multiElements = map[rune][]rune{
GG: []rune{G, G},
GS: []rune{G, S},
NJ: []rune{N, J},
NH: []rune{N, H},
DD: []rune{D, D},
LG: []rune{L, G},
LM: []rune{L, M},
LB: []rune{L, B},
LS: []rune{L, S},
LT: []rune{L, T},
LP: []rune{L, P},
LH: []rune{L, H},
BB: []rune{B, B},
BS: []rune{B, S},
SS: []rune{S, S},
JJ: []rune{J, J},
AE: []rune{A, I},
E: []rune{EO, I},
YAE: []rune{YA, I},
YE: []rune{YEO, I},
WA: []rune{O, A},
WAE: []rune{O, A, I},
OE: []rune{O, I},
WEO: []rune{U, EO},
WE: []rune{U, E},
WI: []rune{U, I},
YI: []rune{EU, I},
}
// SplitMultiElement splits multi-element compatibility jamo
func SplitMultiElement(r rune) ([]rune, bool) {
r = CompatJamo(r)
es, ok := multiElements[r]
return es, ok
}
var toCompatJamo = map[rune]rune{
LeadG: G,
TailG: G,
LeadGG: GG,
TailGG: GG,
TailGS: GS,
LeadN: N,
TailN: N,
TailNJ: NJ,
TailNH: NH,
LeadD: D,
TailD: D,
LeadDD: DD,
LeadR: L,
TailL: L,
TailLG: LG,
TailLM: LM,
TailLB: LB,
TailLS: LS,
TailLT: LT,
TailLP: LP,
TailLH: LH,
LeadM: M,
TailM: M,
LeadB: B,
TailB: B,
LeadBB: BB,
TailBS: BS,
LeadS: S,
TailS: S,
LeadSS: SS,
TailSS: SS,
LeadZS: ZS,
TailNG: ZS,
LeadJ: J,
TailJ: J,
LeadJJ: JJ,
LeadC: C,
TailC: C,
LeadK: K,
TailK: K,
LeadT: T,
TailT: T,
LeadP: P,
TailP: P,
LeadH: H,
TailH: H,
}
// CompatJamo converts lead, medial, tail to compatibility jamo
func CompatJamo(r rune) rune {
switch {
case G <= r && r <= H:
return r
case A <= r && r <= I:
return r
case MedialA <= r && r <= MedialI:
return r - medialBase + A
}
if c, ok := toCompatJamo[r]; ok {
return c
}
return 0
}
var toLead = map[rune]rune{
G: LeadG,
GG: LeadGG,
N: LeadN,
D: LeadD,
DD: LeadDD,
L: LeadR,
M: LeadM,
B: LeadB,
BB: LeadBB,
S: LeadS,
SS: LeadSS,
ZS: LeadZS,
J: LeadJ,
JJ: LeadJJ,
C: LeadC,
K: LeadK,
T: LeadT,
P: LeadP,
H: LeadH,
}
// Lead converts compatibility jaeum to corresponding lead consonant
func Lead(c rune) rune {
if LeadG <= c && c <= LeadH {
return c
}
if l, ok := toLead[c]; ok {
return l
}
return 0
}
// Medial converts compatibility moeum to corresponding medial vowel
func Medial(c rune) rune {
switch {
case MedialA <= c && c <= MedialI:
return c
case A <= c && c <= I:
return c - A + medialBase
}
return 0
}
var toTail = map[rune]rune{
G: TailG,
GG: TailGG,
GS: TailGS,
N: TailN,
NJ: TailNJ,
NH: TailNH,
D: TailD,
L: TailL,
LG: TailLG,
LM: TailLM,
LB: TailLB,
LS: TailLS,
LT: TailLT,
LP: TailLP,
LH: TailLH,
M: TailM,
B: TailB,
BS: TailBS,
S: TailS,
SS: TailSS,
ZS: TailNG,
J: TailJ,
C: TailC,
K: TailK,
T: TailT,
P: TailP,
H: TailH,
}
// Tail converts compatibility jaeum to corresponding tail consonant
func Tail(c rune) rune {
if TailG <= c && c <= TailH {
return c
}
if t, ok := toTail[c]; ok {
return t
}
return 0
}
func leadIdx(l rune) (int, bool) {
i := int(l) - leadBase
if 0 > i || i > maxLeadIdx {
return 0, false
}
return i, true
}
func medialIdx(v rune) (int, bool) {
i := int(v) - medialBase
if 0 > i || i > maxMedialIdx {
return 0, false
}
return i, true
}
func tailIdx(t rune) (int, bool) {
if t == 0 {
// A hangul syllable can have no tail consonent.
return 0, true
}
i := int(t) - tailBase
if 0 > i || i > maxTailIdx {
return 0, false
}
return i + 1, true
} | jamo.go | 0.637031 | 0.496216 | jamo.go | starcoder |
package ext4
import (
"encoding/binary"
"fmt"
"io"
)
type ExtentHeader struct {
Magic uint16 // Magic number, 0xF30A.
Entries uint16 // Number of valid entries following the header.
Max uint16 // Maximum number of entries that could follow the header.
Depth uint16 // Depth of this extent node in the extent tree. 0 = this extent node points to data blocks; otherwise, this extent node points to other extent nodes. The extent tree can be at most 5 levels deep: a logical block number can be at most 2^32, and the smallest n that satisfies 4*(((blocksize - 12)/12)^n) >= 2^32 is 5.
Generation uint32 // Generation of the tree. (Used by Lustre, but not standard ext4).
}
type ExtentIdx struct {
Block uint32 // This index node covers file blocks from 'block' onward.
LeafLo uint32 // Lower 32-bits of the block number of the extent node that is the next level lower in the tree. The tree node pointed to can be either another internal node or a leaf node, described below.
LeafHi uint16 // Upper 16-bits of the previous field.
_ [2]byte
}
func (idx ExtentIdx) Leaf() int64 {
return int64(idx.LeafHi)<<32 + int64(idx.LeafLo)
}
type Extent struct {
Block uint32 // First file block number that this extent covers.
Len uint16 // Number of blocks covered by extent. If the value of this field is <= 32768, the extent is initialized. If the value of the field is > 32768, the extent is uninitialized and the actual extent length is ee_len - 32768. Therefore, the maximum length of a initialized extent is 32768 blocks, and the maximum length of an uninitialized extent is 32767.
StartHi uint16 // Upper 16-bits of the block number to which this extent points.
StartLo uint32 // Lower 32-bits of the block number to which this extent points.
}
func (e Extent) Start() int64 {
return int64(e.StartHi)<<32 + int64(e.StartLo)
}
func (er Reader) GetExtents(inode Inode) ([]Extent, error) {
if inode.Flags&InodeFlagExtents == 0 {
return nil, errNotImplemented
}
inodeData := inode.GetDataReader()
return er.readExtents(inodeData)
}
func (er Reader) readExtents(r io.Reader) ([]Extent, error) {
var eh ExtentHeader
err := binary.Read(r, binary.LittleEndian, &eh)
if err != nil {
return nil, err
}
if eh.Magic != 0xF30A {
return nil, fmt.Errorf("Extent header magic did not match 0x%X!=0xF30A", eh.Magic)
}
//fmt.Printf(" Extent header: %+v\n", eh)
if eh.Depth == 0 { // leaf nodes
extents := make([]Extent, eh.Entries, eh.Entries)
for i := uint16(0); i < eh.Entries; i++ {
err = binary.Read(r, binary.LittleEndian, &extents[i])
if err != nil {
return nil, err
}
}
return extents, nil
} else {
extentIndexes := make([]ExtentIdx, eh.Entries, eh.Entries)
for i := uint16(0); i < eh.Entries; i++ {
err = binary.Read(r, binary.LittleEndian, &extentIndexes[i])
if err != nil {
return nil, err
}
}
extents := []Extent{}
for _, idx := range extentIndexes {
er.s.Seek(er.blockOffset(idx.Leaf()), 0)
subextents, err := er.readExtents(er.s)
if err != nil {
return nil, err
}
extents = append(extents, subextents...)
}
return extents, nil
}
} | ext4/extent.go | 0.589007 | 0.472197 | extent.go | starcoder |
package mediamachine
// WatermarkPosition are references to named, pre-defined watermark locations
type WatermarkPosition = string
const (
// PositionTopLeft places a watermark in the top left corner of the output
PositionTopLeft WatermarkPosition = "topLeft"
// PositionTopRight places a watermark in the top right corner of the output
PositionTopRight WatermarkPosition = "topRight"
// PositionBottomLeft places a watermark in the bottom left corner of the output
PositionBottomLeft WatermarkPosition = "bottomLeft"
// PositionBottomRight places a watermark in the bottom right corner of the output
PositionBottomRight WatermarkPosition = "bottomRight"
)
// WatermarkText can be used for a simple text Watermark overlaid on the output
type WatermarkText struct {
Text string // The text to display as the Watermark
FontSize uint // Optional - defaults to 10
FontColor string // Optional - defaults to black
Opacity float32 // Opacity of Watermark between 0 and 1 inclusive
Position WatermarkPosition // Where the Watermark should be placed. See WatermarkPosition
}
// WatermarkImageURL can be used to supply an image url which will be used as a Watermark
type WatermarkImageURL struct {
URL string // URL where the Watermark image should be fetched from (currently, bucket urls are not supported)
Height uint8 // Height of the Watermark
Width uint8 // Width of the Watermark
Opacity float32 // Opacity of Watermark between 0 and 1 inclusive
Position WatermarkPosition // Where the Watermark should be placed. See WatermarkPosition
}
// WatermarkImageNamed can be used to provide a reference to a Watermark image uploaded to your mediamachine account
// You can easily upload your Watermark images via account settings. The uploaded image gets a unique name that can be used here.
type WatermarkImageNamed struct {
ImageName string // Name of a Watermark image uploaded on the mediamachine account
Height uint8 // Height of the Watermark
Width uint8 // Width of the Watermark
Opacity float32 // Opacity of Watermark between 0 and 1 inclusive
Position WatermarkPosition // Where the Watermark should be placed. See WatermarkPosition
}
// Watermark can be of multiple types - text watermark, image or a saved image reference watermark
type Watermark interface {
isWatermark()
}
func (WatermarkText) isWatermark() {}
func (WatermarkImageNamed) isWatermark() {}
func (WatermarkImageURL) isWatermark() {} | mediamachine/watermark.go | 0.764276 | 0.797951 | watermark.go | starcoder |
package day17
// values that Conway's cube can get
const (
ACTIVE = '#'
INACTIVE = '.'
)
// Program represents a Conway's cube program
type Program struct {
Map3D [][][]rune
Map4D [][][][]rune
Rounds int
}
// New3DProgram creates a new Program for a 3d map
func New3DProgram(m [][][]rune, rounds int) *Program {
return &Program{
Map3D: m,
Rounds: rounds,
}
}
// New4DProgram creates a new program for a 4d map
func New4DProgram(m [][][][]rune, rounds int) *Program {
return &Program{
Map4D: m,
Rounds: rounds,
}
}
// Adjacent3DActives tries to find the actives adjacent to this position
func (p *Program) Adjacent3DActives(x, y, z int) int {
actives := 0
for i := x - 1; i <= x+1; i++ {
for j := y - 1; j <= y+1; j++ {
for k := z - 1; k <= z+1; k++ {
if (i >= 0 && i < len(p.Map3D)) && (j >= 0 && j < len(p.Map3D[i])) && (k >= 0 && k < len(p.Map3D[i][j])) {
if (i != x || j != y || k != z) && p.Map3D[i][j][k] == ACTIVE {
actives++
}
}
}
}
}
return actives
}
// Adjacent4DActives tries to find the actives adjacent to this position
func (p *Program) Adjacent4DActives(x, y, z, w int) int {
actives := 0
for i := x - 1; i <= x+1; i++ {
for j := y - 1; j <= y+1; j++ {
for k := z - 1; k <= z+1; k++ {
for l := w - 1; l <= w+1; l++ {
if (i >= 0 && i < len(p.Map4D)) && (j >= 0 && j < len(p.Map4D[i])) && (k >= 0 && k < len(p.Map4D[i][j])) && (l >= 0 && l < len(p.Map4D[i][j][k])) {
if (i != x || j != y || k != z || l != w) && p.Map4D[i][j][k][l] == ACTIVE {
actives++
}
}
}
}
}
}
return actives
}
// Run3D runs the program
func (p *Program) Run3D() {
for i := 0; i < p.Rounds; i++ {
newMap := copy3DMap(p.Map3D)
for i := range p.Map3D {
for j := range p.Map3D[i] {
for k := range p.Map3D[i][j] {
v := p.Adjacent3DActives(i, j, k)
if p.Map3D[i][j][k] == ACTIVE {
if v == 2 || v == 3 {
newMap[i][j][k] = ACTIVE
} else {
newMap[i][j][k] = INACTIVE
}
} else if p.Map3D[i][j][k] == INACTIVE {
if v == 3 {
newMap[i][j][k] = ACTIVE
}
}
}
}
}
p.Map3D = newMap
}
}
// Run4D runs the program with a 4 dimensional matrix
func (p *Program) Run4D() {
for i := 0; i < p.Rounds; i++ {
newMap := copy4DMap(p.Map4D)
for i := range p.Map4D {
for j := range p.Map4D[i] {
for k := range p.Map4D[i][j] {
for l := range p.Map4D[i][j][k] {
v := p.Adjacent4DActives(i, j, k, l)
if p.Map4D[i][j][k][l] == ACTIVE {
if v == 2 || v == 3 {
newMap[i][j][k][l] = ACTIVE
} else {
newMap[i][j][k][l] = INACTIVE
}
} else if p.Map4D[i][j][k][l] == INACTIVE {
if v == 3 {
newMap[i][j][k][l] = ACTIVE
}
}
}
}
}
}
p.Map4D = newMap
}
}
// Count3DActives counts the number of active positions in
// the whole conway's map
func (p *Program) Count3DActives() int {
actives := 0
for i := range p.Map3D {
for j := range p.Map3D[i] {
for k := range p.Map3D[i][j] {
if p.Map3D[i][j][k] == ACTIVE {
actives++
}
}
}
}
return actives
}
// Count4DActives counts the number of active positions in
// the whole conway's map
func (p *Program) Count4DActives() int {
actives := 0
for i := range p.Map4D {
for j := range p.Map4D[i] {
for k := range p.Map4D[i][j] {
for l := range p.Map4D[i][j][k] {
if p.Map4D[i][j][k][l] == ACTIVE {
actives++
}
}
}
}
}
return actives
}
func copy3DMap(m [][][]rune) [][][]rune {
newRune := make([][][]rune, len(m))
for i := range newRune {
newRune[i] = make([][]rune, len(m[i]))
for j := range newRune[i] {
newRune[i][j] = make([]rune, len(m[i][j]))
copy(newRune[i][j], m[i][j])
}
}
return newRune
}
func copy4DMap(m [][][][]rune) [][][][]rune {
newRune := make([][][][]rune, len(m))
for i := range newRune {
newRune[i] = make([][][]rune, len(m[i]))
for j := range newRune[i] {
newRune[i][j] = make([][]rune, len(m[i][j]))
for k := range newRune[i][j] {
newRune[i][j][k] = make([]rune, len(m[i][j][k]))
copy(newRune[i][j][k], m[i][j][k])
}
}
}
return newRune
}
func createMultiplied3DMap(size int, origMap [][]rune) [][][]rune {
newSize := size * 5
m := make([][][]rune, newSize)
for i := range m {
m[i] = make([][]rune, newSize)
for j := range m[i] {
m[i][j] = make([]rune, newSize)
}
}
middle := newSize / 2
for i := range origMap {
for j := range origMap[i] {
m[middle][middle+i][middle+j] = origMap[i][j]
}
}
// paint empties to inactive
for i := range m {
for j := range m[i] {
for k := range m[i][j] {
if m[i][j][k] != ACTIVE {
m[i][j][k] = INACTIVE
}
}
}
}
return m
}
func createMultiplied4DMap(size int, origMap [][]rune) [][][][]rune {
newSize := size * 5
m := make([][][][]rune, newSize)
for i := range m {
m[i] = make([][][]rune, newSize)
for j := range m[i] {
m[i][j] = make([][]rune, newSize)
for k := range m[i][j] {
m[i][j][k] = make([]rune, newSize)
}
}
}
middle := newSize / 2
for i := range origMap {
for j := range origMap[i] {
m[middle][middle][middle+i][middle+j] = origMap[i][j]
}
}
// paint empties to inactive
for i := range m {
for j := range m[i] {
for k := range m[i][j] {
for l := range m[i][j][k] {
if m[i][j][k][l] != ACTIVE {
m[i][j][k][l] = INACTIVE
}
}
}
}
}
return m
}
// FirstPart creates a conway cube, runs a number of
// rounds and returns the number of active cells
func FirstPart(lines []string, rounds int) int {
firstMap := make([][]rune, len(lines))
for i := range firstMap {
firstMap[i] = []rune(lines[i])
}
mMap := createMultiplied3DMap(len(firstMap), firstMap)
p := New3DProgram(mMap, rounds)
p.Run3D()
return p.Count3DActives()
}
// SecondPart creates a conway 4d cube, runs a number of
// rounds and returns the number of active cells
func SecondPart(lines []string, rounds int) int {
firstMap := make([][]rune, len(lines))
for i := range firstMap {
firstMap[i] = []rune(lines[i])
}
mMap := createMultiplied4DMap(len(firstMap), firstMap)
p := New4DProgram(mMap, rounds)
p.Run4D()
return p.Count4DActives()
} | day17/day17.go | 0.598195 | 0.52208 | day17.go | starcoder |
package api
const (
// ClientAuth authentication types
// ServiceToken as a specific key for the service
ServiceToken AuthType = "service_token"
// ProviderKey for all services under an account
ProviderKey AuthType = "provider_key"
)
const (
// Rate limiting extension keys - see https://github.com/3scale/apisonator/blob/v2.96.2/docs/rfcs/api-extensions.md
// and https://github.com/3scale/apisonator/blob/v2.96.2/docs/extensions.md#limit_headers-boolean
// LimitExtension is the key to enable this extension when calling 3scale backend - set to 1 to enable
LimitExtension = "limit_headers"
// HierarchyExtension is the key to enabling hierarchy feature. Set its bool value to 1 to enable.
// https://github.com/3scale/apisonator/issues/75
HierarchyExtension = "hierarchy"
// FlatUsageExtension is the key to enabling the "flat usage" feature for reporting purposes - set to 1 to enable
// Enabling this feature implies that the backend will not calculate the relationships between hierarchies and
// pushes this compute responsibility back to the client.
// Therefore when enabled, it is the clients responsibility to ensure that parent --> child metrics
// are calculated correctly. This feature is supported in versions >= 2.8
// Use the GetVersion() function to ensure suitability or risk incurring unreported state.
FlatUsageExtension = "flat_usage"
)
// Period wraps the known rate limiting periods as defined in 3scale
type Period int
// Predefined, known LimitPeriods which can be used in 3scale rate limiting functionality
// These values represent time durations.
const (
Minute Period = iota
Hour
Day
Week
Month
Year
Eternity
)
// AuthType maps to a known client authentication pattern
// Currently known and supported are 0=ServiceToken 1=ProviderKey
type AuthType string
// ClientAuth holds the key type (ProviderKey, ServiceToken) and their respective value for
// authenticating the client against a given service.
type ClientAuth struct {
Type AuthType
Value string
}
// Extensions are features or behaviours that are not part of the standard API for a variety of reasons
// See https://github.com/3scale/apisonator/blob/v2.96.2/docs/extensions.md for context
type Extensions map[string]string
// Hierarchy maps a parent metric to its child metrics
type Hierarchy map[string][]string
// Metrics let you track the usage of your API in 3scale
type Metrics map[string]int
// Params that are embedded in each Transaction to 3scale API
// This structure simplifies the formatting of the transaction from the callers perspective
// It is used to authenticate the application
type Params struct {
// AppID is used in the Application Identifier and Key pairs authentication method.
// It is mutually exclusive with the API Key authentication method outlined below
// therefore if both are provided, the value defined in 'UserKey' will be prioritised.
AppID string `json:"app_id"`
// AppKey is an optional, secret key which can be used in conjunction with 'AppID'
AppKey string `json:"app_key"`
// Referrer is an optional value which is required only if referrer filtering is enabled.
// If special value '*' (wildcard) is passed, the referrer check is bypassed.
Referrer string `json:"referrer"`
// UserID is an optional value for identifying an end user.
// Required only when the application is rate limiting end users.
UserID string `json:"user_id"`
// UserKey is the identifier and shared secret of the application if the authentication pattern is API Key.
// Mutually exclusive with, and prioritised over 'AppID'.
UserKey string `json:"user_key"`
}
// PeriodWindow holds information about the start and end time of the specified period
// Start and End are unix timestamp
type PeriodWindow struct {
Period Period
Start int64
End int64
}
// RateLimits holds the values returned when using rate limiting extension
type RateLimits struct {
LimitRemaining int
LimitReset int
}
// Service represents a 3scale service marked by its identifier (service_id)
type Service string
// Transaction holds the params and optional additions that will be sent
// to 3scale as query parameters or headers.
type Transaction struct {
Metrics Metrics
Params Params
// Timestamp is a unix timestamp.
// Timestamp will only be taken into account when calling the Report API
Timestamp int64
}
// UsageReport for rate limiting information gathered from using extensions
type UsageReport struct {
PeriodWindow PeriodWindow
MaxValue int
CurrentValue int
}
// UsageReports defines a map of metric names to a list of 'UsageReport'
type UsageReports map[string][]UsageReport | threescale/api/types.go | 0.840455 | 0.4206 | types.go | starcoder |
package geom
import (
"image"
"math"
)
type Vec2[T intfloat] struct {
X, Y T
}
type Vec2f = Vec2[float64]
type Vec2i = Vec2[int]
func PolarToVec2(r, theta float64) Vec2f {
s, c := math.Sincos(theta)
return Vec2f{c * r, s * r}
}
func (v Vec2[T]) Add(other Vec2[T]) Vec2[T] {
return Vec2[T]{v.X + other.X, v.Y + other.Y}
}
func (v Vec2[T]) Eq(other Vec2[T]) bool {
return v == other
}
func (v Vec2[T]) LengthSq() T {
return v.X*v.X + v.Y*v.Y
}
func (v Vec2[T]) Length() float64 {
return math.Sqrt(float64(v.LengthSq()))
}
func (v Vec2[T]) Angle() float64 {
return math.Atan2(float64(v.Y), float64(v.X))
}
func (v Vec2[T]) Polar() (r, theta float64) {
return v.Length(), v.Angle()
}
func (v Vec2[T]) XY() (x, y T) {
return v.X, v.Y
}
func (v Vec2[T]) Complex64() complex64 {
return complex(float32(v.X), float32(v.Y))
}
func (v Vec2[T]) Complex128() complex128 {
return complex(float64(v.X), float64(v.Y))
}
type Rect[T intfloat] struct {
X0, Y0, X1, Y1 T
}
type Recti = Rect[int]
type Rectf = Rect[float64]
func XYWHToRect[T intfloat](x, y, w, h T) Rect[T] {
return Rect[T]{x, y, x + w, y + h}
}
func MinMaxToRect[T intfloat](min, max Vec2[T]) Rect[T] {
return Rect[T]{min.X, min.Y, max.X, max.Y}
}
func PosSizeToRect[T intfloat](pos, size Vec2[T]) Rect[T] {
return Rect[T]{pos.X, pos.Y, pos.X + size.X, pos.Y + size.Y}
}
func (r Rect[T]) Min() Vec2[T] {
return Vec2[T]{r.X0, r.Y0}
}
func (r Rect[T]) Max() Vec2[T] {
return Vec2[T]{r.X1, r.Y1}
}
func (r Rect[T]) Dx() T {
return r.X1 - r.X0
}
func (r Rect[T]) Dy() T {
return r.Y1 - r.Y0
}
func (r Rect[T]) Size() Vec2[T] {
return Vec2[T]{r.X1 - r.X0, r.Y1 - r.Y0}
}
func (r Rect[T]) Empty() bool {
return r.X0 >= r.X1 || r.Y0 >= r.Y1
}
func (r Rect[T]) Eq(other Rect[T]) bool {
return r == other || r.Empty() && other.Empty()
}
func (r Rect[T]) Image() image.Rectangle {
return image.Rect(
int(math.Round(float64(r.X0))),
int(math.Round(float64(r.Y0))),
int(math.Round(float64(r.X1))),
int(math.Round(float64(r.Y1))),
)
}
type Line2[T intfloat] struct {
X0, Y0, X1, Y1 T
} | geom2d.go | 0.849379 | 0.682443 | geom2d.go | starcoder |
package rules
import (
"github.com/tomkrush/money/finance"
"strconv"
)
func abs(value int) int {
if value < 0 {
return value * -1
}
return value
}
// Bills contains a list of bill rules
type Bills struct {
Rules []TransactionRule
Transactions finance.Transactions
calculated bool
goalAmount finance.Currency
actualAmount finance.Currency
projectedAmount finance.Currency
remainingAmount finance.Currency
bills []Bill
}
// Bill holds the summarized info for a user to understand if the bill has
// been paid, and if so what amount.
type Bill struct {
Day string
Amount string
Description string
Paid string
}
// Calculate iterates over bill rules and transactions and internally holds
// the results for future use.
func (b *Bills) Calculate() {
if b.calculated == false {
goalAmount := 0
actualAmount := 0
projectedAmount := 0
var bills []Bill
for _, rule := range b.Rules {
transaction, ok := b.Transactions.GetByDescription(rule.Bill.Description)
billedAmount := rule.Bill.Amount
paid := "estimate"
if ok {
actualAmount -= abs(transaction.Amount.Amount)
projectedAmount -= abs(transaction.Amount.Amount)
billedAmount = transaction.Amount
paid = "expense"
} else {
projectedAmount -= rule.Bill.Amount.Amount
}
day := strconv.FormatInt(int64(rule.Bill.Day), 10)
bills = append(bills, Bill{
Description: rule.Bill.Description,
Day: day,
Amount: billedAmount.FormatToDollars(),
Paid: paid,
})
goalAmount -= rule.Bill.Amount.Amount
}
b.goalAmount = finance.NewCurrency(goalAmount)
b.actualAmount = finance.NewCurrency(actualAmount)
b.projectedAmount = finance.NewCurrency(projectedAmount)
b.remainingAmount = finance.NewCurrency(projectedAmount - actualAmount)
b.bills = bills
b.calculated = true
}
}
// List returns a list of bills that either have been or haven't been paid.
// Depending on paid status, the bill will have estimated or actual values.
func (b *Bills) List() []Bill {
b.Calculate()
return b.bills
}
// ProjectedAmount returns the amount of money that is going to be spent
// this month based on the actual amount of money already spent on bills plus
// the the remaining amount of unpaid bills.
func (b *Bills) ProjectedAmount() finance.Currency {
b.Calculate()
return b.projectedAmount
}
// RemainingAmount returns the amount of money that has yet to be paid to bills.
func (b *Bills) RemainingAmount() finance.Currency {
b.Calculate()
return b.remainingAmount
}
// GoalAmount returns the amount of money that will ideally be spent on bills
// this month. This is calculated by adding up the expected bill amounts.
func (b *Bills) GoalAmount() finance.Currency {
b.Calculate()
return b.goalAmount
}
// ActualAmount returns the amount of money that has already been spent on bills.
func (b *Bills) ActualAmount() finance.Currency {
b.Calculate()
return b.actualAmount
} | rules/bills.go | 0.746416 | 0.433921 | bills.go | starcoder |
package anomalies
import (
"time"
"github.com/olivere/elastic"
)
const (
// queryMaxSize is the maximum size of an Elastic Search Query
queryMaxSize = 10000
)
// createQueryAccountFilter creates and return a new *elastic.TermsQuery on the accountList array
func createQueryAccountFilter(accountList []string) *elastic.TermsQuery {
accountListFormatted := make([]interface{}, len(accountList))
for i, v := range accountList {
accountListFormatted[i] = v
}
return elastic.NewTermsQuery("account", accountListFormatted...)
}
// createAnomalyQueryTimeRange creates and return a new *elastic.RangeQuery based on the duration
// defined by durationBegin and durationEnd.
func createQueryTimeRange(durationBegin time.Time, durationEnd time.Time) *elastic.RangeQuery {
return elastic.NewRangeQuery("date").
From(durationBegin).To(durationEnd)
}
// getElasticSearchParams is used to construct an ElasticSearch *elastic.SearchService
// used to retrieve the anomalies.
// It takes as parameters :
// - accountList []string : A slice of string representing aws account number
// - durationBeing time.Time : A time.Time struct representing the beginning of the time range in the query
// - durationEnd time.Time : A time.Time struct representing the end of the time range in the query
// - client *elastic.Client : an instance of *elastic.Client that represent an Elastic Search client.
// - index string : The Elastic Search index on which to execute the query.
// This function excepts arguments passed to it to be sanitize. If they are not, the following cases will make
// it crash :
// - If the client is nil or malconfigured, it will crash
// - If the index is not an index present in the ES, it will crash
func getElasticSearchParams(accountList []string, durationBegin time.Time,
durationEnd time.Time, client *elastic.Client, index string, anomalyType string) *elastic.SearchService {
query := elastic.NewBoolQuery()
if len(accountList) > 0 {
query = query.Filter(createQueryAccountFilter(accountList))
}
query = query.Filter(createQueryTimeRange(durationBegin, durationEnd))
search := client.Search().Index(index).Type(anomalyType).Size(queryMaxSize).Sort("date", false).Query(query)
return search
} | costs/anomalies/es_request_constructor.go | 0.677047 | 0.430506 | es_request_constructor.go | starcoder |
package pilosa
import (
"errors"
"fmt"
"strings"
"time"
)
// ErrInvalidTimeQuantum is returned when parsing a time quantum.
var ErrInvalidTimeQuantum = errors.New("invalid time quantum")
// TimeQuantum represents a time granularity for time-based bitmaps.
type TimeQuantum string
// HasYear returns true if the quantum contains a 'Y' unit.
func (q TimeQuantum) HasYear() bool { return strings.ContainsRune(string(q), 'Y') }
// HasMonth returns true if the quantum contains a 'M' unit.
func (q TimeQuantum) HasMonth() bool { return strings.ContainsRune(string(q), 'M') }
// HasDay returns true if the quantum contains a 'D' unit.
func (q TimeQuantum) HasDay() bool { return strings.ContainsRune(string(q), 'D') }
// HasHour returns true if the quantum contains a 'H' unit.
func (q TimeQuantum) HasHour() bool { return strings.ContainsRune(string(q), 'H') }
// Valid returns true if q is a valid time quantum value.
func (q TimeQuantum) Valid() bool {
switch q {
case "Y", "YM", "YMD", "YMDH",
"M", "MD", "MDH",
"D", "DH",
"H",
"":
return true
default:
return false
}
}
// The following methods are required to implement pflag Value interface.
// Set sets the time quantum value.
func (q *TimeQuantum) Set(value string) error {
*q = TimeQuantum(value)
return nil
}
func (q TimeQuantum) String() string {
return string(q)
}
// Type returns the type of a time quantum value.
func (q TimeQuantum) Type() string {
return "TimeQuantum"
}
// ParseTimeQuantum parses v into a time quantum.
func ParseTimeQuantum(v string) (TimeQuantum, error) {
q := TimeQuantum(strings.ToUpper(v))
if !q.Valid() {
return "", ErrInvalidTimeQuantum
}
return q, nil
}
// ViewByTimeUnit returns the view name for time with a given quantum unit.
func ViewByTimeUnit(name string, t time.Time, unit rune) string {
switch unit {
case 'Y':
return fmt.Sprintf("%s_%s", name, t.Format("2006"))
case 'M':
return fmt.Sprintf("%s_%s", name, t.Format("200601"))
case 'D':
return fmt.Sprintf("%s_%s", name, t.Format("20060102"))
case 'H':
return fmt.Sprintf("%s_%s", name, t.Format("2006010215"))
default:
return ""
}
}
// ViewsByTime returns a list of views for a given timestamp.
func ViewsByTime(name string, t time.Time, q TimeQuantum) []string {
a := make([]string, 0, len(q))
for _, unit := range q {
view := ViewByTimeUnit(name, t, unit)
if view == "" {
continue
}
a = append(a, view)
}
return a
}
// ViewsByTimeRange returns a list of views to traverse to query a time range.
func ViewsByTimeRange(name string, start, end time.Time, q TimeQuantum) []string {
t := start
// Save flags for performance.
hasYear := q.HasYear()
hasMonth := q.HasMonth()
hasDay := q.HasDay()
hasHour := q.HasHour()
var results []string
// Walk up from smallest units to largest units.
if hasHour || hasDay || hasMonth {
for t.Before(end) {
if hasHour {
if !nextDayGTE(t, end) {
break
} else if t.Hour() != 0 {
results = append(results, ViewByTimeUnit(name, t, 'H'))
t = t.Add(time.Hour)
continue
}
}
if hasDay {
if !nextMonthGTE(t, end) {
break
} else if t.Day() != 1 {
results = append(results, ViewByTimeUnit(name, t, 'D'))
t = t.AddDate(0, 0, 1)
continue
}
}
if hasMonth {
if !nextYearGTE(t, end) {
break
} else if t.Month() != 1 {
results = append(results, ViewByTimeUnit(name, t, 'M'))
t = t.AddDate(0, 1, 0)
continue
}
}
// If a unit exists but isn't set and there are no larger units
// available then we need to exit the loop because we are no longer
// making progress.
break
}
}
// Walk back down from largest units to smallest units.
for t.Before(end) {
if hasYear && nextYearGTE(t, end) {
results = append(results, ViewByTimeUnit(name, t, 'Y'))
t = t.AddDate(1, 0, 0)
} else if hasMonth && nextMonthGTE(t, end) {
results = append(results, ViewByTimeUnit(name, t, 'M'))
t = t.AddDate(0, 1, 0)
} else if hasDay && nextDayGTE(t, end) {
results = append(results, ViewByTimeUnit(name, t, 'D'))
t = t.AddDate(0, 0, 1)
} else if hasHour {
results = append(results, ViewByTimeUnit(name, t, 'H'))
t = t.Add(time.Hour)
} else {
break
}
}
return results
}
func nextYearGTE(t time.Time, end time.Time) bool {
next := t.AddDate(1, 0, 0)
if next.Year() == end.Year() {
return true
}
return end.After(next)
}
func nextMonthGTE(t time.Time, end time.Time) bool {
next := t.AddDate(0, 1, 0)
y1, m1, _ := next.Date()
y2, m2, _ := end.Date()
if (y1 == y2) && (m1 == m2) {
return true
}
return end.After(next)
}
func nextDayGTE(t time.Time, end time.Time) bool {
next := t.AddDate(0, 0, 1)
y1, m1, d1 := next.Date()
y2, m2, d2 := end.Date()
if (y1 == y2) && (m1 == m2) && (d1 == d2) {
return true
}
return end.After(next)
} | time.go | 0.758153 | 0.529385 | time.go | starcoder |
This is an example of a daemon that listens for a stream of copy-and-paste
audit messages from OpenText Exceed TurboX. This example daemon writes each
image to its own file and logs all text copy messages to a single file. This
program is not supported in any way, shape, or form, but is provided as an
example for the end-user to write their own daemon.
Set proxy.CopyAudit=2 in the ETX configuration to enable optional copy
auditing. Set proxy.CopyAudit=1 to cause ETX to exit if it cannot connect to
the copy audit daemon. Additionally set proxy.CopyAuditImage=1 to enable "Copy
Rectangle" image auditing. This is an undocumented feature of ETX. As such, the
flag and the protocol are subject to change at any time without notice.
Protocol
The copy audit daemon listens on the abstract socket (Linux) or named pipe
(Windows) "Exceed TurboX Copy Audit". On other systems, it listens on the Unix
socket "/tmp/.X11-unix/ETXaudit"
When ETX opens a socket, it will write the 8-byte string "ETXaudit" followed by
the byte 0x9F (aka "CBOR start indefinite length array"). The remainder of the
protocol is based on CBOR. For more details on CBOR encoding, see
http://cbor.io and/or RFC 7049.
Each audit/copy event is a single CBOR Map. The Map may contain any combination
of the following entries (with CBOR type in brackets), except that it shall not
contain more than one of Text, Image, FileStart, FileComplete, or Print.
Additional entries not specified here may be present.
- Display (Unsigned) is the display number of the ETX proxy.
- File (String) is the name of the file copied by the user.
- FileComplete (bool) is true if the file transferred from the proxy to the
desktop, and false if the file transferred from the desktop to the proxy.
- FileStart (bool) is true if the file will transfer from the proxy to the
desktop, and false if the file will transfer from the desktop to the proxy.
- FileSize (Unsigned) is the length (in bytes) of the file copied by the user.
- Image (Binary) is the image copied by the user.
- ImageType (String) is the name of the X11 protocol atom describing the image
(nominally the image's MIME type).
- IPAddress (String, or Array of String) is the IP address of the remote
user's computer (not the ETX proxy) or computers (when sharing).
- Print (Binary) is the first part of the document printed by the user
(limited to proxy.CopyAuditPrintLimit bytes).
- TransferIPAddress (String) is the IP address of the file transfer computer
(not the user's desktop or the ETX proxy). This field is not present when the
file transfer is exchanged with the ETX proxy itself.
- Text (Binary) is the text copied. It is not a CBOR String because it may be
any text type supported by X.
- User (String) is the username of the user.
- XApp (String) is the name of the ETX profile the user is running.
The ETX proxy will send Text, Image, FileStart, FileComplete, or Print last in
the Map, so the other fields can inform the disposition of the Text, Image,
FileStart, FileComplete, or Print.
There is no data sent from the audit daemon to ETX. ETX does not want to wait
for confirmation that the audit message has been logged. Attempting to write
data back to ETX may block (because ETX will never read it).
*/
package main | doc.go | 0.520984 | 0.660651 | doc.go | starcoder |
package geometry
import (
"fmt"
"github.com/gopherd/doge/math/mathutil"
"github.com/gopherd/three/core"
)
type Box3 struct {
Min, Max core.Vector3
}
func (box Box3) Center() core.Vector3 { return box.Min.Add(box.Max).Div(2) }
func (box Box3) Size() core.Vector3 { return box.Max.Sub(box.Min) }
func (box Box3) IsEmpty() bool {
return box.Max.X() < box.Min.X() || box.Max.Y() < box.Min.Y() || box.Max.Z() < box.Min.Z()
}
func (box Box3) String() string {
return fmt.Sprintf(
"{(%f,%f,%f),(%f,%f,%f)}",
box.Min.X(), box.Min.Y(), box.Min.Z(),
box.Max.X(), box.Max.Y(), box.Max.Z(),
)
}
func (box Box3) ContainsPoint(point core.Vector3) bool {
return !(point.X() < box.Min.X() || point.X() > box.Max.X() ||
point.Y() < box.Min.Y() || point.Y() > box.Max.Y() ||
point.Z() < box.Min.Z() || point.Z() > box.Max.Z())
}
func (box Box3) ContainsBox(other Box3) bool {
return box.Min.X() <= other.Min.X() && other.Max.X() <= box.Max.X() &&
box.Min.Y() <= other.Min.Y() && other.Max.Y() <= box.Max.Y() &&
box.Min.Z() <= other.Min.Z() && other.Max.Z() <= box.Max.Z()
}
func (box Box3) Intersect(other Box3) Box3 {
var min = core.Vec3(
mathutil.Max(box.Min.X(), other.Min.X()),
mathutil.Max(box.Min.Y(), other.Min.Y()),
mathutil.Max(box.Min.Z(), other.Min.Z()),
)
var max = core.Vec3(
mathutil.Min(box.Max.X(), other.Max.X()),
mathutil.Min(box.Max.Y(), other.Max.Y()),
mathutil.Min(box.Max.Z(), other.Max.Z()),
)
return Box3{Min: min, Max: max}
}
func (box Box3) Union(other Box3) Box3 {
var min = core.Vec3(
mathutil.Min(box.Min.X(), other.Min.X()),
mathutil.Min(box.Min.Y(), other.Min.Y()),
mathutil.Min(box.Min.Z(), other.Min.Z()),
)
var max = core.Vec3(
mathutil.Max(box.Max.X(), other.Max.X()),
mathutil.Max(box.Max.Y(), other.Max.Y()),
mathutil.Max(box.Max.Z(), other.Max.Z()),
)
return Box3{Min: min, Max: max}
}
func (box Box3) IntersectsBox(other Box3) bool {
return !(other.Max.X() < box.Min.X() || other.Min.X() > box.Max.X() ||
other.Max.Y() < box.Min.Y() || other.Min.Y() > box.Max.Y() ||
other.Max.Z() < box.Min.Z() || other.Min.Z() > box.Max.Z())
}
func (box Box3) IntersectsSphere(sphere Sphere3) bool {
// Find the point on the AABB closest to the sphere center.
var p = box.ClampPoint(sphere.Center)
return p.Sub(sphere.Center).Square() <= sphere.Radius*sphere.Radius
}
func (box Box3) IntersectsPlane(plane Plane) bool {
var min, max core.Float
if plane.Normal.X() > 0 {
min = plane.Normal.X() * box.Min.X()
max = plane.Normal.X() * box.Max.X()
} else {
min = plane.Normal.X() * box.Max.X()
max = plane.Normal.X() * box.Min.X()
}
if plane.Normal.Y() > 0 {
min += plane.Normal.Y() * box.Min.Y()
max += plane.Normal.Y() * box.Max.Y()
} else {
min += plane.Normal.Y() * box.Max.Y()
max += plane.Normal.Y() * box.Min.Y()
}
if plane.Normal.Z() > 0 {
min += plane.Normal.Z() * box.Min.Z()
max += plane.Normal.Z() * box.Max.Z()
} else {
min += plane.Normal.Z() * box.Max.Z()
max += plane.Normal.Z() * box.Min.Z()
}
return min <= -plane.Constant && max >= -plane.Constant
}
func (box Box3) ClampPoint(point core.Vector3) core.Vector3 {
return core.Vec3(
mathutil.Clamp(point.X(), box.Min.X(), box.Max.X()),
mathutil.Clamp(point.Y(), box.Min.Y(), box.Max.Y()),
mathutil.Clamp(point.Z(), box.Min.Z(), box.Max.Z()),
)
}
func (box Box3) DistanceToPoint(point core.Vector3) core.Float {
return box.ClampPoint(point).Sub(point).Length()
} | geometry/box3.go | 0.814201 | 0.634883 | box3.go | starcoder |
package expect
import(`fmt`; `runtime`; `path/filepath`; `strings`; `testing`)
func Eq(t *testing.T, actual, expected interface{}) {
log(t, actual, expected, equal(actual, expected))
}
func Ne(t *testing.T, actual, expected interface{}) {
log(t, actual, expected, !equal(actual, expected))
}
func True(t *testing.T, actual interface{}) {
log(t, actual, true, equal(actual, true))
}
func False(t *testing.T, actual interface{}) {
log(t, actual, false, equal(actual, false))
}
func Contain(t *testing.T, actual interface{}, expected string) {
match(t, fmt.Sprintf(`%+v`, actual), expected, true)
}
func NotContain(t *testing.T, actual interface{}, expected string) {
match(t, fmt.Sprintf(`%+v`, actual), expected, false)
}
func equal(actual, expected interface{}) (passed bool) {
switch expected.(type) {
case bool:
if assertion, ok := actual.(bool); ok {
passed = (assertion == expected)
}
case int:
if assertion, ok := actual.(int); ok {
passed = (assertion == expected)
}
case uint64:
if assertion, ok := actual.(uint64); ok {
passed = (assertion == expected)
}
default:
passed = (fmt.Sprintf(`%v`, actual) == fmt.Sprintf(`%v`, expected))
}
return
}
// Simple success/failure logger that assumes source test file is at Caller(2).
func log(t *testing.T, actual, expected interface{}, passed bool) {
_, file, line, _ := runtime.Caller(2) // Get the calling file path and line number.
file = filepath.Base(file) // Keep file name only.
if !passed {
t.Errorf("\r\t\x1B[31m%s line %d\nExpected: %v\n Actual: %v\x1B[0m", file, line, expected, actual)
} else if (testing.Verbose()) {
t.Logf("\r\t\x1B[32m%s line %d: %v\x1B[0m", file, line, actual)
}
}
func match(t *testing.T, actual, expected string, contains bool) {
passed := (contains == strings.Contains(actual, expected))
_, file, line, _ := runtime.Caller(2)
file = filepath.Base(file)
if !passed {
t.Errorf("\r\t\x1B[31m%s line %d\nContains: %s\n Actual: %s\x1B[0m", file, line, expected, actual)
} else if (testing.Verbose()) {
t.Logf("\r\t\x1B[32m%s line %d: %v\x1B[0m", file, line, actual)
}
} | expect/expect.go | 0.692538 | 0.687551 | expect.go | starcoder |
package cigar
import (
"fmt"
"github.com/vertgenlab/gonomics/common"
"github.com/vertgenlab/gonomics/dna"
"log"
"strings"
"unicode"
)
//The Cigar struct contains information on the runLength, operation, and DNA sequence associated with a particular cigar character.
type Cigar struct {
RunLength int
Op rune
Sequence []dna.Base
}
//NumInsertions calculates the number of inserted bases relative to a reference genome for an input Cigar slice.
func NumInsertions(input []*Cigar) int {
var count int
if input[0].Op == '*' {
log.Fatalf("Cannot calculate NumInsertions from unaligned reads.")
}
for i := 0; i < len(input); i++ {
if !ConsumesReference(input[i].Op) && ConsumesQuery(input[i].Op) {
count = count + input[i].RunLength
}
}
return count
}
//NumDeletions calculates the number of deletions relative to a reference genome for an input Cigar slice.
func NumDeletions(input []*Cigar) int {
var count int
if input[0].Op == '*' {
log.Fatalf("Cannot calculate NumDeletions from unaligned reads.")
}
for i := 0; i < len(input); i++ {
if ConsumesReference(input[i].Op) && !ConsumesQuery(input[i].Op) {
count = count + input[i].RunLength
}
}
return count
}
//ToString converts a slice of Cigar structs to a string for producing readable outputs for files or standard out.
func ToString(c []*Cigar) string {
if len(c) == 0 {
return "*"
}
var output string = ""
for _, v := range c {
if v.Op == '*' {
output = "*"
break
}
printSeq := dna.BasesToString(v.Sequence)
output = output + fmt.Sprintf("%v%c%s", v.RunLength, v.Op, strings.ToLower(printSeq))
}
return output
}
//FromString parses an input string into a slice of Cigar structs.
func FromString(input string) []*Cigar {
var output []*Cigar
var currentNumber string
if input == "*" || input == "**" {
currentCigar := Cigar{RunLength: 0, Op: '*'}
return append(output, ¤tCigar)
}
for _, v := range input {
if unicode.IsDigit(v) {
currentNumber = currentNumber + fmt.Sprintf("%c", v)
} else if RuneIsValidCharacter(v) {
currentCigar := Cigar{RunLength: common.StringToInt(currentNumber), Op: v}
output = append(output, ¤tCigar)
currentNumber = ""
} else {
log.Fatalf("Invalid character: %c", v)
}
}
return output
}
//MatchLength returns the number of bases in a Cigar slice that align to the reference.
func MatchLength(c []*Cigar) int {
var ans int
if c[0].Op == '*' {
log.Fatalf("Cannot calculate MatchLength from unaligned reads.")
}
for _, v := range c {
if ConsumesReference(v.Op) && ConsumesQuery(v.Op) {
ans = ans + v.RunLength
}
}
return ans
}
//ReferenceLength calculates the number of reference positions that a Cigar slice spans.
func ReferenceLength(c []*Cigar) int {
var ans int
if c[0].Op == '*' {
log.Fatalf("Cannot calculate NumInsertions from unaligned reads.")
}
for _, v := range c {
if ConsumesReference(v.Op) {
ans = ans + v.RunLength
}
}
return ans
}
//QueryLength calculates the length of the query read from a slice of Cigar structs.
func QueryLength(c []*Cigar) int {
var ans int
if c[0].Op == '*' {
log.Fatalf("Cannot calculate NumInsertions from unaligned reads.")
}
for _, v := range c {
if ConsumesQuery(v.Op) {
ans = ans + v.RunLength
}
}
return ans
}
//RuneIsValidCharacter returns true if a particular input rune matches any of the acceptable Cigar operation characters.
func RuneIsValidCharacter(r rune) bool {
switch r {
case 'M':
return true
case 'I':
return true
case 'D':
return true
case 'N':
return true
case 'S':
return true
case 'H':
return true
case 'P':
return true
case '=':
return true
case 'X':
return true
}
return false
}
//CigarConsumesReference returns true if the Cigar operation is reference consuming, false otherwise.
func CigarConsumesReference(c Cigar) bool {
return ConsumesReference(c.Op)
}
//ConsumesReference returns true of the rune matches an operation character that is reference consuming for Cigars.
func ConsumesReference(r rune) bool {
switch r {
case 'M':
return true
case 'I':
return false
case 'D':
return true
case 'N':
return true
case 'S':
return false
case 'H':
return false
case 'P':
return false
case '=':
return true
case 'X':
return true
}
log.Fatalf("Invalid rune: %c", r)
return false
}
//ConsumesQuery returns true for input runes that match query consuming characters for Cigars.
func ConsumesQuery(r rune) bool {
switch r {
case 'M':
return true
case 'I':
return true
case 'D':
return false
case 'N':
return false
case 'S':
return true
case 'H':
return false
case 'P':
return false
case '=':
return true
case 'X':
return true
}
log.Fatalf("Invalid rune: %c", r)
return false
} | cigar/cigar.go | 0.627951 | 0.478285 | cigar.go | starcoder |
package geometry
import (
"errors"
"math"
"github.com/bradfitz/slice"
)
type Polygon struct {
vertices []Vector
}
var InvalidPolygon = errors.New("A polygon must have at least 3 vertices!")
func NewPolygon(vertices []Vector) Polygon {
if len(vertices) < 3 {
panic(InvalidPolygon)
}
sorted := clockwiseSort(vertices)
return Polygon{sorted}
}
func (p Polygon) Area() float64 {
var area float64 = 0
var j = len(p.vertices) - 1
for i := 0; i < len(p.vertices); i ++ {
area += (p.vertices[j].X - p.vertices[i].X) * (p.vertices[j].Y + p.vertices[i].Y)
j = i
}
return math.Abs(area) / 2
}
func (p Polygon) Centroid() Vector {
area := p.Area()
centre := Vector{0, 0}
var cross float64
var temp Vector
var j int
for i := 0; i < len(p.vertices); i++ {
j = (i + 1) % len(p.vertices)
cross = p.vertices[i].CrossProduct(p.vertices[j])
temp = p.vertices[i].Add(p.vertices[j]).Multiply(cross)
centre = centre.Add(temp)
}
return centre.Divide(6 * area)
}
func (p Polygon) Translate(v Vector) Shape {
for i := 0; i < len(p.vertices); i++ {
p.vertices[i] = p.vertices[i].Add(v)
}
return p
}
func (p Polygon) Rotate(angle float64) Shape {
if angle == 0 {
return p
}
cos := math.Cos(angle)
sin := math.Sin(angle)
for i := 0; i < len(p.vertices); i++ {
vertex := &p.vertices[i]
vertex.X, vertex.Y = vertex.X*cos - vertex.Y*sin, vertex.X*sin - vertex.Y*cos
}
return p
}
func (p Polygon) RotateAboutPoint(angle float64, point Vector) Shape {
if angle == 0 {
return p
}
if point.X == 0 && point.Y == 0 {
return p.Rotate(angle)
}
cos := math.Cos(angle)
sin := math.Sin(angle)
for i := 0; i < len(p.vertices); i++ {
vector := &p.vertices[i]
dx := vector.X - point.X
dy := vector.Y - point.Y
vector.X = point.X + (dx*cos - dy*sin)
vector.Y = point.Y + (dx*sin + dy*cos)
}
return p
}
func (p Polygon) Contains(point Vector) bool {
for i := 0; i < len(p.vertices); i++ {
vertex := p.vertices[i]
nextVertex := p.vertices[(i + 1) % len(p.vertices)]
if (point.X-vertex.X)*(nextVertex.Y-vertex.Y)+(point.Y-vertex.Y)*(vertex.X-nextVertex.X) > 0 {
return false
}
}
return true
}
func (p Polygon) Scale(scaleFactor float64) Shape {
var point = p.Centroid()
return p.ScaleAboutPoint(scaleFactor, point)
}
func (p Polygon) ScaleAboutPoint(scaleFactor float64, point Vector) Shape {
if scaleFactor == 1 {
return p
}
for i := 0; i < len(p.vertices); i++ {
vertex := &p.vertices[i]
delta := vertex.Subtract(point)
vertex.X = point.X + delta.X*scaleFactor
vertex.Y = point.Y + delta.Y*scaleFactor
}
return p
}
func mean(vertices []Vector) Vector {
var average = Vector{0, 0}
for i := 0; i < len(vertices); i++ {
average.X += vertices[i].X
average.Y += vertices[i].Y
}
return average.Divide(float64(len(vertices)))
}
func clockwiseSort(vertices []Vector) []Vector {
var centre = mean(vertices)
slice.Sort(vertices, func(i, j int) bool {
if centre.Angle(vertices[i])-centre.Angle(vertices[i]) > 0 {
return true
}
return false
})
return vertices
}
func (p Polygon) IsConvex() (bool) {
flag := byte(0)
n := len(p.vertices)
for i := 0; i < n; i++ {
j := (i + 1) % n
k := (i + 2) % n
z := (p.vertices[j].X - p.vertices[i].X) * (p.vertices[k].Y - p.vertices[j].Y)
z -= (p.vertices[j].Y - p.vertices[i].Y) * (p.vertices[k].X - p.vertices[j].X)
if z < 0 {
flag |= 1
} else if z > 0 {
flag |= 2
}
if flag == 3 {
return false
}
}
return true
} | geometry/polygon.go | 0.624523 | 0.638737 | polygon.go | starcoder |
package dsp
import "math"
func computeNTaps(sampleRate, transitionWidth float64) int {
var maxAttenuation = 53.0
var nTaps = int(maxAttenuation * sampleRate / (22.0 * transitionWidth))
nTaps |= 1
return nTaps
}
func computeNTapsAtt(sampleRate, transitionWidth, maxAttenuation float64) int {
var nTaps = int(maxAttenuation * sampleRate / (22.0 * transitionWidth))
nTaps |= 1
return nTaps
}
func MakeRRC(gain, sampleRate, symbolRate, alpha float64, nTaps int) []float32 {
nTaps |= 1
var taps = make([]float32, nTaps)
var spb = sampleRate / symbolRate
var scale = float64(0)
var x1, x2, x3, num, den, xindx float64
for i := 0; i < nTaps; i++ {
xindx = float64(i) - float64(nTaps)/2.0
x1 = math.Pi * xindx / spb
x2 = 4 * alpha * xindx / spb
x3 = x2*x2 - 1
if math.Abs(x3) > 0.000001 {
if i != nTaps/2 {
num = math.Cos((1+alpha)*x1) + math.Sin((1-alpha)*x1)/(4*alpha*xindx/spb)
} else {
num = math.Cos((1+alpha)*x1) + (1-alpha)*math.Pi/(4*alpha)
}
den = x3 * math.Pi
} else {
if alpha == 1 {
taps[i] = -1
continue
}
x3 = (1 - alpha) * x1
x2 = (1 + alpha) * x1
num = math.Sin(x2)*(1+alpha)*math.Pi - math.Cos(x3)*((1-alpha)*math.Pi*spb)/(4*alpha*xindx) + math.Sin(x3)*spb*spb/(4*alpha*xindx*xindx)
den = -32 * math.Pi * alpha * alpha * xindx / spb
}
taps[i] = float32(4 * alpha * num / den)
scale += float64(taps[i])
}
for i := 0; i < nTaps; i++ {
taps[i] = float32(float64(taps[i]) * gain / scale)
if taps[i] > 1 {
taps[i] = 1
}
}
return taps
}
func MakeLowPass(gain, sampleRate, cutFrequency, transitionWidth float64) []float32 {
var nTaps = computeNTaps(sampleRate, transitionWidth)
var taps = make([]float32, nTaps)
var w = HammingWindow(nTaps)
var M = (nTaps - 1) / 2
var fwT0 = 2 * math.Pi * cutFrequency / sampleRate
for i := -M; i <= M; i++ {
if i == 0 {
taps[i+M] = float32(fwT0 / math.Pi * w[i+M])
} else {
taps[i+M] = float32(math.Sin(float64(i)*fwT0) / (float64(i) * math.Pi) * w[i+M])
}
}
var fmax = float64(taps[M])
for i := 1; i <= M; i++ {
fmax += 2 * float64(taps[i+M])
}
gain /= fmax
for i := 0; i < nTaps; i++ {
taps[i] = float32(float64(taps[i]) * gain)
}
return taps
}
func MakeLowPass2(gain, sampleRate, cutFrequency, transitionWidth, attenuation float64) []float32 {
var nTaps = computeNTapsAtt(sampleRate, transitionWidth, attenuation)
var taps = make([]float32, nTaps)
var w = HammingWindow(nTaps)
var M = (nTaps - 1) / 2
var fwT0 = 2 * math.Pi * cutFrequency / sampleRate
for i := -M; i <= M; i++ {
if i == 0 {
taps[i+M] = float32(fwT0 / math.Pi * w[i+M])
} else {
taps[i+M] = float32(math.Sin(float64(i)*fwT0) / (float64(i) * math.Pi) * w[i+M])
}
}
var fmax = float64(taps[M])
for i := 1; i <= M; i++ {
fmax += 2 * float64(taps[i+M])
}
gain /= fmax
for i := 0; i < nTaps; i++ {
taps[i] = float32(float64(taps[i]) * gain)
}
return taps
}
func generateDiffTaps(taps []float32) []float32 {
var dF0 = float64(-1)
var dF1 = float64(1)
var diffTaps = make([]float32, len(taps))
for i := 0; i < len(taps)-1; i++ {
diffTaps[i] = float32(dF0*float64(taps[i]) + dF1*float64(taps[i+1]))
}
diffTaps[len(taps)-1] = 0
return diffTaps
}
func MakeLowPassFixed(gain, sampleRate, cutFrequency float64, length int) []float32 {
length |= 1
var taps = make([]float32, length)
var frequency = cutFrequency / sampleRate
var center = int(math.Floor(float64(length) / 2))
var sum = 0.0
for i := 0; i < length; i++ {
var val float64
if i == center {
val = 2 * math.Pi * float64(frequency)
} else {
var angle = 2 * math.Pi * (float64(i) + 1) / (float64(length) + 1)
val = math.Sin(2*math.Pi*frequency*float64(i-center)) / float64(i-center)
val *= 0.42 - 0.5*math.Cos(angle) + 0.08*math.Cos(2*angle)
}
sum += val
taps[i] = float32(val)
}
for i := 0; i < length; i++ {
taps[i] /= float32(sum)
taps[i] *= float32(gain)
}
return taps
} | dsp/tapsGen.go | 0.709019 | 0.509459 | tapsGen.go | starcoder |
package idemix
import (
"github.com/milagro-crypto/amcl/version3/go/amcl"
"github.com/milagro-crypto/amcl/version3/go/amcl/FP256BN"
"github.com/pkg/errors"
)
// Identity Mixer Credential is a list of attributes certified (signed) by the issuer
// A credential also contains a user secret key blindly signed by the issuer
// Without the secret key the credential cannot be used
// Credential issuance is an interactive protocol between a user and an issuer
// The issuer takes its secret and public keys and user attribute values as input
// The user takes the issuer public key and user secret as input
// The issuance protocol consists of the following steps:
// 1) The issuer sends a random nonce to the user
// 2) The user creates a Credential Request using the public key of the issuer, user secret, and the nonce as input
// The request consists of a commitment to the user secret (can be seen as a public key) and a zero-knowledge proof
// of knowledge of the user secret key
// The user sends the credential request to the issuer
// 3) The issuer verifies the credential request by verifying the zero-knowledge proof
// If the request is valid, the issuer issues a credential to the user by signing the commitment to the secret key
// together with the attribute values and sends the credential back to the user
// 4) The user verifies the issuer's signature and stores the credential that consists of
// the signature value, a randomness used to create the signature, the user secret, and the attribute values
// NewCredential issues a new credential, which is the last step of the interactive issuance protocol
// All attribute values are added by the issuer at this step and then signed together with a commitment to
// the user's secret key from a credential request
func NewCredential(key *IssuerKey, m *CredRequest, attrs []*FP256BN.BIG, rng *amcl.RAND) (*Credential, error) {
// check the credential request that contains
err := m.Check(key.IPk)
if err != nil {
return nil, err
}
if len(attrs) != len(key.IPk.AttributeNames) {
return nil, errors.Errorf("incorrect number of attribute values passed")
}
// Place a BBS+ signature on the user key and the attribute values
// (For BBS+, see e.g. "Constant-Size Dynamic k-TAA" by <NAME>, <NAME>, <NAME>)
E := RandModOrder(rng)
S := RandModOrder(rng)
B := FP256BN.NewECP()
B.Copy(GenG1)
Nym := EcpFromProto(m.Nym)
B.Add(Nym)
B.Add(EcpFromProto(key.IPk.HRand).Mul(S))
// Use Mul2 instead of Mul as much as possible
for i := 0; i < len(attrs)/2; i++ {
B.Add(EcpFromProto(key.IPk.HAttrs[2*i]).Mul2(attrs[2*i], EcpFromProto(key.IPk.HAttrs[2*i+1]), attrs[2*i+1]))
}
if len(attrs)%2 != 0 {
B.Add(EcpFromProto(key.IPk.HAttrs[len(attrs)-1]).Mul(attrs[len(attrs)-1]))
}
Exp := Modadd(FP256BN.FromBytes(key.GetISk()), E, GroupOrder)
Exp.Invmodp(GroupOrder)
A := B.Mul(Exp)
CredAttrs := make([][]byte, len(attrs))
for index, attribute := range attrs {
CredAttrs[index] = BigToBytes(attribute)
}
return &Credential{
EcpToProto(A),
EcpToProto(B),
BigToBytes(E),
BigToBytes(S),
CredAttrs}, nil
}
// Complete completes the credential by updating it with the randomness used to generate CredRequest
func (cred *Credential) Complete(credS1 *FP256BN.BIG) {
cred.S = BigToBytes(Modadd(FP256BN.FromBytes(cred.S), credS1, GroupOrder))
}
// Ver cryptographically verifies the credential by verifying the signature
// on the attribute values and user's secret key
func (cred *Credential) Ver(sk *FP256BN.BIG, ipk *IssuerPublicKey) error {
// parse the credential
A := EcpFromProto(cred.GetA())
B := EcpFromProto(cred.GetB())
E := FP256BN.FromBytes(cred.GetE())
S := FP256BN.FromBytes(cred.GetS())
// verify that all attribute values are present
for i := 0; i < len(cred.GetAttrs()); i++ {
if cred.Attrs[i] == nil {
return errors.Errorf("credential has no value for attribute %s", ipk.AttributeNames[i])
}
}
// verify cryptographic signature on the attributes and the user secret key
BPrime := FP256BN.NewECP()
BPrime.Copy(GenG1)
BPrime.Add(EcpFromProto(ipk.HSk).Mul2(sk, EcpFromProto(ipk.HRand), S))
for i := 0; i < len(cred.Attrs)/2; i++ {
BPrime.Add(EcpFromProto(ipk.HAttrs[2*i]).Mul2(FP256BN.FromBytes(cred.Attrs[2*i]), EcpFromProto(ipk.HAttrs[2*i+1]), FP256BN.FromBytes(cred.Attrs[2*i+1])))
}
if len(cred.Attrs)%2 != 0 {
BPrime.Add(EcpFromProto(ipk.HAttrs[len(cred.Attrs)-1]).Mul(FP256BN.FromBytes(cred.Attrs[len(cred.Attrs)-1])))
}
if !B.Equals(BPrime) {
return errors.Errorf("b-value from credential does not match the attribute values")
}
a := GenG2.Mul(E)
a.Add(Ecp2FromProto(ipk.W))
a.Affine()
if !FP256BN.Fexp(FP256BN.Ate(a, A)).Equals(FP256BN.Fexp(FP256BN.Ate(GenG2, B))) {
return errors.Errorf("credential is not cryptographically valid")
}
return nil
} | idemix/credential.go | 0.639849 | 0.436022 | credential.go | starcoder |
// Package iseq contains the public interfaces for the Clojure sequence library (ported to #golang)
package iseq
import ()
// Equivable is the interface for types that support testing for equivalence.
type Equivable interface {
// Returns true if this equivalent to the given object.
Equiv(o interface{}) bool
}
// Hashable is the interface for types that support computing a hash code.
// Two items that are Equiv should have the same hash code.
type Hashable interface {
// Hashable extends Equivable
Equivable
// Returns a hash code for the thing.
Hash() uint32
}
// Seqable is the interface for types that can produce an iseq.Seq
type Seqable interface {
// Returns a sequence that can be iterated across.
Seq() Seq
}
// PCollection is the most basic interface for types that implement
// an immutable, persistent collection.
// A PCollection is Seqable.
// To access the elements of the PCollection c, you must call c.seq().
type PCollection interface {
Seqable
Equivable
// Returns the number of items in the collection.
// Not guaranteed to be O(1).
Count() int
// Returns a new PCollection with an element added to this collection.
// Which end the element is added to is collection-specific.
Cons(o interface{}) PCollection
// Returns an empty PCollection of the same type (if possible).
Empty() PCollection
}
// Seq is the interface for sequential access to a collection.
// A Seq is itself a PCollection.
// A non-null Seq has at least one element.
type Seq interface {
PCollection
// Returns the first item in the sequence.
// Calls seq on its argument.
// If coll is nil, returns nil.
First() interface{}
// Returns a seq of the items after the first.
// Calls seq on its argument.
// If there are no more items, returns nil.
Next() Seq
// Returns a possibly empty seq of the items after the first.
// Calls seq on its argument.
More() Seq
// Returns a new Seq with o added. Type-specific version of PCollection.Cons.
ConsS(o interface{}) Seq
}
// The Lookup interface supports looking up a value by key.
type Lookup interface {
// Returns the value associated with the given key, or nil if the key is not present.
ValAt(key interface{}) interface{}
// Returns the value associated with the given key, or the provided default value if the key is not present.
ValAtD(key interface{}, notFound interface{}) interface{}
}
// A MapEntry is an immutable key/value pair.
type MapEntry interface {
// The key
Key() interface{}
// The value
Val() interface{}
}
// Counted is the interface implemented by a collection to indicate it provides a constant-time count method.
type Counted interface {
// Returns the number of elements in the collection, in constant time.
Count1() int
}
// An Associative is a persistent, immutable collection supporting key/value lookup.
type Associative interface {
PCollection
Lookup
// Returns true if there is an entry for the given key, false otherwise.
ContainsKey(key interface{}) bool
// Returns a MapEntry with the key/val for the given key, nil if key is not present.
EntryAt(key interface{}) MapEntry
// Returns a new Associative with key associated with value.
Assoc(key interface{}, val interface{}) Associative
}
// A PMap is a persistent, immutable map (key/value) collection.
type PMap interface {
Associative
Counted
// Returns a (new) PMap with key/val added.
// Type-specific version of Associative.Assoc.
AssocM(key interface{}, val interface{}) PMap
// Returns a (possibly new) PMap with no entry for key.
Without(key interface{}) PMap
// Returns a (new) PMap with the key/val added.
// Type-specific version of PCollection.Cons.
ConsM(e MapEntry) PMap
}
// A Meta represents an object that can have metadata attached.
type Meta interface {
// Returns the attached metadata
Meta() PMap
}
// An MetaW is a Meta that also supports creating a copy with new metadata
// This was originally clojure.lang.Obj
type MetaW interface {
Meta
WithMeta(meta PMap) MetaW
}
// An Indexed collection supports direct access to the n-th item in the collection.
type Indexed interface {
Counted
// Returns the i-th entry, or nil if i is out of bounds.
Nth(i int) interface{}
// Returns the i-th entry, or a default value if i is out of bounds.
NthD(i int, notFound interface{}) interface{}
// Returns (i-th entry,nil), or (nil,error) if i is out of bounds.
NthE(i int) (interface{}, error)
}
// An IndexedSeq is a Counted Seq that has a notion of the index of its first element in the context of a parent collection.
type IndexedSeq interface {
Seq
Counted
// Returns the index of the first element of this seq, relative to its parent.
Index() int
}
// A Reversible is a collection that supports iterating through its items in reverse order.
type Reversible interface {
// Returns a Seq which is the reverse.
Rseq() Seq
}
// A PStackOps has stack operations of peek and pop.
type PStackOps interface {
// Returns the element on top of the stack or nil if empty
Peek() interface{}
// Returns a new stack with the top element removed.
Pop() PStack
}
// A PStack is a persistent, immutable collection support stack operations.
type PStack interface {
PCollection
PStackOps
}
// A PList is a persistent, immutable list (a PCollection with stack operations)
type PList interface {
PCollection
PStackOps
}
// A PVector is a persistent, immutable vector.
// A PVector is a PCollection that supports lookup by an int index and supports stack operations.
type PVector interface {
Associative
PStackOps
Reversible
Indexed
ConsV(interface{}) PVector
AssocN(i int, val interface{}) PVector
}
// A PSet is a persistent, immutable collection of unique elements.
type PSet interface {
PCollection
Counted
Disjoin(key interface{}) PSet
Contains(key interface{}) bool
// Get ?? do we need ??
}
// A Chunk is used internally to efficiently sequence through collections.
type Chunk interface {
Indexed
DropFirst() Chunk
}
// A Comparer supports comparing itself to other objects.
type Comparer interface {
Compare(y interface{}) int
}
// A CompareFn compares two objects for <, = , >
type CompareFn func(interface{}, interface{}) int
// A Sorted collection maintains its entry in sorted order given by a comparator function.
type Sorted interface {
Comparator() CompareFn
EntryKey(entry interface{}) interface{}
SeqA(ascending bool) Seq
SeqFrom(key interface{}, ascending bool) Seq
} | iseq/iseq.go | 0.767254 | 0.501099 | iseq.go | starcoder |
package aes
import (
"crypto/cipher"
"unsafe"
)
// Assert that aesCipherAsm implements the ctrAble interface.
var _ ctrAble = (*aesCipherAsm)(nil)
// xorBytes xors the contents of a and b and places the resulting values into
// dst. If a and b are not the same length then the number of bytes processed
// will be equal to the length of shorter of the two. Returns the number
// of bytes processed.
//go:noescape
func xorBytes(dst, a, b []byte) int
// streamBufferSize is the number of bytes of encrypted counter values to cache.
const streamBufferSize = 32 * BlockSize
type aesctr struct {
block *aesCipherAsm // block cipher
ctr [2]uint64 // next value of the counter (big endian)
buffer []byte // buffer for the encrypted counter values
storage [streamBufferSize]byte // array backing buffer slice
}
// NewCTR returns a Stream which encrypts/decrypts using the AES block
// cipher in counter mode. The length of iv must be the same as BlockSize.
func (c *aesCipherAsm) NewCTR(iv []byte) cipher.Stream {
if len(iv) != BlockSize {
panic("cipher.NewCTR: IV length must equal block size")
}
var ac aesctr
ac.block = c
ac.ctr[0] = *(*uint64)(unsafe.Pointer((&iv[0]))) // high bits
ac.ctr[1] = *(*uint64)(unsafe.Pointer((&iv[8]))) // low bits
ac.buffer = ac.storage[:0]
return &ac
}
func (c *aesctr) refill() {
// Fill up the buffer with an incrementing count.
c.buffer = c.storage[:streamBufferSize]
c0, c1 := c.ctr[0], c.ctr[1]
for i := 0; i < streamBufferSize; i += BlockSize {
b0 := (*uint64)(unsafe.Pointer(&c.buffer[i]))
b1 := (*uint64)(unsafe.Pointer(&c.buffer[i+BlockSize/2]))
*b0, *b1 = c0, c1
// Increment in big endian: c0 is high, c1 is low.
c1++
if c1 == 0 {
// add carry
c0++
}
}
c.ctr[0], c.ctr[1] = c0, c1
// Encrypt the buffer using AES in ECB mode.
cryptBlocks(c.block.function, &c.block.key[0], &c.buffer[0], &c.buffer[0], streamBufferSize)
}
func (c *aesctr) XORKeyStream(dst, src []byte) {
for len(src) > 0 {
if len(c.buffer) == 0 {
c.refill()
}
n := xorBytes(dst, src, c.buffer)
c.buffer = c.buffer[n:]
src = src[n:]
dst = dst[n:]
}
} | src/crypto/aes/ctr_s390x.go | 0.621656 | 0.437884 | ctr_s390x.go | starcoder |
package md
import (
"bytes"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/parser"
"github.com/yuin/goldmark/text"
"github.com/yuin/goldmark/util"
)
var fencedCodeBlockInfoKey = parser.NewContextKey()
type fenced struct{}
type fenceData struct {
length int
node ast.Node
}
func (b fenced) Trigger() []byte {
return []byte{'`'}
}
func (b fenced) Parse(p ast.Node, r text.Reader, pc parser.Context) ast.Node {
n, _ := b._open(p, r, pc)
if n == nil {
return nil
}
// Crawl until b.Continue is done:
for state := parser.Continue; state != parser.Close; state = b._continue(n, r, pc) {
}
// Close:
b._close(n, r, pc)
return n
}
func (b fenced) _open(p ast.Node, r text.Reader, pc parser.Context) (ast.Node, parser.State) {
line, segment := r.PeekLine()
i := 0
for ; i < len(line) && line[i] == '`'; i++ {
}
oFenceLength := i
// If there are less than 3 backticks:
if oFenceLength < 3 {
return nil, parser.NoChildren
}
// Advance through the backticks
r.Advance(oFenceLength)
var node = ast.NewFencedCodeBlock(nil)
// If this isn't the last thing in the line: (```<language>)
if i < len(line)-1 {
rest := line[i:]
infoStart, infoStop := segment.Start-segment.Padding+i, segment.Stop
if len(rest) > 0 && infoStart < infoStop && bytes.IndexByte(rest, '\n') > -1 {
// Trim trailing whitespaces:
left := util.TrimLeftSpaceLength(rest)
right := util.TrimRightSpaceLength(rest)
// If there is no space:
if left < right && bytes.IndexByte(rest, ' ') == -1 {
seg := text.NewSegment(infoStart+left, infoStop-right)
node.Info = ast.NewTextSegment(seg)
r.Advance(infoStop - infoStart)
}
}
}
pc.Set(fencedCodeBlockInfoKey, &fenceData{oFenceLength, node})
return node, parser.NoChildren
}
func (b fenced) _continue(node ast.Node, r text.Reader, pc parser.Context) parser.State {
line, segment := r.PeekLine()
if len(line) == 0 {
return parser.Close
}
fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
_, pos := util.IndentWidth(line, r.LineOffset())
// Crawl i to ```
i := pos
for ; i < len(line) && line[i] != '`'; i++ {
}
// Is there a string literal? Write it.
pos, padding := util.DedentPositionPadding(line, r.LineOffset(), segment.Padding, 0)
// start+i accounts for everything before end (```)
var start, stop = segment.Start + pos, segment.Start + i
// Since we're assigning this segment a Start, IsEmpty() would fail if
// seg.End is not touched.
var seg = text.Segment{
Start: start,
Stop: stop,
Padding: padding,
}
r.AdvanceAndSetPadding(stop-start, padding)
defer func() {
// Append this at the end of the function, as the block below might
// reuse our text segment.
node.Lines().Append(seg)
}()
// If found:
if i != len(line) {
// Update the starting position:
pos = i
// Iterate until we're out of backticks:
for ; i < len(line) && line[i] == '`'; i++ {
}
// Do we have enough (3 or more) backticks?
// If yes, end the codeblock properly.
if length := i - pos; length >= fdata.length {
r.Advance(length)
return parser.Close
} else {
// No, treat the rest as text:
seg.Stop = segment.Stop
r.Advance(segment.Stop - stop)
}
}
return parser.Continue | parser.NoChildren
}
func (b fenced) _close(node ast.Node, r text.Reader, pc parser.Context) {
fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
if fdata.node == node {
pc.Set(fencedCodeBlockInfoKey, nil)
}
lines := node.Lines()
if length := lines.Len(); length > 0 {
// Trim first whitespace
first := lines.At(0)
lines.Set(0, first.TrimLeftSpace(r.Source()))
// Trim last new line
last := lines.At(length - 1)
if last.Len() == 0 {
lines.SetSliced(0, length-1)
length--
}
// If we've sliced everything away.
if length == 0 {
return
}
// Trim the new last line's trailing whitespace
last = lines.At(length - 1)
lines.Set(length-1, last.TrimRightSpace(r.Source()))
}
}
func (b fenced) CanInterruptParagraph() bool {
return true
}
func (b fenced) CanAcceptIndentedLine() bool {
return false
} | md/codeblock.go | 0.649467 | 0.440168 | codeblock.go | starcoder |
package cephalobjects
import (
"errors"
"fmt"
"math/rand"
"time"
)
//STRTS
type CephaloTimeNode struct {
Datetime time.Time
Data float64
left *CephaloTimeNode
right *CephaloTimeNode
}
type CephaloTimeSeries struct {
Size int
ID int
Root *CephaloTimeNode
}
//NewCTS creates an empty first-Root only time series // convenience
func NewCTS() CephaloTimeSeries {
return CephaloTimeSeries{ID: randomCTSID()}
}
//Insert provides a way to insert new tree node in the appropriate place
func (cts *CephaloTimeSeries) Insert(dattime time.Time, Data float64) error {
if cts.Root == nil {
cts.Root = &CephaloTimeNode{Datetime: dattime, Data: Data}
cts.Size++
return nil
}
cts.Size++
return cts.Root.insert(dattime, Data)
}
//Find offers fast retrieval of the desired Data point based on the supplied time
func (cts *CephaloTimeSeries) Find(dattime time.Time) (*CephaloTimeNode, bool) {
if cts.Root == nil {
return &CephaloTimeNode{}, false
}
return cts.Root.find(dattime)
}
//FindRange returns all of the tree nodes, i.e. tree Datapoints for the requested range
func (cts *CephaloTimeSeries) FindRange(start time.Time, end time.Time) ([]*CephaloTimeNode, error) {
var fop []*CephaloTimeNode
if cts.Root == nil || end.Before(start) {
return fop, errors.New("No tree Root element or the end is before the start")
}
return cts.Root.findRange(start, end)
}
//Delete removes the designated Datapoint from the time series tree
func (cts *CephaloTimeSeries) Delete(dattime time.Time) error {
if cts.Root == nil {
return errors.New("Deletion can not be performed in an empty tree")
}
fakeParent := &CephaloTimeNode{right: cts.Root}
err := cts.Root.delete(dattime, fakeParent)
if err != nil {
return err
}
if fakeParent.right == nil {
cts.Root = nil
}
cts.Size--
return nil
}
//TraversalMap offers inorder traversal of the timeseries with a callback/map function returning a Datapoint pointer
func (cts *CephaloTimeSeries) TraversalMap(ctn *CephaloTimeNode, callback func(*CephaloTimeNode)) {
if ctn == nil {
return
}
cts.TraversalMap(ctn.left, callback)
callback(ctn)
cts.TraversalMap(ctn.right, callback)
}
//EndpointsMap offers inorder traversal along the specified duration units
//(corresponding to last observation within duration unit) along with a callback/map function
func (cts *CephaloTimeSeries) EndpointsMap(period time.Duration, ctn *CephaloTimeNode, callback func(*CephaloTimeNode)) {
runningnode, _ := ctn.findMin(nil)
cts.TraversalMap(ctn, func(current *CephaloTimeNode) {
controltime := runningnode.Datetime.Add(period)
if current.Datetime.Equal(controltime) || current.Datetime.After(controltime) {
callback(ctn)
runningnode = current
}
})
}
//PeriodApply perfoms the series partial application of the supplied function, thus returning
//an enetierly new series of the period endpoints length (last duration unit) with Data transformed
//accordingly. It is expected that the applied function returns a new CephaloTimeNode instead of
//a pointer to an already used one
func (cts *CephaloTimeSeries) PeriodApply(period time.Duration, ctn *CephaloTimeNode, applied func([]*CephaloTimeNode) CephaloTimeNode) CephaloTimeSeries {
nts := NewCTS()
runningnode, _ := ctn.findMin(nil)
var runnernodes []*CephaloTimeNode
cts.TraversalMap(ctn, func(current *CephaloTimeNode) {
controltime := runningnode.Datetime.Add(period)
if current.Datetime.Equal(controltime) || current.Datetime.After(controltime) {
calcnode := applied(runnernodes)
nts.Insert(calcnode.Datetime, calcnode.Data)
runnernodes = nil
runningnode = current
}
runnernodes = append(runnernodes, current)
})
return nts
}
//Window-based methods (using FindRange for start-end window extraction)
//RollApply provides duration-based rolling window application of the callback function.
//Upon finishing it returns new CephaloTimeSeries, nearest resampled to the provided duration
func (cts *CephaloTimeSeries) RollApply(period time.Duration, ctn *CephaloTimeNode, minn int, applied func([]*CephaloTimeNode) CephaloTimeNode) CephaloTimeSeries {
nts := NewCTS()
cts.TraversalMap(ctn, func(current *CephaloTimeNode) {
rollwinstart := current.Datetime.Add(-period) //TODO - left, right and center align
rollwinend := current.Datetime
inrange, _ := current.findRange(rollwinstart, rollwinend)
if len(inrange) >= minn {
calcnode := applied(inrange)
nts.Insert(calcnode.Datetime, calcnode.Data)
}
})
return nts
}
//RollMean provides the usual rolling mean (moving average) of the time series,
//but based on the period rather than the number of observations
func (cts *CephaloTimeSeries) RollMean(period time.Duration, minn int) CephaloTimeSeries {
return cts.RollApply(period, cts.Root, minn, func(currents []*CephaloTimeNode) CephaloTimeNode {
nctt := CephaloTimeNode{Datetime: currents[len(currents)-1].Datetime, Data: 0}
var valData float64
for _, cu := range currents {
valData += cu.Data
}
nctt.Data = valData / float64(len(currents))
return nctt
})
}
//Node methods considered private (insert, find)
func (ctn *CephaloTimeNode) insert(dattime time.Time, data float64) error {
switch {
case dattime.Equal(ctn.Datetime):
return nil
case dattime.After(ctn.Datetime):
//Check right
if ctn.right == nil {
ctn.right = &CephaloTimeNode{Datetime: dattime, Data: data}
return nil
}
ctn.right.insert(dattime, data)
case dattime.Before(ctn.Datetime):
//Check left
if ctn.left == nil {
ctn.left = &CephaloTimeNode{Datetime: dattime, Data: data}
return nil
}
ctn.left.insert(dattime, data)
}
return nil
}
func (ctn *CephaloTimeNode) find(dattime time.Time) (*CephaloTimeNode, bool) {
if ctn == nil {
return &CephaloTimeNode{}, false
}
switch {
case ctn.Datetime.Equal(dattime):
return ctn, true
case dattime.After(ctn.Datetime):
return ctn.right.find(dattime)
default:
return ctn.left.find(dattime)
}
}
func (ctn *CephaloTimeNode) findRange(start time.Time, end time.Time) ([]*CephaloTimeNode, error) {
var fop []*CephaloTimeNode
if end.Before(start) {
return fop, errors.New("End can't come before start in find range")
}
findRangeInner(ctn, start, end, func(ctn *CephaloTimeNode) {
fop = append(fop, ctn)
})
fmt.Println(len(fop))
return fop, nil
}
func findRangeInner(ctn *CephaloTimeNode, start time.Time, end time.Time, cb func(ctn *CephaloTimeNode)) {
if ctn == nil {
return
}
if (start.Before(ctn.Datetime) || start.Equal(ctn.Datetime)) && (end.After(ctn.Datetime) || end.Equal(ctn.Datetime)) {
findRangeInner(ctn.left, start, end, cb)
cb(ctn)
findRangeInner(ctn.right, start, end, cb)
}
if start.Before(ctn.Datetime) && end.Before(ctn.Datetime) {
findRangeInner(ctn.left, start, end, cb)
}
if start.After(ctn.Datetime) && end.After(ctn.Datetime) {
findRangeInner(ctn.right, start, end, cb)
}
}
func (ctn *CephaloTimeNode) findMax(parent *CephaloTimeNode) (*CephaloTimeNode, *CephaloTimeNode) {
if ctn == nil {
return &CephaloTimeNode{}, parent
}
if ctn.right == nil {
return ctn, parent
}
return ctn.right.findMax(ctn)
}
func (ctn *CephaloTimeNode) findMin(parent *CephaloTimeNode) (*CephaloTimeNode, *CephaloTimeNode) {
if ctn == nil {
return &CephaloTimeNode{}, parent
}
if ctn.left == nil {
return ctn, parent
}
return ctn.left.findMin(ctn)
}
func (ctn *CephaloTimeNode) replaceNode(parent, replacement *CephaloTimeNode) {
if ctn == nil {
return
}
if ctn == parent.left {
parent.left = replacement
}
parent.right = replacement
}
func (ctn *CephaloTimeNode) delete(dattime time.Time, parent *CephaloTimeNode) error {
if ctn == nil {
return errors.New("Can't delete from a nil node")
}
switch {
case dattime.Before(ctn.Datetime):
return ctn.left.delete(dattime, ctn)
case dattime.After(ctn.Datetime):
return ctn.right.delete(dattime, ctn)
default:
//If node is leaf node it has no children then remove it from its parent
if ctn.left == nil && ctn.right == nil {
ctn.replaceNode(parent, nil)
return nil
}
//If node is half-leaf it has one of the children, so replace node by its child node
if ctn.left == nil {
ctn.replaceNode(parent, ctn.right)
return nil
}
if ctn.right == nil {
ctn.replaceNode(parent, ctn.left)
return nil
}
//If the node is inner then steps are:
//1. in the left subtree find largest
leftmax, leftmaxparent := ctn.left.findMax(ctn)
//2. replace my value and Data with
ctn.Datetime = leftmax.Datetime
ctn.Data = leftmax.Data
//3. remove replacement node
return leftmax.delete(leftmax.Datetime, leftmaxparent)
}
}
func randomCTSID() int {
source := rand.NewSource(time.Now().UnixNano())
driver := rand.New(source)
return 10000000 + driver.Intn(99999999-10000000)
}
//Utils
//AbsDuration returns the absolute value of the supplied time.Duration
func AbsDuration(duration time.Duration) time.Duration {
if duration < 0 {
return duration * -1
}
return duration
} | cephalobjects/cephalotimeseries.go | 0.70791 | 0.60174 | cephalotimeseries.go | starcoder |
package client
import (
"encoding/json"
)
// Volume Volume volume
type Volume struct {
// Date/Time the volume was created.
CreatedAt *string `json:"CreatedAt,omitempty"`
// Name of the volume driver used by the volume.
Driver string `json:"Driver"`
// User-defined key/value metadata.
Labels map[string]string `json:"Labels"`
// Mount path of the volume on the host.
Mountpoint string `json:"Mountpoint"`
// Name of the volume.
Name string `json:"Name"`
// The driver specific options used when creating the volume.
Options map[string]string `json:"Options"`
// The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
Scope string `json:"Scope"`
// Low-level details about the volume, provided by the volume driver. Details are returned as a map with key/value pairs: `{\"key\":\"value\",\"key2\":\"value2\"}`. The `Status` field is optional, and is omitted if the volume driver does not support this feature.
Status map[string]map[string]interface{} `json:"Status,omitempty"`
UsageData *VolumeUsageData `json:"UsageData,omitempty"`
}
// NewVolume instantiates a new Volume object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewVolume(driver string, labels map[string]string, mountpoint string, name string, options map[string]string, scope string) *Volume {
this := Volume{}
this.Driver = driver
this.Labels = labels
this.Mountpoint = mountpoint
this.Name = name
this.Options = options
this.Scope = scope
return &this
}
// NewVolumeWithDefaults instantiates a new Volume object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewVolumeWithDefaults() *Volume {
this := Volume{}
return &this
}
// GetCreatedAt returns the CreatedAt field value if set, zero value otherwise.
func (o *Volume) GetCreatedAt() string {
if o == nil || o.CreatedAt == nil {
var ret string
return ret
}
return *o.CreatedAt
}
// GetCreatedAtOk returns a tuple with the CreatedAt field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Volume) GetCreatedAtOk() (*string, bool) {
if o == nil || o.CreatedAt == nil {
return nil, false
}
return o.CreatedAt, true
}
// HasCreatedAt returns a boolean if a field has been set.
func (o *Volume) HasCreatedAt() bool {
if o != nil && o.CreatedAt != nil {
return true
}
return false
}
// SetCreatedAt gets a reference to the given string and assigns it to the CreatedAt field.
func (o *Volume) SetCreatedAt(v string) {
o.CreatedAt = &v
}
// GetDriver returns the Driver field value
func (o *Volume) GetDriver() string {
if o == nil {
var ret string
return ret
}
return o.Driver
}
// GetDriverOk returns a tuple with the Driver field value
// and a boolean to check if the value has been set.
func (o *Volume) GetDriverOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Driver, true
}
// SetDriver sets field value
func (o *Volume) SetDriver(v string) {
o.Driver = v
}
// GetLabels returns the Labels field value
func (o *Volume) GetLabels() map[string]string {
if o == nil {
var ret map[string]string
return ret
}
return o.Labels
}
// GetLabelsOk returns a tuple with the Labels field value
// and a boolean to check if the value has been set.
func (o *Volume) GetLabelsOk() (*map[string]string, bool) {
if o == nil {
return nil, false
}
return &o.Labels, true
}
// SetLabels sets field value
func (o *Volume) SetLabels(v map[string]string) {
o.Labels = v
}
// GetMountpoint returns the Mountpoint field value
func (o *Volume) GetMountpoint() string {
if o == nil {
var ret string
return ret
}
return o.Mountpoint
}
// GetMountpointOk returns a tuple with the Mountpoint field value
// and a boolean to check if the value has been set.
func (o *Volume) GetMountpointOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Mountpoint, true
}
// SetMountpoint sets field value
func (o *Volume) SetMountpoint(v string) {
o.Mountpoint = v
}
// GetName returns the Name field value
func (o *Volume) GetName() string {
if o == nil {
var ret string
return ret
}
return o.Name
}
// GetNameOk returns a tuple with the Name field value
// and a boolean to check if the value has been set.
func (o *Volume) GetNameOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Name, true
}
// SetName sets field value
func (o *Volume) SetName(v string) {
o.Name = v
}
// GetOptions returns the Options field value
func (o *Volume) GetOptions() map[string]string {
if o == nil {
var ret map[string]string
return ret
}
return o.Options
}
// GetOptionsOk returns a tuple with the Options field value
// and a boolean to check if the value has been set.
func (o *Volume) GetOptionsOk() (*map[string]string, bool) {
if o == nil {
return nil, false
}
return &o.Options, true
}
// SetOptions sets field value
func (o *Volume) SetOptions(v map[string]string) {
o.Options = v
}
// GetScope returns the Scope field value
func (o *Volume) GetScope() string {
if o == nil {
var ret string
return ret
}
return o.Scope
}
// GetScopeOk returns a tuple with the Scope field value
// and a boolean to check if the value has been set.
func (o *Volume) GetScopeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Scope, true
}
// SetScope sets field value
func (o *Volume) SetScope(v string) {
o.Scope = v
}
// GetStatus returns the Status field value if set, zero value otherwise.
func (o *Volume) GetStatus() map[string]map[string]interface{} {
if o == nil || o.Status == nil {
var ret map[string]map[string]interface{}
return ret
}
return o.Status
}
// GetStatusOk returns a tuple with the Status field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Volume) GetStatusOk() (map[string]map[string]interface{}, bool) {
if o == nil || o.Status == nil {
return nil, false
}
return o.Status, true
}
// HasStatus returns a boolean if a field has been set.
func (o *Volume) HasStatus() bool {
if o != nil && o.Status != nil {
return true
}
return false
}
// SetStatus gets a reference to the given map[string]map[string]interface{} and assigns it to the Status field.
func (o *Volume) SetStatus(v map[string]map[string]interface{}) {
o.Status = v
}
// GetUsageData returns the UsageData field value if set, zero value otherwise.
func (o *Volume) GetUsageData() VolumeUsageData {
if o == nil || o.UsageData == nil {
var ret VolumeUsageData
return ret
}
return *o.UsageData
}
// GetUsageDataOk returns a tuple with the UsageData field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Volume) GetUsageDataOk() (*VolumeUsageData, bool) {
if o == nil || o.UsageData == nil {
return nil, false
}
return o.UsageData, true
}
// HasUsageData returns a boolean if a field has been set.
func (o *Volume) HasUsageData() bool {
if o != nil && o.UsageData != nil {
return true
}
return false
}
// SetUsageData gets a reference to the given VolumeUsageData and assigns it to the UsageData field.
func (o *Volume) SetUsageData(v VolumeUsageData) {
o.UsageData = &v
}
func (o Volume) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.CreatedAt != nil {
toSerialize["CreatedAt"] = o.CreatedAt
}
if true {
toSerialize["Driver"] = o.Driver
}
if true {
toSerialize["Labels"] = o.Labels
}
if true {
toSerialize["Mountpoint"] = o.Mountpoint
}
if true {
toSerialize["Name"] = o.Name
}
if true {
toSerialize["Options"] = o.Options
}
if true {
toSerialize["Scope"] = o.Scope
}
if o.Status != nil {
toSerialize["Status"] = o.Status
}
if o.UsageData != nil {
toSerialize["UsageData"] = o.UsageData
}
return json.Marshal(toSerialize)
}
type NullableVolume struct {
value *Volume
isSet bool
}
func (v NullableVolume) Get() *Volume {
return v.value
}
func (v *NullableVolume) Set(val *Volume) {
v.value = val
v.isSet = true
}
func (v NullableVolume) IsSet() bool {
return v.isSet
}
func (v *NullableVolume) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableVolume(val *Volume) *NullableVolume {
return &NullableVolume{value: val, isSet: true}
}
func (v NullableVolume) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableVolume) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | clients/kratos/go/model_volume.go | 0.784071 | 0.459682 | model_volume.go | starcoder |
// Package types provides the types that are used internally within the roomserver.
package types
import (
"github.com/matrix-org/gomatrixserverlib"
)
// EventTypeNID is a numeric ID for an event type.
type EventTypeNID int64
// EventStateKeyNID is a numeric ID for an event state_key.
type EventStateKeyNID int64
// EventNID is a numeric ID for an event.
type EventNID int64
// RoomNID is a numeric ID for a room.
type RoomNID int64
// StateSnapshotNID is a numeric ID for the state at an event.
type StateSnapshotNID int64
// StateBlockNID is a numeric ID for a block of state data.
// These blocks of state data are combined to form the actual state.
type StateBlockNID int64
// A StateKeyTuple is a pair of a numeric event type and a numeric state key.
// It is used to lookup state entries.
type StateKeyTuple struct {
// The numeric ID for the event type.
EventTypeNID EventTypeNID
// The numeric ID for the state key.
EventStateKeyNID EventStateKeyNID
}
// LessThan returns true if this state key is less than the other state key.
// The ordering is arbitrary and is used to implement binary search and to efficiently deduplicate entries.
func (a StateKeyTuple) LessThan(b StateKeyTuple) bool {
if a.EventTypeNID != b.EventTypeNID {
return a.EventTypeNID < b.EventTypeNID
}
return a.EventStateKeyNID < b.EventStateKeyNID
}
// A StateEntry is an entry in the room state of a matrix room.
type StateEntry struct {
StateKeyTuple
// The numeric ID for the event.
EventNID EventNID
}
// LessThan returns true if this state entry is less than the other state entry.
// The ordering is arbitrary and is used to implement binary search and to efficiently deduplicate entries.
func (a StateEntry) LessThan(b StateEntry) bool {
if a.StateKeyTuple != b.StateKeyTuple {
return a.StateKeyTuple.LessThan(b.StateKeyTuple)
}
return a.EventNID < b.EventNID
}
// StateAtEvent is the state before and after a matrix event.
type StateAtEvent struct {
// Should this state overwrite the latest events and memberships of the room?
// This might be necessary when rejoining a federated room after a period of
// absence, as our state and latest events will be out of date.
Overwrite bool
// The state before the event.
BeforeStateSnapshotNID StateSnapshotNID
// The state entry for the event itself, allows us to calculate the state after the event.
StateEntry
}
// IsStateEvent returns whether the event the state is at is a state event.
func (s StateAtEvent) IsStateEvent() bool {
return s.EventStateKeyNID != 0
}
// StateAtEventAndReference is StateAtEvent and gomatrixserverlib.EventReference glued together.
// It is used when looking up the latest events in a room in the database.
// The gomatrixserverlib.EventReference is used to check whether a new event references the event.
// The StateAtEvent is used to construct the current state of the room from the latest events.
type StateAtEventAndReference struct {
StateAtEvent
gomatrixserverlib.EventReference
}
// An Event is a gomatrixserverlib.Event with the numeric event ID attached.
// It is when performing bulk event lookup in the database.
type Event struct {
EventNID EventNID
gomatrixserverlib.Event
}
const (
// MRoomCreateNID is the numeric ID for the "m.room.create" event type.
MRoomCreateNID = 1
// MRoomPowerLevelsNID is the numeric ID for the "m.room.power_levels" event type.
MRoomPowerLevelsNID = 2
// MRoomJoinRulesNID is the numeric ID for the "m.room.join_rules" event type.
MRoomJoinRulesNID = 3
// MRoomThirdPartyInviteNID is the numeric ID for the "m.room.third_party_invite" event type.
MRoomThirdPartyInviteNID = 4
// MRoomMemberNID is the numeric ID for the "m.room.member" event type.
MRoomMemberNID = 5
// MRoomRedactionNID is the numeric ID for the "m.room.redaction" event type.
MRoomRedactionNID = 6
// MRoomHistoryVisibilityNID is the numeric ID for the "m.room.history_visibility" event type.
MRoomHistoryVisibilityNID = 7
)
const (
// EmptyStateKeyNID is the numeric ID for the empty state key.
EmptyStateKeyNID = 1
)
// StateBlockNIDList is used to return the result of bulk StateBlockNID lookups from the database.
type StateBlockNIDList struct {
StateSnapshotNID StateSnapshotNID
StateBlockNIDs []StateBlockNID
}
// StateEntryList is used to return the result of bulk state entry lookups from the database.
type StateEntryList struct {
StateBlockNID StateBlockNID
StateEntries []StateEntry
}
// A MissingEventError is an error that happened because the roomserver was
// missing requested events from its database.
type MissingEventError string
func (e MissingEventError) Error() string { return string(e) } | roomserver/types/types.go | 0.690663 | 0.682117 | types.go | starcoder |
package conf
// StringSliceVar defines a string flag and environment variable with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the flag and/or environment variable.
// For example:
// --string-slice="v1,v2" --string-slice="v3"
// STRING_SLICE="v1,v2,v3"
// will result in
// []string{"v1", "v2", "v3"}
func (c *Configurator) StringSliceVar(p *[]string, name string, value []string, usage string) {
c.env().StringSliceVar(p, name, value, usage)
c.flag().StringSliceVar(p, name, value, usage)
}
// StringSlice defines a string flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag and/or environment variable.
// For example:
// --string-slice="v1,v2" --string-slice="v3"
// STRING_SLICE="v1,v2,v3"
// will result in
// []string{"v1", "v2", "v3"}
func (c *Configurator) StringSlice(name string, value []string, usage string) *[]string {
p := new([]string)
c.StringSliceVar(p, name, value, usage)
return p
}
// StringSliceVarE defines a string environment variable with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the environment variable.
// For example:
// STRING_SLICE="v1,v2,v3"
// will result in
// []string{"v1", "v2", "v3"}
func (c *Configurator) StringSliceVarE(p *[]string, name string, value []string, usage string) {
c.env().StringSliceVar(p, name, value, usage)
}
// StringSliceE defines a string environment variable with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the environment variable.
// For example:
// STRING_SLICE="v1,v2,v3"
// will result in
// []string{"v1", "v2", "v3"}
func (c *Configurator) StringSliceE(name string, value []string, usage string) *[]string {
p := new([]string)
c.StringSliceVarE(p, name, value, usage)
return p
}
// StringSliceVarF defines a string flag with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the flag.
// For example:
// --string-slice="v1,v2" --string-slice="v3"
// will result in
// []string{"v1", "v2", "v3"}
func (c *Configurator) StringSliceVarF(p *[]string, name string, value []string, usage string) {
c.flag().StringSliceVar(p, name, value, usage)
}
// StringSliceF defines a string flag with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag.
// For example:
// --string-slice="v1,v2" --string-slice="v3"
// will result in
// []string{"v1", "v2", "v3"}
func (c *Configurator) StringSliceF(name string, value []string, usage string) *[]string {
p := new([]string)
c.StringSliceVarF(p, name, value, usage)
return p
}
// StringSliceVar defines a string flag and environment variable with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the flag and/or environment variable.
// For example:
// --string-slice="v1,v2" --string-slice="v3"
// STRING_SLICE="v1,v2,v3"
// will result in
// []string{"v1", "v2", "v3"}
func StringSliceVar(p *[]string, name string, value []string, usage string) {
Global.StringSliceVar(p, name, value, usage)
}
// StringSlice defines a string flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag and/or environment variable.
// For example:
// --string-slice="v1,v2" --string-slice="v3"
// STRING_SLICE="v1,v2,v3"
// will result in
// []string{"v1", "v2", "v3"}
func StringSlice(name string, value []string, usage string) *[]string {
return Global.StringSlice(name, value, usage)
}
// StringSliceVarE defines a string environment variable with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the environment variable.
// For example:
// STRING_SLICE="v1,v2,v3"
// will result in
// []string{"v1", "v2", "v3"}
func StringSliceVarE(p *[]string, name string, value []string, usage string) {
Global.StringSliceVarE(p, name, value, usage)
}
// StringSliceE defines a string environment variable with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the environment variable.
// For example:
// STRING_SLICE="v1,v2,v3"
// will result in
// []string{"v1", "v2", "v3"}
func StringSliceE(name string, value []string, usage string) *[]string {
return Global.StringSliceE(name, value, usage)
}
// StringSliceVarF defines a string flag with specified name, default value, and usage string.
// The argument p points to a []string variable in which to store the value of the flag.
// For example:
// --string-slice="v1,v2" --string-slice="v3"
// will result in
// []string{"v1", "v2", "v3"}
func StringSliceVarF(p *[]string, name string, value []string, usage string) {
Global.StringSliceVarF(p, name, value, usage)
}
// StringSliceF defines a string flag with specified name, default value, and usage string.
// The return value is the address of a []string variable that stores the value of the flag.
// For example:
// --string-slice="v1,v2" --string-slice="v3"
// will result in
// []string{"v1", "v2", "v3"}
func StringSliceF(name string, value []string, usage string) *[]string {
return Global.StringSliceF(name, value, usage)
} | value_string_slice.go | 0.780871 | 0.534977 | value_string_slice.go | starcoder |
package specs
import (
"errors"
"fmt"
)
// Repeated represents an array type of fixed size.
type Repeated []Template
// Template returns a Template for a given array. It checks the types of internal
// elements to detect the data type(s).
// Note that all the references must be resolved before calling this method.
func (repeated Repeated) Template() (Template, error) {
var template Template
// check if all element types are the same
// TODO: remove once "oneOf" support is added
for position := range repeated {
if position == 0 {
template = repeated[position]
continue
}
if err := template.Compare(repeated[position]); err != nil {
return Template{}, fmt.Errorf("all the elements inside the array must have the same type: %w", err)
}
}
// get rid of default value if scalar type
if template.Scalar != nil {
template = template.Clone()
template.Scalar.Default = nil
}
return template, nil
}
// Clone repeated.
func (repeated Repeated) Clone() Repeated {
clone := make([]Template, len(repeated))
for index, template := range repeated {
clone[index] = template.Clone()
}
return clone
}
// Compare given repeated to the provided one returning the first mismatch.
func (repeated Repeated) Compare(expected Repeated) error {
if expected == nil && repeated == nil {
return nil
}
if expected == nil && repeated != nil {
return errors.New("expected to be nil")
}
if expected != nil && repeated == nil {
return fmt.Errorf("expected to be an array, got %v", nil)
}
if len(expected) != len(repeated) {
return fmt.Errorf("expected to have %d elements, got %d", len(expected), len(repeated))
}
left, err := repeated.Template()
if err != nil {
return fmt.Errorf("unkown repeated property template: %w", err)
}
right, err := expected.Template()
if err != nil {
return fmt.Errorf("unkown expected property template: %w", err)
}
err = left.Compare(right)
if err != nil {
return fmt.Errorf("repeated property: %w", err)
}
return nil
} | pkg/specs/repeated.go | 0.57678 | 0.420391 | repeated.go | starcoder |
package collision2d
import (
"math"
)
//PointInCircle returns true if the point is inside the circle.
func PointInCircle(point Vector, circle Circle) bool {
differenceV := NewVector(point.X, point.Y).Sub(circle.Pos)
radiusSqr := circle.R * circle.R
distanceSqr := differenceV.Len2()
return distanceSqr <= radiusSqr
}
//PointInPolygon returns true if the point is inside a polygon.
func PointInPolygon(point Vector, polygon Polygon) bool {
pointAsPolygon := NewBox(point.Clone(), 1, 1).ToPolygon()
isInside, _ := TestPolygonPolygon(pointAsPolygon, polygon)
return isInside
}
//TestCircleCircle returns true if the circles collide with each other.
func TestCircleCircle(circleA, circleB Circle) (isColliding bool, response Response) {
response = NewResponse()
differenceV := NewVector(circleB.Pos.X, circleB.Pos.Y).Sub(circleA.Pos)
totalRadius := circleA.R + circleB.R
totalRadiusSqr := totalRadius * totalRadius
distanceSqr := differenceV.Len2()
if distanceSqr > totalRadiusSqr {
return false, response.NotColliding()
}
dist := math.Sqrt(distanceSqr)
response.A = circleA
response.B = circleB
response.Overlap = totalRadius - dist
response.OverlapN = response.OverlapN.Copy(differenceV.Normalize())
response.OverlapV = response.OverlapV.Copy(differenceV.Normalize()).Scale(response.Overlap)
response.AInB = circleA.R <= circleB.R && dist <= circleB.R-circleA.R
response.BInA = circleB.R <= circleA.R && dist <= circleA.R-circleB.R
return true, response
}
//TestPolygonCircle returns true if the polygon collides with the circle.
func TestPolygonCircle(polygon Polygon, circle Circle) (isColliding bool, response Response) {
response = NewResponse()
circlePos := NewVector(circle.Pos.X, circle.Pos.Y).Sub(polygon.Pos)
radius := circle.R
radius2 := radius * radius
calcPoints := polygon.CalcPoints
edge := NewVector(0, 0)
point := NewVector(0, 0)
for i := 0; i < len(calcPoints); i++ {
var next int
var prev int
if i == len(calcPoints)-1 {
next = 0
} else {
next = i + 1
}
if i == 0 {
prev = len(calcPoints) - 1
} else {
prev = i - 1
}
overlap := 0.0
overlapN := NewVector(0, 0)
changedOverlapN := false
edge = edge.Copy(polygon.Edges[i])
point = point.Copy(circlePos).Sub(calcPoints[i])
if point.Len2() > radius2 {
response.AInB = false
}
region := voronoiRegion(edge, point)
if region == leftVoronoiRegion {
edge = edge.Copy(polygon.Edges[prev])
point2 := NewVector(circlePos.X, circlePos.Y).Sub(calcPoints[prev])
region2 := voronoiRegion(edge, point2)
if region2 == rightVoronoiRegion {
dist := point.Len()
if dist > radius {
return false, response.NotColliding()
}
response.BInA = false
overlapN = overlapN.Copy(point.Normalize())
changedOverlapN = true
overlap = radius - dist
}
} else if region == rightVoronoiRegion {
edge = edge.Copy(polygon.Edges[next])
point = point.Copy(circlePos).Sub(calcPoints[next])
region2 := voronoiRegion(edge, point)
if region2 == leftVoronoiRegion {
dist := point.Len()
if dist > radius {
return false, response.NotColliding()
}
response.BInA = false
overlapN = overlapN.Copy(point.Normalize())
changedOverlapN = true
overlap = radius - dist
}
} else {
normal := edge.Perp().Normalize()
dist := point.Dot(normal)
distAbs := math.Abs(dist)
if dist > 0 && distAbs > radius {
return false, response.NotColliding()
}
overlapN = overlapN.Copy(normal)
changedOverlapN = true
overlap = radius - dist
if dist >= 0 || overlap < 2*radius {
response.BInA = false
}
}
if changedOverlapN && math.Abs(overlap) < math.Abs(response.Overlap) {
response.Overlap = overlap
response.OverlapN = response.OverlapN.Copy(overlapN)
}
}
response.A = polygon
response.B = circle
response.OverlapV = response.OverlapV.Copy(response.OverlapN).Scale(response.Overlap)
return true, response
}
//TestCirclePolygon returns true if the circle collides with the polygon.
func TestCirclePolygon(circle Circle, polygon Polygon) (isColliding bool, response Response) {
result, response := TestPolygonCircle(polygon, circle)
if result {
a := response.A
aInB := response.AInB
response.OverlapN = response.OverlapN.Reverse()
response.OverlapV = response.OverlapV.Reverse()
response.A = response.B
response.B = a
response.AInB = response.BInA
response.BInA = aInB
}
return result, response
}
//TestPolygonPolygon returns true if the polygons collide with each other.
func TestPolygonPolygon(polygonA, polygonB Polygon) (isColliding bool, response Response) {
response = NewResponse()
for i := 0; i < len(polygonA.CalcPoints); i++ {
if isSeparatingAxis(polygonA.Pos, polygonB.Pos, polygonA.CalcPoints, polygonB.CalcPoints, polygonA.Normals[i], &response) {
return false, response.NotColliding()
}
}
for i := 0; i < len(polygonB.CalcPoints); i++ {
if isSeparatingAxis(polygonA.Pos, polygonB.Pos, polygonA.CalcPoints, polygonB.CalcPoints, polygonB.Normals[i], &response) {
return false, response.NotColliding()
}
}
response.A = polygonA
response.B = polygonB
response.OverlapV = response.OverlapV.Copy(response.OverlapN).Scale(response.Overlap)
return true, response
}
func voronoiRegion(line, point Vector) int {
len2 := line.Len2()
dp := point.Dot(line)
if dp < 0 {
return leftVoronoiRegion
} else if dp > len2 {
return rightVoronoiRegion
} else {
return middleVoronoiRegion
}
}
func isSeparatingAxis(aPos, bPos Vector, aPoints, bPoints []Vector, axis Vector, response *Response) bool {
offsetV := NewVector(bPos.X, bPos.Y).Sub(aPos)
projectedOffset := offsetV.Dot(axis)
minA, maxA := flattenPointsOn(aPoints, axis)
minB, maxB := flattenPointsOn(bPoints, axis)
minB += projectedOffset
maxB += projectedOffset
if minA > maxB || minB > maxA {
return true
}
overlap := 0.0
if minA < minB {
response.AInB = false
if maxA < maxB {
overlap = maxA - minB
response.BInA = false
} else {
option1 := maxA - minB
option2 := maxB - minA
if option1 < option2 {
overlap = option1
} else {
overlap = -option2
}
}
} else {
response.BInA = false
if maxA > maxB {
overlap = minA - maxB
response.AInB = false
} else {
option1 := maxA - minB
option2 := maxB - minA
if option1 < option2 {
overlap = option1
} else {
overlap = -option2
}
}
}
absOverlap := math.Abs(overlap)
if absOverlap < response.Overlap {
response.Overlap = absOverlap
response.OverlapN = response.OverlapN.Copy(axis)
if overlap < 0 {
response.OverlapN = response.OverlapN.Reverse()
}
}
return false
}
func flattenPointsOn(points []Vector, normal Vector) (min, max float64) {
min = math.MaxFloat64
max = -math.MaxFloat64
length := len(points)
for i := 0; i < length; i++ {
dot := points[i].Dot(normal)
if dot < min {
min = dot
}
if dot > max {
max = dot
}
}
return min, max
} | collision.go | 0.870184 | 0.675577 | collision.go | starcoder |
package engine
import (
"math/rand"
"github.com/MathieuMoalic/mms/cuda"
"github.com/MathieuMoalic/mms/data"
"github.com/MathieuMoalic/mms/util"
)
func init() {
DeclFunc("SetGeom", SetGeom, "Sets the geometry to a given shape")
DeclVar("EdgeSmooth", &edgeSmooth, "Geometry edge smoothing with edgeSmooth^3 samples per cell, 0=staircase, ~8=very smooth")
geometry.init()
}
var (
geometry geom
edgeSmooth int = 0 // disabled by default
)
type geom struct {
info
buffer *data.Slice
shape Shape
}
func (g *geom) init() {
g.buffer = nil
g.info = info{1, "geom", ""}
DeclROnly("geom", g, "Cell fill fraction (0..1)")
}
func spaceFill() float64 {
if geometry.Gpu().IsNil() {
return 1
} else {
return float64(cuda.Sum(geometry.buffer)) / float64(geometry.Mesh().NCell())
}
}
func (g *geom) Gpu() *data.Slice {
if g.buffer == nil {
g.buffer = data.NilSlice(1, g.Mesh().Size())
}
return g.buffer
}
func (g *geom) Slice() (*data.Slice, bool) {
s := g.Gpu()
if s.IsNil() {
s := cuda.Buffer(g.NComp(), g.Mesh().Size())
cuda.Memset(s, 1)
return s, true
} else {
return s, false
}
}
func (q *geom) EvalTo(dst *data.Slice) { EvalTo(q, dst) }
var _ Quantity = &geometry
func (g *geom) average() []float64 {
s, r := g.Slice()
if r {
defer cuda.Recycle(s)
}
return sAverageUniverse(s)
}
func (g *geom) Average() float64 { return g.average()[0] }
func SetGeom(s Shape) {
geometry.setGeom(s)
}
func (geometry *geom) setGeom(s Shape) {
SetBusy(true)
defer SetBusy(false)
if s == nil {
// TODO: would be nice not to save volume if entirely filled
s = universe
}
geometry.shape = s
if geometry.Gpu().IsNil() {
geometry.buffer = cuda.NewSlice(1, geometry.Mesh().Size())
}
host := data.NewSlice(1, geometry.Gpu().Size())
array := host.Scalars()
V := host
v := array
n := geometry.Mesh().Size()
c := geometry.Mesh().CellSize()
cx, cy, cz := c[X], c[Y], c[Z]
progress, progmax := 0, n[Y]*n[Z]
var ok bool
for iz := 0; iz < n[Z]; iz++ {
for iy := 0; iy < n[Y]; iy++ {
progress++
util.Progress(progress, progmax, "Initializing geometry")
for ix := 0; ix < n[X]; ix++ {
r := Index2Coord(ix, iy, iz)
x0, y0, z0 := r[X], r[Y], r[Z]
// check if center and all vertices lie inside or all outside
allIn, allOut := true, true
if s(x0, y0, z0) {
allOut = false
} else {
allIn = false
}
if edgeSmooth != 0 { // center is sufficient if we're not really smoothing
for _, Δx := range []float64{-cx / 2, cx / 2} {
for _, Δy := range []float64{-cy / 2, cy / 2} {
for _, Δz := range []float64{-cz / 2, cz / 2} {
if s(x0+Δx, y0+Δy, z0+Δz) { // inside
allOut = false
} else {
allIn = false
}
}
}
}
}
switch {
case allIn:
v[iz][iy][ix] = 1
ok = true
case allOut:
v[iz][iy][ix] = 0
default:
v[iz][iy][ix] = geometry.cellVolume(ix, iy, iz)
ok = ok || (v[iz][iy][ix] != 0)
}
}
}
}
if !ok {
util.Fatal("SetGeom: geometry completely empty")
}
data.Copy(geometry.buffer, V)
// M inside geom but previously outside needs to be re-inited
needupload := false
geomlist := host.Host()[0]
mhost := M.Buffer().HostCopy()
m := mhost.Host()
rng := rand.New(rand.NewSource(0))
for i := range m[0] {
if geomlist[i] != 0 {
mx, my, mz := m[X][i], m[Y][i], m[Z][i]
if mx == 0 && my == 0 && mz == 0 {
needupload = true
rnd := randomDir(rng)
m[X][i], m[Y][i], m[Z][i] = float32(rnd[X]), float32(rnd[Y]), float32(rnd[Z])
}
}
}
if needupload {
data.Copy(M.Buffer(), mhost)
}
M.normalize() // removes m outside vol
}
// Sample edgeSmooth^3 points inside the cell to estimate its volume.
func (g *geom) cellVolume(ix, iy, iz int) float32 {
r := Index2Coord(ix, iy, iz)
x0, y0, z0 := r[X], r[Y], r[Z]
c := geometry.Mesh().CellSize()
cx, cy, cz := c[X], c[Y], c[Z]
s := geometry.shape
var vol float32
N := edgeSmooth
S := float64(edgeSmooth)
for dx := 0; dx < N; dx++ {
Δx := -cx/2 + (cx / (2 * S)) + (cx/S)*float64(dx)
for dy := 0; dy < N; dy++ {
Δy := -cy/2 + (cy / (2 * S)) + (cy/S)*float64(dy)
for dz := 0; dz < N; dz++ {
Δz := -cz/2 + (cz / (2 * S)) + (cz/S)*float64(dz)
if s(x0+Δx, y0+Δy, z0+Δz) { // inside
vol++
}
}
}
}
return vol / float32(N*N*N)
}
func (g *geom) shift(dx int) {
// empty mask, nothing to do
if g == nil || g.buffer.IsNil() {
return
}
// allocated mask: shift
s := g.buffer
s2 := cuda.Buffer(1, g.Mesh().Size())
defer cuda.Recycle(s2)
newv := float32(1) // initially fill edges with 1's
cuda.ShiftX(s2, s, dx, newv, newv)
data.Copy(s, s2)
n := Mesh().Size()
x1, x2 := shiftDirtyRange(dx)
for iz := 0; iz < n[Z]; iz++ {
for iy := 0; iy < n[Y]; iy++ {
for ix := x1; ix < x2; ix++ {
r := Index2Coord(ix, iy, iz) // includes shift
if !g.shape(r[X], r[Y], r[Z]) {
cuda.SetCell(g.buffer, 0, ix, iy, iz, 0) // a bit slowish, but hardly reached
}
}
}
}
}
func (g *geom) shiftY(dy int) {
// empty mask, nothing to do
if g == nil || g.buffer.IsNil() {
return
}
// allocated mask: shift
s := g.buffer
s2 := cuda.Buffer(1, g.Mesh().Size())
defer cuda.Recycle(s2)
newv := float32(1) // initially fill edges with 1's
cuda.ShiftY(s2, s, dy, newv, newv)
data.Copy(s, s2)
n := Mesh().Size()
y1, y2 := shiftDirtyRange(dy)
for iz := 0; iz < n[Z]; iz++ {
for ix := 0; ix < n[X]; ix++ {
for iy := y1; iy < y2; iy++ {
r := Index2Coord(ix, iy, iz) // includes shift
if !g.shape(r[X], r[Y], r[Z]) {
cuda.SetCell(g.buffer, 0, ix, iy, iz, 0) // a bit slowish, but hardly reached
}
}
}
}
}
// x range that needs to be refreshed after shift over dx
func shiftDirtyRange(dx int) (x1, x2 int) {
nx := Mesh().Size()[X]
util.Argument(dx != 0)
if dx < 0 {
x1 = nx + dx
x2 = nx
} else {
x1 = 0
x2 = dx
}
return
}
func (g *geom) Mesh() *data.Mesh { return Mesh() } | engine/geom.go | 0.585931 | 0.535766 | geom.go | starcoder |
package max_slice
import (
"math"
)
/*
A non-empty zero-indexed array A consisting of N integers is given.
A triplet (X, Y, Z), such that 0 ≤ X < Y < Z < N, is called a double slice.
The sum of double slice (X, Y, Z) is the total of
A[X + 1] + A[X + 2] + ... + A[Y − 1] + A[Y + 1] + A[Y + 2] + ... + A[Z − 1].
For example, array A such that:
A[0] = 3
A[1] = 2
A[2] = 6
A[3] = -1
A[4] = 4
A[5] = 5
A[6] = -1
A[7] = 2
contains the following example double slices:
double slice (0, 3, 6), sum is 2 + 6 + 4 + 5 = 17,
double slice (0, 3, 7), sum is 2 + 6 + 4 + 5 − 1 = 16,
double slice (3, 4, 5), sum is 0.
The goal is to find the maximal sum of any double slice.
Write a function:
func Solution(A []int) int
that, given a non-empty zero-indexed array A consisting of N integers,
returns the maximal sum of any double slice.
For example, given:
A[0] = 3
A[1] = 2
A[2] = 6
A[3] = -1
A[4] = 4
A[5] = 5
A[6] = -1
A[7] = 2
the function should return 17, because no double slice of array A has a sum
of greater than 17.
Assume that:
N is an integer within the range [3..100,000];
each element of array A is an integer within the range [−10,000..10,000].
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(N), beyond input storage
(not counting the storage required for input arguments).
*/
// Due to the fact that the first and the last element are not counted just compute
// The maxSliceSum for 2...n-1 and then remove the smallest element
func MaxDoubleSliceSum(A []int) int {
maxSliceSum := 1 - (1 << 32 - 1)
N := len(A) - 2
if len(A) < 4 {
return 0;
}
leftSum := make([]int, N)
rightSum := make([]int, N)
for i := 0; i < N - 1; i += 1 {
leftValue := A[i + 1]
rightValue := A[N - i]
leftSum[i + 1] = int(math.Max(0, float64(leftValue) + float64(leftSum[i])))
rightSum[N - i - 2] = int(math.Max(0, float64(rightValue) + float64(rightSum[N - i - 1])))
}
for i := 0; i < N; i += 1 {
maxSliceSum = int(math.Max(float64(maxSliceSum), float64(rightSum[i]) + float64(leftSum[i])))
}
return maxSliceSum
} | max-slice/MaxDoubleSliceSum.go | 0.876052 | 0.780119 | MaxDoubleSliceSum.go | starcoder |
package world
import (
"fmt"
"github.com/g3n/engine/math32"
)
type Winding struct {
Points []*math32.Vector3
}
func NewWinding(points int) *Winding {
w := &Winding{
Points: make([]*math32.Vector3, points),
}
for i := range w.Points {
w.Points[i] = &math32.Vector3{}
}
return w
}
const splitEpsilon = 0.01
const (
splitFront = 0
splitBack = 1
splitOn = 2
)
func (w *Winding) Clip(split *Plane) {
// Figure out which side of the split
// each point of this winding is on
// This is taken completely wholesale from the valve sdk
// look in brushops.cpp
// Counts of how many points are on which side
windingLength := len(w.Points) + 1
counts := make([]int, 3)
// Which side the point is on
sides := make([]int, windingLength)
// Distance from the split for each point
dists := make([]float32, windingLength)
for i, point := range w.Points {
dot := point.Dot(&split.Normal)
dot -= split.Dist
dists[i] = dot
if dot > splitEpsilon {
sides[i] = splitFront
} else if dot < -splitEpsilon {
sides[i] = splitBack
} else {
sides[i] = splitOn
}
counts[sides[i]]++
}
sides[len(w.Points)] = sides[0]
dists[len(w.Points)] = dists[0]
if counts[splitFront] == 0 && counts[splitBack] == 0 {
// Nothing to split (everything was on the plane)
fmt.Println("All on...")
return
}
if counts[splitFront] == 0 {
// Everything was behind this plane
// so we no longer have any points
fmt.Println("All behind...")
*w = *NewWinding(0)
return
}
if counts[splitBack] == 0 {
// Nothing was behind the split
// so nothing to change
fmt.Println("All in front...")
return
}
maxPoints := len(w.Points) + 4
numPoints := 0
newWinding := NewWinding(maxPoints)
for i, point := range w.Points {
mid := newWinding.Points[numPoints]
if sides[i] == splitOn {
*mid = *point
numPoints++
continue
}
if sides[i] == splitFront {
*mid = *point
numPoints++
mid = newWinding.Points[numPoints]
}
if sides[i+1] == splitOn || sides[i+1] == sides[i] {
continue
}
// Generate a split point
p2 := w.Points[0]
if i != len(w.Points)-1 {
p2 = w.Points[i+1]
}
numPoints++
dot := dists[i] / (dists[i] - dists[i+1])
for j := 0; j < 3; j++ {
// avoid round off error when possible
comp := point.Component(j)
if split.Normal.Component(j) == 1 {
mid.SetComponent(j, split.Dist)
} else if split.Normal.Component(j) == -1 {
mid.SetComponent(j, -split.Dist)
} else {
mid.SetComponent(j, comp+dot*(p2.Component(j)-comp))
}
}
}
w.Points = newWinding.Points[:numPoints]
}
// Source defines this constant as sqrt(3) * 2 * 16584
// its the max diagonal length a map could possibly be
const maxTrace = 56755.8408624
func VectorMAInline(start, direction, dest *math32.Vector3, scale float32) {
dest.X = start.X + direction.X*scale
dest.Y = start.Y + direction.Y*scale
dest.Z = start.Z + direction.Z*scale
}
func CreateWindingFromPlane(p *Plane) *Winding {
// https://github.com/emily33901/HammerFromScratch/blob/a0f669718a70632138545fd1a5a493b8299221a0/hammer/brushops.cpp
// Find the major axis
up := &math32.Vector3{}
normalArray := []float32{p.Normal.X, p.Normal.Y, p.Normal.Z}
max := math32.Abs(normalArray[0])
idx := 0
for i := 1; i < 3; i++ {
v := math32.Abs(normalArray[i])
if v > max {
idx = i
}
}
if idx == -1 {
panic("No major axis found...")
}
if idx == 0 || idx == 1 {
up.Z = 1
} else {
up.X = 1
}
// If X or Y are greater than Z
if math32.Abs(p.Normal.X) < math32.Abs(p.Normal.Z) || math32.Abs(p.Normal.Y) < math32.Abs(p.Normal.Z) {
up.Z = 1
} else {
// Z must be the largest
up.X = 1
}
v := up.Dot(&p.Normal)
VectorMAInline(up, &p.Normal, up, -v)
up.Normalize()
org := p.Normal.Clone().MultiplyScalar(p.Dist)
right := up.Clone().Cross(&p.Normal).MultiplyScalar(maxTrace)
up = up.MultiplyScalar(maxTrace)
w := NewWinding(4)
w.Points[0] = org.Clone().Sub(right).Add(up)
w.Points[1] = org.Clone().Add(right).Add(up)
w.Points[2] = org.Clone().Add(right).Sub(up)
w.Points[3] = org.Clone().Sub(right).Sub(up)
return w
} | core/world/winding.go | 0.693369 | 0.483039 | winding.go | starcoder |
package cbftd
import (
"fmt"
"io/ioutil"
"log"
"sort"
)
const (
ARRAY_LIMIT = 256 // assuming english files 256 is enough
)
type ByteHistogram struct {
Count [ARRAY_LIMIT]uint64
}
type byteCountPair struct {
b byte
c uint64
}
type byCountAsc []byteCountPair
func (a byCountAsc) Len() int { return len(a) }
func (a byCountAsc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byCountAsc) Less(i, j int) bool {
return (a[i].c < a[j].c) || (a[i].c == a[j].c && a[i].b < a[j].b)
}
type byCountDesc []byteCountPair
func (a byCountDesc) Len() int { return len(a) }
func (a byCountDesc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byCountDesc) Less(i, j int) bool {
return (a[i].c > a[j].c) || (a[i].c == a[j].c && a[i].b < a[j].b)
}
// NewByteHistogram creates a new ByteHistogram.
func NewByteHistogram() *ByteHistogram {
return &ByteHistogram{}
}
// Update updates a ByteHistogram with an array of bytes.
func (bh *ByteHistogram) Update(bytes []byte) {
for _, b := range bytes {
if !isFmtChar(b) {
bh.Count[b]++
}
}
}
// ByteList returns two values: a slice of the bytes that have been counted
// once at least and a slice with the actual number of times that every byte
// appears on the processed data.
func (bh *ByteHistogram) ByteList() ([]byte, []uint64) {
bytelist := make([]byte, ARRAY_LIMIT)
bytecount := make([]uint64, ARRAY_LIMIT)
listlen := 0
for i, c := range bh.Count {
if c > 0 {
bytelist[listlen] = byte(i)
bytecount[listlen] = uint64(c)
listlen++
}
}
return bytelist[0:listlen], bytecount[0:listlen]
}
func isFmtChar(c byte) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
// SortedByteList returns two values as the ByteList function does, but the
// resulting slices are sorted by the number of bytes.
// The sorting order is specified by ascOrder, that will be ascending if
// the param is true or descending if it is false.
func (bh *ByteHistogram) SortedByteList(ascOrder bool) ([]byte, []uint64) {
pairs := make([]byteCountPair, 256)
for i, count := range bh.Count {
pairs[i] = byteCountPair{b: byte(i), c: count}
}
if ascOrder {
sort.Sort(byCountAsc(pairs))
} else {
sort.Sort(byCountDesc(pairs))
}
bytelist := make([]byte, 256)
bytecount := make([]uint64, 256)
listlen := 0
for _, pair := range pairs {
if pair.c > 0 {
bytelist[listlen] = pair.b
bytecount[listlen] = pair.c
listlen++
}
}
return bytelist[0:listlen], bytecount[0:listlen]
}
func (bh *ByteHistogram) Train(samples string) {
files, err := ioutil.ReadDir(samples)
if err != nil {
log.Fatal(err)
}
for _, file := range files {
if !file.IsDir() {
content, err := ioutil.ReadFile(samples + file.Name())
if err != nil {
panic(err)
}
bh.Update(content)
}
}
bh.norm()
}
func (bh *ByteHistogram) String() (s string) {
bytelist, bytecount := bh.SortedByteList(false)
for i := range bytelist {
s += fmt.Sprintf("%s - %d\n", string(bytelist[i]), bytecount[i])
}
return fmt.Sprintf("\n%s", s)
}
// normalize the values
// assuming the slice is sorted already
func (bh *ByteHistogram) norm() {
top := bh.Count[0]
for i := range bh.Count {
if bh.Count[i] != 0 {
bh.Count[i] = bh.Count[i] / top
}
}
} | cbftd.go | 0.653901 | 0.428473 | cbftd.go | starcoder |
package main
import "fmt"
var mySlice = make([]uint, 0, 7)
var i uint
// Slice, Maps, Channels are reference types to the underlaying premitive Data Structure of a similar type
// Made/Build with 3 Headder elements, consists of
// 'Address pointing to underlying data-Structure of a Kind'
// 'Length of the Slice'
// 'Length of the underlaying Data-Structure'
// We would end-up having index out of range errors if an element is added at a random index
// best practiece is to use append, 'Go' will identify the existing capacity and double-down
// There is no direct way to delete items of a slice but to re-append as below.
//If i is the index of the element to be removed, then the format of this process would look like the following:
// slice = append(slice[:i], slice[i+1:]...)
//MultiDimensionalSlice demonstrates ways to insert a slice into a slice.
func MultiDimensionalSlice() {
var student1 = make([]string, 3)
var student2 = make([]string, 3)
var student3 = make([]string, 3)
student1[0] = "Karthik"
student1[1] = "Nagadevara"
student1[2] = "17-JAN-1991"
student2[0] = "Srinivas"
student2[1] = "Palaka"
student2[2] = "23-JAN-1985"
student3[0] = "Randy"
student3[1] = "Paush"
student3[2] = "21-DEV-1971"
// Declaring a multi dimensional slice in short-hand way <- casual approach
// Takes slice of string as input but not pointing to an underlying array.
StudentYear2018 := [][]string{}
//errors out at compile time if accessed directly as below
// Correct way to do it
StudentYear2018 = append(StudentYear2018, student2)
// A NIL slice will be created, which does not point to anything yet , till it gets initialized
// at this point the underlaying array is not yet created.
// No underlying array is created, NIL pointing
//Errors out if anything is added is to it, same as above.
var StudentYear2019 [][]string
// Most Prefered and Idiomatic way to make a Slice
// reserved spots are already pointed in the memory
// an underlying array is already created with the capacity size of 20
// Throws an errow if initialized as below
// StudentYear2020[0][0] = "Karthik"
// StudentYear2020[0][1] = "Nagadevara"
StudentYear2020 := make([][]string, 10)
// elements can be directly added
StudentYear2020 = append(StudentYear2020, student3)
// Increment a value in the slice
ExampleSlice := make([]int, 10, 20)
ExampleSlice[0] = 1
//ExampleSlice[0] = ExampleSlice[0] +1
//ExampleSlice[0] += n // <- 'n' being any similar nuber type
ExampleSlice[0]++ // <- Would increment the value inside the slice idiomatic way
}
func main() {
for i = 1; i <= 100; i++ {
mySlice = append(mySlice, i)
fmt.Println("Length: ", len(mySlice), " Capacity: ", cap(mySlice), " Value: ", i)
}
//To delete the element at the position of 56
mySlice = append(mySlice[:56], mySlice[57:]...)
fmt.Println("Length: ", len(mySlice), " Capacity: ", cap(mySlice), " Value: ", mySlice)
} | 04DataStructures/02Slices.go | 0.54577 | 0.447943 | 02Slices.go | starcoder |
package tile
// Tile is a representation of an uniqe tile. THere are 34 unique tiles.
// Tile values starts from value of `1` and ends with value of `34`.
// Value `0` (TileNull) used for `undefined` (not set).
// Tiles order is the following:
// - `123456789` `Man` (numbers 1-9)
// - `123456789` `Pin` (numbers 10-18)
// - `123456789` `Sou` (numbers 19-27)
// - `East`, `South`, `West`, `East` (numbers 28-31)
// - `White`, `Green`, `Red` (numbers 32-34)
// Take note, that for the last group values starts from `White` (not `Red`, as stated in some manuals)
// See also Instance.
type Tile int
// Tile numberation starts from 1
const (
TileNull Tile = iota
Man1
Man2
Man3
Man4
Man5
Man6
Man7
Man8
Man9
Pin1
Pin2
Pin3
Pin4
Pin5
Pin6
Pin7
Pin8
Pin9
Sou1
Sou2
Sou3
Sou4
Sou5
Sou6
Sou7
Sou8
Sou9
East
South
West
North
White
Green
Red
TileEnd
)
const (
TileCount = int(TileEnd - TileBegin)
SequenceBegin = TileBegin
SequenceEnd = East
TileBegin = Man1
)
func (t Tile) Type() Type {
tp := Type(t)
switch {
case t < TileBegin:
return TypeNull
case tp < TypePin:
return TypeMan
case tp < TypeSou:
return TypePin
case tp < TypeWind:
return TypeSou
case tp < TypeDragon:
return TypeWind
case t < TileEnd:
return TypeDragon
}
return TypeNull
}
func (t Tile) Number() int {
return int(t) - int(t.Type()) + 1
}
// Indicates used for dora indicators to choose dora tile
func (t Tile) Indicates() Tile {
next := t + 1
if t.Type() != next.Type() {
return Tile(t.Type())
}
return next
}
func (t Tile) String() string {
return Tiles{t}.String()
}
func (t Tile) Instance(c CopyID) Instance {
return newInstance(t, c)
}
type Tiles []Tile
func (t Tiles) Contains(x Tile) bool {
for _, v := range t {
if x == v {
return true
}
}
return false
}
func (t Tiles) Clone() Tiles {
x := make(Tiles, len(t))
for k, v := range t {
x[k] = v
}
return x
}
func (t Tiles) String() string {
return TilesToTenhouString(t)
}
func (t Tiles) Len() int { return len(t) }
func (t Tiles) Less(i, j int) bool { return t[i] < t[j] }
func (t Tiles) Swap(i, j int) { t[i], t[j] = t[j], t[i] } | tile/tile.go | 0.840357 | 0.749294 | tile.go | starcoder |
// series provides helpers for determining the series of
// a host, and translating from os to series.
package series
import (
"strconv"
"strings"
"sync"
"time"
"github.com/juju/errors"
"github.com/juju/os/v2"
)
const (
genericLinuxSeries = "genericlinux"
genericLinuxVersion = "genericlinux"
)
var (
// HostSeries returns the series of the machine the current process is
// running on (overrideable var for testing).
HostSeries func() (string, error) = hostSeries
// MustHostSeries calls HostSeries and panics if there is an error.
MustHostSeries = mustHostSeries
seriesOnce sync.Once
// These are filled in by the first call to hostSeries
series string
seriesErr error
// timeNow is time.Now, but overrideable via TimeNow in tests.
timeNow = time.Now
)
// hostSeries returns the series of the machine the current process is
// running on.
func hostSeries() (string, error) {
var err error
seriesOnce.Do(func() {
series, err = readSeries()
if err != nil {
seriesErr = errors.Annotate(err, "cannot determine host series")
}
})
return series, seriesErr
}
// mustHostSeries calls HostSeries and panics if there is an error.
func mustHostSeries() string {
series, err := HostSeries()
if err != nil {
panic(err)
}
return series
}
// MustOSFromSeries will panic if the series represents an "unknown"
// operating system.
func MustOSFromSeries(series string) os.OSType {
operatingSystem, err := GetOSFromSeries(series)
if err != nil {
panic("osVersion reported an error: " + err.Error())
}
return operatingSystem
}
// kernelToMajor takes a dotted version and returns just the Major portion
func kernelToMajor(getKernelVersion func() (string, error)) (int, error) {
fullVersion, err := getKernelVersion()
if err != nil {
return 0, err
}
parts := strings.SplitN(fullVersion, ".", 2)
majorVersion, err := strconv.ParseInt(parts[0], 10, 32)
if err != nil {
return 0, err
}
return int(majorVersion), nil
}
func macOSXSeriesFromKernelVersion(getKernelVersion func() (string, error)) (string, error) {
majorVersion, err := kernelToMajor(getKernelVersion)
if err != nil {
logger.Infof("unable to determine OS version: %v", err)
return "unknown", err
}
return macOSXSeriesFromMajorVersion(majorVersion)
}
// TODO(jam): 2014-05-06 https://launchpad.net/bugs/1316593
// we should have a system file that we can read so this can be updated without
// recompiling Juju. For now, this is a lot easier, and also solves the fact
// that we want to populate HostSeries during init() time, before
// we've potentially read that information from anywhere else
// macOSXSeries maps from the Darwin Kernel Major Version to the Mac OSX
// series.
var macOSXSeries = map[int]string{
21: "monterey",
20: "bigsur",
19: "catalina",
18: "mojave",
17: "highsierra",
16: "sierra",
15: "elcapitan",
14: "yosemite",
13: "mavericks",
12: "mountainlion",
11: "lion",
10: "snowleopard",
9: "leopard",
8: "tiger",
7: "panther",
6: "jaguar",
5: "puma",
}
func macOSXSeriesFromMajorVersion(majorVersion int) (string, error) {
series, ok := macOSXSeries[majorVersion]
if !ok {
return "unknown", errors.Errorf("unknown series version %d", majorVersion)
}
return series, nil
} | cluster-autoscaler/vendor/github.com/juju/os/v2/series/series.go | 0.61057 | 0.422683 | series.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.