code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package meta
import (
"errors"
"github.com/tomchavakis/turf-go/geojson"
"github.com/tomchavakis/turf-go/geojson/feature"
"github.com/tomchavakis/turf-go/geojson/geometry"
)
// CoordEach iterate over coordinates in any Geojson object and apply the callbackFn
// geojson can be a FeatureCollection | Feature | Geometry
// callbackFn is a method that takes a point and returns a point
// excludeWrapCoord whether or not to include the final coordinate of LinearRings that wraps the ring in its iteration.
func CoordEach(geojson interface{}, callbackFn func(geometry.Point) geometry.Point, excludeWrapCoord *bool) ([]geometry.Point, error) {
if geojson == nil {
return nil, errors.New("geojson is empty")
}
switch gtp := geojson.(type) {
case nil:
break
case *geometry.Point:
return callbackEachPoint(gtp, callbackFn), nil
case *geometry.MultiPoint:
return coordEachMultiPoint(gtp, callbackFn), nil
case *geometry.LineString:
return coordEachLineString(gtp, callbackFn), nil
case *geometry.Polygon:
if excludeWrapCoord == nil {
return coordEachPolygon(gtp, false, callbackFn), nil
}
return coordEachPolygon(gtp, *excludeWrapCoord, callbackFn), nil
case *geometry.MultiLineString:
return coordEachMultiLineString(gtp, callbackFn), nil
case *geometry.MultiPolygon:
if excludeWrapCoord == nil {
return coordEachMultiPolygon(gtp, false, callbackFn), nil
}
return coordEachMultiPolygon(gtp, *excludeWrapCoord, callbackFn), nil
case *feature.Feature:
if excludeWrapCoord == nil {
return coordEachFeature(gtp, false, callbackFn)
}
return coordEachFeature(gtp, *excludeWrapCoord, callbackFn)
case *feature.Collection:
if excludeWrapCoord == nil {
return coordEachFeatureCollection(gtp, false, callbackFn)
}
return coordEachFeatureCollection(gtp, *excludeWrapCoord, callbackFn)
case *geometry.Collection:
if excludeWrapCoord == nil {
return coordEachGeometryCollection(gtp, false, callbackFn)
}
return coordEachGeometryCollection(gtp, *excludeWrapCoord, callbackFn)
}
return nil, nil
}
func callbackEachPoint(p *geometry.Point, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
var coords []geometry.Point
np := callbackFn(*p)
// Conversion assignment
p.Lat = np.Lat
p.Lng = np.Lng
coords = append(coords, np)
return coords
}
func coordEachMultiPoint(m *geometry.MultiPoint, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
return appendCoordsToMultiPoint([]geometry.Point{}, m, callbackFn)
}
func appendCoordsToMultiPoint(coords []geometry.Point, m *geometry.MultiPoint, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
for _, v := range m.Coordinates {
np := callbackFn(v)
coords = append(coords, np)
}
m.Coordinates = coords
return coords
}
func coordEachLineString(m *geometry.LineString, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
return appendCoordsToLineString([]geometry.Point{}, m, callbackFn)
}
func appendCoordsToLineString(coords []geometry.Point, l *geometry.LineString, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
for _, v := range l.Coordinates {
np := callbackFn(v)
coords = append(coords, np)
}
l.Coordinates = coords
return coords
}
func coordEachMultiLineString(m *geometry.MultiLineString, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
return appendCoordToMultiLineString([]geometry.Point{}, m, callbackFn)
}
func appendCoordToMultiLineString(coords []geometry.Point, m *geometry.MultiLineString, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
for i := 0; i < len(m.Coordinates); i++ {
for j := 0; j < len(m.Coordinates[i].Coordinates); j++ {
np := callbackFn(m.Coordinates[i].Coordinates[j])
m.Coordinates[i].Coordinates[j] = np
coords = append(coords, np)
}
}
return coords
}
func coordEachPolygon(p *geometry.Polygon, excludeWrapCoord bool, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
return appendCoordsToPolygon([]geometry.Point{}, p, excludeWrapCoord, callbackFn)
}
func appendCoordsToPolygon(coords []geometry.Point, p *geometry.Polygon, excludeWrapCoord bool, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
wrapShrink := 0
if excludeWrapCoord {
wrapShrink = 1
}
for i := 0; i < len(p.Coordinates); i++ {
for j := 0; j < len(p.Coordinates[i].Coordinates)-wrapShrink; j++ {
np := callbackFn(p.Coordinates[i].Coordinates[j])
p.Coordinates[i].Coordinates[j] = np
coords = append(coords, np)
}
}
return coords
}
func coordEachMultiPolygon(mp *geometry.MultiPolygon, excludeWrapCoord bool, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
return appendCoordToMultiPolygon([]geometry.Point{}, mp, excludeWrapCoord, callbackFn)
}
func appendCoordToMultiPolygon(coords []geometry.Point, mp *geometry.MultiPolygon, excludeWrapCoord bool, callbackFn func(geometry.Point) geometry.Point) []geometry.Point {
wrapShrink := 0
if excludeWrapCoord {
wrapShrink = 1
}
for i := 0; i < len(mp.Coordinates); i++ {
for j := 0; j < len(mp.Coordinates[i].Coordinates); j++ {
for k := 0; k < len(mp.Coordinates[i].Coordinates[j].Coordinates)-wrapShrink; k++ {
np := callbackFn(mp.Coordinates[i].Coordinates[j].Coordinates[k])
mp.Coordinates[i].Coordinates[j].Coordinates[k] = np
coords = append(coords, np)
}
}
}
return coords
}
func coordEachFeature(f *feature.Feature, excludeWrapCoord bool, callbackFn func(geometry.Point) geometry.Point) ([]geometry.Point, error) {
return appendCoordToFeature([]geometry.Point{}, f, excludeWrapCoord, callbackFn)
}
func appendCoordToFeature(pointList []geometry.Point, f *feature.Feature, excludeWrapCoord bool, callbackFn func(geometry.Point) geometry.Point) ([]geometry.Point, error) {
coords, err := coordsEachFromSingleGeometry(pointList, &f.Geometry, excludeWrapCoord, callbackFn)
if err != nil {
return nil, err
}
return coords, nil
}
func coordsEachFromSingleGeometry(pointList []geometry.Point, g *geometry.Geometry, excludeWrapCoord bool, callbackFn func(geometry.Point) geometry.Point) ([]geometry.Point, error) {
if g.GeoJSONType == geojson.Point {
p, err := g.ToPoint()
if err != nil {
return nil, err
}
np := callbackFn(*p)
pointList = append(pointList, np)
g.Coordinates = np
}
if g.GeoJSONType == geojson.MultiPoint {
mp, err := g.ToMultiPoint()
if err != nil {
return nil, err
}
pointList = appendCoordsToMultiPoint(pointList, mp, callbackFn)
g.Coordinates = mp.Coordinates
}
if g.GeoJSONType == geojson.LineString {
ln, err := g.ToLineString()
if err != nil {
return nil, err
}
pointList = appendCoordsToLineString(pointList, ln, callbackFn)
g.Coordinates = ln.Coordinates
}
if g.GeoJSONType == geojson.MiltiLineString {
mln, err := g.ToMultiLineString()
if err != nil {
return nil, err
}
pointList = appendCoordToMultiLineString(pointList, mln, callbackFn)
g.Coordinates = mln.Coordinates
}
if g.GeoJSONType == geojson.Polygon {
poly, err := g.ToPolygon()
if err != nil {
return nil, err
}
pointList = appendCoordsToPolygon(pointList, poly, excludeWrapCoord, callbackFn)
g.Coordinates = poly.Coordinates
}
if g.GeoJSONType == geojson.MultiPolygon {
multiPoly, err := g.ToMultiPolygon()
if err != nil {
return nil, err
}
pointList = appendCoordToMultiPolygon(pointList, multiPoly, excludeWrapCoord, callbackFn)
g.Coordinates = multiPoly.Coordinates
}
return pointList, nil
}
func coordEachFeatureCollection(c *feature.Collection, excludeWrapCoord bool, callbackFn func(geometry.Point) geometry.Point) ([]geometry.Point, error) {
var finalCoordsList []geometry.Point
for i := 0; i < len(c.Features); i++ {
var tempCoordsList []geometry.Point
tempCoordsList, _ = appendCoordToFeature(tempCoordsList, &c.Features[i], excludeWrapCoord, callbackFn)
finalCoordsList = append(finalCoordsList, tempCoordsList...)
}
return finalCoordsList, nil
}
func coordEachGeometryCollection(g *geometry.Collection, excludeWrapCoord bool, callbackFn func(geometry.Point) geometry.Point) ([]geometry.Point, error) {
var finalCoordsList []geometry.Point
for i := 0; i < len(g.Geometries); i++ {
var tempCoordsList []geometry.Point
tempCoordsList, _ = coordsEachFromSingleGeometry(tempCoordsList, &g.Geometries[i], excludeWrapCoord, callbackFn)
finalCoordsList = append(finalCoordsList, tempCoordsList...)
}
return finalCoordsList, nil
} | meta/coordEach/coordEach.go | 0.770378 | 0.509093 | coordEach.go | starcoder |
// Package horn provides an implementation of Higher Order Recurrent Neural Networks (HORN).
package horn
import (
"encoding/gob"
mat "github.com/nlpodyssey/spago/pkg/mat32"
"github.com/nlpodyssey/spago/pkg/ml/ag"
"github.com/nlpodyssey/spago/pkg/ml/nn"
"github.com/nlpodyssey/spago/pkg/utils"
"log"
)
var (
_ nn.Model = &Model{}
)
// Model contains the serializable parameters.
type Model struct {
nn.BaseModel
W nn.Param `spago:"type:weights"`
WRec []nn.Param `spago:"type:weights"`
B nn.Param `spago:"type:biases"`
States []*State `spago:"scope:processor"`
}
// State represent a state of the Horn recurrent network.
type State struct {
Y ag.Node
}
func init() {
gob.Register(&Model{})
}
// New returns a new model with parameters initialized to zeros.
func New(in, out, order int) *Model {
wRec := make([]nn.Param, order, order)
for i := 0; i < order; i++ {
wRec[i] = nn.NewParam(mat.NewEmptyDense(out, out))
}
return &Model{
W: nn.NewParam(mat.NewEmptyDense(out, in)),
WRec: wRec,
B: nn.NewParam(mat.NewEmptyVecDense(out)),
}
}
// SetInitialState sets the initial state of the recurrent network.
// It panics if one or more states are already present.
func (m *Model) SetInitialState(state *State) {
if len(m.States) > 0 {
log.Fatal("horn: the initial state must be set before any input")
}
m.States = append(m.States, state)
}
// Forward performs the forward step for each input node and returns the result.
func (m *Model) Forward(xs ...ag.Node) []ag.Node {
ys := make([]ag.Node, len(xs))
for i, x := range xs {
s := m.forward(x)
m.States = append(m.States, s)
ys[i] = s.Y
}
return ys
}
func (m *Model) forward(x ag.Node) (s *State) {
g := m.Graph()
s = new(State)
h := nn.Affine(g, append([]ag.Node{m.B, m.W, x}, m.feedback()...)...)
s.Y = g.Tanh(h)
return
}
func (m *Model) feedback() []ag.Node {
g := m.Graph()
var ys []ag.Node
n := len(m.States)
for i := 0; i < utils.MinInt(len(m.WRec), n); i++ {
alpha := g.NewScalar(mat.Pow(0.6, mat.Float(i+1)))
ys = append(ys, m.WRec[i], g.ProdScalar(m.States[n-1-i].Y, alpha))
}
return ys
} | pkg/ml/nn/recurrent/horn/horn.go | 0.804828 | 0.406391 | horn.go | starcoder |
package neural
import (
"fmt"
"math"
"math/rand"
)
// Function32 defines a function that takes a float32 and returns a float32
type Function32 func(x float32) float32
// FunctionPair32 represents a function, a derivative of the function, and a
// transform used for inference during training
type FunctionPair32 struct {
F, T, DF Function32
}
// Neural32 is a 32 bit neural network
type Neural32 struct {
Layers []int
Weights [][][]float32
Changes [][][]float32
Functions []FunctionPair32
}
// WeightInitializer32 is a function that initializes the neural network weights
// See: http://stats.stackexchange.com/questions/47590/what-are-good-initial-weights-in-a-neural-network
type WeightInitializer32 func(in, out int) float32
// WeightInitializer32Basic basic weight initialization
func WeightInitializer32Basic(in, out int) float32 {
return random32(-1, 1)
}
// WeightInitializer32FanIn fan in weight initialization
func WeightInitializer32FanIn(in, out int) float32 {
return random32(-1, 1) / float32(math.Sqrt(float64(in)))
}
// WeightInitializer32FanInFanOut fan in/fan out weight initialization
func WeightInitializer32FanInFanOut(in, out int) float32 {
return random32(-1, 1) * float32(4*math.Sqrt(6/float64(in+out)))
}
// Init initializes the neural network
func (n *Neural32) Init(initializer WeightInitializer32, layers ...int) {
depth := len(layers) - 1
if depth < 1 {
panic("there should be at least 2 layers")
}
n.Layers = layers
for l := range layers[:depth] {
layers[l]++
}
n.Weights = make([][][]float32, depth)
for l := range layers[:depth] {
weights := matrix32(layers[l+1], layers[l])
for i := 0; i < layers[l]; i++ {
for j := 0; j < layers[l+1]; j++ {
weights[j][i] = initializer(layers[l], layers[l+1])
}
}
n.Weights[l] = weights
}
n.Changes = make([][][]float32, depth)
for l := range layers[:depth] {
n.Changes[l] = matrix32(layers[l], layers[l+1])
}
n.Functions = make([]FunctionPair32, depth)
for f := range n.Functions {
n.Functions[f] = FunctionPair32{
F: sigmoid32,
T: identity,
DF: dsigmoid32,
}
}
}
// UseTanh use tanh for the activation function
func (n *Neural32) UseTanh() {
for f := range n.Functions {
n.Functions[f].F = tanh32
n.Functions[f].DF = dtanh32
}
}
// EnableRegression removes the activation function from the last layer so
// that regression is performed
func (n *Neural32) EnableRegression() {
output := len(n.Functions) - 1
n.Functions[output].F = identity
n.Functions[output].DF = one
}
// EnableDropout enables dropout based regularization
// See: http://iamtrask.github.io/2015/07/28/dropout/
func (n *Neural32) EnableDropout(probability float32) {
depth := len(n.Layers) - 1
for i := range n.Functions[:depth-1] {
n.Functions[i].T = func(x float32) float32 {
if rand.Float32() > 1-probability {
x = 0
} else {
x *= 1 / (1 - probability)
}
return x
}
}
}
// NewNeural32 creates a neural network with the given configuration
func NewNeural32(config func(neural *Neural32)) *Neural32 {
neural := &Neural32{}
config(neural)
return neural
}
// Context32 is an inference context
type Context32 struct {
*Neural32
Activations [][]float32
}
// SetInput sets the input to the neural network
func (c *Context32) SetInput(input []float32) {
copy(c.Activations[0], input)
}
// GetOutput gets the output of the neural network
func (c *Context32) GetOutput() []float32 {
return c.Activations[len(c.Activations)-1]
}
// NewContext creates a new inference context from the given neural network
func (n *Neural32) NewContext() *Context32 {
layers, depth := n.Layers, len(n.Layers)
activations := make([][]float32, depth)
for i, width := range layers {
activations[i] = vector32(width, 1.0)
}
return &Context32{
Neural32: n,
Activations: activations,
}
}
// Infer runs inference
func (c *Context32) Infer() {
depth := len(c.Layers) - 1
if depth > 1 {
for i := range c.Activations[:depth-1] {
activations, weights := c.Activations[i], c.Weights[i]
for j := range weights[:len(weights)-1] {
sum := dot32(activations, weights[j])
c.Activations[i+1][j] = c.Functions[i].F(sum)
}
}
}
i := depth - 1
activations, weights := c.Activations[i], c.Weights[i]
for j := range weights[:len(weights)] {
sum := dot32(activations, weights[j])
c.Activations[i+1][j] = c.Functions[i].F(sum)
}
}
// InferWithT runs inference using a transform in between layers
func (c *Context32) InferWithT() {
depth := len(c.Layers) - 1
if depth > 1 {
for i := range c.Activations[:depth-1] {
activations, weights := c.Activations[i], c.Weights[i]
for j := range weights[:len(weights)-1] {
sum := dot32(activations, weights[j])
c.Activations[i+1][j] = c.Functions[i].T(c.Functions[i].F(sum))
}
}
}
i := depth - 1
activations, weights := c.Activations[i], c.Weights[i]
for j := range weights[:len(weights)] {
sum := dot32(activations, weights[j])
c.Activations[i+1][j] = c.Functions[i].T(c.Functions[i].F(sum))
}
}
// BackPropagate run the backpropagation algorithm
func (c *Context32) BackPropagate(targets []float32, lRate, mFactor float32) float32 {
depth, layers := len(c.Layers), c.Layers
deltas := make([][]float32, depth-1)
for i := range deltas {
deltas[i] = vector32(layers[i+1], 0)
}
l := depth - 2
for i := 0; i < layers[l+1]; i++ {
activation := c.Activations[l+1][i]
e := targets[i] - activation
deltas[l][i] = c.Functions[l].DF(activation) * e
}
l--
for l >= 0 {
for i := 0; i < layers[l+1]; i++ {
var e float32
for j := 0; j < layers[l+2]; j++ {
e += deltas[l+1][j] * c.Weights[l+1][j][i]
}
deltas[l][i] = c.Functions[l].DF(c.Activations[l+1][i]) * e
}
l--
}
for l := 0; l < depth-1; l++ {
change := make([]float32, layers[l+1])
for i := 0; i < layers[l]; i++ {
copy(change, deltas[l])
scal32(c.Activations[l][i], change)
scal32(mFactor, c.Changes[l][i])
axpy32(lRate, change, c.Changes[l][i])
for j := 0; j < layers[l+1]; j++ {
c.Weights[l][j][i] = c.Weights[l][j][i] + c.Changes[l][i][j]
}
copy(c.Changes[l][i], change)
}
}
var e float32
for i := 0; i < len(targets); i++ {
f := targets[i] - c.Activations[depth-1][i]
e += f * f
}
return e
}
// Train trains a neural network using data from source
func (n *Neural32) Train(source func(iteration int) [][][]float32, iterations int, lRate, mFactor float32) []float32 {
context, errors := n.NewContext(), make([]float32, iterations)
for i := 0; i < iterations; i++ {
var (
e float32
n int
)
patterns := source(i)
for _, p := range patterns {
context.SetInput(p[0])
context.InferWithT()
e += context.BackPropagate(p[1], lRate, mFactor)
n += len(p[1])
}
errors[i] = e / float32(n)
}
return errors
}
func (n *Neural32) test(patterns [][][]float32) {
context := n.NewContext()
for _, p := range patterns {
context.SetInput(p[0])
context.Infer()
fmt.Println(p[0], "->", context.GetOutput(), " : ", p[1])
}
} | neural32.go | 0.865679 | 0.672507 | neural32.go | starcoder |
// Convert different RGB colorspaces with their native illuminator to CIE XYZ and back.
// RGB values must be linear and in the nominal range [0.0, 1.0].
// Ref.: [24][30][31].
package rgb
// AdobeToXYZ converts from Adobe RGB (1998) with D65 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func AdobeToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.5767309*r + 0.1855540*g + 0.1881852*b
y = 0.2973769*r + 0.6273491*g + 0.0752741*b
z = 0.0270343*r + 0.0706872*g + 0.9911085*b
return
}
// XYZToAdobe converts from CIE XYZ to Adobe RGB (1998) with D65 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToAdobe(x, y, z float64) (r, g, b float64) {
r = 2.0413690*x - 0.5649464*y - 0.3446944*z
g = -0.9692660*x + 1.8760108*y + 0.0415560*z
b = 0.0134474*x - 0.1183897*y + 1.0154096*z
return
}
// AppleToXYZ converts from Apple RGB with D65 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func AppleToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.4497288*r + 0.3162486*g + 0.1844926*b
y = 0.2446525*r + 0.6720283*g + 0.0833192*b
z = 0.0251848*r + 0.1411824*g + 0.9224628*b
return
}
// XYZToApple converts from CIE XYZ to Apple RGB with D65 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToApple(x, y, z float64) (r, g, b float64) {
r = 2.9515373*x - 1.2894116*y - 0.4738445*z
g = -1.0851093*x + 1.9908566*y + 0.0372026*z
b = 0.0854934*x - 0.2694964*y + 1.0912975*z
return
}
// BestToXYZ converts from Best RGB with D50 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func BestToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.6326696*r + 0.2045558*g + 0.1269946*b
y = 0.2284569*r + 0.7373523*g + 0.0341908*b
z = 0.0000000*r + 0.0095142*g + 0.8156958*b
return
}
// XYZToBest converts from CIE XYZ to Best RGB with D50 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToBest(x, y, z float64) (r, g, b float64) {
r = 1.7552599*x - 0.4836786*y - 0.2530000*z
g = -0.5441336*x + 1.5068789*y + 0.0215528*z
b = 0.0063467*x - 0.0175761*y + 1.2256959*z
return
}
// BetaToXYZ converts from Beta RGB with D50 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func BetaToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.6712537*r + 0.1745834*g + 0.1183829*b
y = 0.3032726*r + 0.6637861*g + 0.0329413*b
z = 0.0000000*r + 0.0407010*g + 0.7845090*b
return
}
// XYZToBeta converts from CIE XYZ to Beta RGB with D50 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToBeta(x, y, z float64) (r, g, b float64) {
r = 1.6832270*x - 0.4282363*y - 0.2360185*z
g = -0.7710229*x + 1.7065571*y + 0.0446900*z
b = 0.0400013*x - 0.0885376*y + 1.2723640*z
return
}
// BruceToXYZ converts from Bruce RGB with D65 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func BruceToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.4674162*r + 0.2944512*g + 0.1886026*b
y = 0.2410115*r + 0.6835475*g + 0.0754410*b
z = 0.0219101*r + 0.0736128*g + 0.9933071*b
return
}
// XYZToBruce converts from CIE XYZ to Bruce RGB with D65 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToBruce(x, y, z float64) (r, g, b float64) {
r = 2.7454669*x - 1.1358136*y - 0.4350269*z
g = -0.9692660*x + 1.8760108*y + 0.0415560*z
b = 0.0112723*x - 0.1139754*y + 1.0132541*z
return
}
// CIEToXYZ converts from CIE RGB with E illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func CIEToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.4887180*r + 0.3106803*g + 0.2006017*b
y = 0.1762044*r + 0.8129847*g + 0.0108109*b
z = 0.0000000*r + 0.0102048*g + 0.9897952*b
return
}
// XYZToCIE converts from CIE XYZ to CIE RGB with E illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToCIE(x, y, z float64) (r, g, b float64) {
r = 2.3706743*x - 0.9000405*y - 0.4706338*z
g = -0.5138850*x + 1.4253036*y + 0.0885814*z
b = 0.0052982*x - 0.0146949*y + 1.0093968*z
return
}
// ColorMatchToXYZ converts from ColorMatch RGB with D50 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func ColorMatchToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.5093439*r + 0.3209071*g + 0.1339691*b
y = 0.2748840*r + 0.6581315*g + 0.0669845*b
z = 0.0242545*r + 0.1087821*g + 0.6921735*b
return
}
// XYZToColorMatch converts from CIE XYZ to ColorMatch RGB with D50 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToColorMatch(x, y, z float64) (r, g, b float64) {
r = 2.6422874*x - 1.2234270*y - 0.3930143*z
g = -1.1119763*x + 2.0590183*y + 0.0159614*z
b = 0.0821699*x - 0.2807254*y + 1.4559877*z
return
}
// DonToXYZ converts from Don RGB-4 with D50 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func DonToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.6457711*r + 0.1933511*g + 0.1250978*b
y = 0.2783496*r + 0.6879702*g + 0.0336802*b
z = 0.0037113*r + 0.0179861*g + 0.8035125*b
return
}
// XYZToDon converts from CIE XYZ to Don RGB-4 with D50 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToDon(x, y, z float64) (r, g, b float64) {
r = 1.7603902*x - 0.4881198*y - 0.2536126*z
g = -0.7126288*x + 1.6527432*y + 0.0416715*z
b = 0.0078207*x - 0.0347411*y + 1.2447743*z
return
}
// ECIToXYZ converts from ECI RGB with D50 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func ECIToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.6502043*r + 0.1780774*g + 0.1359384*b
y = 0.3202499*r + 0.6020711*g + 0.0776791*b
z = 0.0000000*r + 0.0678390*g + 0.7573710*b
return
}
// XYZToECI converts from CIE XYZ to ECI RGB with D50 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToECI(x, y, z float64) (r, g, b float64) {
r = 1.7827618*x - 0.4969847*y - 0.2690101*z
g = -0.9593623*x + 1.9477962*y - 0.0275807*z
b = 0.0859317*x - 0.1744674*y + 1.3228273*z
return
}
// EktaSpaceToXYZ converts from Ekta Space PS5 with D50 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func EktaSpaceToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.5938914*r + 0.2729801*g + 0.0973485*b
y = 0.2606286*r + 0.7349465*g + 0.0044249*b
z = 0.0000000*r + 0.0419969*g + 0.7832131*b
return
}
// XYZToEktaSpace converts from CIE XYZ to Ekta Space PS5 with D50 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToEktaSpace(x, y, z float64) (r, g, b float64) {
r = 2.0043819*x - 0.7304844*y - 0.2450052*z
g = -0.7110285*x + 1.6202126*y + 0.0792227*z
b = 0.0381263*x - 0.0868780*y + 1.2725438*z
return
}
// NTSCToXYZ converts from NTSC RGB with D50 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func NTSCToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.6068909*r + 0.1735011*g + 0.2003480*b
y = 0.2989164*r + 0.5865990*g + 0.1144845*b
z = 0.0000000*r + 0.0660957*g + 1.1162243*b
return
}
// XYZToNTSC converts from CIE XYZ to NTSC RGB with D50 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToNTSC(x, y, z float64) (r, g, b float64) {
r = 1.9099961*x - 0.5324542*y - 0.2882091*z
g = -0.9846663*x + 1.9991710*y - 0.0283082*z
b = 0.0583056*x - 0.1183781*y + 0.8975535*z
return
}
// PALToXYZ converts from PAL/SECAM RGB with D65 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func PALToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.4306190*r + 0.3415419*g + 0.1783091*b
y = 0.2220379*r + 0.7066384*g + 0.0713236*b
z = 0.0201853*r + 0.1295504*g + 0.9390944*b
return
}
// XYZToPAL converts from CIE XYZ to PAL/SECAM RGB with D65 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToPAL(x, y, z float64) (r, g, b float64) {
r = 3.0628971*x - 1.3931791*y - 0.4757517*z
g = -0.9692660*x + 1.8760108*y + 0.0415560*z
b = 0.0678775*x - 0.2288548*y + 1.0693490*z
return
}
// ProPhotoToXYZ converts from ProPhoto RGB with D50 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func ProPhotoToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.7976749*r + 0.1351917*g + 0.0313534*b
y = 0.2880402*r + 0.7118741*g + 0.0000857*b
z = 0.0000000*r + 0.0000000*g + 0.8252100*b
return
}
// XYZToProPhoto converts from CIE XYZ to ProPhoto RGB with D50 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToProPhoto(x, y, z float64) (r, g, b float64) {
r = 1.3459433*x - 0.2556075*y - 0.0511118*z
g = -0.5445989*x + 1.5081673*y + 0.0205351*z
b = 0.0000000*x + 0.0000000*y + 1.2118128*z
return
}
// SMPTE_CToXYZ converts from SMPTE-C RGB with D65 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func SMPTE_CToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.3935891*r + 0.3652497*g + 0.1916313*b
y = 0.2124132*r + 0.7010437*g + 0.0865432*b
z = 0.0187423*r + 0.1119313*g + 0.9581563*b
return
}
// XYZToSMPTE_C converts from CIE XYZ to SMPTE-C RGB with D65 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToSMPTE_C(x, y, z float64) (r, g, b float64) {
r = 3.5053960*x - 1.7394894*y - 0.5439640*z
g = -1.0690722*x + 1.9778245*y + 0.0351722*z
b = 0.0563200*x - 0.1970226*y + 1.0502026*z
return
}
// SRGBToXYZ converts from sRGB with D65 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func SRGBToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.4124564*r + 0.3575761*g + 0.1804375*b
y = 0.2126729*r + 0.7151522*g + 0.0721750*b
z = 0.0193339*r + 0.1191920*g + 0.9503041*b
return
}
// XYZToSRGB converts from CIE XYZ to sRGB with D65 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToSRGB(x, y, z float64) (r, g, b float64) {
r = 3.2404542*x - 1.5371385*y - 0.4985314*z
g = -0.9692660*x + 1.8760108*y + 0.0415560*z
b = 0.0556434*x - 0.2040259*y + 1.0572252*z
return
}
// WGamutToXYZ converts from Wide Gamut RGB with D50 illuminator to CIE XYZ. RGB values must be linear and in the nominal range [0.0, 1.0].
func WGamutToXYZ(r, g, b float64) (x, y, z float64) {
x = 0.7161046*r + 0.1009296*g + 0.1471858*b
y = 0.2581874*r + 0.7249378*g + 0.0168748*b
z = 0.0000000*r + 0.0517813*g + 0.7734287*b
return
}
// XYZToWGamut converts from CIE XYZ to Wide Gamut RGB with D50 illuminator. Returned RGB values are linear and in the nominal range [0.0, 1.0].
func XYZToWGamut(x, y, z float64) (r, g, b float64) {
r = 1.4628067*x - 0.1840623*y - 0.2743606*z
g = -0.5217933*x + 1.4472381*y + 0.0677227*z
b = 0.0349342*x - 0.0968930*y + 1.2884099*z
return
} | f64/rgb/rgb.go | 0.885675 | 0.6402 | rgb.go | starcoder |
package planar
import (
"fmt"
"math"
"github.com/paulmach/orb"
)
// Area returns the area of the geometry in the 2d plane.
func Area(g orb.Geometry) float64 {
// TODO: make faster non-centroid version.
_, a := CentroidArea(g)
return a
}
// CentroidArea returns both the centroid and the area in the 2d plane.
// Since the area is need for the centroid, return both.
// Polygon area will always be >= zero. Ring area my be negative if it has
// a clockwise winding orider.
func CentroidArea(g orb.Geometry) (orb.Point, float64) {
if g == nil {
return orb.Point{}, 0
}
switch g := g.(type) {
case orb.Point:
return multiPointCentroid(orb.MultiPoint{g}), 0
case orb.MultiPoint:
return multiPointCentroid(g), 0
case orb.LineString:
return multiLineStringCentroid(orb.MultiLineString{g}), 0
case orb.MultiLineString:
return multiLineStringCentroid(g), 0
case orb.Ring:
return ringCentroidArea(g)
case orb.Polygon:
return polygonCentroidArea(g)
case orb.MultiPolygon:
return multiPolygonCentroidArea(g)
case orb.Collection:
return collectionCentroidArea(g)
case orb.Bound:
return CentroidArea(g.ToRing())
}
panic(fmt.Sprintf("geometry type not supported: %T", g))
}
func multiPointCentroid(mp orb.MultiPoint) orb.Point {
if len(mp) == 0 {
return orb.Point{}
}
x, y := 0.0, 0.0
for _, p := range mp {
x += p[0]
y += p[1]
}
num := float64(len(mp))
return orb.Point{x / num, y / num}
}
func multiLineStringCentroid(mls orb.MultiLineString) orb.Point {
point := orb.Point{}
dist := 0.0
if len(mls) == 0 {
return orb.Point{}
}
validCount := 0
for _, ls := range mls {
c, d := lineStringCentroidDist(ls)
if d == math.Inf(1) {
continue
}
dist += d
validCount++
if d == 0 {
d = 1.0
}
point[0] += c[0] * d
point[1] += c[1] * d
}
if validCount == 0 {
return orb.Point{}
}
if dist == math.Inf(1) || dist == 0.0 {
point[0] /= float64(validCount)
point[1] /= float64(validCount)
return point
}
point[0] /= dist
point[1] /= dist
return point
}
func lineStringCentroidDist(ls orb.LineString) (orb.Point, float64) {
dist := 0.0
point := orb.Point{}
if len(ls) == 0 {
return orb.Point{}, math.Inf(1)
}
// implicitly move everything to near the origin to help with roundoff
offset := ls[0]
for i := 0; i < len(ls)-1; i++ {
p1 := orb.Point{
ls[i][0] - offset[0],
ls[i][1] - offset[1],
}
p2 := orb.Point{
ls[i+1][0] - offset[0],
ls[i+1][1] - offset[1],
}
d := Distance(p1, p2)
point[0] += (p1[0] + p2[0]) / 2.0 * d
point[1] += (p1[1] + p2[1]) / 2.0 * d
dist += d
}
if dist == 0 {
return ls[0], 0
}
point[0] /= dist
point[1] /= dist
point[0] += ls[0][0]
point[1] += ls[0][1]
return point, dist
}
func ringCentroidArea(r orb.Ring) (orb.Point, float64) {
centroid := orb.Point{}
area := 0.0
if len(r) == 0 {
return orb.Point{}, 0
}
// implicitly move everything to near the origin to help with roundoff
offsetX := r[0][0]
offsetY := r[0][1]
for i := 1; i < len(r)-1; i++ {
a := (r[i][0]-offsetX)*(r[i+1][1]-offsetY) -
(r[i+1][0]-offsetX)*(r[i][1]-offsetY)
area += a
centroid[0] += (r[i][0] + r[i+1][0] - 2*offsetX) * a
centroid[1] += (r[i][1] + r[i+1][1] - 2*offsetY) * a
}
if area == 0 {
return r[0], 0
}
// no need to deal with first and last vertex since we "moved"
// that point the origin (multiply by 0 == 0)
area /= 2
centroid[0] /= 6 * area
centroid[1] /= 6 * area
centroid[0] += offsetX
centroid[1] += offsetY
return centroid, area
}
func polygonCentroidArea(p orb.Polygon) (orb.Point, float64) {
if len(p) == 0 {
return orb.Point{}, 0
}
centroid, area := ringCentroidArea(p[0])
area = math.Abs(area)
if len(p) == 1 {
if area == 0 {
c, _ := lineStringCentroidDist(orb.LineString(p[0]))
return c, 0
}
return centroid, area
}
holeArea := 0.0
weightedHoleCentroid := orb.Point{}
for i := 1; i < len(p); i++ {
hc, ha := ringCentroidArea(p[i])
ha = math.Abs(ha)
holeArea += ha
weightedHoleCentroid[0] += hc[0] * ha
weightedHoleCentroid[1] += hc[1] * ha
}
totalArea := area - holeArea
if totalArea == 0 {
c, _ := lineStringCentroidDist(orb.LineString(p[0]))
return c, 0
}
centroid[0] = (area*centroid[0] - weightedHoleCentroid[0]) / totalArea
centroid[1] = (area*centroid[1] - weightedHoleCentroid[1]) / totalArea
return centroid, totalArea
}
func multiPolygonCentroidArea(mp orb.MultiPolygon) (orb.Point, float64) {
point := orb.Point{}
area := 0.0
for _, p := range mp {
c, a := polygonCentroidArea(p)
point[0] += c[0] * a
point[1] += c[1] * a
area += a
}
if area == 0 {
return orb.Point{}, 0
}
point[0] /= area
point[1] /= area
return point, area
}
func collectionCentroidArea(c orb.Collection) (orb.Point, float64) {
point := orb.Point{}
area := 0.0
max := maxDim(c)
for _, g := range c {
if g.Dimensions() != max {
continue
}
c, a := CentroidArea(g)
point[0] += c[0] * a
point[1] += c[1] * a
area += a
}
if area == 0 {
return orb.Point{}, 0
}
point[0] /= area
point[1] /= area
return point, area
}
func maxDim(c orb.Collection) int {
max := 0
for _, g := range c {
if d := g.Dimensions(); d > max {
max = d
}
}
return max
} | planar/area.go | 0.628635 | 0.69166 | area.go | starcoder |
package gobacktest
// PortfolioHandler is the combined interface building block for a portfolio.
type PortfolioHandler interface {
OnSignaler
OnFiller
Investor
Updater
Casher
Valuer
Reseter
}
// OnSignaler is an interface for the OnSignal method
type OnSignaler interface {
OnSignal(SignalEvent, DataHandler) (OrderEvent, error)
}
// OnFiller is an interface for the OnFill method
type OnFiller interface {
OnFill(FillEvent, DataHandler) (*Fill, error)
}
// Investor is an interface to check if a portfolio has a position of a symbol
type Investor interface {
IsInvested(string) (Position, bool)
IsLong(string) (Position, bool)
IsShort(string) (Position, bool)
}
// Updater handles the updating of the portfolio on data events
type Updater interface {
Update(DataEvent)
}
// Casher handles basic portolio info
type Casher interface {
InitialCash() float64
SetInitialCash(float64)
Cash() float64
SetCash(float64)
}
// Valuer returns the values of the portfolio
type Valuer interface {
Value() float64
}
// Booker defines methods for handling the order book of the portfolio
type Booker interface {
OrderBook() ([]OrderEvent, bool)
OrdersBySymbol(symbol string) ([]OrderEvent, bool)
}
// Portfolio represent a simple portfolio struct.
type Portfolio struct {
initialCash float64
cash float64
holdings map[string]Position
orderBook []OrderEvent
transactions []FillEvent
sizeManager SizeHandler
riskManager RiskHandler
}
// NewPortfolio creates a default portfolio with sensible defaults ready for use.
func NewPortfolio() *Portfolio {
return &Portfolio{
initialCash: 100000,
sizeManager: &Size{DefaultSize: 100, DefaultValue: 1000},
riskManager: &Risk{},
}
}
// SizeManager return the size manager of the portfolio.
func (p Portfolio) SizeManager() SizeHandler {
return p.sizeManager
}
// SetSizeManager sets the size manager to be used with the portfolio.
func (p *Portfolio) SetSizeManager(size SizeHandler) {
p.sizeManager = size
}
// RiskManager returns the risk manager of the portfolio.
func (p Portfolio) RiskManager() RiskHandler {
return p.riskManager
}
// SetRiskManager sets the risk manager to be used with the portfolio.
func (p *Portfolio) SetRiskManager(risk RiskHandler) {
p.riskManager = risk
}
// Reset the portfolio into a clean state with set initial cash.
func (p *Portfolio) Reset() error {
p.cash = 0
p.holdings = nil
p.transactions = nil
return nil
}
// OnSignal handles an incomming signal event
func (p *Portfolio) OnSignal(signal SignalEvent, data DataHandler) (OrderEvent, error) {
// fmt.Printf("Portfolio receives Signal: %#v \n", signal)
// set order type
orderType := MarketOrder // default Market, should be set by risk manager
var limit float64
initialOrder := &Order{
Event: Event{
timestamp: signal.Time(),
symbol: signal.Symbol(),
},
direction: signal.Direction(),
// Qty should be set by PositionSizer
orderType: orderType,
limitPrice: limit,
}
// fetch latest known price for the symbol
latest := data.Latest(signal.Symbol())
sizedOrder, err := p.sizeManager.SizeOrder(initialOrder, latest, p)
if err != nil {
return sizedOrder, err
}
return p.riskManager.EvaluateOrder(sizedOrder, latest, p.holdings)
}
// OnFill handles an incomming fill event
func (p *Portfolio) OnFill(fill FillEvent, data DataHandler) (*Fill, error) {
// Check for nil map, else initialise the map
if p.holdings == nil {
p.holdings = make(map[string]Position)
}
// check if portfolio has already a holding of the symbol from this fill
if pos, ok := p.holdings[fill.Symbol()]; ok {
// update existing Position
pos.Update(fill)
p.holdings[fill.Symbol()] = pos
} else {
// create new position
pos := Position{}
pos.Create(fill)
p.holdings[fill.Symbol()] = pos
}
// update cash
if fill.Direction() == BOT {
p.cash = p.cash - fill.NetValue()
} else {
// direction is "SLD"
p.cash = p.cash + fill.NetValue()
}
// add fill to transactions
p.transactions = append(p.transactions, fill)
f := fill.(*Fill)
return f, nil
}
// IsInvested checks if the portfolio has an open position on the given symbol
func (p Portfolio) IsInvested(symbol string) (pos Position, ok bool) {
pos, ok = p.holdings[symbol]
if ok && (pos.qty != 0) {
return pos, true
}
return pos, false
}
// IsLong checks if the portfolio has an open long position on the given symbol
func (p Portfolio) IsLong(symbol string) (pos Position, ok bool) {
pos, ok = p.holdings[symbol]
if ok && (pos.qty > 0) {
return pos, true
}
return pos, false
}
// IsShort checks if the portfolio has an open short position on the given symbol
func (p Portfolio) IsShort(symbol string) (pos Position, ok bool) {
pos, ok = p.holdings[symbol]
if ok && (pos.qty < 0) {
return pos, true
}
return pos, false
}
// Update updates the holding on a data event
func (p *Portfolio) Update(d DataEvent) {
if pos, ok := p.IsInvested(d.Symbol()); ok {
pos.UpdateValue(d)
p.holdings[d.Symbol()] = pos
}
}
// SetInitialCash sets the initial cash value of the portfolio
func (p *Portfolio) SetInitialCash(initial float64) {
p.initialCash = initial
}
// InitialCash returns the initial cash value of the portfolio
func (p Portfolio) InitialCash() float64 {
return p.initialCash
}
// SetCash sets the current cash value of the portfolio
func (p *Portfolio) SetCash(cash float64) {
p.cash = cash
}
// Cash returns the current cash value of the portfolio
func (p Portfolio) Cash() float64 {
return p.cash
}
// Value return the current total value of the portfolio
func (p Portfolio) Value() float64 {
var holdingValue float64
for _, pos := range p.holdings {
holdingValue += pos.marketValue
}
value := p.cash + holdingValue
return value
}
// Holdings returns the holdings of the portfolio
func (p Portfolio) Holdings() map[string]Position {
return p.holdings
}
// OrderBook returns the order book of the portfolio
func (p Portfolio) OrderBook() ([]OrderEvent, bool) {
if len(p.orderBook) == 0 {
return p.orderBook, false
}
return p.orderBook, true
}
// OrdersBySymbol returns the order of a specific symbol from the order book.
func (p Portfolio) OrdersBySymbol(symbol string) ([]OrderEvent, bool) {
var orders = []OrderEvent{}
for _, order := range p.orderBook {
if order.Symbol() == symbol {
orders = append(orders, order)
}
}
if len(orders) == 0 {
return orders, false
}
return orders, true
} | portfolio.go | 0.733738 | 0.550245 | portfolio.go | starcoder |
package binarysearchtree
import (
"fmt"
)
type Node struct {
Value int
Left *Node
Right *Node
}
func NewBinarySearchTree(value int) *Node {
return &Node{Value: value}
}
func (n *Node) Insert(value int) error {
if value == n.Value {
return fmt.Errorf("You can't insert duplicate value %d", value)
}
if value < n.Value {
if n.Left == nil {
n.Left = &Node{Value: value}
} else {
n.Left.Insert(value)
}
} else if value > n.Value {
if n.Right == nil {
n.Right = &Node{Value: value}
} else {
n.Right.Insert(value)
}
}
return nil
}
func (n *Node) Search(value int) *Node {
if n == nil {
return nil
}
if value == n.Value {
return n
} else if value < n.Value {
return n.Left.Search(value)
} else {
return n.Right.Search(value)
}
}
func (n *Node) Delete(value int) *Node {
if n == nil {
return nil
}
if value < n.Value {
n.Left = n.Left.Delete(value)
return n
} else if value > n.Value {
n.Right = n.Right.Delete(value)
return n
}
if n.Left == nil && n.Right == nil {
return nil
} else if n.Left != nil && n.Right == nil {
return n.Left
} else if n.Left == nil && n.Right != nil {
return n.Right
} else {
minNode := n.Right.FindMinNode()
n.Value = minNode.Value
n.Right = n.Right.Delete(minNode.Value)
return n
}
}
func (n *Node) FindMinNode() *Node {
if n.Left == nil {
return n
}
minNode := n.Left
for minNode.Left != nil {
minNode = minNode.Left
}
return minNode
}
func (n *Node) FindMaxNode() *Node {
if n.Right == nil {
return n
}
maxNode := n.Right
for maxNode.Right != nil {
maxNode = maxNode.Right
}
return maxNode
}
func (n *Node) PreOrderTraverse() []*Node {
var nodes []*Node
nodes = append(nodes, n)
if n.Left != nil {
nodes = append(nodes, n.Left.PreOrderTraverse()...)
}
if n.Right != nil {
nodes = append(nodes, n.Right.PreOrderTraverse()...)
}
return nodes
}
func (n *Node) PostOrderTraverse() []*Node {
var nodes []*Node
if n.Left != nil {
nodes = append(nodes, n.Left.PostOrderTraverse()...)
}
nodes = append(nodes, n)
if n.Right != nil {
nodes = append(nodes, n.Right.PostOrderTraverse()...)
}
return nodes
}
func (n *Node) InOrderTraverse() []*Node {
var nodes []*Node
if n.Left != nil {
nodes = append(nodes, n.Left.InOrderTraverse()...)
}
if n.Right != nil {
nodes = append(nodes, n.Right.InOrderTraverse()...)
}
nodes = append(nodes, n)
return nodes
} | data-structure/binary-search-tree/binary_search_tree.go | 0.59749 | 0.485295 | binary_search_tree.go | starcoder |
package iter
// IterableForInt describes a struct that can be iterated over.
type IterableForInt interface {
Next() OptionForInt
}
// IteratorForInt embeds an Iterable and provides util functions for it.
type IteratorForInt struct {
iter IterableForInt
}
// Iterator implements Iterable.
var _ IterableForInt = IteratorForInt{}
// Next returns the next element of the Iterator.
func (i IteratorForInt) Next() OptionForInt {
return i.iter.Next()
}
// AdvanceBy calls Next n times.
// It returns an error if it reached the end of the Iterator
// before it finished to iterate.
func (i IteratorForInt) AdvanceBy(n uint) error {
for k := uint(0); k < n; k++ {
if i.Next().IsNone() {
return &errAdvanceBy{}
}
}
return nil
}
// Nth returns the nth element of the Iterator.
func (i IteratorForInt) Nth(n uint) OptionForInt {
i.AdvanceBy(n)
return i.Next()
}
// Skip the next n iterations.
func (i IteratorForInt) Skip(n uint) IteratorForInt {
i.AdvanceBy(n)
return i
}
// Collect returns a slice containing the elements of the Iterator.
func (i IteratorForInt) Collect() []int {
collected := []int{}
item := i.Next()
for item.IsSome() {
collected = append(collected, item.Unwrap())
item = i.Next()
}
return collected
}
// FoldFirst folds over the Iterator, using its first element as the accumulator initial value.
func (i IteratorForInt) FoldFirst(reducer func(acc, item int) int) OptionForInt {
first := i.Next()
if first.IsNone() {
return NoneInt()
}
return SomeInt(i.FoldForInt(first.Unwrap(), reducer))
}
// Count returns the number of elements in the Iterator.
func (i IteratorForInt) Count() uint {
return i.FoldForUint(uint(0), func(acc uint, item int) uint {
return acc + 1
})
}
// Last returns the last element of the Iterator.
func (i IteratorForInt) Last() OptionForInt {
return i.FoldForOptionForInt(NoneInt(), func(acc OptionForInt, item int) OptionForInt {
return SomeInt(item)
})
}
// ForEach runs a callback for every element of the iterator.
func (i IteratorForInt) ForEach(callback func(item int)) {
i.FoldForEmpty(Empty{}, func(acc Empty, item int) Empty {
callback(item)
return acc
})
}
// All checks if all the elements of the Iterator validates a predicate.
func (i IteratorForInt) All(predicate func(item int) bool) bool {
_, ok := i.TryFoldForEmpty(Empty{}, func(acc Empty, item int) (Empty, bool) {
return acc, predicate(item)
})
return ok
}
// Any checks if at least one element of the Iterator validates a predicate.
func (i IteratorForInt) Any(predicate func(item int) bool) bool {
_, ok := i.TryFoldForEmpty(Empty{}, func(acc Empty, item int) (Empty, bool) {
return acc, !predicate(item)
})
return !ok
}
// Find returns the first element of the Iterator that validates a predicate.
func (i IteratorForInt) Find(predicate func(item int) bool) OptionForInt {
r, ok := i.TryFoldForOptionForInt(NoneInt(), func(acc OptionForInt, item int) (OptionForInt, bool) {
return SomeInt(item), !predicate(item)
})
if ok {
return NoneInt()
}
return r
}
// Position returns the position of the first element of the Iterator that validates a predicate.
func (i IteratorForInt) Position(predicate func(item int) bool) OptionForUint {
r, ok := i.TryFoldForUint(uint(0), func(acc uint, item int) (uint, bool) {
if predicate(item) {
return acc, false
}
return acc + 1, true
})
if ok {
return NoneUint()
}
return SomeUint(r)
}
// SkipWhile skips the next elements until it reaches one which validates predicate.
func (i IteratorForInt) SkipWhile(predicate func(item int) bool) IteratorForInt {
i.Find(predicate)
return i
}
// Map returns a new Iterator applying a mapper function to every element.
func (i IteratorForInt) Map(mapper func(item int) int) IteratorForInt {
return IteratorForInt{iter: &mapIterableForInt{mapper: mapper, iter: i.iter}}
}
// Chain returns a new Iterator sequentially joining the two it was built on.
func (i IteratorForInt) Chain(iter IteratorForInt) IteratorForInt {
return IteratorForInt{iter: &chainForInt{first: i.iter, second: iter.iter, flag: false}}
}
// TakeWhile returns a new Iterator yielding elements until predicate becomes false.
func (i IteratorForInt) TakeWhile(predicate func(item int) bool) IteratorForInt {
return IteratorForInt{iter: &takeWhileForInt{iter: i.iter, predicate: predicate, flag: false}}
}
// Take returns a new Iterator yielding only the n next elements.
func (i IteratorForInt) Take(n uint) IteratorForInt {
return IteratorForInt{iter: &takeForInt{iter: i.iter, count: 0, max: n, flag: false}}
}
// Filter returns a new Iterator yielding only elements validating a predicate.
func (i IteratorForInt) Filter(predicate func(item int) bool) IteratorForInt {
return IteratorForInt{iter: &filterForInt{iter: i, predicate: predicate}}
}
// IterableForString describes a struct that can be iterated over.
type IterableForString interface {
Next() OptionForString
}
// IteratorForString embeds an Iterable and provides util functions for it.
type IteratorForString struct {
iter IterableForString
}
// Iterator implements Iterable.
var _ IterableForString = IteratorForString{}
// Next returns the next element of the Iterator.
func (i IteratorForString) Next() OptionForString {
return i.iter.Next()
}
// AdvanceBy calls Next n times.
// It returns an error if it reached the end of the Iterator
// before it finished to iterate.
func (i IteratorForString) AdvanceBy(n uint) error {
for k := uint(0); k < n; k++ {
if i.Next().IsNone() {
return &errAdvanceBy{}
}
}
return nil
}
// Nth returns the nth element of the Iterator.
func (i IteratorForString) Nth(n uint) OptionForString {
i.AdvanceBy(n)
return i.Next()
}
// Skip the next n iterations.
func (i IteratorForString) Skip(n uint) IteratorForString {
i.AdvanceBy(n)
return i
}
// Collect returns a slice containing the elements of the Iterator.
func (i IteratorForString) Collect() []string {
collected := []string{}
item := i.Next()
for item.IsSome() {
collected = append(collected, item.Unwrap())
item = i.Next()
}
return collected
}
// FoldFirst folds over the Iterator, using its first element as the accumulator initial value.
func (i IteratorForString) FoldFirst(reducer func(acc, item string) string) OptionForString {
first := i.Next()
if first.IsNone() {
return NoneString()
}
return SomeString(i.FoldForString(first.Unwrap(), reducer))
}
// Count returns the number of elements in the Iterator.
func (i IteratorForString) Count() uint {
return i.FoldForUint(uint(0), func(acc uint, item string) uint {
return acc + 1
})
}
// Last returns the last element of the Iterator.
func (i IteratorForString) Last() OptionForString {
return i.FoldForOptionForString(NoneString(), func(acc OptionForString, item string) OptionForString {
return SomeString(item)
})
}
// ForEach runs a callback for every element of the iterator.
func (i IteratorForString) ForEach(callback func(item string)) {
i.FoldForEmpty(Empty{}, func(acc Empty, item string) Empty {
callback(item)
return acc
})
}
// All checks if all the elements of the Iterator validates a predicate.
func (i IteratorForString) All(predicate func(item string) bool) bool {
_, ok := i.TryFoldForEmpty(Empty{}, func(acc Empty, item string) (Empty, bool) {
return acc, predicate(item)
})
return ok
}
// Any checks if at least one element of the Iterator validates a predicate.
func (i IteratorForString) Any(predicate func(item string) bool) bool {
_, ok := i.TryFoldForEmpty(Empty{}, func(acc Empty, item string) (Empty, bool) {
return acc, !predicate(item)
})
return !ok
}
// Find returns the first element of the Iterator that validates a predicate.
func (i IteratorForString) Find(predicate func(item string) bool) OptionForString {
r, ok := i.TryFoldForOptionForString(NoneString(), func(acc OptionForString, item string) (OptionForString, bool) {
return SomeString(item), !predicate(item)
})
if ok {
return NoneString()
}
return r
}
// Position returns the position of the first element of the Iterator that validates a predicate.
func (i IteratorForString) Position(predicate func(item string) bool) OptionForUint {
r, ok := i.TryFoldForUint(uint(0), func(acc uint, item string) (uint, bool) {
if predicate(item) {
return acc, false
}
return acc + 1, true
})
if ok {
return NoneUint()
}
return SomeUint(r)
}
// SkipWhile skips the next elements until it reaches one which validates predicate.
func (i IteratorForString) SkipWhile(predicate func(item string) bool) IteratorForString {
i.Find(predicate)
return i
}
// Map returns a new Iterator applying a mapper function to every element.
func (i IteratorForString) Map(mapper func(item string) string) IteratorForString {
return IteratorForString{iter: &mapIterableForString{mapper: mapper, iter: i.iter}}
}
// Chain returns a new Iterator sequentially joining the two it was built on.
func (i IteratorForString) Chain(iter IteratorForString) IteratorForString {
return IteratorForString{iter: &chainForString{first: i.iter, second: iter.iter, flag: false}}
}
// TakeWhile returns a new Iterator yielding elements until predicate becomes false.
func (i IteratorForString) TakeWhile(predicate func(item string) bool) IteratorForString {
return IteratorForString{iter: &takeWhileForString{iter: i.iter, predicate: predicate, flag: false}}
}
// Take returns a new Iterator yielding only the n next elements.
func (i IteratorForString) Take(n uint) IteratorForString {
return IteratorForString{iter: &takeForString{iter: i.iter, count: 0, max: n, flag: false}}
}
// Filter returns a new Iterator yielding only elements validating a predicate.
func (i IteratorForString) Filter(predicate func(item string) bool) IteratorForString {
return IteratorForString{iter: &filterForString{iter: i, predicate: predicate}}
} | examples/iterator.go | 0.856797 | 0.448426 | iterator.go | starcoder |
package schema
import (
"github.com/graphql-go/graphql"
"github.com/ob-vss-ss18/ppl-stock/models"
)
var stickType = graphql.NewObject(graphql.ObjectConfig{
Name: "Stick",
Description: "A stick.",
Fields: graphql.Fields{
"id": &graphql.Field{
Type: graphql.NewNonNull(graphql.Int),
Description: "The id of the stick",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Id, nil
}
return nil, nil
},
},
"usage": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The use case of the stick",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Usage, nil
}
return nil, nil
},
},
"usertype": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The usertype of the stick",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Usertype, nil
}
return nil, nil
},
},
"gender": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The gender by which the stick is intended to be used.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Gender, nil
}
return nil, nil
},
},
"manufacturer": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The manufacturer of the stick.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Manufacturer, nil
}
return nil, nil
},
},
"modell": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The model of the stick.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Model, nil
}
return nil, nil
},
},
"length": &graphql.Field{
Type: graphql.NewNonNull(graphql.Int),
Description: "The length of the stick.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Length, nil
}
return nil, nil
},
},
"bodyheight": &graphql.Field{
Type: graphql.NewNonNull(graphql.Int),
Description: "The best bodyheight for using this stick.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Bodyheight, nil
}
return nil, nil
},
},
"grip_kind": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The grip_kind of the stick.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.GripKind, nil
}
return nil, nil
},
},
"color": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The color of the stick.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Color, nil
}
return nil, nil
},
},
"price_new": &graphql.Field{
Type: graphql.NewNonNull(graphql.Float),
Description: "The new price of the stick.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.PriceNew, nil
}
return nil, nil
},
},
"condition": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The condition of the stick.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Condition, nil
}
return nil, nil
},
},
"availability": &graphql.Field{
Type: graphql.NewNonNull(graphql.String),
Description: "The status/availability of the stick.",
Resolve: func(parameter graphql.ResolveParams) (interface{}, error) {
if stick, ok := parameter.Source.(models.Stick); ok {
return stick.Status, nil
}
return nil, nil
},
},
},
}) | schema/stick.go | 0.538498 | 0.412708 | stick.go | starcoder |
package plaid
import (
"encoding/json"
)
// NumbersACHNullable struct for NumbersACHNullable
type NumbersACHNullable struct {
// The Plaid account ID associated with the account numbers
AccountId string `json:"account_id"`
// The ACH account number for the account. Note that when using OAuth with Chase Bank (`ins_56`), Chase will issue \"tokenized\" routing and account numbers, which are not the user's actual account and routing numbers. These tokenized numbers should work identically to normal account and routing numbers. The digits returned in the `mask` field will continue to reflect the actual account number, rather than the tokenized account number; for this reason, when displaying account numbers to the user to help them identify their account in your UI, always use the `mask` rather than truncating the `account` number. If a user revokes their permissions to your app, the tokenized numbers will continue to work for ACH deposits, but not withdrawals.
Account string `json:"account"`
// The ACH routing number for the account. If the institution is `ins_56`, this may be a tokenized routing number. For more information, see the description of the `account` field.
Routing string `json:"routing"`
// The wire transfer routing number for the account, if available
WireRouting NullableString `json:"wire_routing"`
}
// NewNumbersACHNullable instantiates a new NumbersACHNullable object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewNumbersACHNullable(accountId string, account string, routing string, wireRouting NullableString) *NumbersACHNullable {
this := NumbersACHNullable{}
this.AccountId = accountId
this.Account = account
this.Routing = routing
this.WireRouting = wireRouting
return &this
}
// NewNumbersACHNullableWithDefaults instantiates a new NumbersACHNullable object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewNumbersACHNullableWithDefaults() *NumbersACHNullable {
this := NumbersACHNullable{}
return &this
}
// GetAccountId returns the AccountId field value
func (o *NumbersACHNullable) GetAccountId() string {
if o == nil {
var ret string
return ret
}
return o.AccountId
}
// GetAccountIdOk returns a tuple with the AccountId field value
// and a boolean to check if the value has been set.
func (o *NumbersACHNullable) GetAccountIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.AccountId, true
}
// SetAccountId sets field value
func (o *NumbersACHNullable) SetAccountId(v string) {
o.AccountId = v
}
// GetAccount returns the Account field value
func (o *NumbersACHNullable) GetAccount() string {
if o == nil {
var ret string
return ret
}
return o.Account
}
// GetAccountOk returns a tuple with the Account field value
// and a boolean to check if the value has been set.
func (o *NumbersACHNullable) GetAccountOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Account, true
}
// SetAccount sets field value
func (o *NumbersACHNullable) SetAccount(v string) {
o.Account = v
}
// GetRouting returns the Routing field value
func (o *NumbersACHNullable) GetRouting() string {
if o == nil {
var ret string
return ret
}
return o.Routing
}
// GetRoutingOk returns a tuple with the Routing field value
// and a boolean to check if the value has been set.
func (o *NumbersACHNullable) GetRoutingOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Routing, true
}
// SetRouting sets field value
func (o *NumbersACHNullable) SetRouting(v string) {
o.Routing = v
}
// GetWireRouting returns the WireRouting field value
// If the value is explicit nil, the zero value for string will be returned
func (o *NumbersACHNullable) GetWireRouting() string {
if o == nil || o.WireRouting.Get() == nil {
var ret string
return ret
}
return *o.WireRouting.Get()
}
// GetWireRoutingOk returns a tuple with the WireRouting field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *NumbersACHNullable) GetWireRoutingOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.WireRouting.Get(), o.WireRouting.IsSet()
}
// SetWireRouting sets field value
func (o *NumbersACHNullable) SetWireRouting(v string) {
o.WireRouting.Set(&v)
}
func (o NumbersACHNullable) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["account_id"] = o.AccountId
}
if true {
toSerialize["account"] = o.Account
}
if true {
toSerialize["routing"] = o.Routing
}
if true {
toSerialize["wire_routing"] = o.WireRouting.Get()
}
return json.Marshal(toSerialize)
}
type NullableNumbersACHNullable struct {
value *NumbersACHNullable
isSet bool
}
func (v NullableNumbersACHNullable) Get() *NumbersACHNullable {
return v.value
}
func (v *NullableNumbersACHNullable) Set(val *NumbersACHNullable) {
v.value = val
v.isSet = true
}
func (v NullableNumbersACHNullable) IsSet() bool {
return v.isSet
}
func (v *NullableNumbersACHNullable) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableNumbersACHNullable(val *NumbersACHNullable) *NullableNumbersACHNullable {
return &NullableNumbersACHNullable{value: val, isSet: true}
}
func (v NullableNumbersACHNullable) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableNumbersACHNullable) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_numbers_ach_nullable.go | 0.766992 | 0.440048 | model_numbers_ach_nullable.go | starcoder |
package floatutils
import (
"github.com/nlpodyssey/spago/pkg/mat64/internal/asm/f64"
"math"
"strconv"
"strings"
)
// Copy creates and return a copy of the given slice.
func Copy(in []float64) []float64 {
out := make([]float64, len(in))
copy(out, in)
return out
}
// FillFloatSlice fills the given slice's elements with value.
func FillFloatSlice(slice []float64, value float64) {
for i := range slice {
slice[i] = value
}
}
// Sign returns +1 if a is positive, -1 if a is negative, or 0 if a is 0.
func Sign(a float64) int {
switch {
case a < 0:
return -1
case a > 0:
return +1
}
return 0
}
// Max returns the maximum value from the given slice, which MUST NOT be empty.
func Max(v []float64) (m float64) {
m = v[len(v)-1]
for _, e := range v {
if m <= e {
m = e
}
}
return
}
// Sum returns the sum of all values from the given slice.
func Sum(v []float64) (s float64) {
for _, e := range v {
s += e
}
return
}
// ArgMinMax finds the indices of min and max arguments.
func ArgMinMax(v []float64) (imin, imax int) {
if len(v) < 1 {
return
}
vmin, vmax := v[0], v[0]
imin, imax = 0, 0
for i := 1; i < len(v); i++ {
if v[i] < vmin {
imin = i
vmin = v[i]
}
if v[i] > vmax {
imax = i
vmax = v[i]
}
}
return
}
// ArgMax finds the index of the max argument.
func ArgMax(v []float64) int {
_, imax := ArgMinMax(v)
return imax
}
// ArgMin finds the index of the min argument.
func ArgMin(v []float64) int {
imin, _ := ArgMinMax(v)
return imin
}
// MakeFloatMatrix returns a new 2-dimensional slice.
func MakeFloatMatrix(rows, cols int) [][]float64 {
matrix := make([][]float64, rows)
for i := 0; i < rows; i++ {
matrix[i] = make([]float64, cols)
}
return matrix
}
// StrToFloatSlice parses a string representation of a slice of float64 values.
func StrToFloatSlice(str string) ([]float64, error) {
spl := strings.Fields(str)
data := make([]float64, len(spl))
for i, v := range spl {
if num, err := strconv.ParseFloat(v, 64); err == nil {
data[i] = num
} else {
return nil, err
}
}
return data, nil
}
// SoftMax returns the results of the softmax function.
func SoftMax(v []float64) (sm []float64) {
c := Max(v)
var sum float64 = 0
for _, e := range v {
sum += math.Exp(e - c)
}
sm = make([]float64, len(v))
for i, v := range v {
sm[i] = math.Exp(v-c) / sum
}
return sm
}
// CumSum computes the cumulative sum of src into dst, and returns dst.
func CumSum(dst, src []float64) []float64 {
return f64.CumSum(dst, src)
} | pkg/mat64/floatutils/utils.go | 0.828592 | 0.489198 | utils.go | starcoder |
package quality
import (
"github.com/biogo/biogo/alphabet"
"github.com/biogo/biogo/seq"
)
// A slice of quality scores that satisfies the alphabet.Slice interface.
type Qphreds []alphabet.Qphred
func (q Qphreds) Make(len, cap int) alphabet.Slice { return make(Qphreds, len, cap) }
func (q Qphreds) Len() int { return len(q) }
func (q Qphreds) Cap() int { return cap(q) }
func (q Qphreds) Slice(start, end int) alphabet.Slice { return q[start:end] }
func (q Qphreds) Append(a alphabet.Slice) alphabet.Slice {
return append(q, a.(Qphreds)...)
}
func (q Qphreds) Copy(a alphabet.Slice) int { return copy(q, a.(Qphreds)) }
type Phred struct {
seq.Annotation
Qual Qphreds
Encode alphabet.Encoding
}
// Create a new scoring type.
func NewPhred(id string, q []alphabet.Qphred, encode alphabet.Encoding) *Phred {
return &Phred{
Annotation: seq.Annotation{ID: id},
Qual: append([]alphabet.Qphred(nil), q...),
Encode: encode,
}
}
// Returns the underlying quality score slice.
func (q *Phred) Slice() alphabet.Slice { return q.Qual }
// Set the underlying quality score slice.
func (q *Phred) SetSlice(sl alphabet.Slice) { q.Qual = sl.(Qphreds) }
// Append to the scores.
func (q *Phred) Append(a ...alphabet.Qphred) { q.Qual = append(q.Qual, a...) }
// Return the raw score at position pos.
func (q *Phred) At(i int) alphabet.Qphred { return q.Qual[i-q.Offset] }
// Return the error probability at position pos.
func (q *Phred) EAt(i int) float64 { return q.Qual[i-q.Offset].ProbE() }
// Set the raw score at position pos to qual.
func (q *Phred) Set(i int, qual alphabet.Qphred) error { q.Qual[i-q.Offset] = qual; return nil }
// Set the error probability to e at position pos.
func (q *Phred) SetE(i int, e float64) error {
q.Qual[i-q.Offset] = alphabet.Ephred(e)
return nil
}
// Encode the quality at position pos to a letter based on the sequence Encode setting.
func (q *Phred) QEncode(i int) byte {
return q.Qual[i-q.Offset].Encode(q.Encode)
}
// Decode a quality letter to a phred score based on the sequence Encode setting.
func (q *Phred) QDecode(l byte) alphabet.Qphred { return q.Encode.DecodeToQphred(l) }
// Return the quality Encode type.
func (q *Phred) Encoding() alphabet.Encoding { return q.Encode }
// Set the quality Encode type to e.
func (q *Phred) SetEncoding(e alphabet.Encoding) error { q.Encode = e; return nil }
// Return the length of the score sequence.
func (q *Phred) Len() int { return len(q.Qual) }
// Return the start position of the score sequence.
func (q *Phred) Start() int { return q.Offset }
// Return the end position of the score sequence.
func (q *Phred) End() int { return q.Offset + q.Len() }
// Return a copy of the quality sequence.
func (q *Phred) Copy() seq.Quality {
c := *q
c.Qual = append([]alphabet.Qphred(nil), q.Qual...)
return &c
}
// Reverse the order of elements in the sequence.
func (q *Phred) Reverse() {
l := q.Qual
for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
}
func (q *Phred) String() string {
qs := make([]byte, 0, len(q.Qual))
for _, s := range q.Qual {
qs = append(qs, s.Encode(q.Encode))
}
return string(qs)
} | seq/quality/phred.go | 0.792705 | 0.50238 | phred.go | starcoder |
package geom
// A MultiPoint is a collection of Points.
type MultiPoint struct {
// To represent an MultiPoint that allows EMPTY elements, e.g.
// MULTIPOINT ( EMPTY, POINT(1.0 1.0), EMPTY), we have to allow
// record ends. If there is an empty point, ends[i] == ends[i-1].
geom2
}
// NewMultiPoint returns a new, empty, MultiPoint.
func NewMultiPoint(layout Layout) *MultiPoint {
return NewMultiPointFlat(layout, nil)
}
// NewMultiPointFlatOption represents an option that can be passed into
// NewMultiPointFlat.
type NewMultiPointFlatOption func(*MultiPoint)
// NewMultiPointFlatOptionWithEnds allows passing ends to NewMultiPointFlat,
// which allows the representation of empty points.
func NewMultiPointFlatOptionWithEnds(ends []int) NewMultiPointFlatOption {
return func(mp *MultiPoint) {
mp.ends = ends
}
}
// NewMultiPointFlat returns a new MultiPoint with the given flat coordinates.
// Assumes no points are empty by default. Use `NewMultiPointFlatOptionWithEnds`
// to specify empty points.
func NewMultiPointFlat(
layout Layout, flatCoords []float64, opts ...NewMultiPointFlatOption,
) *MultiPoint {
g := new(MultiPoint)
g.layout = layout
g.stride = layout.Stride()
g.flatCoords = flatCoords
for _, opt := range opts {
opt(g)
}
// If no ends are provided, assume all points are non empty.
if g.ends == nil && len(g.flatCoords) > 0 {
numCoords := 0
if g.stride > 0 {
numCoords = len(flatCoords) / g.stride
}
g.ends = make([]int, numCoords)
for i := 0; i < numCoords; i++ {
g.ends[i] = (i + 1) * g.stride
}
}
return g
}
// Area returns the area of g, i.e. zero.
func (g *MultiPoint) Area() float64 {
return 0
}
// Clone returns a deep copy.
func (g *MultiPoint) Clone() *MultiPoint {
return deriveCloneMultiPoint(g)
}
// Length returns zero.
func (g *MultiPoint) Length() float64 {
return 0
}
// MustSetCoords sets the coordinates and panics on any error.
func (g *MultiPoint) MustSetCoords(coords []Coord) *MultiPoint {
Must(g.SetCoords(coords))
return g
}
// Coord returns the ith coord of g.
func (g *MultiPoint) Coord(i int) Coord {
before := 0
if i > 0 {
before = g.ends[i-1]
}
if g.ends[i] == before {
return nil
}
return g.flatCoords[before:g.ends[i]]
}
// SetCoords sets the coordinates.
func (g *MultiPoint) SetCoords(coords []Coord) (*MultiPoint, error) {
g.flatCoords = nil
g.ends = nil
for _, c := range coords {
if c != nil {
var err error
g.flatCoords, err = deflate0(g.flatCoords, c, g.stride)
if err != nil {
return nil, err
}
}
g.ends = append(g.ends, len(g.flatCoords))
}
return g, nil
}
// Coords unpacks and returns all of g's coordinates.
func (g *MultiPoint) Coords() []Coord {
coords1 := make([]Coord, len(g.ends))
offset := 0
prevEnd := 0
for i, end := range g.ends {
if end != prevEnd {
coords1[i] = inflate0(g.flatCoords, offset, offset+g.stride, g.stride)
offset += g.stride
}
prevEnd = end
}
return coords1
}
// NumCoords returns the number of coordinates in g.
func (g *MultiPoint) NumCoords() int {
return len(g.ends)
}
// SetSRID sets the SRID of g.
func (g *MultiPoint) SetSRID(srid int) *MultiPoint {
g.srid = srid
return g
}
// NumPoints returns the number of Points.
func (g *MultiPoint) NumPoints() int {
return len(g.ends)
}
// Point returns the ith Point.
func (g *MultiPoint) Point(i int) *Point {
coord := g.Coord(i)
if coord == nil {
return NewPointEmpty(g.layout)
}
return NewPointFlat(g.layout, coord)
}
// Push appends a point.
func (g *MultiPoint) Push(p *Point) error {
if p.layout != g.layout {
return ErrLayoutMismatch{Got: p.layout, Want: g.layout}
}
if !p.Empty() {
g.flatCoords = append(g.flatCoords, p.flatCoords...)
}
g.ends = append(g.ends, len(g.flatCoords))
return nil
}
// Swap swaps the values of g and g2.
func (g *MultiPoint) Swap(g2 *MultiPoint) {
*g, *g2 = *g2, *g
} | multipoint.go | 0.847432 | 0.551151 | multipoint.go | starcoder |
package mixins
import (
"io"
"github.com/ipld/go-ipld-prime/datamodel"
)
type FloatTraits struct {
PkgName string
TypeName string // see doc in kindTraitsGenerator
TypeSymbol string // see doc in kindTraitsGenerator
}
func (FloatTraits) Kind() datamodel.Kind {
return datamodel.Kind_Float
}
func (g FloatTraits) EmitNodeMethodKind(w io.Writer) {
doTemplate(`
func ({{ .TypeSymbol }}) Kind() datamodel.Kind {
return datamodel.Kind_Float
}
`, w, g)
}
func (g FloatTraits) EmitNodeMethodLookupByString(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodLookupByString(w)
}
func (g FloatTraits) EmitNodeMethodLookupByNode(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodLookupByNode(w)
}
func (g FloatTraits) EmitNodeMethodLookupByIndex(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodLookupByIndex(w)
}
func (g FloatTraits) EmitNodeMethodLookupBySegment(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodLookupBySegment(w)
}
func (g FloatTraits) EmitNodeMethodMapIterator(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodMapIterator(w)
}
func (g FloatTraits) EmitNodeMethodListIterator(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodListIterator(w)
}
func (g FloatTraits) EmitNodeMethodLength(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodLength(w)
}
func (g FloatTraits) EmitNodeMethodIsAbsent(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodIsAbsent(w)
}
func (g FloatTraits) EmitNodeMethodIsNull(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodIsNull(w)
}
func (g FloatTraits) EmitNodeMethodAsBool(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodAsBool(w)
}
func (g FloatTraits) EmitNodeMethodAsInt(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodAsInt(w)
}
func (g FloatTraits) EmitNodeMethodAsString(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodAsString(w)
}
func (g FloatTraits) EmitNodeMethodAsBytes(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodAsBytes(w)
}
func (g FloatTraits) EmitNodeMethodAsLink(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_Float}.emitNodeMethodAsLink(w)
}
type FloatAssemblerTraits struct {
PkgName string
TypeName string // see doc in kindAssemblerTraitsGenerator
AppliedPrefix string // see doc in kindAssemblerTraitsGenerator
}
func (FloatAssemblerTraits) Kind() datamodel.Kind {
return datamodel.Kind_Float
}
func (g FloatAssemblerTraits) EmitNodeAssemblerMethodBeginMap(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_Float}.emitNodeAssemblerMethodBeginMap(w)
}
func (g FloatAssemblerTraits) EmitNodeAssemblerMethodBeginList(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_Float}.emitNodeAssemblerMethodBeginList(w)
}
func (g FloatAssemblerTraits) EmitNodeAssemblerMethodAssignNull(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_Float}.emitNodeAssemblerMethodAssignNull(w)
}
func (g FloatAssemblerTraits) EmitNodeAssemblerMethodAssignBool(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_Float}.emitNodeAssemblerMethodAssignBool(w)
}
func (g FloatAssemblerTraits) EmitNodeAssemblerMethodAssignInt(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_Float}.emitNodeAssemblerMethodAssignInt(w)
}
func (g FloatAssemblerTraits) EmitNodeAssemblerMethodAssignString(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_Float}.emitNodeAssemblerMethodAssignString(w)
}
func (g FloatAssemblerTraits) EmitNodeAssemblerMethodAssignBytes(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_Float}.emitNodeAssemblerMethodAssignBytes(w)
}
func (g FloatAssemblerTraits) EmitNodeAssemblerMethodAssignLink(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_Float}.emitNodeAssemblerMethodAssignLink(w)
}
func (g FloatAssemblerTraits) EmitNodeAssemblerMethodPrototype(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_Float}.emitNodeAssemblerMethodPrototype(w)
} | schema/gen/go/mixins/floatGenMixin.go | 0.540681 | 0.427038 | floatGenMixin.go | starcoder |
package flags
import (
"fmt"
"strings"
"github.com/aquasecurity/tracee/tracee-ebpf/tracee"
)
func FilterHelp() string {
return `Select which events to trace by defining trace expressions that operate on events or process metadata.
Only events that match all trace expressions will be traced (trace flags are ANDed).
The following types of expressions are supported:
Numerical expressions which compare numbers and allow the following operators: '=', '!=', '<', '>'.
Available numerical expressions: uid, pid, mntns, pidns.
String expressions which compares text and allow the following operators: '=', '!='.
Available string expressions: event, set, uts, comm.
Boolean expressions that check if a boolean is true and allow the following operator: '!'.
Available boolean expressions: container.
Event arguments can be accessed using 'event_name.event_arg' and provide a way to filter an event by its arguments.
Event arguments allow the following operators: '=', '!='.
Strings can be compared as a prefix if ending with '*'.
Event return value can be accessed using 'event_name.retval' and provide a way to filter an event by its return value.
Event return value expression has the same syntax as a numerical expression.
Non-boolean expressions can compare a field to multiple values separated by ','.
Multiple values are ORed if used with equals operator '=', but are ANDed if used with any other operator.
The field 'container' and 'pid' also support the special value 'new' which selects new containers or pids, respectively.
The field 'set' selects a set of events to trace according to predefined sets, which can be listed by using the 'list' flag.
The special 'follow' expression declares that not only processes that match the criteria will be traced, but also their descendants.
Examples:
--trace pid=new | only trace events from new processes
--trace pid=510,1709 | only trace events from pid 510 or pid 1709
--trace p=510 --trace p=1709 | only trace events from pid 510 or pid 1709 (same as above)
--trace container=new | only trace events from newly created containers
--trace container | only trace events from containers
--trace c | only trace events from containers (same as above)
--trace '!container' | only trace events from the host
--trace uid=0 | only trace events from uid 0
--trace mntns=4026531840 | only trace events from mntns id 4026531840
--trace pidns!=4026531836 | only trace events from pidns id not equal to 4026531840
--trace tree=476165 | only trace events that descend from the process with pid 476165
--trace tree!=5023 | only trace events if they do not descend from the process with pid 5023
--trace tree=3213,5200 --trace tree!=3215 | only trace events if they descend from 3213 or 5200, but not 3215
--trace 'uid>0' | only trace events from uids greater than 0
--trace 'pid>0' --trace 'pid<1000' | only trace events from pids between 0 and 1000
--trace 'u>0' --trace u!=1000 | only trace events from uids greater than 0 but not 1000
--trace event=execve,open | only trace execve and open events
--trace event=open* | only trace events prefixed by "open"
--trace event!=open*,dup* | don't trace events prefixed by "open" or "dup"
--trace set=fs | trace all file-system related events
--trace s=fs --trace e!=open,openat | trace all file-system related events, but not open(at)
--trace uts!=ab356bc4dd554 | don't trace events from uts name ab356bc4dd554
--trace comm=ls | only trace events from ls command
--trace close.fd=5 | only trace 'close' events that have 'fd' equals 5
--trace openat.pathname=/tmp* | only trace 'openat' events that have 'pathname' prefixed by "/tmp"
--trace openat.pathname!=/tmp/1,/bin/ls | don't trace 'openat' events that have 'pathname' equals /tmp/1 or /bin/ls
--trace comm=bash --trace follow | trace all events that originated from bash or from one of the processes spawned by bash
Note: some of the above operators have special meanings in different shells.
To 'escape' those operators, please use single quotes, e.g.: 'uid>0'
`
}
func PrepareFilter(filters []string) (tracee.Filter, error) {
filter := tracee.Filter{
UIDFilter: &tracee.UintFilter{
Equal: []uint64{},
NotEqual: []uint64{},
Less: tracee.LessNotSetUint,
Greater: tracee.GreaterNotSetUint,
Is32Bit: true,
},
PIDFilter: &tracee.UintFilter{
Equal: []uint64{},
NotEqual: []uint64{},
Less: tracee.LessNotSetUint,
Greater: tracee.GreaterNotSetUint,
Is32Bit: true,
},
NewPidFilter: &tracee.BoolFilter{},
MntNSFilter: &tracee.UintFilter{
Equal: []uint64{},
NotEqual: []uint64{},
Less: tracee.LessNotSetUint,
Greater: tracee.GreaterNotSetUint,
},
PidNSFilter: &tracee.UintFilter{
Equal: []uint64{},
NotEqual: []uint64{},
Less: tracee.LessNotSetUint,
Greater: tracee.GreaterNotSetUint,
},
UTSFilter: &tracee.StringFilter{
Equal: []string{},
NotEqual: []string{},
},
CommFilter: &tracee.StringFilter{
Equal: []string{},
NotEqual: []string{},
},
ContFilter: &tracee.BoolFilter{},
NewContFilter: &tracee.BoolFilter{},
ContIDFilter: &tracee.ContIDFilter{
Equal: []string{},
NotEqual: []string{},
},
RetFilter: &tracee.RetFilter{
Filters: make(map[int32]tracee.IntFilter),
},
ArgFilter: &tracee.ArgFilter{
Filters: make(map[int32]map[string]tracee.ArgFilterVal),
},
ProcessTreeFilter: &tracee.ProcessTreeFilter{
PIDs: make(map[uint32]bool),
},
EventsToTrace: []int32{},
}
eventFilter := &tracee.StringFilter{Equal: []string{}, NotEqual: []string{}}
setFilter := &tracee.StringFilter{Equal: []string{}, NotEqual: []string{}}
eventsNameToID := make(map[string]int32, len(tracee.EventsDefinitions))
for id, event := range tracee.EventsDefinitions {
eventsNameToID[event.Name] = id
}
for _, f := range filters {
filterName := f
operatorAndValues := ""
operatorIndex := strings.IndexAny(f, "=!<>")
if operatorIndex > 0 {
filterName = f[0:operatorIndex]
operatorAndValues = f[operatorIndex:]
}
if strings.Contains(f, ".retval") {
err := filter.RetFilter.Parse(filterName, operatorAndValues, eventsNameToID)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if strings.Contains(f, ".") {
err := filter.ArgFilter.Parse(filterName, operatorAndValues, eventsNameToID)
if err != nil {
return tracee.Filter{}, err
}
continue
}
// The filters which are more common (container, event, pid, set, uid) can be given using a prefix of them.
// Other filters should be given using their full name.
// To avoid collisions between filters that share the same prefix, put the filters which should have an exact match first!
if filterName == "comm" {
err := filter.CommFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if strings.HasPrefix("container", f) || (strings.HasPrefix("!container", f) && len(f) > 1) {
err := filter.ContFilter.Parse(f)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if strings.HasPrefix("container", filterName) {
if operatorAndValues == "=new" {
filter.NewContFilter.Enabled = true
filter.NewContFilter.Value = true
continue
}
if operatorAndValues == "!=new" {
filter.ContFilter.Enabled = true
filter.ContFilter.Value = true
filter.NewContFilter.Enabled = true
filter.NewContFilter.Value = false
continue
}
err := filter.ContIDFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if strings.HasPrefix("event", filterName) {
err := eventFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if filterName == "mntns" {
err := filter.MntNSFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if filterName == "pidns" {
err := filter.PidNSFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if filterName == "tree" {
err := filter.ProcessTreeFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if strings.HasPrefix("pid", filterName) {
if operatorAndValues == "=new" {
filter.NewPidFilter.Enabled = true
filter.NewPidFilter.Value = true
continue
}
if operatorAndValues == "!=new" {
filter.NewPidFilter.Enabled = true
filter.NewPidFilter.Value = false
continue
}
err := filter.PIDFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if strings.HasPrefix("set", filterName) {
err := setFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if filterName == "uts" {
err := filter.UTSFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if strings.HasPrefix("uid", filterName) {
err := filter.UIDFilter.Parse(operatorAndValues)
if err != nil {
return tracee.Filter{}, err
}
continue
}
if strings.HasPrefix("follow", f) {
filter.Follow = true
continue
}
return tracee.Filter{}, fmt.Errorf("invalid filter option specified, use '--trace help' for more info")
}
var err error
filter.EventsToTrace, err = prepareEventsToTrace(eventFilter, setFilter, eventsNameToID)
if err != nil {
return tracee.Filter{}, err
}
return filter, nil
}
func prepareEventsToTrace(eventFilter *tracee.StringFilter, setFilter *tracee.StringFilter, eventsNameToID map[string]int32) ([]int32, error) {
eventFilter.Enabled = true
eventsToTrace := eventFilter.Equal
excludeEvents := eventFilter.NotEqual
setsToTrace := setFilter.Equal
var res []int32
setsToEvents := make(map[string][]int32)
isExcluded := make(map[int32]bool)
for id, event := range tracee.EventsDefinitions {
for _, set := range event.Sets {
setsToEvents[set] = append(setsToEvents[set], id)
}
}
for _, name := range excludeEvents {
// Handle event prefixes with wildcards
if strings.HasSuffix(name, "*") {
found := false
prefix := name[:len(name)-1]
for event, id := range eventsNameToID {
if strings.HasPrefix(event, prefix) {
isExcluded[id] = true
found = true
}
}
if !found {
return nil, fmt.Errorf("invalid event to exclude: %s", name)
}
} else {
id, ok := eventsNameToID[name]
if !ok {
return nil, fmt.Errorf("invalid event to exclude: %s", name)
}
isExcluded[id] = true
}
}
if len(eventsToTrace) == 0 && len(setsToTrace) == 0 {
setsToTrace = append(setsToTrace, "default")
}
res = make([]int32, 0, len(tracee.EventsDefinitions))
for _, name := range eventsToTrace {
// Handle event prefixes with wildcards
if strings.HasSuffix(name, "*") {
var ids []int32
found := false
prefix := name[:len(name)-1]
for event, id := range eventsNameToID {
if strings.HasPrefix(event, prefix) {
ids = append(ids, id)
found = true
}
}
if !found {
return nil, fmt.Errorf("invalid event to trace: %s", name)
}
res = append(res, ids...)
} else {
id, ok := eventsNameToID[name]
if !ok {
return nil, fmt.Errorf("invalid event to trace: %s", name)
}
res = append(res, id)
}
}
for _, set := range setsToTrace {
setEvents, ok := setsToEvents[set]
if !ok {
return nil, fmt.Errorf("invalid set to trace: %s", set)
}
for _, id := range setEvents {
if !isExcluded[id] {
res = append(res, id)
}
}
}
return res, nil
} | cmd/tracee-ebpf/internal/flags/flags-filter.go | 0.610686 | 0.444444 | flags-filter.go | starcoder |
package indicators
import (
"container/list"
"errors"
"github.com/thetruetrade/gotrade"
)
// A Linear Regression Indicator (LinReg), no storage, for use in other indicators
type LinRegWithoutStorage struct {
*baseIndicator
*baseFloatBounds
// private variables
periodCounter int
periodHistory *list.List
sumX float64
sumXSquare float64
divisor float64
valueAvailableAction ValueAvailableActionLinearReg
timePeriod int
}
// NewLinRegWithoutStorage creates a Linear Regression Indicator (LinReg) without storage
func NewLinRegWithoutStorage(timePeriod int, valueAvailableAction ValueAvailableActionLinearReg) (indicator *LinRegWithoutStorage, err error) {
// an indicator without storage MUST have a value available action
if valueAvailableAction == nil {
return nil, ErrValueAvailableActionIsNil
}
// the minimum timeperiod for this indicator is 2
if timePeriod < 2 {
return nil, errors.New("timePeriod is less than the minimum (2)")
}
// check the maximum timeperiod
if timePeriod > MaximumLookbackPeriod {
return nil, errors.New("timePeriod is greater than the maximum (100000)")
}
lookback := timePeriod - 1
ind := LinRegWithoutStorage{
baseIndicator: newBaseIndicator(lookback),
baseFloatBounds: newBaseFloatBounds(),
periodCounter: (timePeriod) * -1,
periodHistory: list.New(),
valueAvailableAction: valueAvailableAction,
timePeriod: timePeriod,
}
timePeriodF := float64(timePeriod)
timePeriodFMinusOne := timePeriodF - 1.0
ind.sumX = timePeriodF * timePeriodFMinusOne * 0.5
ind.sumXSquare = timePeriodF * timePeriodFMinusOne * (2.0*timePeriodF - 1.0) / 6.0
ind.divisor = ind.sumX*ind.sumX - timePeriodF*ind.sumXSquare
ind.valueAvailableAction = valueAvailableAction
return &ind, nil
}
// A Linear Regression Indicator (LinReg)
type LinReg struct {
*LinRegWithoutStorage
selectData gotrade.DOHLCVDataSelectionFunc
// public variables
Data []float64
}
// NewLinReg creates a Linear Regression Indicator (LinReg) for online usage
func NewLinReg(timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *LinReg, err error) {
if selectData == nil {
return nil, ErrDOHLCVDataSelectFuncIsNil
}
ind := LinReg{
selectData: selectData,
}
ind.LinRegWithoutStorage, err = NewLinRegWithoutStorage(timePeriod,
func(dataItem float64, slope float64, intercept float64, streamBarIndex int) {
ind.Data = append(ind.Data, dataItem)
ind.UpdateMinMax(dataItem, dataItem)
})
return &ind, err
}
// NewDefaultLinReg creates a Linear Regression Indicator (LinReg) for online usage with default parameters
// - timePeriod: 14
func NewDefaultLinReg() (indicator *LinReg, err error) {
timePeriod := 14
return NewLinReg(timePeriod, gotrade.UseClosePrice)
}
// NewLinRegWithSrcLen creates a Linear Regression Indicator (LinReg) for offline usage
func NewLinRegWithSrcLen(sourceLength uint, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *LinReg, err error) {
ind, err := NewLinReg(timePeriod, selectData)
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewDefaultLinRegWithSrcLen creates a Linear Regression Indicator (LinReg) for offline usage with default parameters
func NewDefaultLinRegWithSrcLen(sourceLength uint) (indicator *LinReg, err error) {
ind, err := NewDefaultLinReg()
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewLinRegForStream creates a Linear Regression Indicator (LinReg) for online usage with a source data stream
func NewLinRegForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *LinReg, err error) {
ind, err := NewLinReg(timePeriod, selectData)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultLinRegForStream creates a Linear Regression Indicator (LinReg) for online usage with a source data stream
func NewDefaultLinRegForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *LinReg, err error) {
ind, err := NewDefaultLinReg()
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewLinRegForStreamWithSrcLen creates a Linear Regression Indicator (LinReg) for offline usage with a source data stream
func NewLinRegForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *LinReg, err error) {
ind, err := NewLinRegWithSrcLen(sourceLength, timePeriod, selectData)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultLinRegForStreamWithSrcLen creates a Linear Regression Indicator (LinReg) for offline usage with a source data stream
func NewDefaultLinRegForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *LinReg, err error) {
ind, err := NewDefaultLinRegWithSrcLen(sourceLength)
priceStream.AddTickSubscription(ind)
return ind, err
}
// ReceiveDOHLCVTick consumes a source data DOHLCV price tick
func (ind *LinReg) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) {
var selectedData = ind.selectData(tickData)
ind.ReceiveTick(selectedData, streamBarIndex)
}
func (ind *LinRegWithoutStorage) ReceiveTick(tickData float64, streamBarIndex int) {
ind.periodCounter += 1
if ind.periodCounter >= 0 {
sumXY := 0.0
sumY := 0.0
i := ind.timePeriod
var value float64 = 0.0
for e := ind.periodHistory.Front(); e != nil; e = e.Next() {
i--
value = e.Value.(float64)
sumY += value
sumXY += (float64(i) * value)
}
sumY += tickData
timePeriod := float64(ind.timePeriod)
m := (timePeriod*sumXY - ind.sumX*sumY) / ind.divisor
b := (sumY - m*ind.sumX) / timePeriod
result := b + m*float64(timePeriod-1.0)
// increment the number of results this indicator can be expected to return
ind.IncDataLength()
// set the streamBarIndex from which this indicator returns valid results
ind.SetValidFromBar(streamBarIndex)
// notify of a new result value though the value available action
ind.valueAvailableAction(result, m, b, streamBarIndex)
}
ind.periodHistory.PushBack(tickData)
if ind.periodHistory.Len() >= ind.timePeriod {
first := ind.periodHistory.Front()
ind.periodHistory.Remove(first)
}
} | indicators/linreg.go | 0.702122 | 0.523238 | linreg.go | starcoder |
package amcl
import (
r "crypto/rand"
"crypto/sha256"
"io"
"math/big"
"regexp"
"strings"
"github.com/IBM/mathlib/driver"
"github.com/IBM/mathlib/driver/common"
"github.com/hyperledger/fabric-amcl/amcl"
"github.com/hyperledger/fabric-amcl/amcl/FP256BN"
"github.com/pkg/errors"
)
/*********************************************************************/
type fp256bnZr struct {
*FP256BN.BIG
}
func (b *fp256bnZr) Plus(a driver.Zr) driver.Zr {
return &fp256bnZr{b.BIG.Plus(a.(*fp256bnZr).BIG)}
}
func (b *fp256bnZr) PowMod(x driver.Zr) driver.Zr {
q := FP256BN.NewBIGints(FP256BN.CURVE_Order)
return &fp256bnZr{b.BIG.Powmod(x.(*fp256bnZr).BIG, q)}
}
func (b *fp256bnZr) Mod(a driver.Zr) {
b.BIG.Mod(a.(*fp256bnZr).BIG)
}
func (b *fp256bnZr) InvModP(p driver.Zr) {
b.BIG.Invmodp(p.(*fp256bnZr).BIG)
}
func (b *fp256bnZr) Bytes() []byte {
by := make([]byte, int(FP256BN.MODBYTES))
b.BIG.ToBytes(by)
return by
}
func (b *fp256bnZr) Equals(p driver.Zr) bool {
return *b.BIG == *(p.(*fp256bnZr).BIG)
}
func (b *fp256bnZr) Copy() driver.Zr {
return &fp256bnZr{FP256BN.NewBIGcopy(b.BIG)}
}
func (b *fp256bnZr) Clone(a driver.Zr) {
c := a.Copy()
b.BIG = c.(*fp256bnZr).BIG
}
func (b *fp256bnZr) String() string {
return strings.TrimLeft(b.BIG.ToString(), "0")
}
/*********************************************************************/
type fp256bnGt struct {
*FP256BN.FP12
}
func (a *fp256bnGt) Equals(b driver.Gt) bool {
return a.FP12.Equals(b.(*fp256bnGt).FP12)
}
func (a *fp256bnGt) IsUnity() bool {
return a.FP12.Isunity()
}
func (a *fp256bnGt) Inverse() {
a.FP12.Inverse()
}
func (a *fp256bnGt) Mul(b driver.Gt) {
a.FP12.Mul(b.(*fp256bnGt).FP12)
}
func (b *fp256bnGt) ToString() string {
return b.FP12.ToString()
}
func (b *fp256bnGt) Bytes() []byte {
bytes := make([]byte, 12*int(FP256BN.MODBYTES))
b.FP12.ToBytes(bytes)
return bytes
}
/*********************************************************************/
type Fp256bn struct {
}
func (*Fp256bn) Pairing(a driver.G2, b driver.G1) driver.Gt {
return &fp256bnGt{FP256BN.Ate(a.(*fp256bnG2).ECP2, b.(*fp256bnG1).ECP)}
}
func (*Fp256bn) Pairing2(p2a, p2b driver.G2, p1a, p1b driver.G1) driver.Gt {
return &fp256bnGt{FP256BN.Ate2(p2a.(*fp256bnG2).ECP2, p1a.(*fp256bnG1).ECP, p2b.(*fp256bnG2).ECP2, p1b.(*fp256bnG1).ECP)}
}
func (*Fp256bn) FExp(e driver.Gt) driver.Gt {
return &fp256bnGt{FP256BN.Fexp(e.(*fp256bnGt).FP12)}
}
func (*Fp256bn) ModMul(a1, b1, m driver.Zr) driver.Zr {
return &fp256bnZr{FP256BN.Modmul(a1.(*fp256bnZr).BIG, b1.(*fp256bnZr).BIG, m.(*fp256bnZr).BIG)}
}
func (*Fp256bn) ModNeg(a1, m driver.Zr) driver.Zr {
return &fp256bnZr{FP256BN.Modneg(a1.(*fp256bnZr).BIG, m.(*fp256bnZr).BIG)}
}
func (*Fp256bn) GenG1() driver.G1 {
return &fp256bnG1{FP256BN.NewECPbigs(FP256BN.NewBIGints(FP256BN.CURVE_Gx), FP256BN.NewBIGints(FP256BN.CURVE_Gy))}
}
func (*Fp256bn) GenG2() driver.G2 {
return &fp256bnG2{FP256BN.NewECP2fp2s(
FP256BN.NewFP2bigs(FP256BN.NewBIGints(FP256BN.CURVE_Pxa), FP256BN.NewBIGints(FP256BN.CURVE_Pxb)),
FP256BN.NewFP2bigs(FP256BN.NewBIGints(FP256BN.CURVE_Pya), FP256BN.NewBIGints(FP256BN.CURVE_Pyb)))}
}
func (p *Fp256bn) GenGt() driver.Gt {
return &fp256bnGt{FP256BN.Fexp(FP256BN.Ate(p.GenG2().(*fp256bnG2).ECP2, p.GenG1().(*fp256bnG1).ECP))}
}
func (p *Fp256bn) GroupOrder() driver.Zr {
return &fp256bnZr{FP256BN.NewBIGints(FP256BN.CURVE_Order)}
}
func (p *Fp256bn) FieldBytes() int {
return int(FP256BN.MODBYTES)
}
func (p *Fp256bn) NewG1() driver.G1 {
return &fp256bnG1{FP256BN.NewECP()}
}
func (p *Fp256bn) NewG2() driver.G2 {
return &fp256bnG2{FP256BN.NewECP2()}
}
func (p *Fp256bn) NewG1FromCoords(ix, iy driver.Zr) driver.G1 {
return &fp256bnG1{FP256BN.NewECPbigs(ix.(*fp256bnZr).BIG, iy.(*fp256bnZr).BIG)}
}
func (p *Fp256bn) NewZrFromBytes(b []byte) driver.Zr {
return &fp256bnZr{FP256BN.FromBytes(b)}
}
func (p *Fp256bn) NewZrFromInt(i int64) driver.Zr {
var i0, i1, i2, i3, i4 int64
sign := int64(1)
if i < 0 {
sign = -1
}
b := common.BigToBytes(big.NewInt(i * sign))
pos := 32
i0 = new(big.Int).SetBytes(b[pos-7 : pos]).Int64()
pos -= 7
i1 = new(big.Int).SetBytes(b[pos-7 : pos]).Int64()
pos -= 7
i2 = new(big.Int).SetBytes(b[pos-7 : pos]).Int64()
pos -= 7
i3 = new(big.Int).SetBytes(b[pos-7 : pos]).Int64()
pos -= 7
i4 = new(big.Int).SetBytes(b[0:pos]).Int64()
zr := FP256BN.NewBIGints([FP256BN.NLEN]FP256BN.Chunk{FP256BN.Chunk(i0), FP256BN.Chunk(i1), FP256BN.Chunk(i2), FP256BN.Chunk(i3), FP256BN.Chunk(i4)})
if sign < 0 {
zr = FP256BN.NewBIGint(0).Minus(zr)
}
return &fp256bnZr{zr}
}
func (p *Fp256bn) NewG1FromBytes(b []byte) driver.G1 {
return &fp256bnG1{FP256BN.ECP_fromBytes(b)}
}
func (p *Fp256bn) NewG2FromBytes(b []byte) driver.G2 {
return &fp256bnG2{FP256BN.ECP2_fromBytes(b)}
}
func (p *Fp256bn) NewGtFromBytes(b []byte) driver.Gt {
return &fp256bnGt{FP256BN.FP12_fromBytes(b)}
}
func (p *Fp256bn) ModAdd(a, b, m driver.Zr) driver.Zr {
c := a.Plus(b)
c.Mod(m)
return c
}
func (p *Fp256bn) ModSub(a, b, m driver.Zr) driver.Zr {
return p.ModAdd(a, p.ModNeg(b, m), m)
}
func (p *Fp256bn) HashToZr(data []byte) driver.Zr {
digest := sha256.Sum256(data)
digestBig := FP256BN.FromBytes(digest[:])
digestBig.Mod(FP256BN.NewBIGints(FP256BN.CURVE_Order))
return &fp256bnZr{digestBig}
}
func (p *Fp256bn) HashToG1(data []byte) driver.G1 {
return &fp256bnG1{FP256BN.Bls_hash(string(data))}
}
func (p *Fp256bn) Rand() (io.Reader, error) {
seedLength := 32
b := make([]byte, seedLength)
_, err := r.Read(b)
if err != nil {
return nil, errors.Wrap(err, "error getting randomness for seed")
}
rng := amcl.NewRAND()
rng.Clean()
rng.Seed(seedLength, b)
return &rand{rng}, nil
}
func (p *Fp256bn) NewRandomZr(rng io.Reader) driver.Zr {
// curve order q
q := FP256BN.NewBIGints(FP256BN.CURVE_Order)
// Take random element in Zq
return &fp256bnZr{FP256BN.Randomnum(q, rng.(*rand).R)}
}
/*********************************************************************/
type fp256bnG1 struct {
*FP256BN.ECP
}
func (e *fp256bnG1) Clone(a driver.G1) {
e.ECP.Copy(a.(*fp256bnG1).ECP)
}
func (e *fp256bnG1) Copy() driver.G1 {
c := FP256BN.NewECP()
c.Copy(e.ECP)
return &fp256bnG1{c}
}
func (e *fp256bnG1) Add(a driver.G1) {
e.ECP.Add(a.(*fp256bnG1).ECP)
}
func (e *fp256bnG1) Mul(a driver.Zr) driver.G1 {
return &fp256bnG1{FP256BN.G1mul(e.ECP, a.(*fp256bnZr).BIG)}
}
func (e *fp256bnG1) Mul2(ee driver.Zr, Q driver.G1, f driver.Zr) driver.G1 {
return &fp256bnG1{e.ECP.Mul2(ee.(*fp256bnZr).BIG, Q.(*fp256bnG1).ECP, f.(*fp256bnZr).BIG)}
}
func (e *fp256bnG1) Equals(a driver.G1) bool {
return e.ECP.Equals(a.(*fp256bnG1).ECP)
}
func (e *fp256bnG1) IsInfinity() bool {
return e.ECP.Is_infinity()
}
func (e *fp256bnG1) Bytes() []byte {
b := make([]byte, 2*int(FP256BN.MODBYTES)+1)
e.ECP.ToBytes(b, false)
return b
}
func (e *fp256bnG1) Sub(a driver.G1) {
e.ECP.Sub(a.(*fp256bnG1).ECP)
}
var g1StrRegexp *regexp.Regexp = regexp.MustCompile(`^\(([0-9a-f]+),([0-9a-f]+)\)$`)
func (b *fp256bnG1) String() string {
rawstr := b.ECP.ToString()
m := g1StrRegexp.FindAllStringSubmatch(rawstr, -1)
return "(" + strings.TrimLeft(m[0][1], "0") + "," + strings.TrimLeft(m[0][2], "0") + ")"
}
/*********************************************************************/
type fp256bnG2 struct {
*FP256BN.ECP2
}
func (e *fp256bnG2) Equals(a driver.G2) bool {
return e.ECP2.Equals(a.(*fp256bnG2).ECP2)
}
func (e *fp256bnG2) Clone(a driver.G2) {
e.ECP2.Copy(a.(*fp256bnG2).ECP2)
}
func (e *fp256bnG2) Copy() driver.G2 {
c := FP256BN.NewECP2()
c.Copy(e.ECP2)
return &fp256bnG2{c}
}
func (e *fp256bnG2) Add(a driver.G2) {
e.ECP2.Add(a.(*fp256bnG2).ECP2)
}
func (e *fp256bnG2) Sub(a driver.G2) {
e.ECP2.Sub(a.(*fp256bnG2).ECP2)
}
func (e *fp256bnG2) Mul(a driver.Zr) driver.G2 {
return &fp256bnG2{e.ECP2.Mul(a.(*fp256bnZr).BIG)}
}
func (e *fp256bnG2) Affine() {
e.ECP2.Affine()
}
func (e *fp256bnG2) Bytes() []byte {
b := make([]byte, 4*int(FP256BN.MODBYTES))
e.ECP2.ToBytes(b)
return b
}
func (b *fp256bnG2) String() string {
return b.ECP2.ToString()
}
/*********************************************************************/
type rand struct {
R *amcl.RAND
}
func (*rand) Read(p []byte) (n int, err error) {
panic("not used")
}
/*********************************************************************/
func bigToBytes(big *FP256BN.BIG) []byte {
ret := make([]byte, int(FP256BN.MODBYTES))
big.ToBytes(ret)
return ret
} | vendor/github.com/IBM/mathlib/driver/amcl/fp256bn.go | 0.570212 | 0.410993 | fp256bn.go | starcoder |
package randvar
import (
"math"
"sync"
"github.com/cockroachdb/errors"
"golang.org/x/exp/rand"
)
const (
// See https://github.com/brianfrankcooper/YCSB/blob/f886c1e7988f8f4965cb88a1fe2f6bad2c61b56d/core/src/main/java/com/yahoo/ycsb/generator/ScrambledZipfianGenerator.java#L33-L35
defaultMax = 10000000000
defaultTheta = 0.99
defaultZetaN = 26.46902820178302
)
// Zipf is a random number generator that generates random numbers from a Zipf
// distribution. Unlike rand.Zipf, this generator supports incrementing the max
// parameter without performing an expensive recomputation of the underlying
// hidden parameters, which is a pattern used in [1] for efficiently generating
// large volumes of Zipf-distributed records for synthetic data. Second,
// rand.Zipf only supports theta <= 1, we suppose all values of theta.
type Zipf struct {
// Supplied constants.
theta float64
min uint64
// Internally computed constants.
alpha, zeta2 float64
halfPowTheta float64
// Mutable state.
mu struct {
sync.Mutex
rng *rand.Rand
max uint64
eta float64
zetaN float64
}
}
// NewDefaultZipf constructs a new Zipf generator with the default parameters.
func NewDefaultZipf(rng *rand.Rand) (*Zipf, error) {
return NewZipf(rng, 1, defaultMax, defaultTheta)
}
// NewZipf constructs a new Zipf generator with the given parameters. Returns
// an error if the parameters are outside the accepted range.
func NewZipf(rng *rand.Rand, min, max uint64, theta float64) (*Zipf, error) {
if min > max {
return nil, errors.Errorf("min %d > max %d", errors.Safe(min), errors.Safe(max))
}
if theta < 0.0 || theta == 1.0 {
return nil, errors.New("0 < theta, and theta != 1")
}
z := &Zipf{
min: min,
theta: theta,
}
z.mu.rng = ensureRand(rng)
z.mu.max = max
// Compute hidden parameters.
z.zeta2 = computeZetaFromScratch(2, theta)
z.halfPowTheta = 1.0 + math.Pow(0.5, z.theta)
z.mu.zetaN = computeZetaFromScratch(max+1-min, theta)
z.alpha = 1.0 / (1.0 - theta)
z.mu.eta = (1 - math.Pow(2.0/float64(z.mu.max+1-z.min), 1.0-theta)) / (1.0 - z.zeta2/z.mu.zetaN)
return z, nil
}
// computeZetaIncrementally recomputes zeta(max, theta), assuming that sum =
// zeta(oldMax, theta). Returns zeta(max, theta), computed incrementally.
func computeZetaIncrementally(oldMax, max uint64, theta float64, sum float64) float64 {
if max < oldMax {
panic("unable to decrement max!")
}
for i := oldMax + 1; i <= max; i++ {
sum += 1.0 / math.Pow(float64(i), theta)
}
return sum
}
// The function zeta computes the value
// zeta(n, theta) = (1/1)^theta + (1/2)^theta + (1/3)^theta + ... + (1/n)^theta
func computeZetaFromScratch(n uint64, theta float64) float64 {
if n == defaultMax && theta == defaultTheta {
// Precomputed value, borrowed from ScrambledZipfianGenerator.java. This is
// quite slow to calculate from scratch due to the large n value.
return defaultZetaN
}
return computeZetaIncrementally(0, n, theta, 0.0)
}
// IncMax increments max and recomputes the internal values that depend on
// it. Returns an error if the recomputation failed.
func (z *Zipf) IncMax(delta int) {
z.mu.Lock()
oldMax := z.mu.max
z.mu.max += uint64(delta)
z.mu.zetaN = computeZetaIncrementally(oldMax+1-z.min, z.mu.max+1-z.min, z.theta, z.mu.zetaN)
z.mu.eta = (1 - math.Pow(2.0/float64(z.mu.max+1-z.min), 1.0-z.theta)) / (1.0 - z.zeta2/z.mu.zetaN)
z.mu.Unlock()
}
// Uint64 draws a new value between min and max, with probabilities according
// to the Zipf distribution.
func (z *Zipf) Uint64() uint64 {
z.mu.Lock()
u := z.mu.rng.Float64()
uz := u * z.mu.zetaN
var result uint64
if uz < 1.0 {
result = z.min
} else if uz < z.halfPowTheta {
result = z.min + 1
} else {
spread := float64(z.mu.max + 1 - z.min)
result = z.min + uint64(spread*math.Pow(z.mu.eta*u-z.mu.eta+1.0, z.alpha))
}
z.mu.Unlock()
return result
} | internal/randvar/zipf.go | 0.805785 | 0.412234 | zipf.go | starcoder |
package curve
import (
"errors"
"fmt"
"math/big"
GF "github.com/armfazh/hash-to-curve-ref/go-h2c/field"
)
// MTCurve is a Montgomery curve
type MTCurve struct{ *params }
type M = *MTCurve
func (e *MTCurve) String() string { return "By^2=x^3+Ax^2+x\n" + e.params.String() }
// NewMontgomery returns a Montgomery curve
func NewMontgomery(id CurveID, f GF.Field, a, b GF.Elt, r, h *big.Int) *MTCurve {
if e := (&MTCurve{¶ms{
Id: id, F: f, A: a, B: b, R: r, H: h,
}}); e.IsValid() {
return e
}
panic(errors.New("can't instantiate a Montgomery curve"))
}
func (e *MTCurve) NewPoint(x, y GF.Elt) (P Point) {
if P = (&ptMt{e, &afPoint{x: x, y: y}}); e.IsOnCurve(P) {
return P
}
panic(fmt.Errorf("p:%v not on %v", P, e))
}
func (e *MTCurve) IsValid() bool {
F := e.F
t0 := F.Sqr(e.A) // A^2
t0 = F.Sub(t0, F.Elt(4)) // A^2-4
t0 = F.Mul(t0, e.B) // B(A^2-4)
return !F.IsZero(t0) // B(A^2-4) != 0
}
func (e *MTCurve) IsEqual(ec EllCurve) bool {
e0 := ec.(*MTCurve)
return e.F.IsEqual(e0.F) && e.F.AreEqual(e.A, e0.A) && e.F.AreEqual(e.B, e0.B)
}
func (e *MTCurve) IsOnCurve(p Point) bool {
if _, isZero := p.(*infPoint); isZero {
return isZero
}
P := p.(*ptMt)
F := e.F
var t0, t1 GF.Elt
t0 = F.Add(P.x, e.A) // x+A
t0 = F.Mul(t0, P.x) // (x+A)x
t0 = F.Add(t0, F.One()) // (x+A)x+1
t0 = F.Mul(t0, P.x) // ((x+A)x+1)x
t1 = F.Sqr(P.y) // y^2
t1 = F.Mul(t1, e.B) // By^2
return F.AreEqual(t0, t1)
}
func (e *MTCurve) Identity() Point { return &infPoint{} }
func (e *MTCurve) Add(p, q Point) Point {
if p.IsIdentity() {
return q.Copy()
} else if q.IsIdentity() {
return p.Copy()
} else if p.IsEqual(e.Neg(q)) {
return e.Identity()
} else if p.IsEqual(q) {
return e.Double(p)
} else {
return e.add(p, q)
}
}
func (e *MTCurve) Neg(p Point) Point {
if _, isZero := p.(*infPoint); isZero {
return e.Identity()
}
P := p.(*ptMt)
return &ptMt{e, &afPoint{x: P.x.Copy(), y: e.F.Neg(P.y)}}
}
func (e *MTCurve) add(p, q Point) Point {
P := p.(*ptMt)
Q := q.(*ptMt)
F := e.F
if F.AreEqual(P.x, Q.x) {
panic("wrong inputs")
}
var t0, t1, ll GF.Elt
t0 = F.Sub(Q.y, P.y) // (y2-y1)
t1 = F.Sub(Q.x, P.x) // (x2-x1)
t1 = F.Inv(t1) // 1/(x2-x1)
ll = F.Mul(t0, t1) // l = (y2-y1)/(x2-x1)
t0 = F.Sqr(ll) // l^2
t0 = F.Mul(t0, e.B) // Bl^2
t0 = F.Sub(t0, e.A) // Bl^2-A
t0 = F.Sub(t0, P.x) // Bl^2-A-x1
x := F.Sub(t0, Q.x) // x' = Bl^2-A-x1-x2
t0 = F.Sub(P.x, x) // x1-x3
t0 = F.Mul(t0, ll) // l(x1-x3)
y := F.Sub(t0, P.y) // y3 = l(x1-x3)-y1
return &ptMt{e, &afPoint{x: x, y: y}}
}
func (e *MTCurve) Double(p Point) Point {
if _, ok := p.(*infPoint); ok {
return e.Identity()
}
P := p.(*ptMt)
if P.IsTwoTorsion() {
return e.Identity()
}
F := e.F
var t0, t1, ll GF.Elt
t0 = F.Mul(F.Elt(3), P.x) // 3x
t1 = F.Mul(F.Elt(2), e.A) // 2A
t0 = F.Add(t0, t1) // 3x+2A
t0 = F.Mul(t0, P.x) // (3x+2A)x
t1 = F.Add(t0, F.One()) // (3x+2A)x+1
t0 = F.Mul(F.Elt(2), e.B) // 2B
t0 = F.Mul(t0, P.y) // 2By
t0 = F.Inv(t0) // 1/2By
ll = F.Mul(t1, t0) // l = (3x^2+2Ax+1)/(2By)
t0 = F.Sqr(ll) // l^2
t0 = F.Mul(t0, e.B) // Bl^2
t0 = F.Sub(t0, e.A) // Bl^2-A
t0 = F.Sub(t0, P.x) // Bl^2-A-x
x := F.Sub(t0, P.x) // x' = Bl^2-A-2x
t0 = F.Sub(P.x, x) // x-x'
t0 = F.Mul(t0, ll) // l(x-x')
y := F.Sub(t0, P.y) // y3 = l(x-x')-y1
return &ptMt{e, &afPoint{x: x, y: y}}
}
func (e *MTCurve) ScalarMult(p Point, k *big.Int) Point {
Q := e.Identity()
for i := k.BitLen() - 1; i >= 0; i-- {
Q = e.Double(Q)
if k.Bit(i) != 0 {
Q = e.Add(Q, p)
}
}
return Q
}
func (e *MTCurve) ClearCofactor(p Point) Point { return e.ScalarMult(p, e.H) }
// ptMt is an affine point on a Montgomery curve.
type ptMt struct {
*MTCurve
*afPoint
}
func (p *ptMt) String() string { return p.afPoint.String() }
func (p *ptMt) Copy() Point { return &ptMt{p.MTCurve, p.copy()} }
func (p *ptMt) IsEqual(q Point) bool {
qq := q.(*ptMt)
return p.MTCurve.IsEqual(qq.MTCurve) && p.isEqual(p.F, qq.afPoint)
}
func (p *ptMt) IsIdentity() bool { return false }
func (p *ptMt) IsTwoTorsion() bool { return p.F.IsZero(p.y) } | go-h2c/curve/montgomery.go | 0.790813 | 0.463141 | montgomery.go | starcoder |
package utils
import (
"fmt"
"reflect"
"strconv"
)
// NewValue new struct value with reflect type
func NewValue(t reflect.Type) (v reflect.Value) {
v = reflect.New(t)
ov := v
for t.Kind() == reflect.Ptr {
v = v.Elem()
t = t.Elem()
e := reflect.New(t)
v.Set(e)
}
if e := v.Elem(); e.Kind() == reflect.Map && e.IsNil() {
v.Elem().Set(reflect.MakeMap(v.Elem().Type()))
}
return ov
}
// ToArray get array from value, will ignore blank string to convert it to array
func ToArray(value interface{}) (values []string) {
switch value := value.(type) {
case []string:
values = []string{}
for _, v := range value {
if v != "" {
values = append(values, v)
}
}
case []interface{}:
for _, v := range value {
values = append(values, fmt.Sprint(v))
}
default:
if value := fmt.Sprint(value); value != "" {
values = []string{value}
}
}
return
}
// ToString get string from value, if passed value is a slice, will use the first element
func ToString(value interface{}) string {
if v, ok := value.([]string); ok {
if len(v) > 0 {
return v[0]
}
return ""
} else if v, ok := value.(string); ok {
return v
} else if v, ok := value.([]interface{}); ok {
if len(v) > 0 {
return fmt.Sprintf("%v", v[0])
}
return ""
}
return fmt.Sprintf("%v", value)
}
// ToInt get int from value, if passed value is empty string, result will be 0
func ToInt(value interface{}) int64 {
if result := ToString(value); result == "" {
return 0
} else if i, err := strconv.ParseInt(result, 10, 64); err == nil {
return i
} else {
panic("failed to parse int: " + result)
}
}
// ToUint get uint from value, if passed value is empty string, result will be 0
func ToUint(value interface{}) uint64 {
if result := ToString(value); result == "" {
return 0
} else if i, err := strconv.ParseUint(result, 10, 64); err == nil {
return i
} else {
panic("failed to parse uint: " + result)
}
}
// ToFloat get float from value, if passed value is empty string, result will be 0
func ToFloat(value interface{}) float64 {
if result := ToString(value); result == "" {
return 0
} else if i, err := strconv.ParseFloat(result, 64); err == nil {
return i
} else {
panic("failed to parse float: " + result)
}
} | utils/meta.go | 0.561696 | 0.401365 | meta.go | starcoder |
package main
import r "github.com/lachee/raylib-goplus/raylib"
import "math"
var orbitSpeed = float64(r.Deg2Rad)
var zoomSpeed = 1.0
var camera r.Camera
func main() {
screenWidth := 800
screenHeight := 450
r.InitWindow(screenWidth, screenHeight, "Raylib Go Plus - 3D Primatives")
camera = r.NewCamera(r.NewVector3(0.0, 10.0, 10.0), r.NewVector3(0.0, 0.0, 0.0), r.NewVector3(0.0, 1.0, 0.0), 45, r.CameraTypePerspective)
r.SetTargetFPS(60)
selfOrbit := true
orbit := 0.0
distance := 10.0
for !r.WindowShouldClose() {
//Modify the orbits
odif, ddif := GetOrbitInput()
orbit += odif
distance += ddif
if selfOrbit {
orbit += orbitSpeed
}
//Loop the orbit
if orbit > 2*math.Pi {
orbit = -2 * math.Pi
} else if orbit < -2*math.Pi {
orbit = 2 * math.Pi
}
//Apply the orbit modification
camera.Position.X = float32(math.Sin(orbit) * distance)
camera.Position.Z = float32(math.Cos(orbit) * distance)
r.BeginDrawing()
//Draw the static shapes
drawShapes()
//Draw a UI to update the orbits
r.GuiLabel(r.NewRectangle(10, 25, 100, 20), "distance")
distance = float64(r.GuiSlider(r.NewRectangle(100, 25, 150, 20), "0%", "100%", float32(distance), 2, 100))
r.GuiLabel(r.NewRectangle(10, 50, 100, 20), "orbit")
orbit = float64(r.GuiSlider(r.NewRectangle(100, 50, 150, 20), "-360", "360", float32(orbit)*r.Rad2Deg, -360, 360) * r.Deg2Rad)
r.GuiLabel(r.NewRectangle(10, 75, 100, 20), "speed")
orbitSpeed = float64(r.GuiSlider(r.NewRectangle(100, 75, 150, 20), "-100%", "100%", float32(orbitSpeed)*r.Rad2Deg, -10, 10) * r.Deg2Rad)
//Enable / Disable the auto fly mode
selfOrbit = r.GuiCheckBox(r.NewRectangle(100, 100, 20, 20), "Auto Orbit", selfOrbit)
r.EndDrawing()
}
r.CloseWindow()
}
func drawShapes() {
//Shape Source: https://github.com/gen2brain/raylib-go/blob/master/examples/models/geometric_shapes/main.go
r.ClearBackground(r.RayWhite)
r.BeginMode3D(camera)
r.DrawCube(r.NewVector3(-4.0, 0.0, 2.0), 2.0, 5.0, 2.0, r.Red)
r.DrawCubeWires(r.NewVector3(-4.0, 0.0, 2.0), 2.0, 5.0, 2.0, r.Gold)
r.DrawCubeWires(r.NewVector3(-4.0, 0.0, -2.0), 3.0, 6.0, 2.0, r.Maroon)
r.DrawSphere(r.NewVector3(-1.0, 0.0, -2.0), 1.0, r.Green)
r.DrawSphereWires(r.NewVector3(1.0, 0.0, 2.0), 2.0, 16, 16, r.Lime)
r.DrawCylinder(r.NewVector3(4.0, 0.0, -2.0), 1.0, 2.0, 3.0, 4, r.GopherBlue)
r.DrawCylinderWires(r.NewVector3(4.0, 0.0, -2.0), 1.0, 2.0, 3.0, 4, r.DarkBlue)
r.DrawCylinderWires(r.NewVector3(4.5, -1.0, 2.0), 1.0, 1.0, 2.0, 6, r.Brown)
r.DrawCylinder(r.NewVector3(1.0, 0.0, -4.0), 0.0, 1.5, 3.0, 8, r.Gold)
r.DrawCylinderWires(r.NewVector3(1.0, 0.0, -4.0), 0.0, 1.5, 3.0, 8, r.Pink)
r.DrawGrid(10, 1.0) // Draw a grid
r.EndMode3D()
r.DrawFPS(10, 10)
}
func GetOrbitInput() (float64, float64) {
orbit := 0.0
dist := 0.0
if r.IsKeyDown(r.KeyA) {
orbit -= orbitSpeed
}
if r.IsKeyDown(r.KeyD) {
orbit += orbitSpeed
}
if r.IsKeyDown(r.KeyW) {
dist -= zoomSpeed
}
if r.IsKeyDown(r.KeyS) {
dist += zoomSpeed
}
return orbit, dist
} | raylib-example/3dprimitives/3dprimitives.go | 0.701202 | 0.57093 | 3dprimitives.go | starcoder |
package dual
import (
"bytes"
"log"
"math/rand"
"time"
"github.com/pkg/errors"
G "gorgonia.org/gorgonia"
"gorgonia.org/tensor"
"gorgonia.org/tensor/native"
)
// Train is a basic trainer.
func Train(d *Dual, Xs, policies, values *tensor.Dense, batches, iterations int) error {
m := G.NewTapeMachine(d.g, G.BindDualValues(d.Model()...))
model := G.NodesToValueGrads(d.Model())
solver := G.NewVanillaSolver(G.WithLearnRate(0.1))
var s slicer
for i := 0; i < iterations; i++ {
// var cost float32
for bat := 0; bat < batches; bat++ {
batchStart := bat * d.Config.BatchSize
batchEnd := batchStart + d.Config.BatchSize
Xs2 := s.Slice(Xs, sli(batchStart, batchEnd))
π := s.Slice(policies, sli(batchStart, batchEnd))
v := s.Slice(values, sli(batchStart, batchEnd))
G.Let(d.planes, Xs2)
G.Let(d.Π, π)
G.Let(d.V, v)
if err := m.RunAll(); err != nil {
return err
}
// cost = d.cost.Data().(float32)
if err := solver.Step(model); err != nil {
return err
}
m.Reset()
tensor.ReturnTensor(Xs2)
tensor.ReturnTensor(π)
tensor.ReturnTensor(v)
}
if err := shuffleBatch(Xs, policies, values); err != nil {
return err
}
// TODO: add a channel to send training cost data down
// log.Printf("%d\t%v", i, cost/float32(batches))
}
return nil
}
// shuffleBatch shuffles the batches.
func shuffleBatch(Xs, π, v *tensor.Dense) (err error) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
oriXs := Xs.Shape().Clone()
oriPis := π.Shape().Clone()
defer func() {
if r := recover(); r != nil {
log.Printf("%v %v", Xs.Shape(), π.Shape())
panic(r)
}
}()
Xs.Reshape(as2D(Xs.Shape())...)
π.Reshape(as2D(π.Shape())...)
var matXs, matPis [][]float32
if matXs, err = native.MatrixF32(Xs); err != nil {
return errors.Wrapf(err, "shuffle batch failed - matX")
}
if matPis, err = native.MatrixF32(π); err != nil {
return errors.Wrapf(err, "shuffle batch failed - pi")
}
vs := v.Data().([]float32)
tmp := make([]float32, Xs.Shape()[1])
for i := range matXs {
j := r.Intn(i + 1)
rowI := matXs[i]
rowJ := matXs[j]
copy(tmp, rowI)
copy(rowI, rowJ)
copy(rowJ, tmp)
piI := matPis[i]
piJ := matPis[j]
copy(tmp, piI)
copy(piI, piJ)
copy(piJ, tmp)
vs[i], vs[j] = vs[j], vs[i]
}
Xs.Reshape(oriXs...)
π.Reshape(oriPis...)
return nil
}
func as2D(s tensor.Shape) tensor.Shape {
retVal := tensor.BorrowInts(2)
retVal[0] = s[0]
retVal[1] = s[1]
for i := 2; i < len(s); i++ {
retVal[1] *= s[i]
}
return retVal
}
// Inferencer is a struct that holds the state for a *Dual and a VM. By using an Inferece struct,
// there is no longer a need to create a VM every time an inference needs to be done.
type Inferencer struct {
d *Dual
m G.VM
input *tensor.Dense
buf *bytes.Buffer
}
// Infer takes a trained *Dual, and creates a interence data structure such that it'd be easy to infer
func Infer(d *Dual, actionSpace int, toLog bool) (*Inferencer, error) {
conf := d.Config
conf.FwdOnly = true
conf.BatchSize = actionSpace
newShape := d.planes.Shape().Clone()
newShape[0] = actionSpace
retVal := &Inferencer{
d: New(conf),
input: tensor.New(tensor.WithShape(newShape...), tensor.Of(Float)),
}
if err := retVal.d.Init(); err != nil {
return nil, err
}
retVal.d.SetTesting()
// G.WithInit(G.Zeroes())(retVal.d.planes)
infModel := retVal.d.Model()
for i, n := range d.Model() {
original := n.Value().Data().([]float32)
cloned := infModel[i].Value().Data().([]float32)
copy(cloned, original)
}
retVal.buf = new(bytes.Buffer)
if toLog {
logger := log.New(retVal.buf, "", 0)
retVal.m = G.NewTapeMachine(retVal.d.g,
G.WithLogger(logger),
G.WithWatchlist(),
G.TraceExec(),
G.WithValueFmt("%+1.1v"),
G.WithNaNWatch(),
)
} else {
retVal.m = G.NewTapeMachine(retVal.d.g)
}
return retVal, nil
}
// Dual implements Dualer
func (m *Inferencer) Dual() *Dual { return m.d }
// Infer takes the board, in form of a []float32, and runs inference, and returns the value
func (m *Inferencer) Infer(board []float32) (policy []float32, value float32, err error) {
m.buf.Reset()
for _, op := range m.d.ops {
op.Reset()
}
// copy board to the provided preallocated input tensor
m.input.Zero()
data := m.input.Data().([]float32)
copy(data, board)
m.m.Reset()
// log.Printf("Let planes %p be input %v", m.d.planes, board)
m.buf.Reset()
G.Let(m.d.planes, m.input)
if err = m.m.RunAll(); err != nil {
return nil, 0, err
}
policy = m.d.policyValue.Data().([]float32)
value = m.d.value.Data().([]float32)[0]
// log.Printf("\t%v", policy)
return policy[:m.d.ActionSpace], value, nil
}
// ExecLog returns the execution log. If Infer was called with toLog = false, then it will return an empty string
func (m *Inferencer) ExecLog() string { return m.buf.String() }
// Close implements a closer, because well, a gorgonia VM is a resource.
func (m *Inferencer) Close() error { return m.m.Close() } | dualnet/meta.go | 0.614278 | 0.456349 | meta.go | starcoder |
package polygo
/*
This file contains a small graphing library built on top of the polygo core.
*/
import (
"errors"
"fmt"
"image/color"
"math"
"math/rand"
"time"
"github.com/fogleman/gg" // For graphics.
)
// A RealPolynomialGraph represents the graph of a set of polynomials.
type RealPolynomialGraph struct {
// The polynomials to be plotted.
elements []*RealPolynomial
// The following slices are used to store computed values so that no re-computation is needed.
intersections []Point
yIntercepts []float64
roots []float64
// Rendering options.
center Point
xResolution int
yResolution int
viewX float64
viewY float64
hViewX float64
hViewY float64
xRenderStep float64
gridStep float64
// Visual options.
options *GraphOptions
// Context which handles the actual graphics.
context *gg.Context
}
// GraphOptions specify some visual options that the RealPolynomialGraph can have.
//
// The options are:
// ShowAxis // show the x = 0 and y = 0 axis lines.
// ShowGrid // show the grid.
// ShowIntersections // highlight the intersection points of all polynomials.
// ShowRoots // highlight the roots of all polynomials.
// ShowYintercepts // highlight the y-intercepts of all polynomials.
// ShowAxisLabels // label axis values.
// ShowIntersectionLabsls // label intersection points.
// ShowRootLabels // label roots.
// ShowYinterceptLabels // label y-intercepts.
// DarkMode // produce a dark-themed graph.
type GraphOptions struct {
ShowAxis bool
ShowGrid bool
ShowIntersections bool
ShowRoots bool
ShowYintercepts bool
ShowAxisLabels bool
ShowIntersectionLabels bool
ShowRootLabels bool
ShowYinterceptLabels bool
DarkMode bool
}
// Some colour definitions
var colBlack = color.RGBA{0x0, 0x0, 0x0, 0xFF}
var colBlackTrans = color.RGBA{0x0, 0x0, 0x0, 0x10}
var colWhite = color.RGBA{0xFF, 0xFF, 0xFF, 0xFF}
var colGray = color.RGBA{0xCC, 0xCC, 0xCC, 0xFF}
var colBlue = color.RGBA{0x0, 0x0, 0xFF, 0xFF}
var colGreen = color.RGBA{0x0, 0xFF, 0x0, 0xFF}
var colRed = color.RGBA{0xFF, 0x0, 0x0, 0xFF}
var colMagenta = color.RGBA{0xFF, 0x0, 0xFF, 0xFF}
// NewGraph returns a new *RealPolynomialGraph instance with the provided settings.
//
// If any settings are invalid, an appropriate error is set.
//
// The settings are:
// center // the point at which the graph is centered.
// xResolution // the width of the graph in pixels.
// yResolution // the height of the graph in pixels.
// viewX // the width of the viewing area. For example, a viewX of 1.0 will provide a graph spanning the horizontally closed interval [center.X - 1.0, center.X + 1.0].
// viewY // the height of the viewing area. For example, a viewY of 1.0 will provide a graph spanning the vertically closed interval [center.Y - 1.0, center.Y + 1.0].
// xRenderStep // the detail the polynomial curves are rendered at. The closer this positive value is to 0.0, the more precise the curves will be (recommended to be 0.01).
// gridStep // the gap between consecutive axis lines.
// options // a *GraphOptions instance.
func NewGraph(elements []*RealPolynomial, center Point, xResolution, yResolution int, viewX, viewY, xRenderStep, gridStep float64, options *GraphOptions) (*RealPolynomialGraph, error) {
if xResolution <= 0 || yResolution <= 0 {
return nil, errors.New("xResolution and yResolution must be positive")
}
if viewX <= 0.0 || viewY <= 0.0 {
return nil, errors.New("viewX and viewY must be positive")
}
if xRenderStep <= 0.0 {
return nil, errors.New("xRenderStep must be positive")
}
if gridStep <= 0.0 {
return nil, errors.New("gridStep must be positive")
}
var newGraph RealPolynomialGraph
newGraph.elements = elements
newGraph.center = center
newGraph.xResolution = xResolution
newGraph.yResolution = yResolution
newGraph.viewX = viewX
newGraph.viewY = viewY
newGraph.xRenderStep = xRenderStep
newGraph.gridStep = gridStep
newGraph.options = options
newGraph.context = gg.NewContext(xResolution, yResolution)
newGraph.hViewX = viewX / 2.0
newGraph.hViewY = viewY / 2.0
return &newGraph, nil
}
// SaveAsPNG renders and saves the current instance as a PNG image file to the provided path.
//
// If any rendering errors occur, an error addressing the problem is set.
func (g *RealPolynomialGraph) SaveAsPNG(path string) error {
var err error
if err = g.renderAxisAndGrid(); err != nil {
return err
}
if err = g.renderPolynomials(); err != nil {
return err
}
if err = g.renderPointIndicators(); err != nil {
return err
}
if err = g.renderLabels(); err != nil {
return err
}
if err = g.context.SavePNG(path); err != nil {
return err
}
fmt.Printf("Your graph has been successfully saved as %s!\n", path)
return nil
}
// Render the axis lines that form the grid.
func (g *RealPolynomialGraph) renderAxisAndGrid() error {
ctx := g.context
// Set background colour.
if g.options.DarkMode {
ctx.SetColor(colBlack)
} else {
ctx.SetColor(colWhite)
}
ctx.Clear()
if g.options.ShowGrid {
ctx.SetColor(colGray)
// Left center to left x.
for x := math.Floor(g.center.X); x > g.center.X-g.hViewX; x -= g.gridStep {
tmpPt := g.mapPointToViewport(Point{x, 0.0})
// Skip x == 0.0 (if the axis lines are actually being drawn) since we want the axis lines to go on top of everything.
if !g.options.ShowAxis || roundToNearestUnit(x, g.gridStep) != 0.0 {
drawLineBresenham(ctx, int(tmpPt.X), 0, int(tmpPt.X), g.yResolution)
}
}
// Right center to right x.
for x := math.Ceil(g.center.X); x < g.center.X+g.hViewX; x += g.gridStep {
tmpPt := g.mapPointToViewport(Point{x, 0.0})
if !g.options.ShowAxis || roundToNearestUnit(x, g.gridStep) != 0.0 {
drawLineBresenham(ctx, int(tmpPt.X), 0, int(tmpPt.X), g.yResolution)
}
}
// Bottom center to bottom y.
for y := math.Floor(g.center.Y); y > g.center.Y-g.hViewY; y -= g.gridStep {
tmpPt := g.mapPointToViewport(Point{0.0, y})
if !g.options.ShowAxis || roundToNearestUnit(y, g.gridStep) != 0.0 {
drawLineBresenham(ctx, 0, int(tmpPt.Y), g.xResolution, int(tmpPt.Y))
}
}
// Top center to top y.
for y := math.Ceil(g.center.Y); y < g.center.Y+g.viewY; y += g.gridStep {
tmpPt := g.mapPointToViewport(Point{0.0, y})
// Skip y == 0.0 (if the axis lines are actually being drawn) since we want the axis lines to go on top of everything.
if !g.options.ShowAxis || roundToNearestUnit(y, g.gridStep) != 0.0 {
drawLineBresenham(ctx, 0, int(tmpPt.Y), g.xResolution, int(tmpPt.Y))
}
}
}
if g.options.ShowAxis {
// If the closed interval [a, b] contains or touches 0, we must have that ab <= 0.
xAxisInViewport := (g.center.X-g.hViewX)*(g.center.X+g.hViewX) <= 0.0
yAxisInViewport := (g.center.Y-g.hViewY)*(g.center.Y+g.hViewY) <= 0.0
origin := g.mapPointToViewport(Point{0.0, 0.0})
if g.options.DarkMode {
ctx.SetColor(colWhite)
} else {
ctx.SetColor(colBlack) // Axis lines will be darker
}
if xAxisInViewport { // Draw x-axis if visible
drawLineBresenham(ctx, int(origin.X), 0, int(origin.X), g.yResolution)
}
if yAxisInViewport { // Draw y-axis if visible
drawLineBresenham(ctx, 0, int(origin.Y), g.xResolution, int(origin.Y))
}
}
return nil
}
// Render the polynomials.
func (g *RealPolynomialGraph) renderPolynomials() error {
ctx := g.context
rand.Seed(time.Now().UnixNano())
var currGraphY, prevGraphY float64
var prevPt Point
var currIsInView, prevIsInView bool
for _, p := range g.elements {
if g.options.DarkMode {
ctx.SetRGB(
0.8+rand.Float64()*(0.3),
0.8+rand.Float64()*(0.3),
0.8+rand.Float64()*(0.3),
)
} else { // We'll use a random dark colour for each polynomial.
ctx.SetRGB(
rand.Float64()*0.8,
rand.Float64()*0.8,
rand.Float64()*0.8,
)
}
prevGraphY = p.At(g.center.X - g.hViewX)
prevPt = g.mapPointToViewport(Point{g.center.X - g.hViewX, prevGraphY})
for x := g.center.X - g.hViewX + g.xRenderStep; x < g.center.X+g.hViewX; x += g.xRenderStep {
currGraphY = p.At(x)
currIsInView = (g.center.Y-g.hViewY <= currGraphY && currGraphY <= g.center.Y+g.hViewY)
prevIsInView = (g.center.Y-g.hViewY <= prevGraphY && prevGraphY <= g.center.Y+g.hViewY)
currPt := g.mapPointToViewport(Point{x, currGraphY})
// Render up to one point out of view.
if (currIsInView && prevIsInView) || (currIsInView && !prevIsInView) || (!currIsInView && prevIsInView) {
drawLineBresenham(ctx, int(prevPt.X), int(prevPt.Y), int(currPt.X), int(currPt.Y))
}
prevPt = currPt
prevGraphY = currGraphY
}
}
return nil
}
// Render all the graphics specified by the additional options.
func (g *RealPolynomialGraph) renderPointIndicators() error {
ctx := g.context
// A "hacky" way of scaling distance based on resolution.
// We want our intersection markers to be of constant unit radius 0.08.
markerRad := g.mapPointToViewport(Point{g.center.X - (g.hViewX) + 0.08, 0.0}).X
for _, p := range g.elements {
if g.options.ShowRoots {
ctx.SetColor(colBlue)
roots, err := p.FindRootsWithin(g.center.X-g.hViewX, g.center.X+g.hViewX)
if err != nil {
return err
}
g.roots = append(g.roots, roots...)
for _, rt := range roots {
tmpPt := g.mapPointToViewport(Point{rt, 0.0})
ctx.DrawCircle(tmpPt.X, tmpPt.Y, markerRad)
}
ctx.Stroke()
}
if g.options.ShowYintercepts {
ctx.SetColor(colGreen)
yInt := p.At(0.0)
g.yIntercepts = append(g.yIntercepts, yInt)
tmpPt := g.mapPointToViewport(Point{0.0, yInt})
ctx.DrawCircle(tmpPt.X, tmpPt.Y, markerRad)
ctx.Stroke()
}
if g.options.ShowIntersections {
ctx.SetColor(colMagenta)
var pois []Point
for _, p2 := range g.elements {
if !p.Equal(p2) {
tmp, err := p.FindIntersectionsWithin(g.center.X-g.hViewX, g.center.X+g.hViewX, p2)
if err != nil {
return err
}
pois = append(pois, tmp...)
}
}
g.intersections = append(g.intersections, pois...)
for _, poi := range pois {
tmpPt := g.mapPointToViewport(Point{poi.X, poi.Y})
ctx.DrawCircle(tmpPt.X, tmpPt.Y, markerRad)
}
ctx.Stroke()
}
}
return nil
}
// Render the axis and/or point indicator labels.
func (g *RealPolynomialGraph) renderLabels() error {
ctx := g.context
if g.options.DarkMode {
ctx.SetColor(colWhite)
} else {
ctx.SetColor(colBlack)
}
if g.options.ShowAxisLabels {
alernate := true
zeroDrawn := false
var tmpPt Point
// Center to left x.
for x := math.Floor(g.center.X); x > g.center.X-g.hViewX-g.gridStep; x -= g.gridStep {
if x == 0.0 {
zeroDrawn = true
}
tmpPt = g.mapPointToViewport(Point{x, 0.0})
if alernate {
// The "%g" format removes all trailing zeroes. I hope who ever came up with that lives a long, healthy life.
ctx.DrawStringAnchored(fmt.Sprintf("%g", x), tmpPt.X+1, tmpPt.Y-1, 0.0, 0.0)
} else {
ctx.DrawStringAnchored(fmt.Sprintf("%g", x), tmpPt.X+1, tmpPt.Y+ctx.FontHeight()+1, 0.0, 0.0)
}
alernate = !alernate
}
alernate = !alernate
if !zeroDrawn {
tmpPt = g.mapPointToViewport(Point{0.0, 0.0})
if alernate {
ctx.DrawStringAnchored(fmt.Sprintf("%g", 0.0), tmpPt.X+1, tmpPt.Y-1, 0.0, 0.0)
} else {
ctx.DrawStringAnchored(fmt.Sprintf("%g", 0.0), tmpPt.X+1, tmpPt.Y+ctx.FontHeight()+1, 0.0, 0.0)
}
}
// Center to right x.
for x := math.Ceil(g.center.X); x < g.center.X+g.hViewX; x += g.gridStep {
if x != 0.0 {
tmpPt = g.mapPointToViewport(Point{x, 0.0})
if alernate {
ctx.DrawStringAnchored(fmt.Sprintf("%g", x), tmpPt.X+1, tmpPt.Y-1, 0.0, 0.0)
} else {
ctx.DrawStringAnchored(fmt.Sprintf("%g", x), tmpPt.X+1, tmpPt.Y+ctx.FontHeight()+1, 0.0, 0.0)
}
}
alernate = !alernate
}
// Center to top y.
for y := math.Ceil(g.center.Y); y < g.center.Y+g.viewY; y += g.gridStep {
if roundToNearestUnit(y, g.gridStep) != 0.0 { // Ignore 0.
tmpPt := g.mapPointToViewport(Point{0.0, y})
ctx.DrawStringAnchored(fmt.Sprintf("%g", y), tmpPt.X+1, tmpPt.Y-ctx.FontHeight()-1, 0.0, 1.0)
}
}
// Center to bottom y.
for y := math.Floor(g.center.Y); y > g.center.Y-g.hViewY-g.gridStep; y -= g.gridStep {
if roundToNearestUnit(y, g.gridStep) != 0.0 { // Ignore 0.
tmpPt := g.mapPointToViewport(Point{0.0, y})
ctx.DrawStringAnchored(fmt.Sprintf("%g", y), tmpPt.X+1, tmpPt.Y-ctx.FontHeight()-1, 0.0, 1.0)
}
}
}
if g.options.ShowIntersectionLabels {
// Process significant points if they have not already been processed via renderPointIndicators.
if g.intersections == nil {
for _, p := range g.elements {
for _, p2 := range g.elements {
if !p.Equal(p2) {
tmp, err := p.FindIntersectionsWithin(g.center.X-g.hViewX, g.center.X+g.hViewX, p2)
if err != nil {
return err
}
g.intersections = append(g.intersections, tmp...)
}
}
}
}
ctx.SetColor(colMagenta)
for _, pt := range g.intersections {
tmpPt := g.mapPointToViewport(Point{pt.X + 0.05, pt.Y + 0.05})
ctx.DrawStringAnchored(fmt.Sprintf("(%.2f, %.2f)", pt.X, pt.Y), tmpPt.X, tmpPt.Y, 0.0, 0.0)
}
}
if g.options.ShowRootLabels {
if g.roots == nil {
for _, p := range g.elements {
roots, err := p.FindRootsWithin(g.center.X-g.hViewX, g.center.X+g.hViewX)
if err != nil {
return err
}
g.roots = append(g.roots, roots...)
}
}
ctx.SetColor(colBlue)
alternate := true
for _, x := range g.roots {
tmpPt := g.mapPointToViewport(Point{x + 0.05, 0.05})
if alternate { // Alternate the root labels to minimize cluster.
ctx.DrawStringAnchored(fmt.Sprintf("(%.2f, %.2f)", x, 0.0), tmpPt.X, tmpPt.Y, 0.0, 0.0)
} else {
ctx.DrawStringAnchored(fmt.Sprintf("(%.2f, %.2f)", x, 0.0), tmpPt.X, tmpPt.Y, 0.0, 1.0)
}
alternate = !alternate
}
}
if g.options.ShowYinterceptLabels {
if g.yIntercepts == nil {
for _, p := range g.elements {
yInt := p.At(0.0)
g.yIntercepts = append(g.yIntercepts, yInt)
}
}
ctx.SetColor(colGreen)
for _, y := range g.yIntercepts {
tmpPt := g.mapPointToViewport(Point{0.05, y + 0.05})
ctx.DrawStringAnchored(fmt.Sprintf("(%.2f, %.2f)", 0.0, y), tmpPt.X, tmpPt.Y, 0.0, 0.0)
}
}
return nil
}
// Map a point on the graph coordinate system to the corresponding pixel coordinate.
func (g *RealPolynomialGraph) mapPointToViewport(p Point) Point {
return Point{
(p.X + g.hViewX - g.center.X) * float64(g.xResolution) / g.viewX,
(-p.Y + g.hViewY + g.center.Y) * float64(g.yResolution) / g.viewY,
}
}
// Credit to https://github.com/StephaneBunel/bresenham/blob/master/drawline.go for the implementation.
func drawLineBresenham(ctx *gg.Context, x1, y1, x2, y2 int) {
var dx, dy, e, slope int
// Because drawing p1 -> p2 is equivalent to draw p2 -> p1,
// I sort points in x-axis order to handle only half of possible cases.
if x1 > x2 {
x1, y1, x2, y2 = x2, y2, x1, y1
}
dx, dy = x2-x1, y2-y1
// Because point is x-axis ordered, dx cannot be negative
if dy < 0 {
dy = -dy
}
switch {
// Is line a point ?
case x1 == x2 && y1 == y2:
ctx.SetPixel(x1, y1)
// Is line an horizontal ?
case y1 == y2:
for ; dx != 0; dx-- {
ctx.SetPixel(x1, y1)
x1++
}
ctx.SetPixel(x1, y1)
// Is line a vertical ?
case x1 == x2:
if y1 > y2 {
y1 = y2
}
for ; dy != 0; dy-- {
ctx.SetPixel(x1, y1)
y1++
}
ctx.SetPixel(x1, y1)
// Is line a diagonal ?
case dx == dy:
if y1 < y2 {
for ; dx != 0; dx-- {
ctx.SetPixel(x1, y1)
x1++
y1++
}
} else {
for ; dx != 0; dx-- {
ctx.SetPixel(x1, y1)
x1++
y1--
}
}
ctx.SetPixel(x1, y1)
// wider than high ?
case dx > dy:
if y1 < y2 {
dy, e, slope = 2*dy, dx, 2*dx
for ; dx != 0; dx-- {
ctx.SetPixel(x1, y1)
x1++
e -= dy
if e < 0 {
y1++
e += slope
}
}
} else {
dy, e, slope = 2*dy, dx, 2*dx
for ; dx != 0; dx-- {
ctx.SetPixel(x1, y1)
x1++
e -= dy
if e < 0 {
y1--
e += slope
}
}
}
ctx.SetPixel(x2, y2)
// higher than wide.
default:
if y1 < y2 {
dx, e, slope = 2*dx, dy, 2*dy
for ; dy != 0; dy-- {
ctx.SetPixel(x1, y1)
y1++
e -= dx
if e < 0 {
x1++
e += slope
}
}
} else {
dx, e, slope = 2*dx, dy, 2*dy
for ; dy != 0; dy-- {
ctx.SetPixel(x1, y1)
y1--
e -= dx
if e < 0 {
x1++
e += slope
}
}
}
ctx.SetPixel(x2, y2)
}
} | graph.go | 0.637482 | 0.401629 | graph.go | starcoder |
// +build gofuzz
package roaring
import (
"encoding/binary"
"fmt"
"io/ioutil"
"reflect"
)
// FuzzBitmapUnmarshalBinary fuzz tests the unmarshaling of binary
// to both Pilosa and official roaring formats.
func FuzzBitmapUnmarshalBinary(data []byte) int {
b := NewBitmap()
err := b.UnmarshalBinary(data)
if err != nil {
return 0
}
return 1
}
// FuzzRoaringOps fuzz tests different operations on roaring bitmaps,
// comparing the results to a naive implementation of the operations
// on uint64 slices.
func FuzzRoaringOps(data []byte) int {
// number of uint64s not to include
const reserved = 4
// flipping is too inefficient for large values of end - start
// and will cause go-fuzz to hang if not controlled.
const maxFlips = 1000000
arr := bytesToUint64s(data)
if len(arr) <= reserved {
return 0
}
// start > end is possible. This will test correctness in unexpected conditions.
start, end, split, rand := arr[0], arr[1], int(arr[2]), arr[3]
// don't include start, end, split, rand in the slices
if split < reserved {
split = reserved
}
// ensure slice in bounds
if split > len(arr) {
split = len(arr)
}
// using removeSliceDuplicates guarantees that the slice inputs of the
// following functions do not have duplicates and are sorted, just as
// the Roaring Bitmap implementations are.
s1 := removeSliceDuplicates(arr[reserved:split])
s2 := removeSliceDuplicates(arr[split:])
if len(s1) == 0 {
s1 = nil
}
if len(s2) == 0 {
s2 = nil
}
bm1 := NewBitmap(arr[reserved:split]...)
bm2 := NewBitmap(arr[split:]...)
expected := s1
actual := bm1.Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("first slice:\n expected: %v\n got: %v", expected, actual))
}
expected = s2
actual = bm2.Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("second slice:\n expected: %v\n got: %v", expected, actual))
}
// Pure functions
expected = []uint64{maxInSlice(s1), maxInSlice(s2)}
actual = []uint64{bm1.Max(), bm2.Max()}
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("max values:\n expected: %v\n got: %v", expected, actual))
}
expected = intersectSlice(s1, s2)
actual = bm1.Intersect(bm2).Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("intersection:\n expected: %v\n got: %v", expected, actual))
}
expected = unionSlice(s1, s2)
actual = bm1.Union(bm2).Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("union:\n expected: %v\n got: %v", expected, actual))
}
expected = differenceSlice(s1, s2)
actual = bm1.Difference(bm2).Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("difference:\n expected: %v\n got: %v", expected, actual))
}
expected = xorSlice(s1, s2)
actual = bm1.Xor(bm2).Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("XOR:\n expected: %v\n got: %v", expected, actual))
}
if (len(s1) > 0) != bm1.Any() {
panic(fmt.Sprintf("any:\n %v has %v values but got %v which has %v values", s1, len(s1), bm1.Slice(), bm1.Any()))
}
if (len(s2) > 0) != bm2.Any() {
panic(fmt.Sprintf("any:\n %v has %v values but got %v which has %v values", s2, len(s2), bm2.Slice(), bm2.Any()))
}
expected = []uint64{uint64(len(s1)), uint64(len(s2))}
actual = []uint64{bm1.Count(), bm2.Count()}
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("count:\n expected: %v\n got: %v", expected, actual))
}
expect := countRangeSlice(s1, start, end)
got := bm1.CountRange(start, end)
if expect != got {
panic(fmt.Sprintf("count range:\n count from %v to %v in slice %v and bitmap %v:\n expected %v got %v",
start, end, s1, bm1.Slice(), expect, got))
}
expect = countRangeSlice(s2, start, end)
got = bm2.CountRange(start, end)
if expect != got {
panic(fmt.Sprintf("count range:\n count from %v to %v in slice %v and bitmap %v:\n expected %v got %v",
start, end, s2, bm2.Slice(), expect, got))
}
expected = rangeSlice(s1, start, end)
actual = bm1.SliceRange(start, end)
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("slice range:\n from %v to %v in slice %v and bitmap %v:\n expected %v\n got %v",
start, end, s1, bm1.Slice(), expected, actual))
}
expected = rangeSlice(s2, start, end)
actual = bm2.SliceRange(start, end)
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("slice range:\n from %v to %v in slice %v and bitmap %v:\n expected %v\n got %v",
start, end, s2, bm2.Slice(), expected, actual))
}
expect = uint64(len(intersectSlice(s1, s2)))
got = bm1.IntersectionCount(bm2)
if expect != got {
panic(fmt.Sprintf("intersection count:\n expected %v got %v", expect, got))
}
_, found := containedInSlice(s1, rand)
if found != bm1.Contains(rand) {
panic(fmt.Sprintf("contains:\n %v contains %v: %v\n %v contains %v: %v", s1, rand, found,
bm1.Slice(), rand, bm1.Contains(rand)))
}
_, found = containedInSlice(s2, rand)
if found != bm2.Contains(rand) {
panic(fmt.Sprintf("contains:\n %v contains %v: %v\n %v contains %v: %v", s2, rand, found,
bm2.Slice(), rand, bm2.Contains(rand)))
}
if end-start < maxFlips {
expected = flipSlice(s1, start, end)
actual = bm1.Flip(start, end).Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("flip:\n from %v to %v in slice %v and bitmap %v\n expected %v\n got %v",
start, end, s1, bm1.Slice(), expected, actual))
}
expected = flipSlice(s2, start, end)
actual = bm2.Flip(start, end).Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("flip:\n from %v to %v in slice %v and bitmap %v\n expected %v\n got %v",
start, end, s2, bm2.Slice(), expected, actual))
}
}
expected = make([]uint64, 0)
actual = make([]uint64, 0)
forEachInSlice(s1, func(v uint64) { expected = append(expected, v) })
bm1.ForEach(func(v uint64) { actual = append(actual, v) })
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("for each:\n expected %v\n got %v", expected, actual))
}
expected = make([]uint64, 0)
actual = make([]uint64, 0)
forEachInSlice(s2, func(v uint64) { expected = append(expected, v) })
bm2.ForEach(func(v uint64) { actual = append(actual, v) })
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("for each:\n expected %v\n got %v", expected, actual))
}
expected = make([]uint64, 0)
actual = make([]uint64, 0)
forEachInRangeSlice(s1, start, end, func(v uint64) { expected = append(expected, v) })
bm1.ForEachRange(start, end, func(v uint64) { actual = append(actual, v) })
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("for each in range:\n expected %v\n got %v", expected, actual))
}
expected = make([]uint64, 0)
actual = make([]uint64, 0)
forEachInRangeSlice(s2, start, end, func(v uint64) { expected = append(expected, v) })
bm2.ForEachRange(start, end, func(v uint64) { actual = append(actual, v) })
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("for each in range:\n expected %v\n got %v", expected, actual))
}
// Impure functions
// The following tests operations that mutate bitmaps.
nbm1, nbm2 := bm1.Clone(), bm2.Clone()
expected = shiftSlice(s1, 1)
tempBM, _ := nbm1.Shift(1)
actual = tempBM.Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("shift:\n in slice %v and bitmap %v \n expected %v\n got %v",
s1, bm1.Slice(), expected, actual))
}
expected = shiftSlice(s2, 1)
tempBM, _ = nbm2.Shift(1)
actual = tempBM.Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("shift:\n in slice %v and bitmap %v \n expected %v\n got %v",
s2, bm2.Slice(), expected, actual))
}
// reuse start and end as random values
rand2 := start
rand3 := end
nbm1 = bm1.Clone()
expected, echanged := addNToSlice(s1, rand)
achanged := nbm1.DirectAddN(rand)
actual = nbm1.Slice()
if echanged != achanged || !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("directAddN:\n adding %v in slice %v and bitmap %v \n expected %v and %v changed \n got %v and %v changed",
rand, s1, bm1.Slice(), expected, echanged, actual, achanged))
}
nbm2 = bm2.Clone()
expected, echanged = addNToSlice(s2, rand2, rand3)
achanged = nbm2.DirectAddN(rand2, rand3)
actual = nbm2.Slice()
if echanged != achanged || !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("directAddN:\n adding %v and %v in slice %v and bitmap %v \n expected %v and %v changed \n got %v and %v changed",
rand2, rand3, s2, bm2.Slice(), expected, echanged, actual, achanged))
}
nbm1 = bm1.Clone()
expected, echanged = removeNFromSlice(s1, rand2, rand3)
achanged = nbm1.DirectRemoveN(rand2, rand3)
actual = nbm1.Slice()
if echanged != achanged || !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("directRemoveN\n removing %v and %v in slice %v and bitmap %v \n expected %v and %v changed \n got %v and %v changed",
rand2, rand3, s1, bm1.Slice(), expected, echanged, actual, achanged))
}
nbm2 = bm2.Clone()
expected, echanged = removeNFromSlice(s2, rand)
achanged = nbm2.DirectRemoveN(rand)
actual = nbm2.Slice()
if echanged != achanged || !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("directRemoveN:\n removing %v in slice %v and bitmap %v \n expected %v and %v changed \n got %v and %v changed",
rand, s2, bm2.Slice(), expected, echanged, actual, achanged))
}
nbm1, nbm2 = bm1.Clone(), bm2.Clone()
expected = unionSlice(s1, s2)
nbm1.UnionInPlace(nbm2)
actual = nbm1.Slice()
if !reflect.DeepEqual(expected, actual) {
panic(fmt.Sprintf("union in place:\n expected %v\n got %v", expected, actual))
}
return 1
}
func bytesToUint64s(data []byte) []uint64 {
const uint64Size = 8
size := len(data) / uint64Size
slice := make([]uint64, 0)
for i := 0; i < size; i++ {
offset := i * uint64Size
num := binary.LittleEndian.Uint64(data[offset : offset+uint64Size])
slice = append(slice, num)
}
return slice
}
// copy and paste the following to a main file to run.
// path should be the absolute path to the corpus.
// make sure filename is not already in the corpus.
func addSliceToCorpus(slice []uint64, filename, path string) {
data := uint64sToBytes(slice)
err := ioutil.WriteFile(path+"/"+filename, data, 0777)
if err != nil {
fmt.Printf("could not write to file: %v\n", err)
}
}
func uint64sToBytes(slice []uint64) []byte {
const uint64Size = 8
size := len(slice) * uint64Size
data := make([]byte, size)
for i := 0; i < len(slice); i++ {
offset := i * uint64Size
binary.LittleEndian.PutUint64(data[offset:offset+uint64Size], slice[i])
}
return data
} | roaring/fuzzer.go | 0.568895 | 0.496643 | fuzzer.go | starcoder |
package pixelate
import (
"image"
"image/color"
"math"
"github.com/fogleman/gg"
)
type context struct {
*gg.Context
}
// Brightness factor
var bf = 1.0005
// Draw creates uniform cells with the quantified cell color of the source image.
func (quant *Quant) Draw(img image.Image, numOfColors int, csize int, useNoise bool) image.Image {
var cellSize int
dx, dy := img.Bounds().Dx(), img.Bounds().Dy()
imgRatio := func(w, h int) float64 {
var ratio float64
if w > h {
ratio = float64((w / h) * w)
} else {
ratio = float64((h / w) * h)
}
return ratio
}
if csize == 0 {
cellSize = int(round(imgRatio(dx, dy) * 0.015))
} else {
cellSize = csize
}
qimg := quant.Quantize(img, numOfColors)
ctx := &context{gg.NewContext(dx, dy)}
ctx.SetRGB(1, 1, 1)
ctx.Clear()
ctx.SetRGB(0, 0, 0)
rgba := ctx.convertToNRGBA64(qimg)
for x := 0; x < dx; x += cellSize {
for y := 0; y < dy; y += cellSize {
rect := image.Rect(x, y, x+cellSize, y+cellSize)
rect = rect.Intersect(qimg.Bounds())
if rect.Empty() {
rect = image.ZR
}
subImg := rgba.SubImage(rect).(*image.NRGBA64)
cellColor := ctx.getAvgColor(subImg)
ctx.drawCell(float64(x), float64(y), float64(cellSize), cellColor)
}
}
ctxImg := ctx.Image()
if useNoise {
return noise(ctxImg, dx, dy, 10)
}
return ctxImg
}
// drawCell draws the cell filling up with the quantified color
func (ctx *context) drawCell(x, y, cellSize float64, c color.NRGBA64) {
ctx.DrawRectangle(x, y, x+cellSize, y+cellSize)
ctx.SetRGBA(float64(c.R/255^0xff)*bf, float64(c.G/255^0xff)*bf, float64(c.B/255^0xff)*bf, 1)
ctx.Fill()
}
// getAvgColor get the average color of a cell
func (ctx *context) getAvgColor(img *image.NRGBA64) color.NRGBA64 {
var (
bounds = img.Bounds()
r, g, b int
)
for x := bounds.Min.X; x < bounds.Max.X; x++ {
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
var c = img.NRGBA64At(x, y)
r += int(c.R)
g += int(c.G)
b += int(c.B)
}
}
return color.NRGBA64{
R: maxUint16(0, minUint16(65535, uint16(r/(bounds.Dx()*bounds.Dy())))),
G: maxUint16(0, minUint16(65535, uint16(g/(bounds.Dx()*bounds.Dy())))),
B: maxUint16(0, minUint16(65535, uint16(b/(bounds.Dx()*bounds.Dy())))),
A: 255,
}
}
// convertToNRGBA64 converts an image.Image into an image.NRGBA64.
func (ctx *context) convertToNRGBA64(img image.Image) *image.NRGBA64 {
var (
bounds = img.Bounds()
nrgba = image.NewNRGBA64(bounds)
)
for x := bounds.Min.X; x < bounds.Max.X; x++ {
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
nrgba.Set(x, y, img.At(x, y))
}
}
return nrgba
}
// round number down.
func round(x float64) float64 {
return math.Floor(x)
}
// minUint16 returns the smallest number between two uint16 numbers.
func minUint16(x, y uint16) uint16 {
if x < y {
return x
}
return y
}
// maxUint16 returns the biggest number between two uint16 numbers.
func maxUint16(x, y uint16) uint16 {
if x > y {
return x
}
return y
} | pixelate/drawer.go | 0.813572 | 0.494812 | drawer.go | starcoder |
package pgsql
import (
"database/sql"
"database/sql/driver"
"strconv"
)
// Int8RangeFromIntArray2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]int.
func Int8RangeFromIntArray2(val [2]int) driver.Valuer {
return int8RangeFromIntArray2{val: val}
}
// Int8RangeToIntArray2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]int and sets it to val.
func Int8RangeToIntArray2(val *[2]int) sql.Scanner {
return int8RangeToIntArray2{val: val}
}
// Int8RangeFromInt8Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]int8.
func Int8RangeFromInt8Array2(val [2]int8) driver.Valuer {
return int8RangeFromInt8Array2{val: val}
}
// Int8RangeToInt8Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]int8 and sets it to val.
func Int8RangeToInt8Array2(val *[2]int8) sql.Scanner {
return int8RangeToInt8Array2{val: val}
}
// Int8RangeFromInt16Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]int16.
func Int8RangeFromInt16Array2(val [2]int16) driver.Valuer {
return int8RangeFromInt16Array2{val: val}
}
// Int8RangeToInt16Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]int16 and sets it to val.
func Int8RangeToInt16Array2(val *[2]int16) sql.Scanner {
return int8RangeToInt16Array2{val: val}
}
// Int8RangeFromInt32Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]int32.
func Int8RangeFromInt32Array2(val [2]int32) driver.Valuer {
return int8RangeFromInt32Array2{val: val}
}
// Int8RangeToInt32Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]int32 and sets it to val.
func Int8RangeToInt32Array2(val *[2]int32) sql.Scanner {
return int8RangeToInt32Array2{val: val}
}
// Int8RangeFromInt64Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]int64.
func Int8RangeFromInt64Array2(val [2]int64) driver.Valuer {
return int8RangeFromInt64Array2{val: val}
}
// Int8RangeToInt64Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]int64 and sets it to val.
func Int8RangeToInt64Array2(val *[2]int64) sql.Scanner {
return int8RangeToInt64Array2{val: val}
}
// Int8RangeFromUintArray2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]uint.
func Int8RangeFromUintArray2(val [2]uint) driver.Valuer {
return int8RangeFromUintArray2{val: val}
}
// Int8RangeToUintArray2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]uint and sets it to val.
func Int8RangeToUintArray2(val *[2]uint) sql.Scanner {
return int8RangeToUintArray2{val: val}
}
// Int8RangeFromUint8Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]uint8.
func Int8RangeFromUint8Array2(val [2]uint8) driver.Valuer {
return int8RangeFromUint8Array2{val: val}
}
// Int8RangeToUint8Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]uint8 and sets it to val.
func Int8RangeToUint8Array2(val *[2]uint8) sql.Scanner {
return int8RangeToUint8Array2{val: val}
}
// Int8RangeFromUint16Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]uint16.
func Int8RangeFromUint16Array2(val [2]uint16) driver.Valuer {
return int8RangeFromUint16Array2{val: val}
}
// Int8RangeToUint16Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]uint16 and sets it to val.
func Int8RangeToUint16Array2(val *[2]uint16) sql.Scanner {
return int8RangeToUint16Array2{val: val}
}
// Int8RangeFromUint32Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]uint32.
func Int8RangeFromUint32Array2(val [2]uint32) driver.Valuer {
return int8RangeFromUint32Array2{val: val}
}
// Int8RangeToUint32Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]uint32 and sets it to val.
func Int8RangeToUint32Array2(val *[2]uint32) sql.Scanner {
return int8RangeToUint32Array2{val: val}
}
// Int8RangeFromUint64Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]uint64.
func Int8RangeFromUint64Array2(val [2]uint64) driver.Valuer {
return int8RangeFromUint64Array2{val: val}
}
// Int8RangeToUint64Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]uint64 and sets it to val.
func Int8RangeToUint64Array2(val *[2]uint64) sql.Scanner {
return int8RangeToUint64Array2{val: val}
}
// Int8RangeFromFloat32Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]float32.
func Int8RangeFromFloat32Array2(val [2]float32) driver.Valuer {
return int8RangeFromFloat32Array2{val: val}
}
// Int8RangeToFloat32Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]float32 and sets it to val.
func Int8RangeToFloat32Array2(val *[2]float32) sql.Scanner {
return int8RangeToFloat32Array2{val: val}
}
// Int8RangeFromFloat64Array2 returns a driver.Valuer that produces a PostgreSQL int8range from the given Go [2]float64.
func Int8RangeFromFloat64Array2(val [2]float64) driver.Valuer {
return int8RangeFromFloat64Array2{val: val}
}
// Int8RangeToFloat64Array2 returns an sql.Scanner that converts a PostgreSQL int8range into a Go [2]float64 and sets it to val.
func Int8RangeToFloat64Array2(val *[2]float64) sql.Scanner {
return int8RangeToFloat64Array2{val: val}
}
type int8RangeFromIntArray2 struct {
val [2]int
}
func (v int8RangeFromIntArray2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToIntArray2 struct {
val *[2]int
}
func (v int8RangeToIntArray2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = int(lo)
v.val[1] = int(hi)
return nil
}
type int8RangeFromInt8Array2 struct {
val [2]int8
}
func (v int8RangeFromInt8Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToInt8Array2 struct {
val *[2]int8
}
func (v int8RangeToInt8Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 8); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 8); err != nil {
return err
}
}
v.val[0] = int8(lo)
v.val[1] = int8(hi)
return nil
}
type int8RangeFromInt16Array2 struct {
val [2]int16
}
func (v int8RangeFromInt16Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToInt16Array2 struct {
val *[2]int16
}
func (v int8RangeToInt16Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 16); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 16); err != nil {
return err
}
}
v.val[0] = int16(lo)
v.val[1] = int16(hi)
return nil
}
type int8RangeFromInt32Array2 struct {
val [2]int32
}
func (v int8RangeFromInt32Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToInt32Array2 struct {
val *[2]int32
}
func (v int8RangeToInt32Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = int32(lo)
v.val[1] = int32(hi)
return nil
}
type int8RangeFromInt64Array2 struct {
val [2]int64
}
func (v int8RangeFromInt64Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, v.val[0], 10)
out = append(out, ',')
out = strconv.AppendInt(out, v.val[1], 10)
out = append(out, ')')
return out, nil
}
type int8RangeToInt64Array2 struct {
val *[2]int64
}
func (v int8RangeToInt64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = lo
v.val[1] = hi
return nil
}
type int8RangeFromUintArray2 struct {
val [2]uint
}
func (v int8RangeFromUintArray2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToUintArray2 struct {
val *[2]uint
}
func (v int8RangeToUintArray2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = uint(lo)
v.val[1] = uint(hi)
return nil
}
type int8RangeFromUint8Array2 struct {
val [2]uint8
}
func (v int8RangeFromUint8Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToUint8Array2 struct {
val *[2]uint8
}
func (v int8RangeToUint8Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 8); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 8); err != nil {
return err
}
}
v.val[0] = uint8(lo)
v.val[1] = uint8(hi)
return nil
}
type int8RangeFromUint16Array2 struct {
val [2]uint16
}
func (v int8RangeFromUint16Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToUint16Array2 struct {
val *[2]uint16
}
func (v int8RangeToUint16Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 16); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 16); err != nil {
return err
}
}
v.val[0] = uint16(lo)
v.val[1] = uint16(hi)
return nil
}
type int8RangeFromUint32Array2 struct {
val [2]uint32
}
func (v int8RangeFromUint32Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, uint64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendUint(out, uint64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToUint32Array2 struct {
val *[2]uint32
}
func (v int8RangeToUint32Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 32); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 32); err != nil {
return err
}
}
v.val[0] = uint32(lo)
v.val[1] = uint32(hi)
return nil
}
type int8RangeFromUint64Array2 struct {
val [2]uint64
}
func (v int8RangeFromUint64Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendUint(out, v.val[0], 10)
out = append(out, ',')
out = strconv.AppendUint(out, v.val[1], 10)
out = append(out, ')')
return out, nil
}
type int8RangeToUint64Array2 struct {
val *[2]uint64
}
func (v int8RangeToUint64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi uint64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseUint(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseUint(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = lo
v.val[1] = hi
return nil
}
type int8RangeFromFloat32Array2 struct {
val [2]float32
}
func (v int8RangeFromFloat32Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToFloat32Array2 struct {
val *[2]float32
}
func (v int8RangeToFloat32Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = float32(lo)
v.val[1] = float32(hi)
return nil
}
type int8RangeFromFloat64Array2 struct {
val [2]float64
}
func (v int8RangeFromFloat64Array2) Value() (driver.Value, error) {
out := []byte{'['}
out = strconv.AppendInt(out, int64(v.val[0]), 10)
out = append(out, ',')
out = strconv.AppendInt(out, int64(v.val[1]), 10)
out = append(out, ')')
return out, nil
}
type int8RangeToFloat64Array2 struct {
val *[2]float64
}
func (v int8RangeToFloat64Array2) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
var lo, hi int64
elems := pgParseRange(data)
if len(elems[0]) > 0 {
if lo, err = strconv.ParseInt(string(elems[0]), 10, 64); err != nil {
return err
}
}
if len(elems[1]) > 0 {
if hi, err = strconv.ParseInt(string(elems[1]), 10, 64); err != nil {
return err
}
}
v.val[0] = float64(lo)
v.val[1] = float64(hi)
return nil
} | pgsql/int8range.go | 0.798698 | 0.497009 | int8range.go | starcoder |
package m64
import (
"math/big"
"math/bits"
"github.com/mmcloughlin/ec3/arith/eval"
"github.com/mmcloughlin/ec3/arith/ir"
"github.com/mmcloughlin/ec3/internal/bigint"
"github.com/mmcloughlin/ec3/internal/errutil"
)
// Word is a 64-bit machine word.
type Word uint64
// Bits returns the number of bits required to represent x.
func (x Word) Bits() uint {
return uint(bits.Len64(uint64(x)))
}
// Evaluator for arithmetic programs with 64-bit limbs.
type Evaluator struct {
eval *eval.Evaluator
}
func NewEvaluator() *Evaluator {
return &Evaluator{
eval: eval.NewEvaluator(New()),
}
}
// SetRegister sets register r to value x.
func (e *Evaluator) SetRegister(r ir.Register, x uint64) {
e.eval.SetRegister(r, Word(x))
}
// Register returns the value in the given register.
func (e *Evaluator) Register(r ir.Register) (uint64, error) {
v, err := e.eval.Register(r)
if err != nil {
return 0, err
}
return u64(v)
}
// SetInt sets registers to the 64-bit limbs of x.
func (e *Evaluator) SetInt(z ir.Registers, x *big.Int) {
limbs := bigint.Uint64s(x)
for i, limb := range limbs {
e.SetRegister(z[i], limb)
}
for i := len(limbs); i < len(z); i++ {
e.SetRegister(z[i], 0)
}
}
// Int returns the integer represented by the 64-bit limbs in the given registers.
func (e *Evaluator) Int(z ir.Registers) (*big.Int, error) {
words := make([]uint64, len(z))
for i, r := range z {
word, err := e.Register(r)
if err != nil {
return nil, err
}
words[i] = word
}
return bigint.FromUint64s(words), nil
}
// Execute the program p.
func (e *Evaluator) Execute(p *ir.Program) error {
return e.eval.Execute(p)
}
// Processor is a 64-bit arithmetic evaluator.
type Processor struct {
errs errutil.Errors
}
// New builds a new 64-bit arithmetic processor.
func New() *Processor {
return &Processor{}
}
// Errors returns any errors encountered during execution.
func (p *Processor) Errors() []error {
return p.errs
}
// Bits returns the word size.
func (Processor) Bits() uint { return 64 }
// Const builds an n-bit constant.
func (Processor) Const(x uint64, n uint) eval.Value { return Word(x) }
// ITE returns x if l≡r else y.
func (p *Processor) ITE(l, r, x, y eval.Value) eval.Value {
if p.u64(l) == p.u64(r) {
return x
}
return y
}
// ADD executes an add with carry instruction.
func (p *Processor) ADD(x, y, cin eval.Value) (sum, cout eval.Value) {
s, c := bits.Add64(p.u64(x), p.u64(y), p.u64(cin))
return Word(s), Word(c)
}
// SUB executes a subtract with borrow instruction.
func (p *Processor) SUB(x, y, bin eval.Value) (diff, bout eval.Value) {
d, b := bits.Sub64(p.u64(x), p.u64(y), p.u64(bin))
return Word(d), Word(b)
}
// MUL executes a multiply instruction.
func (p *Processor) MUL(x, y eval.Value) (hi, lo eval.Value) {
h, l := bits.Mul64(p.u64(x), p.u64(y))
return Word(h), Word(l)
}
// SHL executes a shift left instruction.
func (p *Processor) SHL(x eval.Value, s uint) eval.Value {
return Word(p.u64(x) << s)
}
// SHR executes a shift right instruction.
func (p *Processor) SHR(x eval.Value, s uint) eval.Value {
return Word(p.u64(x) >> s)
}
// u64 casts v to uint64.
func (p *Processor) u64(v eval.Value) uint64 {
x, err := u64(v)
if err != nil {
p.errs.Add(errutil.UnexpectedType(v))
return 0
}
return x
}
// u64 type asserts v to a uint64.
func u64(v eval.Value) (uint64, error) {
if x, ok := v.(Word); ok {
return uint64(x), nil
}
return 0, errutil.UnexpectedType(v)
} | arith/eval/m64/m64.go | 0.723114 | 0.416144 | m64.go | starcoder |
package sunspec
// SunSpec register addresses:
// https://www.solaredge.com/sites/default/files/sunspec-implementation-technical-note.pdf
const (
Dt_uint16 = iota
Dt_uint32
Dt_int16
Dt_string
Dt_acc32
)
type ModbusAddress struct {
// E.g. 40000
Address uint16
// Only needed for 'Type: string'
Size uint16
/**
* <b>Dt_uint16</b>
* <b>Dt_uint32</b>
* <b>Dt_int16</b>
* <b>Dt_string</b>
* <b>Dt_acc32</b>
*/
Type int
Value interface{}
}
// Inverter Statuses
const (
Ivs_I_STATUS_OFF = 1
Ivs_I_STATUS_SLEEPING = 2
Ivs_I_STATUS_STARTING = 3
Ivs_I_STATUS_MPPT = 4
Ivs_I_STATUS_THROTTLED = 5
Ivs_I_STATUS_SHUTTING_DOWN = 6
Ivs_I_STATUS_FAULT = 7
Ivs_I_STATUS_STANDBY = 8
)
/*func GetModbusRegisters() map[string]ModbusAddress {
return Registers
}*/
// const registers map[string]ModbusAddress =
var Registers = map[string]ModbusAddress{
/** [SUNSPEC : COMMON BLOCK] **/
/*
// Value = "SunS" (0x53756e53). Uniquely identifies this as a SunSpec MODBUS Map
"C_SunSpec_ID": {Address: 40000, Type: Dt_uint32},
// Value = 0x0001. Uniquely identifies this as a SunSpec Common Model Block
"C_SunSpec_DID": {Address: 40002, Type: Dt_uint16},
// 65 = Length of block in 16-bit registers
"C_SunSpec_Length": {Address: 40003, Type: Dt_uint16},
// Value Registered with SunSpec = "SolarEdge"
"C_Manufacturer": {Address: 40004, Size: 32, Type: Dt_string},
// SolarEdge Specific Value
"C_Model": {Address: 40020, Size: 32, Type: Dt_string},
// SolarEdge Specific Value
"C_Version": {Address: 40044, Size: 16, Type: Dt_string},
// SolarEdge Unique Value
"C_SerialNumber": {Address: 40052, Size: 32, Type: Dt_string},
// MODBUS Unit ID
"C_DeviceAddress": {Address: 40068, Type: Dt_uint16},
/*
/** END of [SUNSPEC : COMMON BLOCK] **/
"C_SerialNumber": {Address: 40052, Size: 4, Type: Dt_string}, // Default: Size: 32
/** [SolarEdge Specific Registers] **/
// 101 = single phase, 102 = split phase, 103 = three phase
"C_SunSpec_DID": {Address: 40069, Type: Dt_uint16},
// 50 = Length of model block
"C_SunSpec_Length": {Address: 40070, Type: Dt_uint16},
// AC Total Current value
"I_AC_Current": {Address: 40071, Type: Dt_uint16},
// AC Phase A Current value
"I_AC_CurrentA": {Address: 40072, Type: Dt_uint16},
// AC Phase B Current value
"I_AC_CurrentB": {Address: 40073, Type: Dt_uint16},
// AC Phase C Current value
"I_AC_CurrentC": {Address: 40074, Type: Dt_uint16},
// AC Current scale factor
"I_AC_Current_SF": {Address: 40075, Type: Dt_int16},
// AC Voltage Phase AB value
"I_AC_VoltageAB": {Address: 40076, Type: Dt_uint16},
// AC Voltage Phase BC value
"I_AC_VoltageBC": {Address: 40077, Type: Dt_uint16},
// AC Voltage Phase CA value
"I_AC_VoltageCA": {Address: 40078, Type: Dt_uint16},
// AC Voltage Phase A to N value
"I_AC_VoltageAN": {Address: 40079, Type: Dt_uint16},
// AC Voltage Phase B to N value
"I_AC_VoltageBN": {Address: 40080, Type: Dt_uint16},
// AC Voltage Phase C to N value
"I_AC_VoltageCN": {Address: 40081, Type: Dt_uint16},
// AC Voltage scale factor
"I_AC_Voltage_SF": {Address: 40082, Type: Dt_int16},
// AC Power value
"I_AC_Power": {Address: 40083, Type: Dt_int16},
// AC Power scale factor
"I_AC_Power_SF": {Address: 40084, Type: Dt_int16},
// AC Frequency value
"I_AC_Frequency": {Address: 40085, Type: Dt_uint16},
// Scale factor
"I_AC_Frequency_SF": {Address: 40086, Type: Dt_int16},
// Apparent Power
"I_AC_VA": {Address: 40087, Type: Dt_int16},
// Scale factor
"I_AC_VA_SF": {Address: 40088, Type: Dt_int16},
// Reactive Power
"I_AC_VAR": {Address: 40089, Type: Dt_int16},
// Scale factor
"I_AC_VAR_SF": {Address: 40090, Type: Dt_int16},
// Power Factor (%)
"I_AC_PF": {Address: 40091, Type: Dt_int16},
// Scale factor
"I_AC_PF_SF": {Address: 40092, Type: Dt_int16},
// AC Lifetime Energy production (WattHours)
"I_AC_Energy_WH": {Address: 40093, Type: Dt_acc32},
// Scale factor
"I_AC_Energy_WH_SF": {Address: 40095, Type: Dt_int16}, // Data type typo 'uint16' where it _should_ be 'int16'
// DC Current value (Amps)
"I_DC_Current": {Address: 40096, Type: Dt_uint16},
// Scale factor
"I_DC_Current_SF": {Address: 40097, Type: Dt_int16},
// DC Voltage value (Volts)
"I_DC_Voltage": {Address: 40098, Type: Dt_uint16},
// Scale factor
"I_DC_Voltage_SF": {Address: 40099, Type: Dt_int16},
// DC Power value (Watts)
"I_DC_Power": {Address: 40100, Type: Dt_int16},
// Scale factor
"I_DC_Power_SF": {Address: 40101, Type: Dt_int16},
// Heat Sink Temperature (Degrees C)
"I_Temp_Sink": {Address: 40103, Type: Dt_int16},
// Scale factor
"I_Temp_SF": {Address: 40106, Type: Dt_int16},
// Operating State
"I_Status": {Address: 40107, Type: Dt_uint16},
// Vendor-defined operating state and error codes. For error description, meaning and troubleshooting, refer to the SolarEdge Installation Guide.
"I_Status_Vendor": {Address: 40108, Type: Dt_uint16},
/** End of [SolarEdge Specific Registers] **/
} | datamodels/sunspec/sunspec.go | 0.755276 | 0.447641 | sunspec.go | starcoder |
package encoder
import (
errors "golang.org/x/xerrors"
)
const (
// Penalty weights from section 6.8.2.1
maskUtilN1 = 3
maskUtilN2 = 3
maskUtilN3 = 40
maskUtilN4 = 10
)
// MaskUtil_applyMaskPenaltyRule1 Apply mask penalty rule 1 and return the penalty.
// Find repetitive cells with the same color and give penalty to them. Example: 00000 or 11111.
func MaskUtil_applyMaskPenaltyRule1(matrix *ByteMatrix) int {
return applyMaskPenaltyRule1Internal(matrix, true) + applyMaskPenaltyRule1Internal(matrix, false)
}
// MaskUtil_applyMaskPenaltyRule2 Apply mask penalty rule 2 and return the penalty.
// Find 2x2 blocks with the same color and give penalty to them.
// This is actually equivalent to the spec's rule, which is to find MxN blocks and give a penalty
// proportional to (M-1)x(N-1), because this is the number of 2x2 blocks inside such a block.
func MaskUtil_applyMaskPenaltyRule2(matrix *ByteMatrix) int {
penalty := 0
array := matrix.GetArray()
width := matrix.GetWidth()
height := matrix.GetHeight()
for y := 0; y < height-1; y++ {
arrayY := array[y]
for x := 0; x < width-1; x++ {
value := arrayY[x]
if value == arrayY[x+1] && value == array[y+1][x] && value == array[y+1][x+1] {
penalty++
}
}
}
return maskUtilN2 * penalty
}
// MaskUtil_applyMaskPenaltyRule3 Apply mask penalty rule 3 and return the penalty.
// Find consecutive runs of 1:1:3:1:1:4 starting with black, or 4:1:1:3:1:1 starting with white,
// and give penalty to them. If we find patterns like 000010111010000, we give penalty once.
func MaskUtil_applyMaskPenaltyRule3(matrix *ByteMatrix) int {
numPenalties := 0
array := matrix.GetArray()
width := matrix.GetWidth()
height := matrix.GetHeight()
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
arrayY := array[y] // We can at least optimize this access
if x+6 < width &&
arrayY[x] == 1 &&
arrayY[x+1] == 0 &&
arrayY[x+2] == 1 &&
arrayY[x+3] == 1 &&
arrayY[x+4] == 1 &&
arrayY[x+5] == 0 &&
arrayY[x+6] == 1 &&
(isWhiteHorizontal(arrayY, x-4, x) || isWhiteHorizontal(arrayY, x+7, x+11)) {
numPenalties++
}
if y+6 < height &&
array[y][x] == 1 &&
array[y+1][x] == 0 &&
array[y+2][x] == 1 &&
array[y+3][x] == 1 &&
array[y+4][x] == 1 &&
array[y+5][x] == 0 &&
array[y+6][x] == 1 &&
(isWhiteVertical(array, x, y-4, y) || isWhiteVertical(array, x, y+7, y+11)) {
numPenalties++
}
}
}
return numPenalties * maskUtilN3
}
func isWhiteHorizontal(rowArray []int8, from, to int) bool {
if from < 0 {
from = 0
}
if to > len(rowArray) {
to = len(rowArray)
}
for i := from; i < to; i++ {
if rowArray[i] == 1 {
return false
}
}
return true
}
func isWhiteVertical(array [][]int8, col, from, to int) bool {
if from < 0 {
from = 0
}
if to > len(array) {
to = len(array)
}
for i := from; i < to; i++ {
if array[i][col] == 1 {
return false
}
}
return true
}
// MaskUtil_applyMaskPenaltyRule4 Apply mask penalty rule 4 and return the penalty.
// Calculate the ratio of dark cells and give penalty if the ratio is far from 50%.
// It gives 10 penalty for 5% distance.
func MaskUtil_applyMaskPenaltyRule4(matrix *ByteMatrix) int {
numDarkCells := 0
array := matrix.GetArray()
width := matrix.GetWidth()
height := matrix.GetHeight()
for y := 0; y < height; y++ {
arrayY := array[y]
for x := 0; x < width; x++ {
if arrayY[x] == 1 {
numDarkCells++
}
}
}
numTotalCells := matrix.GetHeight() * matrix.GetWidth()
distance := numDarkCells*2 - numTotalCells
if distance < 0 {
distance = -distance
}
fivePercentVariances := distance * 10 / numTotalCells
return fivePercentVariances * maskUtilN4
}
// MaskUtil_getDataMaskBit Return the mask bit for "getMaskPattern" at "x" and "y".
// See 8.8 of JISX0510:2004 for mask pattern conditions.
func MaskUtil_getDataMaskBit(maskPattern, x, y int) (bool, error) {
var intermediate int
switch maskPattern {
case 0:
intermediate = (y + x) & 0x1
break
case 1:
intermediate = y & 0x1
break
case 2:
intermediate = x % 3
break
case 3:
intermediate = (y + x) % 3
break
case 4:
intermediate = ((y / 2) + (x / 3)) & 0x1
break
case 5:
temp := y * x
intermediate = (temp & 0x1) + (temp % 3)
break
case 6:
temp := y * x
intermediate = ((temp & 0x1) + (temp % 3)) & 0x1
break
case 7:
temp := y * x
intermediate = ((temp % 3) + ((y + x) & 0x1)) & 0x1
break
default:
return false, errors.Errorf("IllegalArgumentException: Invalid mask pattern: %d", maskPattern)
}
return (intermediate == 0), nil
}
func applyMaskPenaltyRule1Internal(matrix *ByteMatrix, isHorizontal bool) int {
penalty := 0
iLimit := matrix.GetWidth()
jLimit := matrix.GetHeight()
if isHorizontal {
iLimit, jLimit = jLimit, iLimit
}
array := matrix.GetArray()
for i := 0; i < iLimit; i++ {
numSameBitCells := 0
prevBit := -1
for j := 0; j < jLimit; j++ {
var bit int
if isHorizontal {
bit = int(array[i][j])
} else {
bit = int(array[j][i])
}
if bit == prevBit {
numSameBitCells++
} else {
if numSameBitCells >= 5 {
penalty += maskUtilN1 + (numSameBitCells - 5)
}
numSameBitCells = 1 // Include the cell itself.
prevBit = bit
}
}
if numSameBitCells >= 5 {
penalty += maskUtilN1 + (numSameBitCells - 5)
}
}
return penalty
} | qrcode/encoder/mask_util.go | 0.677794 | 0.578418 | mask_util.go | starcoder |
package main
import (
"fmt"
"image/color"
"math/rand"
"os"
"time"
"github.com/hajimehoshi/ebiten"
"github.com/hajimehoshi/ebiten/ebitenutil"
)
type World struct {
cells []Cell
width int
height int
}
func NewWorld(width, height int) *World {
return &World{
cells: make([]Cell, width*height),
width: width,
height: height,
}
}
func (wl World) isInside(x, y int) bool {
return x >= 0 && x < wl.width && y >= 0 && y < wl.height
}
func (wl World) Look(x, y int, dir Vector) bool {
if !wl.isInside(x, y) {
return false
}
return wl.Plus(x, y, dir)
}
func (wl World) getCell(x, y int) bool {
return wl.cells[x+(y*wl.width)].Alive
}
func (wl World) Plus(x, y int, vec Vector) bool {
return wl.isInside(x+vec.x, y+vec.y) && wl.getCell(x+vec.x, y+vec.y)
}
func (wl *World) setCell(x, y int, alive bool) {
if !wl.isInside(x, y) {
fmt.Printf("Coordinates %v and %v are not in range! \n", x, y)
os.Exit(1)
}
wl.cells[x+(y*wl.width)] = NewCell(alive)
}
func (wl *World) GenerateGrid(percent int) {
percentageAlive := percent * len(wl.cells) / 100
for i := percentageAlive; i > 0; i-- {
wl.cells[i] = NewCell(true)
}
cellsClone := wl.cells
seed := rand.New(rand.NewSource(time.Now().Unix()))
for i := len(wl.cells); i > 0; i-- {
randomIndex := seed.Intn(i)
wl.cells[i-1], cellsClone[randomIndex] = cellsClone[randomIndex], wl.cells[i-1]
}
wl.cells = cellsClone
}
func (wl *World) Next() {
oldWorld := NewWorld(wl.width, wl.height)
copy(oldWorld.cells, wl.cells)
for y := 0; y < oldWorld.height; y++ {
for x := 0; x < oldWorld.width; x++ {
cell := NewCell(oldWorld.getCell(x, y))
count := oldWorld.findNeighbours(x, y)
cell.NextState(count)
wl.setCell(x, y, cell.Alive)
}
}
}
func (wl World) findNeighbours(x, y int) (count int) {
for _, direction := range DirectionNames {
if wl.Look(x, y, Directions[direction]) {
count++
}
}
return
}
func (wl World) Print(background *ebiten.Image) {
for y := 0; y < wl.height; y++ {
for x := 0; x < wl.width; x++ {
renderCharacter(x, y, wl.getCell(x, y), background)
}
}
}
func renderCharacter(x, y int, isAlive bool, background *ebiten.Image) {
if isAlive {
ebitenutil.DrawRect(background, float64(x), float64(y), 1, 1, color.RGBA{255, 0, 0, 255})
}
} | world.go | 0.588416 | 0.435661 | world.go | starcoder |
package main
import (
"github.com/xidongc/go-leetcode/utils"
"math"
)
/*
55 jump game
Given an array of non-negative integers nums, you are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length at that position.
Determine if you are able to reach the last index.
co-ordinate dp, O(n*n) where dp[i] if you can reach i-th pos,
dp[i] = dp[j] && OR(nums[j] + j >= i), dp[0] = True
Example 1:
Input: nums = [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: nums = [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum jump length is 0, which makes it impossible to reach the last index.
*/
func canJump(nums []int) bool {
if len(nums) == 0 {
return false
}
dp := make([]bool, len(nums), len(nums))
dp[0] = true
for i := 1; i < len(dp); i ++ {
for j := 0; j < i; j ++ {
if dp[j] && nums[j] + j >= i {
dp[i] = true
break
}
}
}
return dp[len(dp)-1]
}
/*
45 jump game II
Given an array of non-negative integers nums, you are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length at that position.
Your goal is to reach the last index in the minimum number of jumps.
You can assume that you can always reach the last index.
dp[i] = Min(dp[j]+1 && nums[j] + j >= i && j < i)
dp[i] = Integer.MaxValue if can't reach pos i
dp[0] = 0
Example 1:
Input: nums = [2,3,1,1,4]
Output: 2
Explanation: The minimum number of jumps to reach the last index is 2. Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: nums = [2,3,0,1,4]
Output: 2
*/
func jump(nums []int) int {
if len(nums) <= 1 {
return 0
}
dp := make([]int, len(nums), len(nums))
dp[0] = 0
for i := 1; i < len(dp); i++ {
dp[i] = math.MaxInt64 // assume 64 bit server
}
for i := 1; i < len(nums); i++ {
for j := 0; j < i; j++ {
if dp[j] != math.MaxInt64 && nums[j] + j >= i {
dp[i] = utils.Min(dp[i], dp[j]+1)
}
}
}
return dp[len(dp)-1]
} | dp/55-jump-game.go | 0.753376 | 0.521045 | 55-jump-game.go | starcoder |
package pktline
// Utility functions for working with the Git pkt-line format. See
// https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
)
const (
maxPktSize = 0xffff
pktDelim = "0001"
)
var (
flush = []byte("0000")
)
// NewScanner returns a bufio.Scanner that splits on Git pktline boundaries
func NewScanner(r io.Reader) *bufio.Scanner {
scanner := bufio.NewScanner(r)
scanner.Buffer(make([]byte, maxPktSize), maxPktSize)
scanner.Split(pktLineSplitter)
return scanner
}
// Data returns the packet pkt without its length header. The length
// header is not validated. Returns an empty slice when pkt is a magic packet such
// as '0000'.
func Data(pkt []byte) []byte {
return pkt[4:]
}
// IsFlush detects the special flush packet '0000'
func IsFlush(pkt []byte) bool {
return bytes.Equal(pkt, flush)
}
// WriteString writes a string with pkt-line framing
func WriteString(w io.Writer, str string) (int, error) {
pktLen := len(str) + 4
if pktLen > maxPktSize {
return 0, fmt.Errorf("string too large: %d bytes", len(str))
}
_, err := fmt.Fprintf(w, "%04x%s", pktLen, str)
return len(str), err
}
// WriteFlush writes a pkt flush packet.
func WriteFlush(w io.Writer) error {
_, err := w.Write(flush)
return err
}
// WriteDelim writes a pkt delim packet.
func WriteDelim(w io.Writer) error {
_, err := fmt.Fprint(w, pktDelim)
return err
}
func pktLineSplitter(data []byte, atEOF bool) (advance int, token []byte, err error) {
if len(data) < 4 {
if atEOF && len(data) > 0 {
return 0, nil, fmt.Errorf("pktLineSplitter: incomplete length prefix on %q", data)
}
return 0, nil, nil // want more data
}
// We have at least 4 bytes available so we can decode the 4-hex digit
// length prefix of the packet line.
pktLength64, err := strconv.ParseInt(string(data[:4]), 16, 0)
if err != nil {
return 0, nil, fmt.Errorf("pktLineSplitter: decode length: %v", err)
}
// Cast is safe because we requested an int-size number from strconv.ParseInt
pktLength := int(pktLength64)
if pktLength < 0 {
return 0, nil, fmt.Errorf("pktLineSplitter: invalid length: %d", pktLength)
}
if pktLength < 4 {
// Special case: magic empty packet 0000, 0001, 0002 or 0003.
return 4, data[:4], nil
}
if len(data) < pktLength {
// data contains incomplete packet
if atEOF {
return 0, nil, fmt.Errorf("pktLineSplitter: less than %d bytes in input %q", pktLength, data)
}
return 0, nil, nil // want more data
}
return pktLength, data[:pktLength], nil
} | internal/git/pktline/pktline.go | 0.817866 | 0.407186 | pktline.go | starcoder |
package pg_query
func MakeStrNode(str string) *Node {
return &Node{Node: &Node_String_{String_: &String{Str: str}}}
}
func MakeAConstStrNode(str string, location int32) *Node {
return &Node{
Node: &Node_AConst{
AConst: &A_Const{
Val: MakeStrNode(str),
Location: location,
},
},
}
}
func MakeIntNode(ival int64) *Node {
return &Node{Node: &Node_Integer{Integer: &Integer{Ival: int32(ival)}}}
}
func MakeAConstIntNode(ival int64, location int32) *Node {
return &Node{
Node: &Node_AConst{
AConst: &A_Const{
Val: MakeIntNode(ival),
Location: location,
},
},
}
}
func MakeListNode(items []*Node) *Node {
return &Node{Node: &Node_List{List: &List{Items: items}}}
}
func MakeResTargetNodeWithName(name string, location int32) *Node {
return &Node{Node: &Node_ResTarget{ResTarget: &ResTarget{Name: name, Location: location}}}
}
func MakeResTargetNodeWithVal(val *Node, location int32) *Node {
return &Node{Node: &Node_ResTarget{ResTarget: &ResTarget{Val: val, Location: location}}}
}
func MakeResTargetNodeWithNameAndVal(name string, val *Node, location int32) *Node {
return &Node{Node: &Node_ResTarget{ResTarget: &ResTarget{Name: name, Val: val, Location: location}}}
}
func MakeSimpleRangeVar(relname string, location int32) *RangeVar {
return &RangeVar{
Relname: relname,
Inh: true,
Relpersistence: "p",
Location: location,
}
}
func MakeSimpleRangeVarNode(relname string, location int32) *Node {
return &Node{
Node: &Node_RangeVar{
RangeVar: MakeSimpleRangeVar(relname, location),
},
}
}
func MakeFullRangeVar(schemaname string, relname string, alias string, location int32) *RangeVar {
return &RangeVar{
Schemaname: schemaname,
Relname: relname,
Inh: true,
Relpersistence: "p",
Alias: &Alias{
Aliasname: alias,
},
Location: location,
}
}
func MakeFullRangeVarNode(schemaname string, relname string, alias string, location int32) *Node {
return &Node{
Node: &Node_RangeVar{
RangeVar: MakeFullRangeVar(schemaname, relname, alias, location),
},
}
}
func MakeParamRefNode(number int32, location int32) *Node {
return &Node{
Node: &Node_ParamRef{
ParamRef: &ParamRef{Number: number, Location: location},
},
}
}
func MakeColumnRefNode(fields []*Node, location int32) *Node {
return &Node{
Node: &Node_ColumnRef{
ColumnRef: &ColumnRef{Fields: fields, Location: location},
},
}
}
func MakeAStarNode() *Node {
return &Node{
Node: &Node_AStar{
AStar: &A_Star{},
},
}
}
func MakeCaseExprNode(arg *Node, args []*Node, location int32) *Node {
return &Node{
Node: &Node_CaseExpr{
CaseExpr: &CaseExpr{
Arg: arg,
Args: args,
Location: location,
},
},
}
}
func MakeCaseWhenNode(expr *Node, result *Node, location int32) *Node {
return &Node{
Node: &Node_CaseWhen{
CaseWhen: &CaseWhen{
Expr: expr,
Result: result,
Location: location,
},
},
}
}
func MakeFuncCallNode(funcname []*Node, args []*Node, location int32) *Node {
return &Node{
Node: &Node_FuncCall{
FuncCall: &FuncCall{
Funcname: funcname,
Args: args,
Location: location,
},
},
}
}
func MakeJoinExprNode(jointype JoinType, larg *Node, rarg *Node, quals *Node) *Node {
return &Node{
Node: &Node_JoinExpr{
JoinExpr: &JoinExpr{
Jointype: jointype,
Larg: larg,
Rarg: rarg,
Quals: quals,
},
},
}
}
func MakeAExprNode(kind A_Expr_Kind, name []*Node, lexpr *Node, rexpr *Node, location int32) *Node {
return &Node{
Node: &Node_AExpr{
AExpr: &A_Expr{
Kind: kind,
Name: name,
Lexpr: lexpr,
Rexpr: rexpr,
Location: location,
},
},
}
}
func MakeBoolExprNode(boolop BoolExprType, args []*Node, location int32) *Node {
return &Node{
Node: &Node_BoolExpr{
BoolExpr: &BoolExpr{
Boolop: boolop,
Args: args,
Location: location,
},
},
}
}
func MakeSortByNode(node *Node, sortbyDir SortByDir, sortbyNulls SortByNulls, location int32) *Node {
return &Node{
Node: &Node_SortBy{
SortBy: &SortBy{
Node: node,
SortbyDir: sortbyDir,
SortbyNulls: sortbyNulls,
Location: location,
},
},
}
}
func MakeSimpleDefElemNode(defname string, arg *Node, location int32) *Node {
return &Node{
Node: &Node_DefElem{
DefElem: &DefElem{
Defname: defname,
Arg: arg,
Defaction: DefElemAction_DEFELEM_UNSPEC,
Location: location,
},
},
}
}
func MakeSimpleColumnDefNode(colname string, typeName *TypeName, constraints []*Node, location int32) *Node {
return &Node{
Node: &Node_ColumnDef{
ColumnDef: &ColumnDef{
Colname: colname,
TypeName: typeName,
Constraints: constraints,
IsLocal: true,
Location: location,
},
},
}
}
func MakePrimaryKeyConstraintNode(location int32) *Node {
return &Node{
Node: &Node_Constraint{
Constraint: &Constraint{
Contype: ConstrType_CONSTR_PRIMARY,
Location: location,
},
},
}
}
func MakeNotNullConstraintNode(location int32) *Node {
return &Node{
Node: &Node_Constraint{
Constraint: &Constraint{
Contype: ConstrType_CONSTR_NOTNULL,
Location: location,
},
},
}
}
func MakeDefaultConstraintNode(rawExpr *Node, location int32) *Node {
return &Node{
Node: &Node_Constraint{
Constraint: &Constraint{
Contype: ConstrType_CONSTR_DEFAULT,
RawExpr: rawExpr,
Location: location,
},
},
}
}
func MakeSimpleRangeFunctionNode(functions []*Node) *Node {
return &Node{
Node: &Node_RangeFunction{
RangeFunction: &RangeFunction{
Functions: functions,
},
},
}
} | vendor/github.com/pganalyze/pg_query_go/v2/makefuncs.go | 0.679604 | 0.428532 | makefuncs.go | starcoder |
package iso20022
// Creation/cancellation of investment units on the books of the fund or its designated agent, as a result of executing an investment fund order.
type InvestmentFundTransaction2 struct {
// Type of investment fund transaction.
TransactionType *TransactionType1CodeChoice `xml:"TxTp"`
// Type of corporate action event.
CorporateActionEventType *CorporateActionEventType1CodeChoice `xml:"CorpActnEvtTp"`
// Status of an investment fund transaction.
BookingStatus *TransactionStatus1Code `xml:"BookgSts,omitempty"`
// Reference assigned to a set of orders or trades in order to link them together.
MasterReference *Max35Text `xml:"MstrRef,omitempty"`
// Unique identifier for an order, as assigned by the sell-side. The identifier must be unique within a single trading day.
OrderReference *Max35Text `xml:"OrdrRef,omitempty"`
// Unique and unambiguous identifier for an order execution, as assigned by a confirming party.
DealReference *Max35Text `xml:"DealRef,omitempty"`
// Unique technical identifier for an instance of a leg within a switch.
LegIdentification *Max35Text `xml:"LegId,omitempty"`
// Unique identifier for an instance of a leg execution within a switch confirmation.
LegExecutionIdentification *Max35Text `xml:"LegExctnId,omitempty"`
// Date and time at which the order was placed by the investor.
OrderDateTime *ISODateTime `xml:"OrdrDtTm,omitempty"`
// Indicates whether the cash payment with respect to the executed order is settled.
SettledTransactionIndicator *YesNoIndicator `xml:"SttldTxInd"`
// Indicates whether the executed order has a registered status on the books of the transfer agent.
RegisteredTransactionIndicator *YesNoIndicator `xml:"RegdTxInd"`
// Number of investment funds units.
UnitsQuantity *FinancialInstrumentQuantity1 `xml:"UnitsQty"`
// Direction of the transaction being reported, ie, securities are received (credited) or delivered (debited).
CreditDebit *CreditDebitCode `xml:"CdtDbt"`
// Transaction being reported is a reversal of previously reported transaction.
Reversal *ReversalCode `xml:"Rvsl,omitempty"`
// Amount of money to be moved between the debtor and creditor, before deduction of charges, expressed in the currency as ordered by the initiating party.
GrossSettlementAmount *ActiveCurrencyAndAmount `xml:"GrssSttlmAmt,omitempty"`
// Date on which the debtor expects the amount of money to be available to the creditor.
SettlementDate *ISODate `xml:"SttlmDt,omitempty"`
// Date and time at which a price is applied, according to the terms stated in the prospectus.
TradeDateTime *DateAndDateTimeChoice `xml:"TradDtTm"`
// Indicates whether the dividend is included, ie, cum-dividend, in the executed price. When the dividend is not included, the price will be ex-dividend.
CumDividendIndicator *YesNoIndicator `xml:"CumDvddInd"`
// Indicates whether the order has been partially executed, ie, the confirmed quantity does not match the ordered quantity for a given financial instrument.
PartiallyExecutedIndicator *YesNoIndicator `xml:"PrtlyExctdInd"`
// Price at which the order was executed.
PriceDetails *UnitPrice1 `xml:"PricDtls,omitempty"`
}
func (i *InvestmentFundTransaction2) AddTransactionType() *TransactionType1CodeChoice {
i.TransactionType = new(TransactionType1CodeChoice)
return i.TransactionType
}
func (i *InvestmentFundTransaction2) AddCorporateActionEventType() *CorporateActionEventType1CodeChoice {
i.CorporateActionEventType = new(CorporateActionEventType1CodeChoice)
return i.CorporateActionEventType
}
func (i *InvestmentFundTransaction2) SetBookingStatus(value string) {
i.BookingStatus = (*TransactionStatus1Code)(&value)
}
func (i *InvestmentFundTransaction2) SetMasterReference(value string) {
i.MasterReference = (*Max35Text)(&value)
}
func (i *InvestmentFundTransaction2) SetOrderReference(value string) {
i.OrderReference = (*Max35Text)(&value)
}
func (i *InvestmentFundTransaction2) SetDealReference(value string) {
i.DealReference = (*Max35Text)(&value)
}
func (i *InvestmentFundTransaction2) SetLegIdentification(value string) {
i.LegIdentification = (*Max35Text)(&value)
}
func (i *InvestmentFundTransaction2) SetLegExecutionIdentification(value string) {
i.LegExecutionIdentification = (*Max35Text)(&value)
}
func (i *InvestmentFundTransaction2) SetOrderDateTime(value string) {
i.OrderDateTime = (*ISODateTime)(&value)
}
func (i *InvestmentFundTransaction2) SetSettledTransactionIndicator(value string) {
i.SettledTransactionIndicator = (*YesNoIndicator)(&value)
}
func (i *InvestmentFundTransaction2) SetRegisteredTransactionIndicator(value string) {
i.RegisteredTransactionIndicator = (*YesNoIndicator)(&value)
}
func (i *InvestmentFundTransaction2) AddUnitsQuantity() *FinancialInstrumentQuantity1 {
i.UnitsQuantity = new(FinancialInstrumentQuantity1)
return i.UnitsQuantity
}
func (i *InvestmentFundTransaction2) SetCreditDebit(value string) {
i.CreditDebit = (*CreditDebitCode)(&value)
}
func (i *InvestmentFundTransaction2) SetReversal(value string) {
i.Reversal = (*ReversalCode)(&value)
}
func (i *InvestmentFundTransaction2) SetGrossSettlementAmount(value, currency string) {
i.GrossSettlementAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (i *InvestmentFundTransaction2) SetSettlementDate(value string) {
i.SettlementDate = (*ISODate)(&value)
}
func (i *InvestmentFundTransaction2) AddTradeDateTime() *DateAndDateTimeChoice {
i.TradeDateTime = new(DateAndDateTimeChoice)
return i.TradeDateTime
}
func (i *InvestmentFundTransaction2) SetCumDividendIndicator(value string) {
i.CumDividendIndicator = (*YesNoIndicator)(&value)
}
func (i *InvestmentFundTransaction2) SetPartiallyExecutedIndicator(value string) {
i.PartiallyExecutedIndicator = (*YesNoIndicator)(&value)
}
func (i *InvestmentFundTransaction2) AddPriceDetails() *UnitPrice1 {
i.PriceDetails = new(UnitPrice1)
return i.PriceDetails
} | InvestmentFundTransaction2.go | 0.809238 | 0.457985 | InvestmentFundTransaction2.go | starcoder |
package canvas
import (
"math"
)
type Path2D struct {
cv *Canvas
p []pathPoint
move vec
cwSum float64
}
type pathPoint struct {
pos vec
next vec
flags pathPointFlag
}
type pathPointFlag uint8
const (
pathMove pathPointFlag = 1 << iota
pathAttach
pathIsRect
pathIsConvex
pathIsClockwise
pathSelfIntersects
)
// NewPath2D creates a new Path2D and returns it
func (cv *Canvas) NewPath2D() *Path2D {
return &Path2D{cv: cv, p: make([]pathPoint, 0, 20)}
}
// func (p *Path2D) AddPath(p2 *Path2D) {
// }
// MoveTo (see equivalent function on canvas type)
func (p *Path2D) MoveTo(x, y float64) {
if len(p.p) > 0 && isSamePoint(p.p[len(p.p)-1].pos, vec{x, y}, 0.1) {
return
}
p.p = append(p.p, pathPoint{pos: vec{x, y}, flags: pathMove}) // todo more flags probably
p.cwSum = 0
p.move = vec{x, y}
}
// LineTo (see equivalent function on canvas type)
func (p *Path2D) LineTo(x, y float64) {
p.lineTo(x, y, true)
}
func (p *Path2D) lineTo(x, y float64, checkSelfIntersection bool) {
count := len(p.p)
if count > 0 && isSamePoint(p.p[len(p.p)-1].pos, vec{x, y}, 0.1) {
return
}
if count == 0 {
p.MoveTo(x, y)
return
}
prev := &p.p[count-1]
prev.next = vec{x, y}
prev.flags |= pathAttach
p.p = append(p.p, pathPoint{pos: vec{x, y}})
newp := &p.p[count]
px, py := prev.pos[0], prev.pos[1]
p.cwSum += (x - px) * (y + py)
cwTotal := p.cwSum
cwTotal += (p.move[0] - x) * (p.move[1] + y)
if cwTotal <= 0 {
newp.flags |= pathIsClockwise
}
if prev.flags&pathSelfIntersects > 0 {
newp.flags |= pathSelfIntersects
}
if len(p.p) < 4 || Performance.AssumeConvex {
newp.flags |= pathIsConvex
} else if prev.flags&pathIsConvex > 0 {
cuts := false
var cutPoint vec
if checkSelfIntersection && !Performance.IgnoreSelfIntersections {
b0, b1 := prev.pos, vec{x, y}
for i := 1; i < count; i++ {
a0, a1 := p.p[i-1].pos, p.p[i].pos
var r1, r2 float64
cutPoint, r1, r2 = lineIntersection(a0, a1, b0, b1)
if r1 > 0 && r1 < 1 && r2 > 0 && r2 < 1 {
cuts = true
break
}
}
}
if cuts && !isSamePoint(cutPoint, vec{x, y}, samePointTolerance) {
newp.flags |= pathSelfIntersects
} else {
prev2 := &p.p[len(p.p)-3]
cw := (newp.flags & pathIsClockwise) > 0
ln := prev.pos.sub(prev2.pos)
lo := vec{ln[1], -ln[0]}
dot := newp.pos.sub(prev2.pos).dot(lo)
if (cw && dot <= 0) || (!cw && dot >= 0) {
newp.flags |= pathIsConvex
}
}
}
}
// Arc (see equivalent function on canvas type)
func (p *Path2D) Arc(x, y, radius, startAngle, endAngle float64, anticlockwise bool) {
checkSelfIntersection := len(p.p) > 0
lastWasMove := len(p.p) == 0 || p.p[len(p.p)-1].flags&pathMove != 0
if endAngle == startAngle {
s, c := math.Sincos(endAngle)
p.lineTo(x+radius*c, y+radius*s, checkSelfIntersection)
if lastWasMove {
p.p[len(p.p)-1].flags |= pathIsConvex
}
return
}
if !anticlockwise && endAngle < startAngle {
endAngle = startAngle + (2*math.Pi - math.Mod(startAngle-endAngle, math.Pi*2))
} else if anticlockwise && endAngle > startAngle {
endAngle = startAngle - (2*math.Pi - math.Mod(endAngle-startAngle, math.Pi*2))
}
if !anticlockwise {
diff := endAngle - startAngle
if diff >= math.Pi*4 {
diff = math.Mod(diff, math.Pi*2) + math.Pi*2
endAngle = startAngle + diff
}
} else {
diff := startAngle - endAngle
if diff >= math.Pi*4 {
diff = math.Mod(diff, math.Pi*2) + math.Pi*2
endAngle = startAngle - diff
}
}
const step = math.Pi * 2 / 90
if !anticlockwise {
for a := startAngle; a < endAngle; a += step {
s, c := math.Sincos(a)
p.lineTo(x+radius*c, y+radius*s, checkSelfIntersection)
}
} else {
for a := startAngle; a > endAngle; a -= step {
s, c := math.Sincos(a)
p.lineTo(x+radius*c, y+radius*s, checkSelfIntersection)
}
}
s, c := math.Sincos(endAngle)
p.lineTo(x+radius*c, y+radius*s, checkSelfIntersection)
if lastWasMove {
p.p[len(p.p)-1].flags |= pathIsConvex
}
}
// ArcTo (see equivalent function on canvas type)
func (p *Path2D) ArcTo(x1, y1, x2, y2, radius float64) {
if len(p.p) == 0 {
return
}
p0, p1, p2 := p.p[len(p.p)-1].pos, vec{x1, y1}, vec{x2, y2}
v0, v1 := p0.sub(p1).norm(), p2.sub(p1).norm()
angle := math.Acos(v0.dot(v1))
// should be in the range [0-pi]. if parallel, use a straight line
if angle <= 0 || angle >= math.Pi {
p.LineTo(x2, y2)
return
}
// cv0 and cv1 are vectors that point to the center of the circle
cv0 := vec{-v0[1], v0[0]}
cv1 := vec{v1[1], -v1[0]}
x := cv1.sub(cv0).div(v0.sub(v1))[0] * radius
if x < 0 {
cv0 = cv0.mulf(-1)
cv1 = cv1.mulf(-1)
}
center := p1.add(v0.mulf(math.Abs(x))).add(cv0.mulf(radius))
a0, a1 := cv0.mulf(-1).atan2(), cv1.mulf(-1).atan2()
if x > 0 {
if a1-a0 > 0 {
a0 += math.Pi * 2
}
} else {
if a0-a1 > 0 {
a1 += math.Pi * 2
}
}
p.Arc(center[0], center[1], radius, a0, a1, x > 0)
}
// QuadraticCurveTo (see equivalent function on canvas type)
func (p *Path2D) QuadraticCurveTo(x1, y1, x2, y2 float64) {
if len(p.p) == 0 {
return
}
p0 := p.p[len(p.p)-1].pos
p1 := vec{x1, y1}
p2 := vec{x2, y2}
v0 := p1.sub(p0)
v1 := p2.sub(p1)
const step = 0.01
for r := 0.0; r < 1; r += step {
i0 := v0.mulf(r).add(p0)
i1 := v1.mulf(r).add(p1)
pt := i1.sub(i0).mulf(r).add(i0)
p.LineTo(pt[0], pt[1])
}
p.LineTo(x2, y2)
}
// BezierCurveTo (see equivalent function on canvas type)
func (p *Path2D) BezierCurveTo(x1, y1, x2, y2, x3, y3 float64) {
if len(p.p) == 0 {
return
}
p0 := p.p[len(p.p)-1].pos
p1 := vec{x1, y1}
p2 := vec{x2, y2}
p3 := vec{x3, y3}
v0 := p1.sub(p0)
v1 := p2.sub(p1)
v2 := p3.sub(p2)
const step = 0.01
for r := 0.0; r < 1; r += step {
i0 := v0.mulf(r).add(p0)
i1 := v1.mulf(r).add(p1)
i2 := v2.mulf(r).add(p2)
iv0 := i1.sub(i0)
iv1 := i2.sub(i1)
j0 := iv0.mulf(r).add(i0)
j1 := iv1.mulf(r).add(i1)
pt := j1.sub(j0).mulf(r).add(j0)
p.LineTo(pt[0], pt[1])
}
p.LineTo(x3, y3)
}
// Ellipse (see equivalent function on canvas type)
func (p *Path2D) Ellipse(x, y, radiusX, radiusY, rotation, startAngle, endAngle float64, anticlockwise bool) {
checkSelfIntersection := len(p.p) > 0
rs, rc := math.Sincos(rotation)
lastWasMove := len(p.p) == 0 || p.p[len(p.p)-1].flags&pathMove != 0
if endAngle == startAngle {
s, c := math.Sincos(endAngle)
rx, ry := radiusX*c, radiusY*s
rx, ry = rx*rc-ry*rs, rx*rs+ry*rc
p.lineTo(x+rx, y+ry, checkSelfIntersection)
if lastWasMove {
p.p[len(p.p)-1].flags |= pathIsConvex
}
return
}
if !anticlockwise && endAngle < startAngle {
endAngle = startAngle + (2*math.Pi - math.Mod(startAngle-endAngle, math.Pi*2))
} else if anticlockwise && endAngle > startAngle {
endAngle = startAngle - (2*math.Pi - math.Mod(endAngle-startAngle, math.Pi*2))
}
if !anticlockwise {
diff := endAngle - startAngle
if diff >= math.Pi*4 {
diff = math.Mod(diff, math.Pi*2) + math.Pi*2
endAngle = startAngle + diff
}
} else {
diff := startAngle - endAngle
if diff >= math.Pi*4 {
diff = math.Mod(diff, math.Pi*2) + math.Pi*2
endAngle = startAngle - diff
}
}
const step = math.Pi * 2 / 90
if !anticlockwise {
for a := startAngle; a < endAngle; a += step {
s, c := math.Sincos(a)
rx, ry := radiusX*c, radiusY*s
rx, ry = rx*rc-ry*rs, rx*rs+ry*rc
p.lineTo(x+rx, y+ry, checkSelfIntersection)
}
} else {
for a := startAngle; a > endAngle; a -= step {
s, c := math.Sincos(a)
rx, ry := radiusX*c, radiusY*s
rx, ry = rx*rc-ry*rs, rx*rs+ry*rc
p.lineTo(x+rx, y+ry, checkSelfIntersection)
}
}
s, c := math.Sincos(endAngle)
rx, ry := radiusX*c, radiusY*s
rx, ry = rx*rc-ry*rs, rx*rs+ry*rc
p.lineTo(x+rx, y+ry, checkSelfIntersection)
if lastWasMove {
p.p[len(p.p)-1].flags |= pathIsConvex
}
}
// ClosePath (see equivalent function on canvas type)
func (p *Path2D) ClosePath() {
if len(p.p) < 2 {
return
}
if isSamePoint(p.p[len(p.p)-1].pos, p.p[0].pos, 0.1) {
return
}
closeIdx := 0
for i := len(p.p) - 1; i >= 0; i-- {
if p.p[i].flags&pathMove != 0 {
closeIdx = i
break
}
}
p.LineTo(p.p[closeIdx].pos[0], p.p[closeIdx].pos[1])
p.p[len(p.p)-1].next = p.p[closeIdx].next
p.p[len(p.p)-1].flags |= pathAttach
}
// Rect (see equivalent function on canvas type)
func (p *Path2D) Rect(x, y, w, h float64) {
lastWasMove := len(p.p) == 0 || p.p[len(p.p)-1].flags&pathMove != 0
p.MoveTo(x, y)
p.LineTo(x+w, y)
p.LineTo(x+w, y+h)
p.LineTo(x, y+h)
p.LineTo(x, y)
if lastWasMove {
p.p[len(p.p)-1].flags |= pathIsRect
p.p[len(p.p)-1].flags |= pathIsConvex
}
}
func runSubPaths(path []pathPoint, close bool, fn func(subPath []pathPoint) bool) {
start := 0
for i, p := range path {
if p.flags&pathMove == 0 {
continue
}
if i >= start+3 {
end := i
if runSubPath(path[start:end], close, fn) {
return
}
}
start = i
}
if len(path) >= start+3 {
runSubPath(path[start:], close, fn)
}
}
func runSubPath(path []pathPoint, close bool, fn func(subPath []pathPoint) bool) bool {
if !close || path[0].pos == path[len(path)-1].pos {
return fn(path)
}
var buf [64]pathPoint
path2 := Path2D{
p: append(buf[:0], path...),
move: path[0].pos,
}
path2.lineTo(path[0].pos[0], path[0].pos[1], true)
return fn(path2.p)
}
type pathRule uint8
// Path rule constants. See https://en.wikipedia.org/wiki/Nonzero-rule
// and https://en.wikipedia.org/wiki/Even%E2%80%93odd_rule
const (
NonZero pathRule = iota
EvenOdd
)
// IsPointInPath returns true if the point is in the path according
// to the given rule
func (p *Path2D) IsPointInPath(x, y float64, rule pathRule) bool {
inside := false
runSubPaths(p.p, false, func(sp []pathPoint) bool {
num := 0
prev := sp[len(sp)-1].pos
for _, pt := range p.p {
r, dir := pointIsRightOfLine(prev, pt.pos, vec{x, y})
prev = pt.pos
if !r {
continue
}
if dir {
num++
} else {
num--
}
}
if rule == NonZero {
inside = num != 0
} else {
inside = num%2 == 0
}
return inside
})
return inside
}
// IsPointInStroke returns true if the point is in the stroke
func (p *Path2D) IsPointInStroke(x, y float64) bool {
if len(p.p) == 0 {
return false
}
var triBuf [500][2]float64
tris := p.cv.strokeTris(p, mat{}, false, triBuf[:0])
pt := vec{x, y}
for i := 0; i < len(tris); i += 3 {
a := vec{tris[i][0], tris[i][1]}
b := vec{tris[i+1][0], tris[i+1][1]}
c := vec{tris[i+2][0], tris[i+2][1]}
if triangleContainsPoint(a, b, c, pt) {
return true
}
}
return false
} | path2d.go | 0.566258 | 0.541227 | path2d.go | starcoder |
package modular
import (
"errors"
)
type Matrix struct {
nRow int
nCol int
values []*Int
}
// NewMatrix creates a new (unexported) matrix struct.
func NewMatrix(r, c int, vals []*Int) *Matrix {
space := r*c - len(vals)
for space > 0 {
vals = append(vals, NewInt(0))
space--
}
return &Matrix{
nRow: r,
nCol: c,
values: vals,
}
}
// GetRow returns a rwo of a matrix. It starts from 1 rather than 0.
func (m *Matrix) GetRow(r int) []*Int {
return m.values[(r-1)*m.nCol:r*m.nCol]
}
// SetRow resets a row. It also starts from 1 rather than 0.
func (m *Matrix) SetRow(r int, row []*Int) *Matrix {
i := 0
for i < m.nCol {
m.values[(r-1)*m.nCol + i] = row[i]
i++
}
return m
}
// GetCol returns a column of a matrix. It starts from 1 rather than 0.
func (m *Matrix) GetCol(c int) []*Int {
c--
res := make([]*Int, m.nRow)
for i := range res {
res[i] = m.GetRow(i+1)[c]
}
return res
}
func (m *Matrix) SetCol(c int, col []*Int) *Matrix {
i := 0
for i < m.nRow {
m.values[i*m.nCol+(c-1)] = col[i]
i++
}
return m
}
// ScalarMul multiplies a matrix by a scalar
func (m *Matrix) ScalarMul(i *Int) *Matrix {
for _, v := range m.values {
v.Mul(v, i)
}
return m
}
func (m *Matrix) Represent2D() [][]*Int {
mat := make([][]*Int, m.nRow)
for i := range mat {
row := m.GetRow(i+1)
mat[i] = make([]*Int, len(row))
for j := range mat[i] {
mat[i][j] = IntFromBig(row[j].AsBig())
}
}
return mat
}
func (m *Matrix) Copy() *Matrix {
vals := make([]*Int, len(m.values))
for i := range vals {
vals[i] = IntFromBig(m.values[i].AsBig())
}
return NewMatrix(m.nRow, m.nCol, vals)
}
// Mul does matrix multiplication, returns a new matrix if the dimensions are acceptable for multiplication, else an error.
func (m *Matrix) Mul(x, y *Matrix) (*Matrix, error) {
if x.nCol != y.nRow {
return nil, errors.New("mismatched dimensions, cannot multiply")
}
vals := make([]*Int, x.nRow*y.nCol)
i := 1
j := 1
idex := 0
for i <= x.nRow {
j = 1
for j <= y.nCol {
vals[idex] = new(Int).LinearCombination(x.GetRow(i), y.GetCol(j))
idex++
j++
}
i++
}
m = &Matrix{
nRow: y.nCol,
nCol: x.nRow,
values: vals,
}
return m, nil
}
func (m *Matrix) Inverse() (*Matrix, error) {
if m.nRow != m.nCol {
return nil, errors.New("only square matrices are invertible")
}
inverse := NewMatrix(m.nRow, m.nCol, []*Int{})
i := 1
for i < m.nRow+1 {
col, err := GaussJordan(m.Represent2D(), GetI(m.nRow).GetRow(i))
if err != nil {
return nil, err
}
inverse.SetCol(i, col)
i++
}
return inverse, nil
}
func GetI(c int) *Matrix {
I := NewMatrix(c,c, []*Int{})
i := 0
idex := 0
for i < c {
idex = c*i + i
I.values[idex] = NewInt(1)
i++
}
return I
} | modular/matrix.go | 0.833934 | 0.446555 | matrix.go | starcoder |
package gamerules
import (
"encoding/json"
"fmt"
"io"
"os"
. "chunkymonkey/types"
)
// FurnaceData contains data on furnace reactions.
type FurnaceData struct {
// FuelDuration contains a map of fuel types to number of ticks that the fuel
// lasts for.
Fuels map[ItemTypeId]Ticks
// Reactions contains a map of input item type to output item type and data.
Reactions map[ItemTypeId]Reaction
}
// Reaction describes the output of a furnace reaction.
type Reaction struct {
Output ItemTypeId
OutputData ItemData
}
// furnaceDataDef is used in unmarshalling data from the JSON definition of
// FurnaceData.
type furnaceDataDef struct {
Fuels []struct {
Id ItemTypeId
FuelTicks Ticks
}
Reactions []struct {
Comment string
Input ItemTypeId
Output ItemTypeId
OutputData ItemData
}
}
// LoadFurnaceData reads FurnaceData from the reader.
func LoadFurnaceData(reader io.Reader) (furnaceData FurnaceData, err error) {
decoder := json.NewDecoder(reader)
var dataDef furnaceDataDef
err = decoder.Decode(&dataDef)
if err != nil {
return
}
furnaceData.Fuels = make(map[ItemTypeId]Ticks)
for _, fuelDef := range dataDef.Fuels {
if _, ok := Items[fuelDef.Id]; !ok {
err = fmt.Errorf("Furnace fuel type %d is unknown item type ID", fuelDef.Id)
return
}
furnaceData.Fuels[fuelDef.Id] = fuelDef.FuelTicks
}
furnaceData.Reactions = make(map[ItemTypeId]Reaction)
for _, reactionDef := range dataDef.Reactions {
if _, ok := Items[reactionDef.Input]; !ok {
err = fmt.Errorf(
"Furnace reaction %q has unknown input item type ID %d",
reactionDef.Comment, reactionDef.Input)
return
}
if _, ok := Items[reactionDef.Output]; !ok {
err = fmt.Errorf(
"Furnace reaction %q has unknown output item type ID %d",
reactionDef.Comment, reactionDef.Output)
return
}
furnaceData.Reactions[reactionDef.Input] = Reaction{
Output: reactionDef.Output,
OutputData: reactionDef.OutputData,
}
}
return
}
// LoadFurnaceDataFromFile reads FurnaceData from the named file.
func LoadFurnaceDataFromFile(filename string) (furnaceData FurnaceData, err error) {
file, err := os.Open(filename)
if err != nil {
return
}
defer file.Close()
return LoadFurnaceData(file)
} | src/chunkymonkey/gamerules/furnace_data.go | 0.642432 | 0.422683 | furnace_data.go | starcoder |
package option
import (
"math"
"github.com/konimarti/fixedincome/pkg/term"
)
const (
Call int = iota
Put
)
// European is the implementation of plain vanilla European option
type European struct {
// Type is the type of the option (call=0, put=1)
Type int
// S is the price of the underlying asset
S float64
// K is the strike price
K float64
// T is remaining maturity in years
T float64
// Q is the dividend yield in percent
Q float64
// Vola is the volatility of the underlying asset
Vola float64
}
// Presentvalues implements the Black-Scholes pricing for European call and put options
func (e *European) PresentValue(ts term.Structure) float64 {
var value float64
d1 := D1(e.S, e.K, e.T, e.Q, e.Vola, ts)
d2 := D2(d1, e.T, e.Vola)
z := ts.Z(e.T)
if e.Type == Call {
value = e.S*math.Exp(-e.Q/100.0*e.T)*N(d1) - e.K*z*N(d2)
} else if e.Type == Put {
value = -e.S*math.Exp(-e.Q/100.0*e.T)*N(-d1) + e.K*z*N(-d2)
}
return value
}
// SetVola sets the volatility (needed for the calculation of the implied volatility)
func (e *European) SetVola(newVola float64) {
e.Vola = newVola
}
// implement the 'Greeks'
// Delta
func (e *European) Delta(ts term.Structure) float64 {
d1 := D1(e.S, e.K, e.T, e.Q, e.Vola, ts)
sign := 1.0
if e.Type == Put {
sign = -1.0
}
return sign * math.Exp(-e.Q*e.T) * N(sign*d1)
}
// Gamma
func (e *European) Gamma(ts term.Structure) float64 {
d1 := D1(e.S, e.K, e.T, e.Q, e.Vola, ts)
return math.Exp(-e.Q*e.T) * Napostroph(d1) / (e.S * e.Vola * math.Sqrt(e.T))
}
// Rho
func (e *European) Rho(ts term.Structure) float64 {
d1 := D1(e.S, e.K, e.T, e.Q, e.Vola, ts)
d2 := D2(d1, e.T, e.Vola)
sign := 1.0
if e.Type == Put {
sign = -1.0
}
return sign * e.K * e.T * math.Exp(-(ts.Rate(e.T)/100.0)*e.T) * N(sign*d2)
}
// Vega
func (e *European) Vega(ts term.Structure) float64 {
d1 := D1(e.S, e.K, e.T, e.Q, e.Vola, ts)
return math.Exp(-e.Q*e.T) * e.S * math.Sqrt(e.T) * Napostroph(d1)
}
// helper function for Black Scholes formula
// D1
func D1(S, K, T, Q, Vola float64, ts term.Structure) float64 {
return (math.Log(S/K) + (ts.Rate(T)/100.0-Q/100.0+math.Pow(Vola, 2.0)/2.0)*T) / (Vola * math.Sqrt(T))
}
// D2
func D2(d1, T, Vola float64) float64 {
return d1 - Vola*math.Sqrt(T)
}
// N
func N(x float64) float64 {
if x < 0 {
return 1.0 - N(-x)
}
return 0.5 * math.Erfc(-x/math.Sqrt2)
}
// Napostroph
func Napostroph(x float64) float64 {
return math.Exp(-(x*x)/2.0) / (math.SqrtPi * math.Sqrt2)
} | pkg/instrument/option/european.go | 0.75401 | 0.499451 | european.go | starcoder |
package operator
import (
"github.com/matrixorigin/matrixcube/components/prophet/core"
"github.com/matrixorigin/matrixcube/components/prophet/limit"
"github.com/matrixorigin/matrixcube/pb/metapb"
)
// OpInfluence records the influence of the cluster.
type OpInfluence struct {
StoresInfluence map[uint64]*StoreInfluence
}
// GetStoreInfluence get containerInfluence of specific container.
func (m OpInfluence) GetStoreInfluence(id uint64) *StoreInfluence {
containerInfluence, ok := m.StoresInfluence[id]
if !ok {
containerInfluence = &StoreInfluence{
InfluenceStats: map[string]InfluenceStats{},
}
m.StoresInfluence[id] = containerInfluence
}
return containerInfluence
}
type InfluenceStats struct {
ShardSize int64
ShardCount int64
LeaderSize int64
LeaderCount int64
}
// StoreInfluence records influences that pending operators will make.
type StoreInfluence struct {
InfluenceStats map[string]InfluenceStats
StepCost map[limit.Type]int64
}
// ShardProperty returns delta size of leader/resource by influence.
func (s StoreInfluence) ShardProperty(kind core.ScheduleKind, groupKey string) int64 {
switch kind.ShardKind {
case metapb.ShardType_LeaderOnly:
switch kind.Policy {
case core.ByCount:
return s.InfluenceStats[groupKey].LeaderCount
case core.BySize:
return s.InfluenceStats[groupKey].LeaderSize
default:
return 0
}
case metapb.ShardType_AllShards:
return s.InfluenceStats[groupKey].ShardSize
default:
return 0
}
}
// GetStepCost returns the specific type step cost
func (s StoreInfluence) GetStepCost(limitType limit.Type) int64 {
if s.StepCost == nil {
return 0
}
return s.StepCost[limitType]
}
func (s *StoreInfluence) addStepCost(limitType limit.Type, cost int64) {
if s.StepCost == nil {
s.StepCost = make(map[limit.Type]int64)
}
s.StepCost[limitType] += cost
}
// AdjustStepCost adjusts the step cost of specific type container limit according to resource size
func (s *StoreInfluence) AdjustStepCost(limitType limit.Type, resourceSize int64) {
if resourceSize > limit.SmallShardThreshold {
s.addStepCost(limitType, limit.ShardInfluence[limitType])
} else if resourceSize <= limit.SmallShardThreshold && resourceSize > limit.EmptyShardApproximateSize {
s.addStepCost(limitType, limit.SmallShardInfluence[limitType])
}
} | components/prophet/schedule/operator/influence.go | 0.644001 | 0.422743 | influence.go | starcoder |
package reactnative
const deployWorkflowDescription = `## Configure Android part of the deploy workflow
To generate a signed APK:
1. Open the **Workflow** tab of your project on Bitrise.io
1. Add **Sign APK step right after Android Build step**
1. Click on **Code Signing** tab
1. Find the **ANDROID KEYSTORE FILE** section
1. Click or drop your file on the upload file field
1. Fill the displayed 3 input fields:
1. **Keystore password**
1. **Keystore alias**
1. **Private key password**
1. Click on **[Save metadata]** button
That's it! From now on, **Sign APK** step will receive your uploaded files.
## Configure iOS part of the deploy workflow
To generate IPA:
1. Open the **Workflow** tab of your project on Bitrise.io
1. Click on **Code Signing** tab
1. Find the **PROVISIONING PROFILE** section
1. Click or drop your file on the upload file field
1. Find the **CODE SIGNING IDENTITY** section
1. Click or drop your file on the upload file field
1. Click on **Workflows** tab
1. Select deploy workflow
1. Select **Xcode Archive & Export for iOS** step
1. Open **Force Build Settings** input group
1. Specify codesign settings
Set **Force code signing with Development Team**, **Force code signing with Code Signing Identity**
and **Force code signing with Provisioning Profile** inputs regarding to the uploaded codesigning files
1. Specify manual codesign style
If the codesigning files, are generated manually on the Apple Developer Portal,
you need to explicitly specify to use manual coedsign settings
(as ejected rn projects have xcode managed codesigning turned on).
To do so, add 'CODE_SIGN_STYLE="Manual"' to 'Additional options for xcodebuild call' input
## To run this workflow
If you want to run this workflow manually:
1. Open the app's build list page
2. Click on **[Start/Schedule a Build]** button
3. Select **deploy** in **Workflow** dropdown input
4. Click **[Start Build]** button
Or if you need this workflow to be started by a GIT event:
1. Click on **Triggers** tab
2. Setup your desired event (push/tag/pull) and select **deploy** workflow
3. Click on **[Done]** and then **[Save]** buttons
The next change in your repository that matches any of your trigger map event will start **deploy** workflow.
` | scanners/reactnative/const.go | 0.76769 | 0.432962 | const.go | starcoder |
Package bitutil contains common function for bit-level operations.
Pack and Unpack functions are used to pack and unpack a list of non-zero numbers
very efficiently.
*/
package bitutil
import (
"bytes"
"fmt"
"math"
)
/*
CompareByteArray compares the contents of two byte array slices. Returns true
if both slices are equivalent in terms of size and content. The capacity may
be different.
*/
func CompareByteArray(arr1 []byte, arr2 []byte) bool {
if len(arr1) != len(arr2) {
return false
}
for i, v := range arr1 {
if v != arr2[i] {
return false
}
}
return true
}
/*
ByteSizeString takes a numeric byte size and returns it in human readable form.
The useISU parameter determines which units to use. False uses the more common
binary form. The units kibibyte, mebibyte, etc were established by the
International Electrotechnical Commission (IEC) in 1998.
useISU = True -> Decimal (as formally defined in the International System of Units)
Bytes / Metric
1000^1 kB kilobyte
1000^2 MB megabyte
1000^3 GB gigabyte
1000^4 TB terabyte
1000^5 PB petabyte
1000^6 EB exabyte
useISU = False -> Binary (as defined by the International Electrotechnical Commission)
Bytes / Metric
1024^1 KiB kibibyte
1024^2 MiB mebibyte
1024^3 GiB gibibyte
1024^4 TiB tebibyte
1024^5 PiB pebibyte
1024^6 EiB exbibyte
*/
func ByteSizeString(size int64, useISU bool) string {
var byteSize, unit float64 = float64(size), 1024
var pre string
if useISU {
unit = 1000
}
if byteSize < unit {
return fmt.Sprintf("%d B", int(byteSize))
}
exp := math.Floor(math.Log(byteSize) / math.Log(unit))
if useISU {
pre = string("kMGTPE"[int(exp-1)])
} else {
pre = fmt.Sprintf("%vi", string("KMGTPE"[int(exp-1)]))
}
res := byteSize / math.Pow(unit, exp)
return fmt.Sprintf("%.1f %sB", res, pre)
}
/*
HexDump produces a more-or-less human readable hex dump from a given byte array
slice.
*/
func HexDump(data []byte) string {
buf := new(bytes.Buffer)
line := new(bytes.Buffer)
buf.WriteString("====\n000000 ")
for i, b := range data {
if i != 0 && i%10 == 0 {
buf.WriteString(fmt.Sprintf(" %s\n%06x ", line.String(), i))
line = new(bytes.Buffer)
}
buf.WriteString(fmt.Sprintf("%02X ", b))
line.WriteString(fmt.Sprintf("%c", b))
}
rest := len(data) % 10
if rest != 0 {
for i := rest; i < 10; i++ {
buf.WriteString(" ")
}
}
buf.WriteString(fmt.Sprintf(" %s\n====\n", line.String()))
return buf.String()
} | bitutil/bitutil.go | 0.662796 | 0.53965 | bitutil.go | starcoder |
package quickhull
import (
"github.com/golang/geo/r3"
)
// HalfEdgeMesh is a mesh consisting of half edges.
// See: https://www.openmesh.org/media/Documentations/OpenMesh-6.3-Documentation/a00010.html
type HalfEdgeMesh struct {
Vertices []r3.Vector
Faces []Face
HalfEdges []HalfEdge
}
// HalfEdge is a half edge.
// See: https://www.openmesh.org/media/Documentations/OpenMesh-6.3-Documentation/a00010.html
type HalfEdge struct {
EndVertex int // Index of end vertex
Opp int // Index of opposite HalfEdge
Face int // Index of Face it belongs to
Next int // Index of next HalfEdge
}
const disabledInt = ^int(0)
func (he *HalfEdge) disable() {
he.EndVertex = disabledInt
}
func (he HalfEdge) isDisabled() bool {
return he.EndVertex == disabledInt
}
// Face of a half edge.
// See: https://www.openmesh.org/media/Documentations/OpenMesh-6.3-Documentation/a00010.html
type Face struct {
HalfEdge int // Index of a bounding HalfEdge
}
func newHalfEdgeMesh(builder meshBuilder, vertices []r3.Vector) HalfEdgeMesh {
var heMesh HalfEdgeMesh
faceMapping := make(map[int]int)
halfEdgeMapping := make(map[int]int)
vertexMapping := make(map[int]int)
for i, f := range builder.faces {
if f.isDisabled() {
continue
}
heMesh.Faces = append(heMesh.Faces, Face{HalfEdge: f.halfEdgeIndex})
faceMapping[i] = len(heMesh.Faces) - 1
heIndicies := builder.halfEdgeIndicesOfFace(f)
for _, heIndex := range heIndicies {
vertexIndex := builder.halfEdges[heIndex].EndVertex
if _, contains := vertexMapping[vertexIndex]; !contains {
heMesh.Vertices = append(heMesh.Vertices, vertices[vertexIndex])
vertexMapping[vertexIndex] = len(heMesh.Vertices) - 1
}
}
}
for i, he := range builder.halfEdges {
if he.isDisabled() {
continue
}
heMesh.HalfEdges = append(heMesh.HalfEdges, he)
halfEdgeMapping[i] = len(heMesh.HalfEdges) - 1
}
for i := range heMesh.Faces {
_, contains := halfEdgeMapping[heMesh.Faces[i].HalfEdge]
assertTrue(contains)
heMesh.Faces[i].HalfEdge = halfEdgeMapping[heMesh.Faces[i].HalfEdge]
}
for i := range heMesh.HalfEdges {
heMesh.HalfEdges[i].Face = faceMapping[heMesh.HalfEdges[i].Face]
heMesh.HalfEdges[i].Opp = halfEdgeMapping[heMesh.HalfEdges[i].Opp]
heMesh.HalfEdges[i].Next = halfEdgeMapping[heMesh.HalfEdges[i].Next]
heMesh.HalfEdges[i].EndVertex = vertexMapping[heMesh.HalfEdges[i].EndVertex]
}
return heMesh
} | half_edge_mesh.go | 0.790288 | 0.547162 | half_edge_mesh.go | starcoder |
package board
//CanPlace determines if the spot on the board can have a piece
func (b *Board) CanPlace(x int, y int, block TetrisBlock) (canPlace bool) {
pattern := block.Pattern
//Given this block's pattern can we place it on the board?
for w := 0; w < len(pattern); w++ {
for h := 0; h < len(pattern[w]); h++ {
// Out of bounds checking - shape overlaying on the board
if x+w >= len(b.Bits) {
return false
}
if y+h >= len(b.Bits[x+w]) {
return false
}
// If the space is already occupied we cannot place there
if b.Bits[x+w][y+h].occupied == true && pattern[w][h] == true {
return false
}
}
}
return true
}
// PlaceBlock anchors a piece to the board a spot x,y given the blocks pattern
func (b *Board) PlaceBlock(x int, y int, block TetrisBlock) (canFit bool) {
if !b.CanPlace(x, y, block) {
return false
}
pattern := block.Pattern
// If we are with-in boundaries, apply the OccupyPatten to the Board
for w := 0; w < len(pattern); w++ {
for h := 0; h < len(pattern[w]); h++ {
// We set the Board bit to the block, and occupied pattern
b.set(x+w, y+h, pattern[w][h], block)
}
}
return true
}
// MoveBlock takes block at x,y and tries to move it to x1,y1
func (b *Board) MoveBlock(x, y, x1, y1 int) (wasMoved bool) {
// 1. Boudary checking for x,y
// TODO: Add more boundary checks for x,y >=0 and x1+patwidth<b.width...
if y >= b.Height || x >= b.Width {
return false
}
bit := b.Bits[x][y]
block := bit.block
pattern := block.Pattern
if pattern == nil {
return false
}
patwidth := len(pattern)
if patwidth == 0 {
return false
}
if x+patwidth >= b.Width {
return false
}
patheight := len(pattern[0])
if patheight == 0 {
return false
}
if x+patheight >= b.Height {
return false
}
// 2. Remove piece and try to place at x1,y1 - otherwise put back at x,y
b.RemovePiece(x, y)
wasMoved = b.PlaceBlock(x1, y1, block)
if !wasMoved {
b.PlaceBlock(x, y, block)
}
return wasMoved
}
// DropToBottom looksup the block at x,y and tries to 'move it' to the bottom of the board.
// In Tetris this is when you push the 'down' arrow on the currently moving block.
func (b *Board) DropToBottom(x, y int) (wasMoved bool) {
if y >= b.Height || x >= b.Width {
return false
}
bit := b.Bits[x][y]
block := bit.block
pattern := block.Pattern
if pattern == nil {
return false
}
patwidth := len(pattern)
patheight := len(pattern[0])
for w := 0; w < patwidth; w++ {
for h := 0; h < patheight; h++ {
if pattern[w][h] == true {
//A bit of this block is occupy the space below, so we could move down.
if h+1 < patheight && pattern[w][h+1] == true {
continue
}
//The bit below is not occupied
if h+1+y < b.Height && b.Bits[x+w][h+1+y].occupied == false {
continue
}
//The bit below is occupied and our pattern has bit that needs the spot
return false
}
}
}
//Remove the block from the board
for w := 0; w < patwidth; w++ {
for h := 0; h < patheight; h++ {
b.unset(x+w, y+h, pattern[w][h])
}
}
//Move the block down one row, which we know is inbounds from above
b.PlaceBlock(x, y+1, block)
// RECURSE!! :-)
b.DropToBottom(x, y+1)
return true
}
// RemovePiece looksup the piece at x,y and unsets each bit of the pattern
func (b *Board) RemovePiece(x, y int) (delete bool) {
if y >= b.Height || x >= b.Width {
return false
}
bit := b.Bits[x][y]
block := bit.block
pattern := block.Pattern
if pattern == nil {
return false
}
patwidth := len(pattern)
patheight := len(pattern[0])
// Remove from board
for w := 0; w < patwidth; w++ {
for h := 0; h < patheight; h++ {
b.unset(x+w, y+h, pattern[w][h])
}
}
return true
}
// RotatePiece takes the active piece at x,y and rotates to the right Up->Right->Down->Left->Up->Right...
func (b *Board) RotatePiece(x, y int) (rotated bool) {
//1. Boundary checks and block/pattern lookup
if y >= b.Height || x >= b.Width {
return false
}
bit := b.Bits[x][y]
block := bit.block
pattern := block.Pattern
if pattern == nil {
return false
}
// 2. Remove the piece from the board we are rotating
b.RemovePiece(x, y)
// 3. Rotate the block pattern
block.Rotate()
//4. Place the rotated block back on the board.
return b.PlaceBlock(x, y, block)
}
// TetrisMatch replaces rows of all occupied with empty blocks and calls TetrisReduce
func (b *Board) TetrisMatch(onTetris func(row int)) {
next_row:
for y := 0; y < b.Height; y++ {
for x := 0; x < b.Width; x++ {
//1. If any of the bits on the row aren't 1, then skip row.
if b.Bits[x][y].occupied == false {
continue next_row
}
}
//2. All set, so clear Clear the row of all occupied
for x := 0; x < b.Width; x++ {
b.unset(x, y, true)
}
// All the bits are occupied, so Tetris!
if onTetris != nil {
onTetris(y)
}
}
}
// TetrisReduce implements classic Tetris rule of 'all zero rows replace with the row above'
func (b *Board) TetrisReduce(startrow int) {
skiprow:
for h := startrow; h < b.Height; h++ {
//1. Check each bit in the row
for w := 0; w < b.Width; w++ {
// If any bit is occupied the row cannot reduce
if b.Bits[w][h].occupied == true {
continue skiprow
}
}
// The h row is all unoccupied and we work to row 0 zero copying rows
for s := h; s > 0; s-- {
for w := 0; w < b.Width; w++ {
//1. Assign current row the bit from the row 'above' (closer to zero)
b.Bits[w][s] = b.Bits[w][s-1]
//2. Unoccupy the bit and remove block reference
b.Bits[w][s-1].occupied = false
b.Bits[w][s-1].block = TetrisBlock{}
}
}
}
return
} | 01-tetrisgo/pkg/board/gameplay.go | 0.674694 | 0.635477 | gameplay.go | starcoder |
package stmt
import (
"fmt"
"strings"
"github.com/lindb/lindb/aggregation/function"
)
// Expr represents a interface for all expression types
type Expr interface {
// Rewrite rewrites the expr after parse
Rewrite() string
}
// TagFilter represents tag filter for searching time series
type TagFilter interface {
// TagKey returns the filter's tag key
TagKey() string
}
// SelectItem represents a select item from select statement
type SelectItem struct {
Expr Expr
Alias string
}
// FieldExpr represents a field name for select list
type FieldExpr struct {
Name string
}
// CallExpr represents a function call expression
type CallExpr struct {
FuncType function.FuncType
Params []Expr
}
// ParenExpr represents a parenthesized expression
type ParenExpr struct {
Expr Expr
}
// BinaryExpr represents an operations with two expressions
type BinaryExpr struct {
Left, Right Expr
Operator BinaryOP
}
// EqualsExpr represents an equals expression
type EqualsExpr struct {
Key string
Value string
}
// InExpr represents an in expression
type InExpr struct {
Key string
Values []string
}
// LikeExpr represents a like expression
type LikeExpr struct {
Key string
Value string
}
// RegexExpr represents a regular expression
type RegexExpr struct {
Key string
Regexp string
}
// NotExpr represents a not expression
type NotExpr struct {
Expr Expr
}
// Rewrite rewrites the select item expr after parse
func (e *SelectItem) Rewrite() string {
if len(e.Alias) == 0 {
return e.Expr.Rewrite()
}
return fmt.Sprintf("%s as %s", e.Expr.Rewrite(), e.Alias)
}
// Rewrite rewrites the field expr after parse
func (e *FieldExpr) Rewrite() string {
return e.Name
}
// Rewrite rewrites the call expr after parse
func (e *CallExpr) Rewrite() string {
var params []string
for _, param := range e.Params {
params = append(params, param.Rewrite())
}
return fmt.Sprintf("%s(%s)", function.FuncTypeString(e.FuncType), strings.Join(params, ","))
}
// Rewrite rewrites the paren expr after parse
func (e *ParenExpr) Rewrite() string {
return fmt.Sprintf("(%s)", e.Expr.Rewrite())
}
// Rewrite rewrites the binary expr after parse
func (e *BinaryExpr) Rewrite() string {
return fmt.Sprintf("%s%s%s", e.Left.Rewrite(), BinaryOPString(e.Operator), e.Right.Rewrite())
}
// Rewrite rewrites the not expr after parse
func (e *NotExpr) Rewrite() string {
return fmt.Sprintf("not %s", e.Expr.Rewrite())
}
// Rewrite rewrites the equals expr after parse
func (e *EqualsExpr) Rewrite() string {
return fmt.Sprintf("%s=%s", e.Key, e.Value)
}
// Rewrite rewrites the in expr after parse
func (e *InExpr) Rewrite() string {
return fmt.Sprintf("%s in (%s)", e.Key, strings.Join(e.Values, ","))
}
// Rewrite rewrites the like expr after parse
func (e *LikeExpr) Rewrite() string {
return fmt.Sprintf("%s like %s", e.Key, e.Value)
}
// Rewrite rewrites the regex expr after parse
func (e *RegexExpr) Rewrite() string {
return fmt.Sprintf("%s=~%s", e.Key, e.Regexp)
}
// TagKey returns the equals filter's tag key
func (e *EqualsExpr) TagKey() string { return e.Key }
// TagKey returns the in filter's tag key
func (e *InExpr) TagKey() string { return e.Key }
// TagKey returns the like filter's tag key
func (e *LikeExpr) TagKey() string { return e.Key }
// TagKey returns the regex filter's tag key
func (e *RegexExpr) TagKey() string { return e.Key } | sql/stmt/expr.go | 0.726911 | 0.440108 | expr.go | starcoder |
package brotli
import "encoding/binary"
/* Copyright 2015 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function for fast encoding of an input fragment, independently from the input
history. This function uses one-pass processing: when we find a backward
match, we immediately emit the corresponding command and literal codes to
the bit stream.
Adapted from the CompressFragment() function in
https://github.com/google/snappy/blob/master/snappy.cc */
const maxDistance_compress_fragment = 262128
func hash5(p []byte, shift uint) uint32 {
var h uint64 = (binary.LittleEndian.Uint64(p) << 24) * uint64(kHashMul32)
return uint32(h >> shift)
}
func hashBytesAtOffset5(v uint64, offset int, shift uint) uint32 {
assert(offset >= 0)
assert(offset <= 3)
{
var h uint64 = ((v >> uint(8*offset)) << 24) * uint64(kHashMul32)
return uint32(h >> shift)
}
}
func isMatch5(p1 []byte, p2 []byte) bool {
return binary.LittleEndian.Uint32(p1) == binary.LittleEndian.Uint32(p2) &&
p1[4] == p2[4]
}
/* Builds a literal prefix code into "depths" and "bits" based on the statistics
of the "input" string and stores it into the bit stream.
Note that the prefix code here is built from the pre-LZ77 input, therefore
we can only approximate the statistics of the actual literal stream.
Moreover, for long inputs we build a histogram from a sample of the input
and thus have to assign a non-zero depth for each literal.
Returns estimated compression ratio millibytes/char for encoding given input
with generated code. */
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint {
var histogram = [256]uint32{0}
var histogram_total uint
var i uint
if input_size < 1<<15 {
for i = 0; i < input_size; i++ {
histogram[input[i]]++
}
histogram_total = input_size
for i = 0; i < 256; i++ {
/* We weigh the first 11 samples with weight 3 to account for the
balancing effect of the LZ77 phase on the histogram. */
var adjust uint32 = 2 * brotli_min_uint32_t(histogram[i], 11)
histogram[i] += adjust
histogram_total += uint(adjust)
}
} else {
const kSampleRate uint = 29
for i = 0; i < input_size; i += kSampleRate {
histogram[input[i]]++
}
histogram_total = (input_size + kSampleRate - 1) / kSampleRate
for i = 0; i < 256; i++ {
/* We add 1 to each population count to avoid 0 bit depths (since this is
only a sample and we don't know if the symbol appears or not), and we
weigh the first 11 samples with weight 3 to account for the balancing
effect of the LZ77 phase on the histogram (more frequent symbols are
more likely to be in backward references instead as literals). */
var adjust uint32 = 1 + 2*brotli_min_uint32_t(histogram[i], 11)
histogram[i] += adjust
histogram_total += uint(adjust)
}
}
buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */
8, depths, bits, storage_ix, storage)
{
var literal_ratio uint = 0
for i = 0; i < 256; i++ {
if histogram[i] != 0 {
literal_ratio += uint(histogram[i] * uint32(depths[i]))
}
}
/* Estimated encoding ratio, millibytes per symbol. */
return (literal_ratio * 125) / histogram_total
}
}
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream. */
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
var tree [129]huffmanTree
var cmd_depth = [numCommandSymbols]byte{0}
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
var cmd_bits [64]uint16
createHuffmanTree(histogram, 64, 15, tree[:], depth)
createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:])
/* We have to jump through a few hoops here in order to compute
the command bits because the symbols are in a different order than in
the full alphabet. This looks complicated, but having the symbols
in this order in the command bits saves a few branches in the Emit*
functions. */
copy(cmd_depth[:], depth[:24])
copy(cmd_depth[24:][:], depth[40:][:8])
copy(cmd_depth[32:][:], depth[24:][:8])
copy(cmd_depth[40:][:], depth[48:][:8])
copy(cmd_depth[48:][:], depth[32:][:8])
copy(cmd_depth[56:][:], depth[56:][:8])
convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:])
copy(bits, cmd_bits[:24])
copy(bits[24:], cmd_bits[32:][:8])
copy(bits[32:], cmd_bits[48:][:8])
copy(bits[40:], cmd_bits[24:][:8])
copy(bits[48:], cmd_bits[40:][:8])
copy(bits[56:], cmd_bits[56:][:8])
convertBitDepthsToSymbols(depth[64:], 64, bits[64:])
{
/* Create the bit length array for the full command alphabet. */
var i uint
for i := 0; i < int(64); i++ {
cmd_depth[i] = 0
} /* only 64 first values were used */
copy(cmd_depth[:], depth[:8])
copy(cmd_depth[64:][:], depth[8:][:8])
copy(cmd_depth[128:][:], depth[16:][:8])
copy(cmd_depth[192:][:], depth[24:][:8])
copy(cmd_depth[384:][:], depth[32:][:8])
for i = 0; i < 8; i++ {
cmd_depth[128+8*i] = depth[40+i]
cmd_depth[256+8*i] = depth[48+i]
cmd_depth[448+8*i] = depth[56+i]
}
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
}
storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
}
/* REQUIRES: insertlen < 6210 */
func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
if insertlen < 6 {
var code uint = insertlen + 40
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
histo[code]++
} else if insertlen < 130 {
var tail uint = insertlen - 2
var nbits uint32 = log2FloorNonZero(tail) - 1
var prefix uint = tail >> nbits
var inscode uint = uint((nbits << 1) + uint32(prefix) + 42)
writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
histo[inscode]++
} else if insertlen < 2114 {
var tail uint = insertlen - 66
var nbits uint32 = log2FloorNonZero(tail)
var code uint = uint(nbits + 50)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
histo[code]++
} else {
writeBits(uint(depth[61]), uint64(bits[61]), storage_ix, storage)
writeBits(12, uint64(insertlen)-2114, storage_ix, storage)
histo[61]++
}
}
func emitLongInsertLen(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
if insertlen < 22594 {
writeBits(uint(depth[62]), uint64(bits[62]), storage_ix, storage)
writeBits(14, uint64(insertlen)-6210, storage_ix, storage)
histo[62]++
} else {
writeBits(uint(depth[63]), uint64(bits[63]), storage_ix, storage)
writeBits(24, uint64(insertlen)-22594, storage_ix, storage)
histo[63]++
}
}
func emitCopyLen1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
if copylen < 10 {
writeBits(uint(depth[copylen+14]), uint64(bits[copylen+14]), storage_ix, storage)
histo[copylen+14]++
} else if copylen < 134 {
var tail uint = copylen - 6
var nbits uint32 = log2FloorNonZero(tail) - 1
var prefix uint = tail >> nbits
var code uint = uint((nbits << 1) + uint32(prefix) + 20)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
histo[code]++
} else if copylen < 2118 {
var tail uint = copylen - 70
var nbits uint32 = log2FloorNonZero(tail)
var code uint = uint(nbits + 28)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
histo[code]++
} else {
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
writeBits(24, uint64(copylen)-2118, storage_ix, storage)
histo[39]++
}
}
func emitCopyLenLastDistance1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
if copylen < 12 {
writeBits(uint(depth[copylen-4]), uint64(bits[copylen-4]), storage_ix, storage)
histo[copylen-4]++
} else if copylen < 72 {
var tail uint = copylen - 8
var nbits uint32 = log2FloorNonZero(tail) - 1
var prefix uint = tail >> nbits
var code uint = uint((nbits << 1) + uint32(prefix) + 4)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
histo[code]++
} else if copylen < 136 {
var tail uint = copylen - 8
var code uint = (tail >> 5) + 30
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(5, uint64(tail)&31, storage_ix, storage)
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
histo[code]++
histo[64]++
} else if copylen < 2120 {
var tail uint = copylen - 72
var nbits uint32 = log2FloorNonZero(tail)
var code uint = uint(nbits + 28)
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
histo[code]++
histo[64]++
} else {
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
writeBits(24, uint64(copylen)-2120, storage_ix, storage)
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
histo[39]++
histo[64]++
}
}
func emitDistance1(distance uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
var d uint = distance + 3
var nbits uint32 = log2FloorNonZero(d) - 1
var prefix uint = (d >> nbits) & 1
var offset uint = (2 + prefix) << nbits
var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80)
writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage)
writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage)
histo[distcode]++
}
func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
var j uint
for j = 0; j < len; j++ {
var lit byte = input[j]
writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage)
}
}
/* REQUIRES: len <= 1 << 24. */
func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
var nibbles uint = 6
/* ISLAST */
writeBits(1, 0, storage_ix, storage)
if len <= 1<<16 {
nibbles = 4
} else if len <= 1<<20 {
nibbles = 5
}
writeBits(2, uint64(nibbles)-4, storage_ix, storage)
writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
/* ISUNCOMPRESSED */
writeSingleBit(is_uncompressed, storage_ix, storage)
}
func updateBits(n_bits uint, bits uint32, pos uint, array []byte) {
for n_bits > 0 {
var byte_pos uint = pos >> 3
var n_unchanged_bits uint = pos & 7
var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
var total_bits uint = n_unchanged_bits + n_changed_bits
var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
var unchanged_bits uint32 = uint32(array[byte_pos]) & mask
var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
array[byte_pos] = byte(changed_bits<<n_unchanged_bits | unchanged_bits)
n_bits -= n_changed_bits
bits >>= n_changed_bits
pos += n_changed_bits
}
}
func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) {
var bitpos uint = new_storage_ix & 7
var mask uint = (1 << bitpos) - 1
storage[new_storage_ix>>3] &= byte(mask)
*storage_ix = new_storage_ix
}
var shouldMergeBlock_kSampleRate uint = 43
func shouldMergeBlock(data []byte, len uint, depths []byte) bool {
var histo = [256]uint{0}
var i uint
for i = 0; i < len; i += shouldMergeBlock_kSampleRate {
histo[data[i]]++
}
{
var total uint = (len + shouldMergeBlock_kSampleRate - 1) / shouldMergeBlock_kSampleRate
var r float64 = (fastLog2(total)+0.5)*float64(total) + 200
for i = 0; i < 256; i++ {
r -= float64(histo[i]) * (float64(depths[i]) + fastLog2(histo[i]))
}
return r >= 0.0
}
}
func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertlen uint, literal_ratio uint) bool {
var compressed uint = uint(-cap(next_emit) + cap(metablock_start))
if compressed*50 > insertlen {
return false
} else {
return literal_ratio > 980
}
}
func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) {
var len uint = uint(-cap(end) + cap(begin))
rewindBitPosition1(storage_ix_start, storage_ix, storage)
storeMetaBlockHeader1(uint(len), true, storage_ix, storage)
*storage_ix = (*storage_ix + 7) &^ 7
copy(storage[*storage_ix>>3:], begin[:len])
*storage_ix += uint(len << 3)
storage[*storage_ix>>3] = 0
}
var kCmdHistoSeed = [128]uint32{
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
}
var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15
var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16
func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
var cmd_histo [128]uint32
var ip_end int
var next_emit int = 0
var base_ip int = 0
var input int = 0
const kInputMarginBytes uint = windowGap
const kMinMatchLen uint = 5
var metablock_start int = input
var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
var total_block_size uint = block_size
var mlen_storage_ix uint = *storage_ix + 3
var lit_depth [256]byte
var lit_bits [256]uint16
var literal_ratio uint
var ip int
var last_distance int
var shift uint = 64 - table_bits
/* "next_emit" is a pointer to the first byte that is not covered by a
previous copy. Bytes between "next_emit" and the start of the next copy or
the end of the input will be emitted as literal bytes. */
/* Save the start of the first block for position and distance computations.
*/
/* Save the bit position of the MLEN field of the meta-block header, so that
we can update it later if we decide to extend this meta-block. */
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
/* No block splits, no contexts. */
writeBits(13, 0, storage_ix, storage)
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
{
/* Store the pre-compressed command and distance prefix codes. */
var i uint
for i = 0; i+7 < *cmd_code_numbits; i += 8 {
writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage)
}
}
writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage)
/* Initialize the command and distance histograms. We will gather
statistics of command and distance codes during the processing
of this block and use it to update the command and distance
prefix codes for the next block. */
emit_commands:
copy(cmd_histo[:], kCmdHistoSeed[:])
/* "ip" is the input pointer. */
ip = input
last_distance = -1
ip_end = int(uint(input) + block_size)
if block_size >= kInputMarginBytes {
var len_limit uint = brotli_min_size_t(block_size-kMinMatchLen, input_size-kInputMarginBytes)
var ip_limit int = int(uint(input) + len_limit)
/* For the last block, we need to keep a 16 bytes margin so that we can be
sure that all distances are at most window size - 16.
For all other blocks, we only need to keep a margin of 5 bytes so that
we don't go over the block size with a copy. */
var next_hash uint32
ip++
for next_hash = hash5(in[ip:], shift); ; {
var skip uint32 = 32
var next_ip int = ip
/* Step 1: Scan forward in the input looking for a 5-byte-long match.
If we get close to exhausting the input then goto emit_remainder.
Heuristic match skipping: If 32 bytes are scanned with no matches
found, start looking only at every other byte. If 32 more bytes are
scanned, look at every third byte, etc.. When a match is found,
immediately go back to looking at every byte. This is a small loss
(~5% performance, ~0.1% density) for compressible data due to more
bookkeeping, but for non-compressible data (such as JPEG) it's a huge
win since the compressor quickly "realizes" the data is incompressible
and doesn't bother looking for matches everywhere.
The "skip" variable keeps track of how many bytes there are since the
last match; dividing it by 32 (i.e. right-shifting by five) gives the
number of bytes to move ahead for each iteration. */
var candidate int
assert(next_emit < ip)
trawl:
for {
var hash uint32 = next_hash
var bytes_between_hash_lookups uint32 = skip >> 5
skip++
assert(hash == hash5(in[next_ip:], shift))
ip = next_ip
next_ip = int(uint32(ip) + bytes_between_hash_lookups)
if next_ip > ip_limit {
goto emit_remainder
}
next_hash = hash5(in[next_ip:], shift)
candidate = ip - last_distance
if isMatch5(in[ip:], in[candidate:]) {
if candidate < ip {
table[hash] = int(ip - base_ip)
break
}
}
candidate = base_ip + table[hash]
assert(candidate >= base_ip)
assert(candidate < ip)
table[hash] = int(ip - base_ip)
if !(!isMatch5(in[ip:], in[candidate:])) {
break
}
}
/* Check copy distance. If candidate is not feasible, continue search.
Checking is done outside of hot loop to reduce overhead. */
if ip-candidate > maxDistance_compress_fragment {
goto trawl
}
/* Step 2: Emit the found match together with the literal bytes from
"next_emit" to the bit stream, and then see if we can find a next match
immediately afterwards. Repeat until we find no match for the input
without emitting some literal bytes. */
{
var base int = ip
/* > 0 */
var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5)
var distance int = int(base - candidate)
/* We have a 5-byte match at ip, and we need to emit bytes in
[next_emit, ip). */
var insert uint = uint(base - next_emit)
ip += int(matched)
if insert < 6210 {
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage)
input_size -= uint(base - input)
input = base
next_emit = input
goto next_block
} else {
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
}
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
if distance == last_distance {
writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage)
cmd_histo[64]++
} else {
emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
last_distance = distance
}
emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
next_emit = ip
if ip >= ip_limit {
goto emit_remainder
}
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some positions
within the last copy. */
{
var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:])
var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift)
var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift)
table[prev_hash] = int(ip - base_ip - 3)
prev_hash = hashBytesAtOffset5(input_bytes, 1, shift)
table[prev_hash] = int(ip - base_ip - 2)
prev_hash = hashBytesAtOffset5(input_bytes, 2, shift)
table[prev_hash] = int(ip - base_ip - 1)
candidate = base_ip + table[cur_hash]
table[cur_hash] = int(ip - base_ip)
}
}
for isMatch5(in[ip:], in[candidate:]) {
var base int = ip
/* We have a 5-byte match at ip, and no need to emit any literal bytes
prior to ip. */
var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5)
if ip-candidate > maxDistance_compress_fragment {
break
}
ip += int(matched)
last_distance = int(base - candidate) /* > 0 */
emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
next_emit = ip
if ip >= ip_limit {
goto emit_remainder
}
/* We could immediately start working at ip now, but to improve
compression we first update "table" with the hashes of some positions
within the last copy. */
{
var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:])
var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift)
var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift)
table[prev_hash] = int(ip - base_ip - 3)
prev_hash = hashBytesAtOffset5(input_bytes, 1, shift)
table[prev_hash] = int(ip - base_ip - 2)
prev_hash = hashBytesAtOffset5(input_bytes, 2, shift)
table[prev_hash] = int(ip - base_ip - 1)
candidate = base_ip + table[cur_hash]
table[cur_hash] = int(ip - base_ip)
}
}
ip++
next_hash = hash5(in[ip:], shift)
}
}
emit_remainder:
assert(next_emit <= ip_end)
input += int(block_size)
input_size -= block_size
block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kMergeBlockSize)
/* Decide if we want to continue this meta-block instead of emitting the
last insert-only command. */
if input_size > 0 && total_block_size+block_size <= 1<<20 && shouldMergeBlock(in[input:], block_size, lit_depth[:]) {
assert(total_block_size > 1<<16)
/* Update the size of the current meta-block and continue emitting commands.
We can do this because the current size and the new size both have 5
nibbles. */
total_block_size += block_size
updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage)
goto emit_commands
}
/* Emit the remaining bytes as literals. */
if next_emit < ip_end {
var insert uint = uint(ip_end - next_emit)
if insert < 6210 {
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage)
} else {
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
}
}
next_emit = ip_end
/* If we have more data, write a new meta-block header and prefix codes and
then continue emitting commands. */
next_block:
if input_size > 0 {
metablock_start = input
block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
total_block_size = block_size
/* Save the bit position of the MLEN field of the meta-block header, so that
we can update it later if we decide to extend this meta-block. */
mlen_storage_ix = *storage_ix + 3
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
/* No block splits, no contexts. */
writeBits(13, 0, storage_ix, storage)
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage)
goto emit_commands
}
if !is_last {
/* If this is not the last block, update the command and distance prefix
codes for the next block and store the compressed forms. */
cmd_code[0] = 0
*cmd_code_numbits = 0
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code)
}
}
/* Compresses "input" string to the "*storage" buffer as one or more complete
meta-blocks, and updates the "*storage_ix" bit position.
If "is_last" is 1, emits an additional empty last meta-block.
"cmd_depth" and "cmd_bits" contain the command and distance prefix codes
(see comment in encode.h) used for the encoding of this input fragment.
If "is_last" is 0, they are updated to reflect the statistics
of this input fragment, to be used for the encoding of the next fragment.
"*cmd_code_numbits" is the number of bits of the compressed representation
of the command and distance prefix codes, and "cmd_code" is an array of
at least "(*cmd_code_numbits + 7) >> 3" size that contains the compressed
command and distance prefix codes. If "is_last" is 0, these are also
updated to represent the updated "cmd_depth" and "cmd_bits".
REQUIRES: "input_size" is greater than zero, or "is_last" is 1.
REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24).
REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
var initial_storage_ix uint = *storage_ix
var table_bits uint = uint(log2FloorNonZero(table_size))
if input_size == 0 {
assert(is_last)
writeBits(1, 1, storage_ix, storage) /* islast */
writeBits(1, 1, storage_ix, storage) /* isempty */
*storage_ix = (*storage_ix + 7) &^ 7
return
}
compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage)
/* If output is larger than single uncompressed block, rewrite it. */
if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage)
}
if is_last {
writeBits(1, 1, storage_ix, storage) /* islast */
writeBits(1, 1, storage_ix, storage) /* isempty */
*storage_ix = (*storage_ix + 7) &^ 7
}
} | vendor/github.com/andybalholm/brotli/compress_fragment.go | 0.736874 | 0.603698 | compress_fragment.go | starcoder |
package validator
import (
"bytes"
"crypto/sha256"
"fmt"
"net"
"net/url"
"os"
"reflect"
"strconv"
"strings"
"time"
"unicode/utf8"
)
var timeType = reflect.TypeOf(time.Time{})
func (t *KValidator) IsURLEncoded() bool {
return uRLEncodedRegex.MatchString(t.data.String())
}
func (t *KValidator) IsHTMLEncoded() bool {
return hTMLEncodedRegex.MatchString(t.data.String())
}
func (t *KValidator) IsHTML() bool {
return hTMLRegex.MatchString(t.data.String())
}
// IsMAC is the validation function for validating if the field's value is a valid MAC address.
func (t *KValidator) IsMAC() bool {
_, err := net.ParseMAC(t.data.String())
return err == nil
}
// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address.
func (t *KValidator) IsCIDRv4() bool {
ip, _, err := net.ParseCIDR(t.data.String())
return err == nil && ip.To4() != nil
}
// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address.
func (t *KValidator) IsCIDRv6() bool {
ip, _, err := net.ParseCIDR(t.data.String())
return err == nil && ip.To4() == nil
}
// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address.
func (t *KValidator) IsCIDR() bool {
_, _, err := net.ParseCIDR(t.data.String())
return err == nil
}
// IsIPv4 is the validation function for validating if a value is a valid v4 IP address.
func (t *KValidator) IsIPv4() bool {
ip := net.ParseIP(t.data.String())
return ip != nil && ip.To4() != nil
}
// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address.
func (t *KValidator) IsIPv6() bool {
ip := net.ParseIP(t.data.String())
return ip != nil && ip.To4() == nil
}
// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address.
func (t *KValidator) IsIP() bool {
ip := net.ParseIP(t.data.String())
return ip != nil
}
// IsSSN is the validation function for validating if the field's value is a valid SSN.
func (t *KValidator) IsSSN() bool {
if t.data.Len() != 11 {
return false
}
return sSNRegex.MatchString(t.data.String())
}
// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate.
func (t *KValidator) isLongitude() bool {
field := t.data
var v string
switch field.Kind() {
case reflect.String:
v = field.String()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
v = strconv.FormatInt(field.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
v = strconv.FormatUint(field.Uint(), 10)
case reflect.Float32:
v = strconv.FormatFloat(field.Float(), 'f', -1, 32)
case reflect.Float64:
v = strconv.FormatFloat(field.Float(), 'f', -1, 64)
default:
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
return longitudeRegex.MatchString(v)
}
// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate.
func (t *KValidator) isLatitude() bool {
field := t.data
var v string
switch field.Kind() {
case reflect.String:
v = field.String()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
v = strconv.FormatInt(field.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
v = strconv.FormatUint(field.Uint(), 10)
case reflect.Float32:
v = strconv.FormatFloat(field.Float(), 'f', -1, 32)
case reflect.Float64:
v = strconv.FormatFloat(field.Float(), 'f', -1, 64)
default:
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
return latitudeRegex.MatchString(v)
}
// IsDataURI is the validation function for validating if the field's value is a valid data URI.
func (t *KValidator) isDataURI() bool {
uri := strings.SplitN(t.data.String(), ",", 2)
if len(uri) != 2 {
return false
}
if !dataURIRegex.MatchString(uri[0]) {
return false
}
return base64Regex.MatchString(uri[1])
}
// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character.
func (t *KValidator) HasMultiByteCharacter() bool {
field := t.data
if field.Len() == 0 {
return true
}
return multibyteRegex.MatchString(field.String())
}
// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character.
func (t *KValidator) IsPrintableASCII() bool {
return printableASCIIRegex.MatchString(t.data.String())
}
// IsASCII is the validation function for validating if the field's value is a valid ASCII character.
func (t *KValidator) IsASCII() bool {
return aSCIIRegex.MatchString(t.data.String())
}
// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID.
func (t *KValidator) IsUUID5() bool {
return uUID5Regex.MatchString(t.data.String())
}
// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID.
func (t *KValidator) IsUUID4() bool {
return uUID4Regex.MatchString(t.data.String())
}
// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID.
func (t *KValidator) IsUUID3() bool {
return uUID3Regex.MatchString(t.data.String())
}
// IsUUID is the validation function for validating if the field's value is a valid UUID of any version.
func (t *KValidator) IsUUID() bool {
return uUIDRegex.MatchString(t.data.String())
}
// IsUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID.
func (t *KValidator) IsUUID5RFC4122() bool {
return uUID5RFC4122Regex.MatchString(t.data.String())
}
// IsUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID.
func (t *KValidator) IsUUID4RFC4122() bool {
return uUID4RFC4122Regex.MatchString(t.data.String())
}
// IsUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID.
func (t *KValidator) IsUUID3RFC4122() bool {
return uUID3RFC4122Regex.MatchString(t.data.String())
}
// IsUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version.
func (t *KValidator) IsUUIDRFC4122() bool {
return uUIDRFC4122Regex.MatchString(t.data.String())
}
// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN.
func (t *KValidator) isISBN() bool {
return t.IsISBN10() || t.IsISBN13()
}
// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN.
func (t *KValidator) IsISBN13() bool {
s := strings.Replace(strings.Replace(t.data.String(), "-", "", 4), " ", "", 4)
if !iSBN13Regex.MatchString(s) {
return false
}
var checksum int32
var i int32
factor := []int32{1, 3}
for i = 0; i < 12; i++ {
checksum += factor[i%2] * int32(s[i]-'0')
}
return (int32(s[12] - '0'))-((10-(checksum%10))%10) == 0
}
// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN.
func (t *KValidator) IsISBN10() bool {
s := strings.Replace(strings.Replace(t.data.String(), "-", "", 3), " ", "", 3)
if !iSBN10Regex.MatchString(s) {
return false
}
var checksum int32
var i int32
for i = 0; i < 9; i++ {
checksum += (i + 1) * int32(s[i]-'0')
}
if s[9] == 'X' {
checksum += 10 * 10
} else {
checksum += 10 * int32(s[9]-'0')
}
return checksum%11 == 0
}
// IsEthereumAddress is the validation function for validating if the field's value is a valid ethereum address based currently only on the format
func (t *KValidator) IsEthereumAddress() bool {
address := t.data.String()
if !ethAddressRegex.MatchString(address) {
return false
}
if ethaddressRegexUpper.MatchString(address) || ethAddressRegexLower.MatchString(address) {
return true
}
// checksum validation is blocked by https://github.com/golang/crypto/pull/28
return true
}
// IsBitcoinAddress is the validation function for validating if the field's value is a valid btc address
func (t *KValidator) IsBitcoinAddress() bool {
address := t.data.String()
if !btcAddressRegex.MatchString(address) {
return false
}
alphabet := []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz")
decode := [25]byte{}
for _, n := range []byte(address) {
d := bytes.IndexByte(alphabet, n)
for i := 24; i >= 0; i-- {
d += 58 * int(decode[i])
decode[i] = byte(d % 256)
d /= 256
}
}
h := sha256.New()
_, _ = h.Write(decode[:21])
d := h.Sum([]byte{})
h = sha256.New()
_, _ = h.Write(d)
validchecksum := [4]byte{}
computedchecksum := [4]byte{}
copy(computedchecksum[:], h.Sum(d[:0]))
copy(validchecksum[:], decode[21:])
return validchecksum == computedchecksum
}
// IsBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address
func (t *KValidator) IsBitcoinBech32Address() bool {
address := t.data.String()
if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) {
return false
}
am := len(address) % 8
if am == 0 || am == 3 || am == 5 {
return false
}
address = strings.ToLower(address)
alphabet := "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
hr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc
addr := address[3:]
dp := make([]int, 0, len(addr))
for _, c := range addr {
dp = append(dp, strings.IndexRune(alphabet, c))
}
ver := dp[0]
if ver < 0 || ver > 16 {
return false
}
if ver == 0 {
if len(address) != 42 && len(address) != 62 {
return false
}
}
values := append(hr, dp...)
GEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}
p := 1
for _, v := range values {
b := p >> 25
p = (p&0x1ffffff)<<5 ^ v
for i := 0; i < 5; i++ {
if (b>>uint(i))&1 == 1 {
p ^= GEN[i]
}
}
}
if p != 1 {
return false
}
b := uint(0)
acc := 0
mv := (1 << 5) - 1
var sw []int
for _, v := range dp[1 : len(dp)-6] {
acc = (acc << 5) | v
b += 5
for b >= 8 {
b -= 8
sw = append(sw, (acc>>b)&mv)
}
}
if len(sw) < 2 || len(sw) > 40 {
return false
}
return true
}
// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param.
func (t *KValidator) excludesRune() bool {
return !t.ContainsRune()
}
// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param.
func (t *KValidator) ExcludesAll() bool {
return !t.ContainsAny()
}
// Excludes is the validation function for validating that the field's value does not contain the text specified within the param.
func (t *KValidator) Excludes() bool {
return !t.Contains()
}
// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param.
func (t *KValidator) ContainsRune() bool {
r, _ := utf8.DecodeRuneInString(t.data.String())
return strings.ContainsRune(t.data.String(), r)
}
// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param.
func (t *KValidator) ContainsAny() bool {
return strings.ContainsAny(t.data.String(), t.data.String())
}
// Contains is the validation function for validating that the field's value contains the text specified within the param.
func (t *KValidator) Contains() bool {
return strings.Contains(t.data.String(), t.data.String())
}
// FieldContains is the validation function for validating if the current field's value contains the field specified by the param's value.
func (t *KValidator) FieldContains(s string) bool {
field := t.data
return strings.Contains(field.String(), s)
}
// FieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value.
func (t *KValidator) FieldExcludes(s string) bool {
field := t.data
return !strings.Contains(field.String(), s)
}
// IsNe is the validation function for validating that the field's value does not equal the provided param value.
func (t *KValidator) IsNe() bool {
return !t.IsEq()
}
// IsEq is the validation function for validating if the current field's value is equal to the param's value.
func (t *KValidator) IsEq() bool {
field := t.data
param := field.String()
switch field.Kind() {
case reflect.String:
return field.String() == param
case reflect.Slice, reflect.Map, reflect.Array:
p := asInt(param)
return int64(field.Len()) == p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
return field.Int() == p
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p := asUint(param)
return field.Uint() == p
case reflect.Float32, reflect.Float64:
p := asFloat(param)
return field.Float() == p
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// IsBase64 is the validation function for validating if the current field's value is a valid base 64.
func (t *KValidator) IsBase64() bool {
return base64Regex.MatchString(t.data.String())
}
// IsBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string.
func (t *KValidator) IsBase64URL() bool {
return base64URLRegex.MatchString(t.data.String())
}
// IsURI is the validation function for validating if the current field's value is a valid URI.
func (t *KValidator) IsURI() bool {
switch t.data.Kind() {
case reflect.String:
s := t.data.String()
// checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
// emulate browser and strip the '#' suffix prior to validation. see issue-#237
if i := strings.Index(s, "#"); i > -1 {
s = s[:i]
}
if len(s) == 0 {
return false
}
_, err := url.ParseRequestURI(s)
return err == nil
}
panic(fmt.Sprintf("Bad field type %T", t.data.Interface()))
}
// IsURL is the validation function for validating if the current field's value is a valid URL.
func (t *KValidator) IsURL() bool {
field := t.data
switch field.Kind() {
case reflect.String:
var i int
s := field.String()
// checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
// emulate browser and strip the '#' suffix prior to validation. see issue-#237
if i = strings.Index(s, "#"); i > -1 {
s = s[:i]
}
if len(s) == 0 {
return false
}
url, err := url.ParseRequestURI(s)
if err != nil || url.Scheme == "" {
return false
}
return err == nil
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// IsFile is the validation function for validating if the current field's value is a valid file path.
func (t *KValidator) IsFile() bool {
field := t.data
switch field.Kind() {
case reflect.String:
fileInfo, err := os.Stat(field.String())
if err != nil {
return false
}
return !fileInfo.IsDir()
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// IsEmail is the validation function for validating if the current field's value is a valid email address.
func (t *KValidator) IsEmail() bool {
return emailRegex.MatchString(t.data.String())
}
// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color.
func (t *KValidator) IsHSLA() bool {
return hslaRegex.MatchString(t.data.String())
}
// IsHSL is the validation function for validating if the current field's value is a valid HSL color.
func (t *KValidator) IsHSL() bool {
return hslRegex.MatchString(t.data.String())
}
// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color.
func (t *KValidator) IsRGBA() bool {
return rgbaRegex.MatchString(t.data.String())
}
// IsRGB is the validation function for validating if the current field's value is a valid RGB color.
func (t *KValidator) IsRGB() bool {
return rgbRegex.MatchString(t.data.String())
}
// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color.
func (t *KValidator) IsHEXColor() bool {
return hexcolorRegex.MatchString(t.data.String())
}
// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal.
func (t *KValidator) IsHexadecimal() bool {
return hexadecimalRegex.MatchString(t.data.String())
}
// HasValue is the validation function for validating if the current field's value is not the default static value.
func (t *KValidator) Required() bool {
switch t.data.Kind() {
case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
return !t.data.IsNil()
default:
if t.data.Interface() != nil {
return true
}
return t.data.IsValid() && t.data.Interface() != reflect.Zero(t.data.Type()).Interface()
}
}
// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value.
func (t *KValidator) IsGte() bool {
field := t.data
param := t.data.String()
switch field.Kind() {
case reflect.String:
p := asInt(param)
return int64(utf8.RuneCountInString(field.String())) >= p
case reflect.Slice, reflect.Map, reflect.Array:
p := asInt(param)
return int64(field.Len()) >= p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
return field.Int() >= p
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p := asUint(param)
return field.Uint() >= p
case reflect.Float32, reflect.Float64:
p := asFloat(param)
return field.Float() >= p
case reflect.Struct:
if field.Type() == timeType {
now := time.Now().UTC()
t := field.Interface().(time.Time)
return t.After(now) || t.Equal(now)
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// IsGt is the validation function for validating if the current field's value is greater than the param's value.
func (t *KValidator) IsGt() bool {
field := t.data
param := t.data.String()
switch field.Kind() {
case reflect.String:
p := asInt(param)
return int64(utf8.RuneCountInString(field.String())) > p
case reflect.Slice, reflect.Map, reflect.Array:
p := asInt(param)
return int64(field.Len()) > p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
return field.Int() > p
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p := asUint(param)
return field.Uint() > p
case reflect.Float32, reflect.Float64:
p := asFloat(param)
return field.Float() > p
case reflect.Struct:
if field.Type() == timeType {
return field.Interface().(time.Time).After(time.Now().UTC())
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value.
func (t *KValidator) HasLengthOf() bool {
field := t.data
param := t.data.String()
switch field.Kind() {
case reflect.String:
p := asInt(param)
return int64(utf8.RuneCountInString(field.String())) == p
case reflect.Slice, reflect.Map, reflect.Array:
p := asInt(param)
return int64(field.Len()) == p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
return field.Int() == p
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p := asUint(param)
return field.Uint() == p
case reflect.Float32, reflect.Float64:
p := asFloat(param)
return field.Float() == p
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value.
func (t *KValidator) HasMinOf() bool {
return t.IsGte()
}
// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value.
func (t *KValidator) IsLte() bool {
field := t.data
param := t.data.String()
switch field.Kind() {
case reflect.String:
p := asInt(param)
return int64(utf8.RuneCountInString(field.String())) <= p
case reflect.Slice, reflect.Map, reflect.Array:
p := asInt(param)
return int64(field.Len()) <= p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
return field.Int() <= p
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p := asUint(param)
return field.Uint() <= p
case reflect.Float32, reflect.Float64:
p := asFloat(param)
return field.Float() <= p
case reflect.Struct:
if field.Type() == timeType {
now := time.Now().UTC()
t := field.Interface().(time.Time)
return t.Before(now) || t.Equal(now)
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// IsLt is the validation function for validating if the current field's value is less than the param's value.
func (t *KValidator) IsLt() bool {
field := t.data
param := t.data.String()
switch field.Kind() {
case reflect.String:
p := asInt(param)
return int64(utf8.RuneCountInString(field.String())) < p
case reflect.Slice, reflect.Map, reflect.Array:
p := asInt(param)
return int64(field.Len()) < p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p := asInt(param)
return field.Int() < p
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p := asUint(param)
return field.Uint() < p
case reflect.Float32, reflect.Float64:
p := asFloat(param)
return field.Float() < p
case reflect.Struct:
if field.Type() == timeType {
return field.Interface().(time.Time).Before(time.Now().UTC())
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value.
func (t *KValidator) HasMaxOf() bool {
return t.IsLte()
}
// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address.
func (t *KValidator) IsTCP4AddrResolvable() bool {
if !t.IsIP4Addr() {
return false
}
_, err := net.ResolveTCPAddr("tcp4", t.data.String())
return err == nil
}
// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address.
func (t *KValidator) IsTCP6AddrResolvable() bool {
if !t.IsIP6Addr() {
return false
}
_, err := net.ResolveTCPAddr("tcp6", t.data.String())
return err == nil
}
// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address.
func (t *KValidator) IsTCPAddrResolvable() bool {
if !t.IsIP4Addr() && !t.IsIP6Addr() {
return false
}
_, err := net.ResolveTCPAddr("tcp", t.data.String())
return err == nil
}
// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address.
func (t *KValidator) IsUDP4AddrResolvable() bool {
if !t.IsIP4Addr() {
return false
}
_, err := net.ResolveUDPAddr("udp4", t.data.String())
return err == nil
}
// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address.
func (t *KValidator) IsUDP6AddrResolvable() bool {
if !t.IsIP6Addr() {
return false
}
_, err := net.ResolveUDPAddr("udp6", t.data.String())
return err == nil
}
// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address.
func (t *KValidator) IsUDPAddrResolvable() bool {
if !t.IsIP4Addr() && !t.IsIP6Addr() {
return false
}
_, err := net.ResolveUDPAddr("udp", t.data.String())
return err == nil
}
// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address.
func (t *KValidator) IsIP4AddrResolvable() bool {
if !t.IsIPv4() {
return false
}
_, err := net.ResolveIPAddr("ip4", t.data.String())
return err == nil
}
// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address.
func (t *KValidator) IsIP6AddrResolvable() bool {
if !t.IsIPv6() {
return false
}
_, err := net.ResolveIPAddr("ip6", t.data.String())
return err == nil
}
// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address.
func (t *KValidator) IsIPAddrResolvable() bool {
if !t.IsIP() {
return false
}
_, err := net.ResolveIPAddr("ip", t.data.String())
return err == nil
}
// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address.
func (t *KValidator) IsUnixAddrResolvable() bool {
_, err := net.ResolveUnixAddr("unix", t.data.String())
return err == nil
}
func (t *KValidator) IsIP4Addr() bool {
val := t.data.String()
if idx := strings.LastIndex(val, ":"); idx != -1 {
val = val[0:idx]
}
ip := net.ParseIP(val)
return ip != nil && ip.To4() != nil
}
func (t *KValidator) IsIP6Addr() bool {
val := t.data.String()
if idx := strings.LastIndex(val, ":"); idx != -1 {
if idx != 0 && val[idx-1:idx] == "]" {
val = val[1 : idx-1]
}
}
ip := net.ParseIP(val)
return ip != nil && ip.To4() == nil
}
func (t *KValidator) IsHostnameRFC952() bool {
return hostnameRegexRFC952.MatchString(t.data.String())
}
func (t *KValidator) IsHostnameRFC1123() bool {
return hostnameRegexRFC1123.MatchString(t.data.String())
}
func (t *KValidator) IsFQDN() bool {
val := t.data.String()
if val == "" {
return false
}
if val[len(val)-1] == '.' {
val = val[0 : len(val)-1]
}
return strings.ContainsAny(val, ".") &&
hostnameRegexRFC952.MatchString(val)
}
// IsDir is the validation function for validating if the current field's value is a valid directory.
func (t *KValidator) IsDir() bool {
if t.data.Kind() == reflect.String {
fileInfo, err := os.Stat(t.data.String())
if err != nil {
return false
}
return fileInfo.IsDir()
}
panic(fmt.Sprintf("Bad field type %T", t.data.Interface()))
} | internal/validator/baked_in.go | 0.74055 | 0.411879 | baked_in.go | starcoder |
package restruct
import (
"encoding/binary"
"fmt"
"math"
"reflect"
)
// Unpacker is a type capable of unpacking a binary representation of itself
// into a native representation. The Unpack function is expected to consume
// a number of bytes from the buffer, then return a slice of the remaining
// bytes in the buffer. You may use a pointer receiver even if the type is
// used by value.
type Unpacker interface {
Unpack(buf []byte, order binary.ByteOrder) ([]byte, error)
}
type decoder struct {
order binary.ByteOrder
buf []byte
struc reflect.Value
sfields []field
bitCounter uint8
}
func (d *decoder) readBits(f field, outputLength uint8) []byte {
output := make([]byte, outputLength)
if f.BitSize == 0 {
// Having problems with complex64 type ... so we asume we want to read all
// f.BitSize = uint8(f.Type.Bits())
f.BitSize = 8 * outputLength
}
// originPos: Original position of the first bit in the first byte
originPos := 8 - d.bitCounter
// destPos: Destination position ( in the result ) of the first bit in the first byte
destPos := f.BitSize % 8
if destPos == 0 {
destPos = 8
}
// numBytes: number of complete bytes to hold the result
numBytes := f.BitSize / 8
// numBits: number of remaining bits in the first non-complete byte of the result
numBits := f.BitSize % 8
// number of positions we have to shift the bytes to get the result
shift := (originPos - destPos) % 8
outputInitialIdx := outputLength - numBytes
if numBits > 0 {
outputInitialIdx = outputInitialIdx - 1
}
o := output[outputInitialIdx:]
if originPos < destPos { // shift left
for idx := range o {
// TODO: Control the number of bytes of d.buf ... we need to read ahead
carry := d.buf[idx+1] >> (8 - shift)
o[idx] = (d.buf[idx] << shift) | carry
}
} else { // originPos >= destPos => shift right
// carry : is a little bit tricky in this case because of the first case
// when idx == 0 and there is no carry at all
carry := func(idx int) uint8 {
if idx == 0 {
return 0x00
}
return (d.buf[idx-1] << (8 - shift))
}
for idx := range o {
o[idx] = (d.buf[idx] >> shift) | carry(idx)
}
}
// here the output is calculated ... but the first byte may have some extra bits
// therefore we apply a mask to erase those unaddressable bits
output[outputInitialIdx] &= ((0x01 << destPos) - 1)
// now we need to update the head of the incoming buffer and the bitCounter
d.bitCounter = (d.bitCounter + f.BitSize) % 8
// move the head to the next non-complete byte used
headerUpdate := func() uint8 {
if (d.bitCounter == 0) && ((f.BitSize % 8) != 0) {
return (numBytes + 1)
}
return numBytes
}
d.buf = d.buf[headerUpdate():]
return output
}
func (d *decoder) read8(f field) uint8 {
rawdata := d.readBits(f, 1)
return uint8(rawdata[0])
}
func (d *decoder) read16(f field) uint16 {
rawdata := d.readBits(f, 2)
return d.order.Uint16(rawdata)
}
func (d *decoder) read32(f field) uint32 {
rawdata := d.readBits(f, 4)
return d.order.Uint32(rawdata)
}
func (d *decoder) read64(f field) uint64 {
rawdata := d.readBits(f, 8)
return d.order.Uint64(rawdata)
}
func (d *decoder) readS8(f field) int8 { return int8(d.read8(f)) }
func (d *decoder) readS16(f field) int16 { return int16(d.read16(f)) }
func (d *decoder) readS32(f field) int32 { return int32(d.read32(f)) }
func (d *decoder) readS64(f field) int64 { return int64(d.read64(f)) }
func (d *decoder) readn(count int) []byte {
x := d.buf[0:count]
d.buf = d.buf[count:]
return x
}
func (d *decoder) skipn(count int) {
d.buf = d.buf[count:]
}
func (d *decoder) skip(f field, v reflect.Value) {
d.skipn(f.SizeOf(v))
}
func (d *decoder) unpacker(v reflect.Value) (Unpacker, bool) {
if s, ok := v.Interface().(Unpacker); ok {
return s, true
}
if !v.CanAddr() {
return nil, false
}
if s, ok := v.Addr().Interface().(Unpacker); ok {
return s, true
}
return nil, false
}
func (d *decoder) read(f field, v reflect.Value) {
if f.Name != "_" {
if s, ok := d.unpacker(v); ok {
var err error
d.buf, err = s.Unpack(d.buf, d.order)
if err != nil {
panic(err)
}
return
}
} else {
d.skipn(f.SizeOf(v))
return
}
struc := d.struc
sfields := d.sfields
order := d.order
if f.Order != nil {
d.order = f.Order
defer func() { d.order = order }()
}
if f.Skip != 0 {
d.skipn(f.Skip)
}
switch f.Type.Kind() {
case reflect.Array:
l := f.Type.Len()
// If the underlying value is a slice, initialize it.
if f.DefType.Kind() == reflect.Slice {
v.Set(reflect.MakeSlice(reflect.SliceOf(f.Type.Elem()), l, l))
}
switch f.DefType.Kind() {
case reflect.String:
v.SetString(string(d.readn(f.SizeOf(v))))
case reflect.Slice, reflect.Array:
ef := f.Elem()
for i := 0; i < l; i++ {
d.read(ef, v.Index(i))
}
default:
panic(fmt.Errorf("invalid array cast type: %s", f.DefType.String()))
}
case reflect.Struct:
d.struc = v
d.sfields = cachedFieldsFromStruct(f.Type)
l := len(d.sfields)
for i := 0; i < l; i++ {
f := d.sfields[i]
v := v.Field(f.Index)
if v.CanSet() {
d.read(f, v)
} else {
d.skip(f, v)
}
}
d.sfields = sfields
d.struc = struc
case reflect.Slice, reflect.String:
switch f.DefType.Kind() {
case reflect.String:
l := v.Len()
v.SetString(string(d.readn(l)))
case reflect.Slice, reflect.Array:
switch f.DefType.Elem().Kind() {
case reflect.Uint8:
v.SetBytes(d.readn(f.SizeOf(v)))
default:
l := v.Len()
ef := f.Elem()
for i := 0; i < l; i++ {
d.read(ef, v.Index(i))
}
}
default:
panic(fmt.Errorf("invalid array cast type: %s", f.DefType.String()))
}
case reflect.Int8:
v.SetInt(int64(d.readS8(f)))
case reflect.Int16:
v.SetInt(int64(d.readS16(f)))
case reflect.Int32:
v.SetInt(int64(d.readS32(f)))
case reflect.Int64:
v.SetInt(d.readS64(f))
case reflect.Uint8:
v.SetUint(uint64(d.read8(f)))
case reflect.Uint16:
v.SetUint(uint64(d.read16(f)))
case reflect.Uint32:
v.SetUint(uint64(d.read32(f)))
case reflect.Uint64:
v.SetUint(d.read64(f))
case reflect.Float32:
v.SetFloat(float64(math.Float32frombits(d.read32(f))))
case reflect.Float64:
v.SetFloat(math.Float64frombits(d.read64(f)))
case reflect.Complex64:
v.SetComplex(complex(
float64(math.Float32frombits(d.read32(f))),
float64(math.Float32frombits(d.read32(f))),
))
case reflect.Complex128:
v.SetComplex(complex(
math.Float64frombits(d.read64(f)),
math.Float64frombits(d.read64(f)),
))
}
if f.SIndex != -1 {
sv := struc.Field(f.SIndex)
l := len(sfields)
for i := 0; i < l; i++ {
if sfields[i].Index != f.SIndex {
continue
}
sf := sfields[i]
sl := 0
// Must use different codepath for signed/unsigned.
switch f.DefType.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
sl = int(v.Int())
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
sl = int(v.Uint())
default:
panic(fmt.Errorf("unsupported sizeof type %s", f.DefType.String()))
}
// Strings are immutable, but we make a blank one so that we can
// figure out the size later. It might be better to do something
// more hackish, like writing the length into the string...
switch sf.DefType.Kind() {
case reflect.Slice:
sv.Set(reflect.MakeSlice(sf.Type, sl, sl))
case reflect.String:
sv.SetString(string(make([]byte, sl)))
default:
panic(fmt.Errorf("unsupported sizeof target %s", sf.DefType.String()))
}
}
}
} | decoder.go | 0.5144 | 0.449574 | decoder.go | starcoder |
package utils
import (
"fmt"
"github.com/axelarnetwork/utils/math"
)
// NewCircularBuffer is the constructor of CircularBuffer
func NewCircularBuffer(maxSize int) *CircularBuffer {
return &CircularBuffer{
CumulativeValue: make([]uint64, 32),
Index: 0,
MaxSize: int32(maxSize),
}
}
// Add appends a new value into the CircularBuffer
func (m *CircularBuffer) Add(value uint32) {
if m.isGTMaxSize() {
m.shrink()
}
if m.isFull() && m.isLTMaxSize() {
m.grow()
}
prevValue := m.CumulativeValue[m.Index]
m.Index = m.addToIndex(1)
m.CumulativeValue[m.Index] = prevValue + uint64(value)
}
func (m CircularBuffer) isGTMaxSize() bool {
return len(m.CumulativeValue) > int(m.MaxSize)
}
func (m *CircularBuffer) shrink() {
newBuffer := make([]uint64, int(m.MaxSize))
for i := 0; i < len(newBuffer); i++ {
newBuffer[len(newBuffer)-1-i] = m.CumulativeValue[m.addToIndex(int32(-i))]
}
m.Index = int32(len(newBuffer) - 1)
m.CumulativeValue = newBuffer
}
// Count returns the cumulative value for the most recent given window
func (m CircularBuffer) Count(windowRange int) uint64 {
if windowRange >= int(m.MaxSize) {
panic(fmt.Errorf("window range to large"))
}
if windowRange >= len(m.CumulativeValue) {
return m.CumulativeValue[m.Index] - m.CumulativeValue[m.addToIndex(1)]
}
return m.CumulativeValue[m.Index] - m.CumulativeValue[m.addToIndex(int32(-windowRange))]
}
func (m CircularBuffer) addToIndex(i int32) int32 {
index := m.Index + i
length := int32(len(m.CumulativeValue))
index = (index + length) % length
return index
}
func (m CircularBuffer) isFull() bool {
return int(m.Index)+1 == len(m.CumulativeValue) || m.CumulativeValue[m.addToIndex(1)] != 0
}
func (m CircularBuffer) isLTMaxSize() bool {
return len(m.CumulativeValue) < int(m.MaxSize)
}
// double buffer size until it reaches max size. If max size is not a power of 2 limit the last increase to max size
func (m *CircularBuffer) grow() {
newBuffer := make([]uint64, math.Min(len(m.CumulativeValue)<<1, int(m.MaxSize)))
// there is no information about the count outside the buffer range, so when the new buffer gets padded with zeroes
// the oldest value also needs to be reset to zero,
// otherwise windows larger than the old buffer size would produce a wrong count
zeroValue := m.CumulativeValue[m.addToIndex(1)]
for i := 0; i < len(m.CumulativeValue); i++ {
newBuffer[i] = m.CumulativeValue[m.addToIndex(1+int32(i))] - zeroValue
}
m.Index = int32(len(m.CumulativeValue) - 1)
m.CumulativeValue = newBuffer
}
// SetMaxSize sets the max size of the buffer to the given value.
// The buffer size gets updated accordingly the next time a value is added.
func (m *CircularBuffer) SetMaxSize(size int) {
m.MaxSize = int32(size)
} | utils/circular_buffer.go | 0.718989 | 0.448245 | circular_buffer.go | starcoder |
package main
import (
"fmt"
"math"
"strings"
)
func try(err error) {
if err != nil {
panic(err)
}
}
func main() {
// f, err := os.Open("inputs/day12_part1.txt")
// try(err)
// b, err := ioutil.ReadAll(f)
// try(err)
// trimmed := bytes.TrimSpace(b)
Part1()
Part2()
}
type System []Body
func (s System) Step() System {
s2 := make(System, len(s))
for i, b1 := range s {
res := b1
for j, b2 := range s {
if i == j {
continue
}
res = res.ApplyGravity(b2)
}
s2[i] = res.ApplyVelocity()
}
return s2
}
func (s System) Energy() int {
total := 0
for _, b := range s {
total += b.Energy()
}
return total
}
type Vector struct {
X int
Y int
Z int
}
func (v Vector) Add(v2 Vector) Vector {
return Vector{
X: v.X + v2.X,
Y: v.Y + v2.Y,
Z: v.Z + v2.Z,
}
}
func (v Vector) AbsSum() int {
return int(math.Abs(float64(v.X))) +
int(math.Abs(float64(v.Y))) +
int(math.Abs(float64(v.Z)))
}
type Body struct {
Position Vector
Velocity Vector
}
func (m Body) ApplyGravity(m2 Body) Body {
dx1 := compareAxis(m.Position.X, m2.Position.X)
dy1 := compareAxis(m.Position.Y, m2.Position.Y)
dz1 := compareAxis(m.Position.Z, m2.Position.Z)
return Body{
Position: m.Position,
Velocity: m.Velocity.Add(Vector{dx1, dy1, dz1}),
}
}
func (m Body) ApplyVelocity() Body {
return Body{
Velocity: m.Velocity,
Position: m.Position.Add(m.Velocity),
}
}
func (m Body) Energy() int {
return m.PotentialEnergy() * m.KineticEnergy()
}
func (m Body) PotentialEnergy() int {
return m.Position.AbsSum()
}
func (m Body) KineticEnergy() int {
return m.Velocity.AbsSum()
}
func compareAxis(v1, v2 int) int {
if v1 > v2 {
return -1
} else if v1 < v2 {
return +1
} else {
return 0
}
}
func Part1() {
s := System{
Body{
Position: Vector{-4, 3, 15},
Velocity: Vector{0, 0, 0},
},
Body{
Position: Vector{-11, -10, 13},
Velocity: Vector{0, 0, 0},
},
Body{
Position: Vector{2, 2, 18},
Velocity: Vector{0, 0, 0},
},
Body{
Position: Vector{7, -1, 0},
Velocity: Vector{0, 0, 0},
},
}
for i := 0; i < 1000; i++ {
s = s.Step()
}
fmt.Printf("Energy after 1000 iterations: %d\n", s.Energy())
}
// greatest common divisor (GCD) via Euclidean algorithm
func GCD(a, b int) int {
for b != 0 {
t := b
b = a % b
a = t
}
return a
}
// find Least Common Multiple (LCM) via GCD
func LCM(a, b int, integers ...int) int {
result := a * b / GCD(a, b)
for i := 0; i < len(integers); i++ {
result = LCM(result, integers[i])
}
return result
}
func Part2() {
start := System{
Body{
Position: Vector{-4, 3, 15},
Velocity: Vector{0, 0, 0},
},
Body{
Position: Vector{-11, -10, 13},
Velocity: Vector{0, 0, 0},
},
Body{
Position: Vector{2, 2, 18},
Velocity: Vector{0, 0, 0},
},
Body{
Position: Vector{7, -1, 0},
Velocity: Vector{0, 0, 0},
},
}
iterations := 0
current := start
var periodX int
var periodY int
var periodZ int
for {
iterations++
current = current.Step()
if periodX == 0 && repeatedX(start, current) {
periodX = iterations
}
if periodY == 0 && repeatedY(start, current) {
periodY = iterations
}
if periodZ == 0 && repeatedZ(start, current) {
periodZ = iterations
}
if periodX != 0 && periodY != 0 && periodZ != 0 {
break
}
}
fmt.Printf("%d iterations for X\n", periodX)
fmt.Printf("%d iterations for Y\n", periodY)
fmt.Printf("%d iterations for Z\n", periodZ)
lcm := LCM(periodX, periodY, periodZ)
fmt.Printf("LCM for periods: %d\n", lcm)
}
func repeated(s1, s2 System, pred func(Body, Body) bool) bool {
for i := range s1 {
if !pred(s1[i], s2[i]) {
return false
}
}
return true
}
func repeatedX(s1, s2 System) bool {
return repeated(s1, s2, func(b1, b2 Body) bool {
return b1.Position.X == b2.Position.X &&
b1.Velocity.X == b2.Velocity.X
})
}
func repeatedY(s1, s2 System) bool {
return repeated(s1, s2, func(b1, b2 Body) bool {
return b1.Position.Y == b2.Position.Y &&
b1.Velocity.Y == b2.Velocity.Y
})
}
func repeatedZ(s1, s2 System) bool {
return repeated(s1, s2, func(b1, b2 Body) bool {
return b1.Position.Z == b2.Position.Z &&
b1.Velocity.Z == b2.Velocity.Z
})
}
var (
puzzle = strings.TrimSpace(
`
<x=-4, y=3, z=15>
<x=-11, y=-10, z=13>
<x=2, y=2, z=18>
<x=7, y=-1, z=0>
`)
) | cmd/day12/day12.go | 0.622804 | 0.42179 | day12.go | starcoder |
package gfx
import (
"fmt"
"math"
"github.com/go-gl/mathgl/mgl32"
"github.com/goxjs/gl"
)
// SpriteBatch is a collection of images/quads/textures all drawn with a single draw call
type SpriteBatch struct {
size int
count int
color []float32 // Current color. This color, if present, will be applied to the next added sprite.
arrayBuf *vertexBuffer
quadIndices *quadIndices
usage Usage
texture ITexture
rangeMin int
rangeMax int
}
// NewSpriteBatch is like NewSpriteBatch but allows you to set the usage.
func NewSpriteBatch(texture ITexture, size int, usage Usage) *SpriteBatch {
return &SpriteBatch{
size: size,
texture: texture,
usage: usage,
color: []float32{1, 1, 1, 1},
arrayBuf: newVertexBuffer(size*4*8, []float32{}, usage),
quadIndices: newQuadIndices(size),
rangeMin: -1,
rangeMax: -1,
}
}
// Add adds a sprite to the batch. Sprites are drawn in the order they are added.
// x, y The position to draw the object
// r rotation of the object
// sx, sy scale of the object
// ox, oy offset of the object
// kx, ky shear of the object
func (spriteBatch *SpriteBatch) Add(args ...float32) error {
return spriteBatch.addv(spriteBatch.texture.getVerticies(), generateModelMatFromArgs(args), -1)
}
// Addq adds a Quad to the batch. This is very useful for something like a tilemap.
func (spriteBatch *SpriteBatch) Addq(quad *Quad, args ...float32) error {
return spriteBatch.addv(quad.getVertices(), generateModelMatFromArgs(args), -1)
}
// Set changes a sprite in the batch with the same arguments as add
func (spriteBatch *SpriteBatch) Set(index int, args ...float32) error {
return spriteBatch.addv(spriteBatch.texture.getVerticies(), generateModelMatFromArgs(args), index)
}
// Setq changes a sprite in the batch with the same arguments as addq
func (spriteBatch *SpriteBatch) Setq(index int, quad *Quad, args ...float32) error {
return spriteBatch.addv(quad.getVertices(), generateModelMatFromArgs(args), index)
}
// Clear will remove all the sprites from the batch
func (spriteBatch *SpriteBatch) Clear() {
spriteBatch.arrayBuf = newVertexBuffer(spriteBatch.size*4*8, []float32{}, spriteBatch.usage)
spriteBatch.count = 0
}
// flush will ensure the data is uploaded to the buffer
func (spriteBatch *SpriteBatch) flush() {
spriteBatch.arrayBuf.bufferData()
}
// SetTexture will change the texture of the batch to a new one
func (spriteBatch *SpriteBatch) SetTexture(newtexture ITexture) {
spriteBatch.texture = newtexture
}
// GetTexture will return the currently bound texture of this sprite batch.
func (spriteBatch *SpriteBatch) GetTexture() ITexture {
return spriteBatch.texture
}
// SetColor will set the color that will be used for the next add or set operations.
func (spriteBatch *SpriteBatch) SetColor(vals ...float32) {
spriteBatch.color = vals
}
// ClearColor will reset the color back to white
func (spriteBatch *SpriteBatch) ClearColor() {
spriteBatch.color = []float32{1, 1, 1, 1}
}
// GetColor will return the currently used color.
func (spriteBatch *SpriteBatch) GetColor() []float32 {
return spriteBatch.color
}
// GetCount will return the amount of sprites already added to the batch
func (spriteBatch *SpriteBatch) GetCount() int {
return spriteBatch.count
}
// SetBufferSize will resize the buffer, change the limit of sprites you can add
// to this batch.
func (spriteBatch *SpriteBatch) SetBufferSize(newsize int) error {
if newsize <= 0 {
return fmt.Errorf("invalid SpriteBatch size")
} else if newsize == spriteBatch.size {
return nil
}
spriteBatch.arrayBuf = newVertexBuffer(newsize*4*8, spriteBatch.arrayBuf.data, spriteBatch.usage)
spriteBatch.quadIndices = newQuadIndices(newsize)
spriteBatch.size = newsize
return nil
}
// GetBufferSize will return the limit of sprites you can add to this batch.
func (spriteBatch *SpriteBatch) GetBufferSize() int {
return spriteBatch.size
}
// addv will add a sprite to the batch using the verts, a transform and an index to
// place it
func (spriteBatch *SpriteBatch) addv(verts []float32, mat *mgl32.Mat4, index int) error {
if index == -1 && spriteBatch.count >= spriteBatch.size {
return fmt.Errorf("Sprite Batch Buffer Full")
}
sprite := make([]float32, 8*4)
for i := 0; i < 32; i += 8 {
j := (i / 2)
sprite[i+0] = (mat[0] * verts[j+0]) + (mat[4] * verts[j+1]) + mat[12]
sprite[i+1] = (mat[1] * verts[j+0]) + (mat[5] * verts[j+1]) + mat[13]
sprite[i+2] = verts[j+2]
sprite[i+3] = verts[j+3]
sprite[i+4] = spriteBatch.color[0]
sprite[i+5] = spriteBatch.color[1]
sprite[i+6] = spriteBatch.color[2]
sprite[i+7] = spriteBatch.color[3]
}
if index == -1 {
spriteBatch.arrayBuf.fill(spriteBatch.count*4*8, sprite)
spriteBatch.count++
} else {
spriteBatch.arrayBuf.fill(index*4*8, sprite)
}
return nil
}
// SetDrawRange will set a range in the points to draw. This is useful if you only
// need to render a portion of the batch.
func (spriteBatch *SpriteBatch) SetDrawRange(min, max int) error {
if min < 0 || max < 0 || min > max {
return fmt.Errorf("invalid draw range")
}
spriteBatch.rangeMin = min
spriteBatch.rangeMax = max
return nil
}
// ClearDrawRange will reset the draw range if you want to draw the whole batch again.
func (spriteBatch *SpriteBatch) ClearDrawRange() {
spriteBatch.rangeMin = -1
spriteBatch.rangeMax = -1
}
// GetDrawRange will return the min, max range set on the batch. If no range is set
// the range will return -1, -1
func (spriteBatch *SpriteBatch) GetDrawRange() (int, int) {
min := 0
max := spriteBatch.count - 1
if spriteBatch.rangeMax >= 0 {
max = int(math.Min(float64(spriteBatch.rangeMax), float64(max)))
}
if spriteBatch.rangeMin >= 0 {
min = int(math.Min(float64(spriteBatch.rangeMin), float64(max)))
}
return min, max
}
// Draw satisfies the Drawable interface. Inputs are as follows
// x, y, r, sx, sy, ox, oy, kx, ky
// x, y are position
// r is rotation
// sx, sy is the scale, if sy is not given sy will equal sx
// ox, oy are offset
// kx, ky are the shear. If ky is not given ky will equal kx
func (spriteBatch *SpriteBatch) Draw(args ...float32) {
if spriteBatch.count == 0 {
return
}
prepareDraw(generateModelMatFromArgs(args))
bindTexture(spriteBatch.texture.getHandle())
useVertexAttribArrays(shaderPos, shaderTexCoord, shaderColor)
spriteBatch.arrayBuf.bind()
defer spriteBatch.arrayBuf.unbind()
gl.VertexAttribPointer(gl.Attrib{Value: 0}, 2, gl.FLOAT, false, 8*4, 0)
gl.VertexAttribPointer(gl.Attrib{Value: 1}, 2, gl.FLOAT, false, 8*4, 2*4)
gl.VertexAttribPointer(gl.Attrib{Value: 2}, 4, gl.FLOAT, false, 8*4, 4*4)
min, max := spriteBatch.GetDrawRange()
spriteBatch.quadIndices.drawElements(gl.TRIANGLES, min, max-min+1)
} | gfx/sprite_batch.go | 0.786869 | 0.450903 | sprite_batch.go | starcoder |
package fragments
const Bits = `
{{- define "BitsForwardDeclaration" }}
class {{ .Name }} final {
public:
constexpr {{ .Name }}() : value_(0u) {}
explicit constexpr {{ .Name }}({{ .Type }} value) : value_(value) {}
{{- range .Members }}
const static {{ $.Name }} {{ .Name }};
{{- end }}
const static {{ .Name }} mask;
explicit constexpr inline operator {{ .Type }}() const { return value_; }
explicit constexpr inline operator bool() const { return static_cast<bool>(value_); }
constexpr inline bool operator==(const {{ .Name }}& other) const { return value_ == other.value_; }
constexpr inline bool operator!=(const {{ .Name }}& other) const { return value_ != other.value_; }
constexpr inline {{ .Name }} operator~() const;
constexpr inline {{ .Name }} operator|(const {{ .Name }}& other) const;
constexpr inline {{ .Name }} operator&(const {{ .Name }}& other) const;
constexpr inline {{ .Name }} operator^(const {{ .Name }}& other) const;
constexpr inline void operator|=(const {{ .Name }}& other);
constexpr inline void operator&=(const {{ .Name }}& other);
constexpr inline void operator^=(const {{ .Name }}& other);
private:
{{ .Type }} value_;
};
{{- range $member := .Members }}
constexpr const {{ $.Namespace }}::{{ $.Name }} {{ $.Name }}::{{ $member.Name }} = {{ $.Namespace }}::{{ $.Name }}({{ $member.Value }});
{{- end }}
constexpr const {{ .Namespace }}::{{ .Name }} {{ .Name }}::mask = {{ $.Namespace }}::{{ $.Name }}({{ .Mask }}u);
constexpr inline {{ .Namespace }}::{{ .Name }} {{ .Name }}::operator~() const {
return {{ $.Namespace }}::{{ $.Name }}(static_cast<{{ .Type }}>(~this->value_ & mask.value_));
}
constexpr inline {{ .Namespace }}::{{ .Name }} {{ .Name }}::operator|(
const {{ .Namespace }}::{{ .Name }}& other) const {
return {{ $.Namespace }}::{{ $.Name }}(static_cast<{{ .Type }}>(this->value_ | other.value_));
}
constexpr inline {{ .Namespace }}::{{ .Name }} {{ .Name }}::operator&(
const {{ .Namespace }}::{{ .Name }}& other) const {
return {{ $.Namespace }}::{{ $.Name }}(static_cast<{{ .Type }}>(this->value_ & other.value_));
}
constexpr inline {{ .Namespace }}::{{ .Name }} {{ .Name }}::operator^(
const {{ .Namespace }}::{{ .Name }}& other) const {
return {{ $.Namespace }}::{{ $.Name }}(static_cast<{{ .Type }}>(this->value_ ^ other.value_));
}
constexpr inline void {{ .Name }}::operator|=(
const {{ .Namespace }}::{{ .Name }}& other) {
this->value_ |= other.value_;
}
constexpr inline void {{ .Name }}::operator&=(
const {{ .Namespace }}::{{ .Name }}& other) {
this->value_ &= other.value_;
}
constexpr inline void {{ .Name }}::operator^=(
const {{ .Namespace }}::{{ .Name }}& other) {
this->value_ ^= other.value_;
}
{{ end }}
{{- define "BitsTraits" }}
template <>
struct IsFidlType<{{ .Namespace }}::{{ .Name }}> : public std::true_type {};
static_assert(std::is_standard_layout_v<{{ .Namespace }}::{{ .Name }}>);
static_assert(sizeof({{ .Namespace }}::{{ .Name }}) == sizeof({{ .Type }}));
{{- end }}
` | garnet/go/src/fidl/compiler/llcpp_backend/templates/fragments/bits.tmpl.go | 0.68458 | 0.435001 | bits.tmpl.go | starcoder |
package qrprng
import (
"fmt"
"math"
"math/big"
"math/bits"
)
const (
INT63_MASK = (1 << 63) - 1
// Largest prime (3 mod 4) less than 2^64, permutes [0, 2^64-189)
DEFAULT_PRIME = uint64(math.MaxUint64 - 188)
DEFAULT_INTERMEDIATE_OFFSET = 5_577_006_791_947_779_410
)
// QuadraticResiduePRNG is a thread-unsafe PRNG based on Preshing's method using quadratic residues.
// The PRNG has the unique advantage of generating a permutation: when `offset` is 0, the output will cycle through
// all numbers less than `prime` without repeats (until all have been output and the cycle restarts).
// It implements both rand.Source and rand.Source64, and can be used via rand.New() to generate various random data.
type QuadraticResiduePRNG struct {
prime uint64
intermediateOffset uint64
offset uint64
maxMask uint64
mask uint64
idx uint64
}
// New creates a new PRNG instance with the given parameters, which are validated for correctness before creation.
// The chosen prime must be 3 mod 4, and the intermediate offset (seed) can be any number less than the prime. The
// offset will be added to all output, effectively placing a floor on the output values.
func New(prime, intermediateOffset, offset uint64) (*QuadraticResiduePRNG, error) {
if err := validate(prime, offset, intermediateOffset); err != nil {
return &QuadraticResiduePRNG{}, err
}
maxMask := calculateMaxMask(prime)
return &QuadraticResiduePRNG{
prime: prime,
intermediateOffset: intermediateOffset,
offset: offset,
maxMask: maxMask,
mask: calculateMask(prime, intermediateOffset, maxMask),
}, nil
}
// Default returns a new PRNG instance suitable for general-purpose use.
// It uses the largest possible prime to permute 99.999999999999999% of possible uint64 values.
func Default() *QuadraticResiduePRNG {
prng, err := New(DEFAULT_PRIME, DEFAULT_INTERMEDIATE_OFFSET, 0)
if err != nil {
panic(err)
}
return prng
}
// Index generates the ith element of the permutation described by the generator.
// If i >= prime, then an error is returned. However, it can be ignored if desired;
// the sequence will simply cycle.
func (prng *QuadraticResiduePRNG) Index(i uint64) (uint64, error) {
if i >= prng.prime {
return i, fmt.Errorf("invalid index %d: must be less than chosen prime", i)
}
intermediate := prng.permuteQPR(i) + prng.intermediateOffset
masked := prng.applyMask(intermediate % prng.prime)
return prng.offset + prng.permuteQPR(masked), nil
}
func (prng *QuadraticResiduePRNG) applyMask(i uint64) uint64 {
if i <= prng.maxMask {
return i ^ prng.mask
} else {
return i
}
}
func (prng *QuadraticResiduePRNG) permuteQPR(i uint64) uint64 {
residue := (i * i) % prng.prime
if i <= (prng.prime / 2) {
return residue
} else {
return prng.prime - residue
}
}
// QuadraticResiduePRNG implements math/rand.Source
func (prng *QuadraticResiduePRNG) Int63() int64 {
return int64(prng.Uint64() & INT63_MASK)
}
// Seed changes the seed of the PRNG instance and resets the internal state of the generator.
func (prng *QuadraticResiduePRNG) Seed(seed int64) {
if seed >= 0 {
prng.intermediateOffset = uint64(seed)
} else {
prng.intermediateOffset = math.MaxUint64 - uint64(-1*seed)
}
prng.idx = 0
}
// QuadraticResiduePRNG implements math/rand.Source64
func (prng *QuadraticResiduePRNG) Uint64() uint64 {
n, _ := prng.Index(prng.idx)
prng.idx++
return n
}
// ===== Private functions =====
func validate(prime, offset, intermediateOffset uint64) error {
if prime%4 != 3 {
return fmt.Errorf("invalid prime %d: must be 3 mod 4", prime)
} else if intermediateOffset >= prime {
return fmt.Errorf("invalid intermediate offset %d: must be less than chosen prime", intermediateOffset)
} else if p := bigIntFromUint64(prime); !p.ProbablyPrime(0) {
return fmt.Errorf("invalid prime %d: number is not prime", prime)
}
return nil
}
func calculateMaxMask(prime uint64) uint64 {
primeBits := bits.Len64(prime - 1)
return (1 << (primeBits - 1)) - 1
}
func calculateMask(prime, intermediateOffset, maxMask uint64) uint64 {
min := uint64(1 << (bits.Len64(maxMask) - 1))
return min + ((prime + intermediateOffset) % (maxMask - min))
}
func bigIntFromUint64(n uint64) *big.Int {
var result *big.Int
if n <= math.MaxInt64 {
result = big.NewInt(int64(n))
} else {
result = big.NewInt(math.MaxInt64)
result.Add(result, big.NewInt(int64(n-math.MaxInt64)))
}
return result
} | qrprng.go | 0.722821 | 0.542621 | qrprng.go | starcoder |
package main
import (
"fmt"
"log"
)
type SnailfishNumber struct {
left *SnailfishNumber
right *SnailfishNumber
value int
}
type SnailfishParse struct {
data string
pos int
}
func (parse *SnailfishParse) next() byte {
parse.pos += 1
return parse.data[parse.pos-1]
}
func (parse *SnailfishParse) expect(c byte) {
if found := parse.next(); found != c {
err := fmt.Sprintf("Expected '%c', found '%c' at pos %v in %v", c, found, parse.pos, parse.data)
log.Fatalln(err)
}
}
func (parse *SnailfishParse) number() SnailfishNumber {
ch := parse.next()
if ch == '[' {
left := parse.number()
parse.expect(',')
right := parse.number()
parse.expect(']')
return SnailfishNumber{&left, &right, 0}
} else if ch >= '0' && ch <= '9' {
return SnailfishNumber{nil, nil, int(ch - '0')}
}
log.Fatalln("Cant parse")
return SnailfishNumber{}
}
func parseSnailfish(data string) *SnailfishNumber {
parse := SnailfishParse{data, 0}
n := parse.number()
if parse.pos != len(data) {
log.Fatalln("not reached end")
}
return &n
}
func (n SnailfishNumber) isRegular() bool {
return n.left == nil && n.right == nil
}
func (n SnailfishNumber) str() string {
if n.isRegular() {
return fmt.Sprint(n.value)
} else {
return "[" + n.left.str() + "," + n.right.str() + "]"
}
}
func (n SnailfishNumber) regulars() []*SnailfishNumber {
lst := []*SnailfishNumber{}
if n.left.isRegular() {
lst = append(lst, n.left)
} else {
lst = append(lst, n.left.regulars()...)
}
if n.right.isRegular() {
lst = append(lst, n.right)
} else {
lst = append(lst, n.right.regulars()...)
}
return lst
}
func (number *SnailfishNumber) explode(parents []*SnailfishNumber) bool {
if number.isRegular() {
return false
}
if len(parents) == 4 {
if !number.left.isRegular() || !number.right.isRegular() {
log.Fatal("explode left/right not regular")
return true
}
regulars := parents[0].regulars()
i := 0
for regulars[i] != number.left {
i++
}
if i > 0 {
regulars[i-1].value += number.left.value
}
// +1 is number.right
if i < len(regulars)-2 {
regulars[i+2].value += number.right.value
}
number.left = nil
number.right = nil
number.value = 0
return true
}
stack := make([]*SnailfishNumber, len(parents))
copy(stack, parents)
stack = append(stack, number)
return number.left.explode(stack) || number.right.explode(stack)
}
func (n *SnailfishNumber) split() bool {
for _, n := range n.regulars() {
if n.value > 9 {
n.left = &SnailfishNumber{nil, nil, n.value / 2}
var r int
if n.value%2 == 0 {
r = n.value / 2
} else {
r = (n.value + 1) / 2
}
n.right = &SnailfishNumber{nil, nil, r}
return true
}
}
return false
}
func (n *SnailfishNumber) reduce() {
for {
if n.explode([]*SnailfishNumber{}) {
continue
}
if n.split() {
continue
}
break
}
}
func (n *SnailfishNumber) magnitude() int {
if n.isRegular() {
return n.value
}
return n.left.magnitude()*3 + n.right.magnitude()*2
}
func (n SnailfishNumber) copy() SnailfishNumber {
if n.isRegular() {
return SnailfishNumber{nil, nil, n.value}
}
left := n.left.copy()
right := n.right.copy()
return SnailfishNumber{&left, &right, 0}
}
func parseNumbers(data []string) []*SnailfishNumber {
res := []*SnailfishNumber{}
for _, line := range data {
res = append(res, parseSnailfish(line))
}
return res
}
func addNumbers(numbers []*SnailfishNumber) *SnailfishNumber {
result := numbers[0]
for _, n := range numbers[1:] {
result = &SnailfishNumber{result, n, 0}
result.reduce()
}
return result
}
func largestMagnitude(numbers []*SnailfishNumber) int {
largest := 0
for _, left := range numbers {
for _, right := range numbers {
if left == right {
continue
}
leftC := left.copy()
rightC := right.copy()
n := addNumbers([]*SnailfishNumber{&leftC, &rightC}).magnitude()
if n > largest {
largest = n
}
}
}
return largest
}
func main18() {
data, err := ReadInputFrom("18.inp")
if err != nil {
log.Fatal(err)
return
}
numbers := parseNumbers(data)
n := addNumbers(numbers)
log.Println(n.magnitude())
numbers = parseNumbers(data)
log.Println(largestMagnitude(numbers))
} | 2021/18.go | 0.642096 | 0.443721 | 18.go | starcoder |
package fee
var (
feePairs = []feePair{
{
minVolume: 0.0,
maxVolume: 500.00,
feePercentage: 0.0085,
},
{
minVolume: 500.00,
maxVolume: 1000.00,
feePercentage: 0.0083,
},
{
minVolume: 1000.00,
maxVolume: 3000.00,
feePercentage: 0.0080,
},
{
minVolume: 3000.00,
maxVolume: 9000.00,
feePercentage: 0.0075,
},
{
minVolume: 9000.00,
maxVolume: 18000.00,
feePercentage: 0.0070,
},
{
minVolume: 18000.00,
maxVolume: 40000.00,
feePercentage: 0.0065,
},
{
minVolume: 40000.00,
maxVolume: 60000.00,
feePercentage: 0.0060,
},
{
minVolume: 60000.00,
maxVolume: 70000.00,
feePercentage: 0.0055,
},
{
minVolume: 70000.00,
maxVolume: 80000.00,
feePercentage: 0.0050,
},
{
minVolume: 80000.00,
maxVolume: 90000.00,
feePercentage: 0.0045,
},
{
minVolume: 90000.00,
maxVolume: 115000.00,
feePercentage: 0.0040,
},
{
minVolume: 115000.00,
maxVolume: 125000.00,
feePercentage: 0.0035,
},
{
minVolume: 125000.00,
maxVolume: 200000.00,
feePercentage: 0.0030,
},
{
minVolume: 200000.00,
maxVolume: 400000.00,
feePercentage: 0.0025,
},
{
minVolume: 400000.00,
maxVolume: 650000.00,
feePercentage: 0.0023,
},
{
minVolume: 650000.00,
maxVolume: 850000.00,
feePercentage: 0.0020,
},
{
minVolume: 850000.00,
maxVolume: 1000000.00,
feePercentage: 0.0018,
},
{
minVolume: 1000000.00,
maxVolume: 3000000.00,
feePercentage: 0.0015,
},
{
minVolume: 3000000.00,
maxVolume: 5000000.00,
feePercentage: 0.0013,
},
{
minVolume: 5000000.00,
maxVolume: 0.0,
feePercentage: 0.0035,
},
}
)
// fees are based on 30 day volume. https://www.btcmarkets.net/fees
type feePair struct{
minVolume float64
maxVolume float64
feePercentage float64
}
func getTradeFeePercentage(volume float64) float64 {
for _, f := range feePairs {
if volume > f.minVolume && f.maxVolume != 0.0 && volume <= f.maxVolume {
return f.feePercentage
}
}
// default to highest fee
return 0.0085
}
func CalculateTradeFee(cost float64, volume float64) float64 {
feePercentage := getTradeFeePercentage(volume)
return cost * feePercentage
} | pkg/fee/fee.go | 0.628293 | 0.403214 | fee.go | starcoder |
package math
import (
"fmt"
"sort"
"github.com/bitflow-stream/go-bitflow/bitflow"
"github.com/bitflow-stream/go-bitflow/script/reg"
log "github.com/sirupsen/logrus"
)
// Graham Scan for computing the convex hull of a point set
// https://en.wikipedia.org/wiki/Graham_scan
type Point struct {
X, Y float64
}
func (p Point) Distance(other Point) float64 {
return (p.X-other.X)*(p.X-other.X) + (p.Y-other.Y)*(p.Y-other.Y)
}
type ConvexHull []Point
func ComputeConvexHull(points []Point) ConvexHull {
p := ConvexHull(points)
if len(p) < 3 {
return p
}
lowest := p.ComputeLowestPoint()
sort.Sort(AngleBasedSort{Reference: lowest, Points: p})
hull := make(ConvexHull, 0, len(p)+1)
p1 := p[0]
p2 := p[1]
p3 := p[2]
hull = append(hull, p1)
if lowest != p1 {
panic(fmt.Errorf("Lowest point not at beginning of hull. Lowest %v, first: %v", lowest, p1))
}
for i := 2; i < len(p); {
if isLeftTurn(p1, p2, p3) {
hull = append(hull, p2)
i++
if i >= len(p) {
if isLeftTurn(p2, p3, lowest) {
hull = append(hull, p3)
}
break
}
p1, p2, p3 = p2, p3, p[i]
} else {
if len(hull) <= 1 {
// TODO this is probably a bug, debug and fix
log.Warnln("Illegal convex hull with", len(p), "points")
return p
}
p2 = p1
p1 = hull[len(hull)-2]
hull = hull[:len(hull)-1]
}
}
hull = append(hull, lowest) // Close the "circle"
return hull
}
func (p ConvexHull) ComputeLowestPoint() (low Point) {
low = p[0]
for _, point := range p {
if point.Y < low.Y || (point.Y == low.Y && point.X < low.X) {
low = point
}
}
return
}
func isLeftTurn(a, b, c Point) bool {
// > 0: left turn
// == 0: collinear points
// < 0: right turn
return crossProductZ(a, b, c) > 0
}
func crossProductZ(a, b, c Point) float64 {
return (b.X-a.X)*(c.Y-a.Y) - (c.X-a.X)*(b.Y-a.Y)
}
// ==================== Sort by the angle from a reference point ====================
func SortByAngle(points []Point) []Point {
p := ConvexHull(points)
l := p.ComputeLowestPoint()
sort.Sort(AngleBasedSort{Reference: l, Points: p})
return p
}
type AngleBasedSort struct {
Reference Point
Points []Point
}
func (s AngleBasedSort) Len() int {
return len(s.Points)
}
func (s AngleBasedSort) Swap(i, j int) {
p := s.Points
p[i], p[j] = p[j], p[i]
}
func (s AngleBasedSort) Less(i, j int) bool {
a, b := s.Points[i], s.Points[j]
z := crossProductZ(s.Reference, a, b)
if z == 0 {
// Collinear points: use distance to reference as second argument
return s.Reference.Distance(a) < s.Reference.Distance(b)
}
return z > 0
}
// ====================================== Batch processor ======================================
func BatchConvexHull(sortOnly bool) bitflow.BatchProcessingStep {
desc := "convex hull"
if sortOnly {
desc += " sort"
}
return &bitflow.SimpleBatchProcessingStep{
Description: desc,
Process: func(header *bitflow.Header, samples []*bitflow.Sample) (*bitflow.Header, []*bitflow.Sample, error) {
if len(header.Fields) != 2 {
return nil, nil, fmt.Errorf(
"Cannot compute convex hull for %v dimension(s), only 2-dimensional data is allowed", len(header.Fields))
}
points := make([]Point, len(samples))
for i, sample := range samples {
points[i].X = float64(sample.Values[0])
points[i].Y = float64(sample.Values[1])
}
var hull ConvexHull
if sortOnly {
hull = SortByAngle(points)
} else {
hull = ComputeConvexHull(points)
}
for i, point := range hull {
samples[i].Values[0] = bitflow.Value(point.X)
samples[i].Values[1] = bitflow.Value(point.Y)
}
log.Println("Convex hull reduced samples from", len(samples), "to", len(hull))
return header, samples[:len(hull)], nil
},
}
}
func RegisterConvexHull(b reg.ProcessorRegistry) {
b.RegisterBatchStep("convex_hull",
func(_ map[string]interface{}) (bitflow.BatchProcessingStep, error) {
return BatchConvexHull(false), nil
},
"Filter out the convex hull for a two-dimensional batch of samples")
}
func RegisterConvexHullSort(b reg.ProcessorRegistry) {
b.RegisterBatchStep("convex_hull_sort",
func(_ map[string]interface{}) (bitflow.BatchProcessingStep, error) {
return BatchConvexHull(true), nil
},
"Sort a two-dimensional batch of samples in order around their center")
} | steps/math/convex_hull.go | 0.586286 | 0.515742 | convex_hull.go | starcoder |
package main
import (
"math"
"fmt"
)
type node struct {
pos Position
prev *node
distance int
}
func newNode(pos Position) node {
return node{pos:pos, distance:math.MaxInt32}
}
type grid struct {
nodes []*node
}
func (g *grid) addNode(n *node) *grid {
g.nodes = append(g.nodes, n)
return g
}
func (g *grid) getShortest() *node {
shortestDistance := g.nodes[0].distance
paths := make(map[int][]*node)
for _, n := range g.nodes {
if n.distance < shortestDistance {
shortestDistance = n.distance
}
paths[n.distance] = append(paths[n.distance], n)
}
var bestNode *node
for _, n := range paths[shortestDistance] {
// pick by reading order
if bestNode == nil || bestNode.pos.y > n.pos.y || (bestNode.pos.y == n.pos.y && bestNode.pos.x > n.pos.x) {
bestNode = n
}
}
return bestNode
}
func (g *grid) remove(d *node) *grid {
for i, n := range g.nodes {
if n.pos.IsEqual(d.pos) {
if i >= len(g.nodes) - 1 { // TODO: Check how > can occur in this condition...
g.nodes = g.nodes[:i]
} else {
g.nodes = append(g.nodes[:i], g.nodes[i+1:]...)
}
}
}
return g
}
func (g *grid) getNeighbors(d *node) []*node {
var neighbors []*node
for _, n := range g.nodes {
d := manhattanDistance(d.pos.x, d.pos.y, n.pos.x, n.pos.y)
if d == 1 {
neighbors = append(neighbors, n)
}
}
return neighbors
}
// a dijkstra implementation that only allows simple 2D movement (non diagonal)
// https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm#Pseudocode
func Dijkstra2D(walkable []Position, start Position, dest Position) (bool, []Position) {
var Q grid
for _, p := range walkable {
n := newNode(p)
if p.IsEqual(start) {
n.distance = 0
}
Q.addNode(&n)
}
for {
u := Q.getShortest()
Q.remove(u)
if u.pos.IsEqual(dest) {
steps := []Position{u.pos}
if u.prev == nil {
return false, []Position{Position{-1, -1}} // TODO: Check this case, why does this occur..
}
for p := u.prev; p.prev != nil; p = p.prev {
steps = append([]Position{p.pos}, steps...)
}
return true, steps
}
for _, n := range Q.getNeighbors(u) {
var newDist int
// prioritise horizontal movements (penalty verticals by 2 distance)
newDist = u.distance + 1
if newDist < n.distance {
n.distance = newDist
n.prev = u
}
}
if len(Q.nodes) == 0 {
return false, []Position{Position{-1, -1}}
}
}
}
func manhattanDistance(x1 int, y1 int, x2 int, y2 int) int {
var diff int
if x1 < x2 {
diff += x2-x1
} else {
diff += x1-x2
}
if y1 < y2 {
diff += y2 - y1
} else {
diff += y1 - y2
}
return diff
}
func asdf() {
fmt.Println("")
} | day15/go/anoff/src/dijkstra.go | 0.517571 | 0.487124 | dijkstra.go | starcoder |
package starwars
//Based on https://github.com/facebook/relay/blob/master/examples/star-wars/data/database.js
/**
* This defines a basic set of data for our Star Wars Schema.
*
* This data is hard coded for the sake of the demo, but you could imagine
* fetching this data from a backend service rather than from hardcoded
* JSON objects in a more complex demo.
*/
var xwing = Ship{
Id: "1",
Name: "X-Wing",
}
var ywing = Ship{
Id: "2",
Name: "Y-Wing",
}
var awing = Ship{
Id: "3",
Name: "A-Wing",
}
// Yeah, technically it's Corellian. But it flew in the service of the rebels,
// so for the purposes of this demo it's a rebel ship.
var falcon = Ship{
Id: "4",
Name: "Millenium Falcon",
}
var homeOne = Ship{
Id: "5",
Name: "Home One",
}
var tieFighter = Ship{
Id: "6",
Name: "TIE Fighter",
}
var tieInterceptor = Ship{
Id: "7",
Name: "TIE Interceptor",
}
var executor = Ship{
Id: "8",
Name: "Executor",
}
var rebels = map[string]interface{}{
"id": "1",
"name": "Alliance to Restore the Republic",
"ships": []string{"1", "2", "3", "4", "5"},
}
var empire = map[string]interface{}{
"id": "2",
"name": "Galactic Empire",
"ships": []string{"6", "7", "8"},
}
var data = map[string]map[string]interface{}{
"Faction": map[string]interface{}{
"1": rebels,
"2": empire,
},
"Ship": map[string]interface{}{
"1": xwing,
"2": ywing,
"3": awing,
"4": falcon,
"5": homeOne,
"6": tieFighter,
"7": tieInterceptor,
"8": executor,
},
}
var nextShipId = "9"
var nextShipName = "New ship"
func GetShips(factionId string) []interface{} {
ships := []interface{}{}
for _, s := range data["Faction"][factionId].(map[string]interface{})["ships"].([]string) {
ships = append(ships, data["Ship"][s].(Ship))
}
return ships
}
func GetFaction(Id string) Faction {
f := data["Faction"][Id].(map[string]interface{})
return Faction{
Id: f["id"].(string),
Name: f["name"].(string),
}
}
/*
export function createShip(shipName, factionId) {
const newShip = {
id: '' + (nextShip++),
name: shipName,
};
data.Ship[newShip.id] = newShip;
data.Faction[factionId].ships.push(newShip.id);
return newShip;
}
export function getShip(id) {
return data.Ship[id];
}
export function getShips(id) {
return data.Faction[id].ships.map(shipId => data.Ship[shipId]);
}
*/ | examples/starwars/database.go | 0.654895 | 0.450118 | database.go | starcoder |
// Package horn provides an implementation of Higher Order Recurrent Neural Networks (HORN).
package horn
import (
"encoding/gob"
"math"
"github.com/nlpodyssey/spago/ag"
"github.com/nlpodyssey/spago/mat"
"github.com/nlpodyssey/spago/mat/float"
"github.com/nlpodyssey/spago/nn"
)
var _ nn.Model = &Model{}
// Model contains the serializable parameters.
type Model struct {
nn.Module
W nn.Param `spago:"type:weights"`
WRec []nn.Param `spago:"type:weights"`
B nn.Param `spago:"type:biases"`
}
// State represent a state of the Horn recurrent network.
type State struct {
Y ag.Node
}
func init() {
gob.Register(&Model{})
}
// New returns a new model with parameters initialized to zeros.
func New[T float.DType](in, out, order int) *Model {
wRec := make([]nn.Param, order)
for i := 0; i < order; i++ {
wRec[i] = nn.NewParam(mat.NewEmptyDense[T](out, out))
}
return &Model{
W: nn.NewParam(mat.NewEmptyDense[T](out, in)),
WRec: wRec,
B: nn.NewParam(mat.NewEmptyVecDense[T](out)),
}
}
// Forward performs the forward step for each input node and returns the result.
func (m *Model) Forward(xs ...ag.Node) []ag.Node {
ys := make([]ag.Node, len(xs))
states := make([]*State, 0)
var s *State = nil
for i, x := range xs {
s = m.Next(states, x)
states = append(states, s)
ys[i] = s.Y
}
return ys
}
// Next performs a single forward step, producing a new state.
func (m *Model) Next(states []*State, x ag.Node) (s *State) {
s = new(State)
h := ag.Affine(append([]ag.Node{m.B, m.W, x}, m.feedback(states)...)...)
s.Y = ag.Tanh(h)
return
}
func (m *Model) feedback(states []*State) []ag.Node {
var ys []ag.Node
n := len(states)
for i := 0; i < min(len(m.WRec), n); i++ {
alpha := ag.Var(m.WRec[i].Value().NewScalar(math.Pow(0.6, float64(i+1))))
ys = append(ys, m.WRec[i], ag.ProdScalar(states[n-1-i].Y, alpha))
}
return ys
}
// min returns the minimum value between a and b.
func min(a, b int) int {
if a < b {
return a
}
return b
} | nn/recurrent/horn/horn.go | 0.881066 | 0.470189 | horn.go | starcoder |
package sources
import (
"math"
"github.com/crnbaker/gostringsynth/numeric"
)
// stringSource provides attributes that define a finite-difference simulation of a vibrating string
type stringSource struct {
fdtdSource
sampleRate float64
stringLengthM float64
physics stringSettings
pluck pluckSettings
}
// calculateLossFactor returns a loss factor used to attenuated the string vibration during synthesis
func (s *stringSource) calculateLossFactor() float64 {
g := s.physics.DecayTimeS * s.sampleRate / (s.physics.DecayTimeS*s.sampleRate + 6*math.Log(10)) // Stefan Bilbao's loss factor
return g
}
// calculateVoiceLifetime determines the lifetime to give to the exported Voice in samples
func (s *stringSource) calculateVoiceLifetime() int {
return int(math.Round(s.physics.DecayTimeS)) * int(s.sampleRate)
}
// PublishVoice packages the synthesis function as createVoice struct and publishes it to the voiceChannel
func (s *stringSource) createVoice() *Voice {
return &Voice{s.synthesize, 0, s.calculateVoiceLifetime(), false}
}
// synthesize simulates the state of the string at the next time stemp and generates an audio output sample
func (s *stringSource) synthesize() float32 {
defer s.stepGrid()
dt2 := math.Pow(1/s.sampleRate, 2)
a2 := math.Pow(s.physics.WaveSpeedMpS, 2)
dx2 := math.Pow(s.stringLengthM/float64(s.numSpatialSections), 2)
coeff := (dt2 * a2) / dx2
g := s.calculateLossFactor()
for m := 1; m < s.numSpatialSections; m++ {
s.fdtdGrid[2][m] = g *
(coeff*(s.fdtdGrid[1][m+1]-2*s.fdtdGrid[1][m]+s.fdtdGrid[1][m-1]) +
2*s.fdtdGrid[1][m] - (2-1/g)*s.fdtdGrid[0][m])
}
return s.readPickup()
}
// readPickup is used by Synthesize to generate an output sample from a chosen point on the string
func (s *stringSource) readPickup() float32 {
var pickupPoint int
if s.physics.PickupPosReStringLen < 0.5 {
pickupPoint = int(math.Ceil(float64(s.numSpatialSections) * s.physics.PickupPosReStringLen))
} else {
pickupPoint = int(math.Floor(float64(s.numSpatialSections) * s.physics.PickupPosReStringLen))
}
return float32(s.fdtdGrid[2][pickupPoint])
}
// stepGrid updates the finite difference simulation grid by one timestamp, providing a new, empty
// string state for simulation with Synthesize()
func (s *stringSource) stepGrid() {
s.fdtdGrid = append(s.fdtdGrid, make([]float64, s.numSpatialSections+1))
s.fdtdGrid = s.fdtdGrid[1:]
}
type pluckSettings struct {
PosReStrLen float64
WidthReStrLen float64
Amplitude float64
}
func (s *stringSource) pluckString() []float64 {
pluckShape := createTrianglePluck(s.pluck.Amplitude, s.numSpatialSections+1, s.pluck.PosReStrLen)
if s.pluck.WidthReStrLen < 1.0 {
stringLengthInPoints := s.numSpatialSections + 1
fingerWidthInSections := s.pluck.WidthReStrLen * float64(s.numSpatialSections)
fingerHalfWidthInPoints := int(math.Round(fingerWidthInSections+1) / 2)
fingerWidthInPoints := fingerHalfWidthInPoints * 2
if fingerWidthInPoints > 2 {
var start int
var stop int
for i := fingerHalfWidthInPoints; i < stringLengthInPoints-fingerHalfWidthInPoints; i++ {
start = i - fingerHalfWidthInPoints
stop = i + fingerHalfWidthInPoints
pluckShape[i] = mean(pluckShape[start:stop])
}
}
}
s.fdtdGrid[0] = pluckShape
s.fdtdGrid[1] = pluckShape
return s.fdtdGrid[0]
}
type stringSettings struct {
WaveSpeedMpS float64
DecayTimeS float64
PickupPosReStringLen float64
}
// newStringSource constructs a StringSource from the physical properties of a string
func newStringSource(sampleRate float64, lengthM float64, physics stringSettings,
pluck pluckSettings) stringSource {
physics.PickupPosReStringLen = numeric.Clip(physics.PickupPosReStringLen, 0, 1)
pluck.PosReStrLen = numeric.Clip(pluck.PosReStrLen, 0, 1)
pluck.WidthReStrLen = numeric.Clip(pluck.WidthReStrLen, 0, 1)
numSpatialSections := int(math.Floor(lengthM / (physics.WaveSpeedMpS * (1 / sampleRate)))) // Stability condition
s := stringSource{
newFtdtSource(3, numSpatialSections),
sampleRate,
lengthM,
physics,
pluck,
}
return s
}
func mean(slice []float64) float64 {
var sum float64 = slice[0]
for _, value := range slice {
sum += value
}
return sum / float64(len(slice))
}
// trianglePluck creates a trianglePluck shape in a slice
func createTrianglePluck(amplitude float64, length int, pluckPosFraction float64) []float64 {
pluckPoint := int(math.Floor(float64(length) * pluckPosFraction))
if pluckPoint < 1 {
pluckPoint = 1
} else if pluckPoint >= length {
pluckPoint = length - 1
}
pluck := make([]float64, length)
for point := 0; point <= pluckPoint; point++ {
pluck[point] = amplitude * float64(point) / float64(pluckPoint)
}
for point := pluckPoint; point < length; point++ {
pluck[point] = amplitude * float64(length-point-1) / float64(length-pluckPoint-1)
}
return pluck
} | sources/string.go | 0.799481 | 0.518485 | string.go | starcoder |
// Package maputil includes some functions to manipulate map.
package maputil
import "reflect"
// Keys returns a slice of the map's keys
func Keys[K comparable, V any](m map[K]V) []K {
keys := make([]K, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}
// Values returns a slice of the map's values
func Values[K comparable, V any](m map[K]V) []V {
values := make([]V, 0, len(m))
for _, v := range m {
values = append(values, v)
}
return values
}
// Merge maps, next key will overwrite previous key
func Merge[K comparable, V any](maps ...map[K]V) map[K]V {
res := make(map[K]V, 0)
for _, m := range maps {
for k, v := range m {
res[k] = v
}
}
return res
}
// ForEach executes iteratee funcation for every key and value pair in map
func ForEach[K comparable, V any](m map[K]V, iteratee func(key K, value V)) {
for k, v := range m {
iteratee(k, v)
}
}
// Filter iterates over map, return a new map contains all key and value pairs pass the predicate function
func Filter[K comparable, V any](m map[K]V, predicate func(key K, value V) bool) map[K]V {
res := make(map[K]V)
for k, v := range m {
if predicate(k, v) {
res[k] = v
}
}
return res
}
// Intersect iterates over maps, return a new map of key and value pairs in all given maps
func Intersect[K comparable, V any](maps ...map[K]V) map[K]V {
if len(maps) == 0 {
return map[K]V{}
}
if len(maps) == 1 {
return maps[0]
}
var res map[K]V
reducer := func(m1, m2 map[K]V) map[K]V {
m := make(map[K]V)
for k, v1 := range m1 {
if v2, ok := m2[k]; ok && reflect.DeepEqual(v1, v2) {
m[k] = v1
}
}
return m
}
reduceMaps := make([]map[K]V, 2, 2)
res = reducer(maps[0], maps[1])
for i := 2; i < len(maps); i++ {
reduceMaps[0] = res
reduceMaps[1] = maps[i]
res = reducer(reduceMaps[0], reduceMaps[1])
}
return res
}
// Minus creates an map of whose key in mapA but not in mapB
func Minus[K comparable, V any](mapA, mapB map[K]V) map[K]V {
res := make(map[K]V)
for k, v := range mapA {
if _, ok := mapB[k]; !ok {
res[k] = v
}
}
return res
} | maputil/map.go | 0.826957 | 0.514034 | map.go | starcoder |
package utils
import (
"image"
"image/color"
)
// ForEachPixel loops through the image and calls f functions for each [x, y] position.
func ForEachPixel(size image.Point, f func(x int, y int)) {
for y := 0; y < size.Y; y++ {
for x := 0; x < size.X; x++ {
f(x, y)
}
}
}
// ForEachGrayPixel loops through the image and calls f functions for each gray pixel.
func ForEachGrayPixel(img *image.Gray, f func(pixel color.Gray)) {
ForEachPixel(img.Bounds().Size(), func(x, y int) {
pixel := img.GrayAt(x, y)
f(pixel)
})
}
// ForEachRGBAPixel loops through the image and calls f functions for each RGBA pixel.
func ForEachRGBAPixel(img *image.RGBA, f func(pixel color.RGBA)) {
ForEachPixel(img.Bounds().Size(), func(x, y int) {
pixel := img.RGBAAt(x, y)
f(pixel)
})
}
// ForEachRGBARedPixel loops through the image and calls f functions for red component of each RGBA pixel.
func ForEachRGBARedPixel(img *image.RGBA, f func(r uint8)) {
ForEachRGBAPixel(img, func(pixel color.RGBA) {
f(pixel.R)
})
}
// ForEachRGBAGreenPixel loops through the image and calls f functions for green component of each RGBA pixel.
func ForEachRGBAGreenPixel(img *image.RGBA, f func(r uint8)) {
ForEachRGBAPixel(img, func(pixel color.RGBA) {
f(pixel.G)
})
}
// ForEachRGBABluePixel loops through the image and calls f functions for blue component of each RGBA pixel.
func ForEachRGBABluePixel(img *image.RGBA, f func(r uint8)) {
ForEachRGBAPixel(img, func(pixel color.RGBA) {
f(pixel.B)
})
}
// ForEachRGBAAlphaPixel loops through the image and calls f functions for alpha component of each RGBA pixel
func ForEachRGBAAlphaPixel(img *image.RGBA, f func(r uint8)) {
ForEachRGBAPixel(img, func(pixel color.RGBA) {
f(pixel.A)
})
}
// ClampInt returns min if value is lesser then min, max if value is greater them max or value if the input value is
// between min and max.
func ClampInt(value int, min int, max int) int {
if value < min {
return min
} else if value > max {
return max
}
return value
}
// ClampF64 returns min if value is lesser then min, max if value is greater them max or value if the input value is
// between min and max.
func ClampF64(value float64, min float64, max float64) float64 {
if value < min {
return min
} else if value > max {
return max
}
return value
}
// GetMax returns the maximum value from a slice
func GetMax(v []uint64) uint64 {
max := v[0]
for _, value := range v {
if max < value {
max = value
}
}
return max
} | utils/helpers.go | 0.820469 | 0.710666 | helpers.go | starcoder |
package velocypack
import "fmt"
// Type returns the vpack type of the slice
func (s Slice) Type() ValueType {
return typeMap[s.head()]
}
// IsType returns true when the vpack type of the slice is equal to the given type.
// Returns false otherwise.
func (s Slice) IsType(t ValueType) bool {
return typeMap[s.head()] == t
}
// AssertType returns an error when the vpack type of the slice different from the given type.
// Returns nil otherwise.
func (s Slice) AssertType(t ValueType) error {
if found := typeMap[s.head()]; found != t {
return WithStack(InvalidTypeError{Message: fmt.Sprintf("expected type '%s', got '%s'", t, found)})
}
return nil
}
// AssertTypeAny returns an error when the vpack type of the slice different from all of the given types.
// Returns nil otherwise.
func (s Slice) AssertTypeAny(t ...ValueType) error {
found := typeMap[s.head()]
for _, x := range t {
if x == found {
return nil
}
}
return WithStack(InvalidTypeError{Message: fmt.Sprintf("expected types '%q', got '%s'", t, found)})
}
// IsNone returns true if slice is a None object
func (s Slice) IsNone() bool { return s.IsType(None) }
// IsIllegal returns true if slice is an Illegal object
func (s Slice) IsIllegal() bool { return s.IsType(Illegal) }
// IsNull returns true if slice is a Null object
func (s Slice) IsNull() bool { return s.IsType(Null) }
// IsBool returns true if slice is a Bool object
func (s Slice) IsBool() bool { return s.IsType(Bool) }
// IsTrue returns true if slice is the Boolean value true
func (s Slice) IsTrue() bool { return s.head() == 0x1a }
// IsFalse returns true if slice is the Boolean value false
func (s Slice) IsFalse() bool { return s.head() == 0x19 }
// IsArray returns true if slice is an Array object
func (s Slice) IsArray() bool { return s.IsType(Array) }
// IsEmptyArray tests whether the Slice is an empty array
func (s Slice) IsEmptyArray() bool { return s.head() == 0x01 }
// IsObject returns true if slice is an Object object
func (s Slice) IsObject() bool { return s.IsType(Object) }
// IsEmptyObject tests whether the Slice is an empty object
func (s Slice) IsEmptyObject() bool { return s.head() == 0x0a }
// IsDouble returns true if slice is a Double object
func (s Slice) IsDouble() bool { return s.IsType(Double) }
// IsUTCDate returns true if slice is a UTCDate object
func (s Slice) IsUTCDate() bool { return s.IsType(UTCDate) }
// IsExternal returns true if slice is an External object
func (s Slice) IsExternal() bool { return s.IsType(External) }
// IsMinKey returns true if slice is a MinKey object
func (s Slice) IsMinKey() bool { return s.IsType(MinKey) }
// IsMaxKey returns true if slice is a MaxKey object
func (s Slice) IsMaxKey() bool { return s.IsType(MaxKey) }
// IsInt returns true if slice is an Int object
func (s Slice) IsInt() bool { return s.IsType(Int) }
// IsUInt returns true if slice is a UInt object
func (s Slice) IsUInt() bool { return s.IsType(UInt) }
// IsSmallInt returns true if slice is a SmallInt object
func (s Slice) IsSmallInt() bool { return s.IsType(SmallInt) }
// IsString returns true if slice is a String object
func (s Slice) IsString() bool { return s.IsType(String) }
// IsBinary returns true if slice is a Binary object
func (s Slice) IsBinary() bool { return s.IsType(Binary) }
// IsBCD returns true if slice is a BCD
func (s Slice) IsBCD() bool { return s.IsType(BCD) }
// IsCustom returns true if slice is a Custom type
func (s Slice) IsCustom() bool { return s.IsType(Custom) }
// IsInteger returns true if a slice is any decimal number type
func (s Slice) IsInteger() bool { return s.IsInt() || s.IsUInt() || s.IsSmallInt() }
// IsNumber returns true if slice is any Number-type object
func (s Slice) IsNumber() bool { return s.IsInteger() || s.IsDouble() }
// IsSorted returns true if slice is an object with table offsets, sorted by attribute name
func (s Slice) IsSorted() bool {
h := s.head()
return (h >= 0x0b && h <= 0x0e)
} | deps/github.com/arangodb/go-velocypack/slice_type.go | 0.890628 | 0.492188 | slice_type.go | starcoder |
package bit
import (
"fmt"
"math"
"os"
)
// Array is an array in in which elements are packed with a width of
// b < 64 bits. It allows for space-efficient storage when integers have
// well-knownvalue ranges that don't correspond to exactly 64, 32, 16, or 8
// bits.
type Array struct {
Length int
Bits byte
Data []byte
}
func PrecisionNeeded(max uint64) int {
return int(math.Ceil(math.Log2(float64(max + 1))))
}
func ArrayBytes(bits, length int) int {
return int(math.Ceil(float64(bits * length) / 8))
}
// Slice converts the contents of a Array into a standard uint64 slice.
// len(out) must equal arr.Length.
func (arr *Array) Slice(out []uint64) {
if len(out) < arr.Length {
panic(fmt.Sprintf("Array has length %d, but out buffer has " +
"length %d.", arr.Length, len(out)))
}
// Set up buffers and commonly-used values.
bits := int(arr.Bits)
buf, tBuf := [8]byte{ }, [9]byte{ }
bufBytes := uint64(arr.Bits / 8)
if bufBytes * 8 < uint64(arr.Bits) { bufBytes++ }
for i := 0; i < arr.Length; i++ {
// Find where we are in the array.
startBit := uint64(i*bits % 8)
nextStartBit := (startBit + uint64(bits)) % 8
startByte := int(i*bits / 8)
endByte := int(((i + 1)*bits - 1) / 8)
tBufBytes := endByte - startByte + 1
// Pull bytes out into a buffer.
for j := 0; j < tBufBytes; j++ {
tBuf[j] = arr.Data[startByte + j]
}
// Mask unrelated edges
startMask := (^byte(0)) << startBit
endMask := (^byte(0)) >> (8 - nextStartBit)
if nextStartBit == 0 { endMask = ^byte(0) }
tBuf[0] &= startMask
tBuf[tBufBytes - 1] &= endMask
// Transfer shifted bytes into unshifted buffer.
for j := uint64(0); j < bufBytes; j++ {
buf[j] = tBuf[j] >> startBit
}
for j := uint64(0); j < bufBytes; j++ {
buf[j] |= tBuf[j+1] << (8-startBit)
}
// Clear tBuf for next loop.
for i := 0; i < tBufBytes; i++ { tBuf[i] = 0 }
// Convert to uint64
xi := uint64(0)
for j := uint64(0); j < bufBytes; j++ {
xi |= uint64(buf[j]) << (8*j)
}
out[i] = xi
}
}
func BufferedArray(bits int, x []uint64, b []byte) *Array {
if bits > 64 {
panic("Cannot pack more than 64 bits per element into a bit.Array")
}
nBytes := ArrayBytes(bits, len(x))
if len(b) != nBytes {
panic(fmt.Sprintf("bit.BufferedArray given buffer of length %d, " +
"but length %d was required.", len(b), nBytes))
}
for i := range b { b[i] = 0 }
arr := &Array{
Length: len(x), Bits: byte(bits), Data: b,
}
buf, tBuf := [8]byte{ }, [9]byte{ }
bufBytes := uint64(bits / 8)
if bufBytes * 8 < uint64(bits) { bufBytes++ }
mask := (^uint64(0)) >> uint64(64 - bits)
for i, xi := range x {
xi &= mask
currBit := uint64(i*bits % 8)
// Move to byte-wise buffer.
for j := uint64(0); j < bufBytes; j++ {
buf[j] = byte(xi >> (8*j))
}
// Shift and move to the transfer buffer
tBuf[bufBytes] = 0
for j := uint64(0); j < bufBytes; j++ {
tBuf[j] = buf[j] << currBit
}
for j := uint64(0); j < bufBytes; j++ {
tBuf[j + 1] |= buf[j] >> (8-currBit)
}
// Transfer bits into the Array
startByte := i * bits / 8
endByte := ((i + 1)*bits - 1) / 8
for j := 0; j < (endByte - startByte) + 1; j++ {
arr.Data[startByte + j] |= tBuf[j]
}
}
return arr
}
// NewArray creates a new Array which stores only the bits least
// signiticant bits of every element in x.
func NewArray(bits int, x []uint64) *Array {
// Set up buffers and commonly used values.
nBytes := ArrayBytes(bits, len(x))
return BufferedArray(bits, x, make([]byte, nBytes))
}
// ArrayBuffer allows for Arrays to be read from and written to files without
// excess heap allocation.
type ArrayBuffer struct {
byteBuf []byte
uint64Buf []uint64
}
func (ab *ArrayBuffer) Bits(x []uint64) int {
if len(x) == 0 { return 0 }
max := x[0]
for i := range x {
if x[i] > max { max = x[i] }
}
return PrecisionNeeded(uint64(max))
}
func (ab *ArrayBuffer) Write(f *os.File, x []uint64, bits int) {
if bits == 0 { return }
ab.setByteSize(ArrayBytes(bits, len(x)))
arr := BufferedArray(bits, x, ab.byteBuf)
f.Write(arr.Data)
}
func (ab *ArrayBuffer) Read(f *os.File, bits, n int) []uint64 {
ab.setUint64Size(n)
if bits == 0 {
for i := range ab.uint64Buf { ab.uint64Buf[i] = 0 }
return ab.uint64Buf
}
ab.setByteSize(ArrayBytes(bits, n))
arr :=Array{ Length: n, Bits: byte(bits), Data: ab.byteBuf }
f.Read(ab.byteBuf)
arr.Slice(ab.uint64Buf)
return ab.uint64Buf
}
func (ab *ArrayBuffer) Uint64(n int) []uint64 {
ab.setUint64Size(n)
return ab.uint64Buf
}
func (ab *ArrayBuffer) setByteSize(n int) {
if n <= cap(ab.byteBuf) {
ab.byteBuf = ab.byteBuf[:n]
return
}
ab.byteBuf = ab.byteBuf[:cap(ab.byteBuf)]
nAdd := n - len(ab.byteBuf)
ab.byteBuf = append(ab.byteBuf, make([]byte, nAdd)...)
}
func (ab *ArrayBuffer) setUint64Size(n int) {
if n <= cap(ab.uint64Buf) {
ab.uint64Buf = ab.uint64Buf[:n]
return
}
ab.uint64Buf = ab.uint64Buf[:cap(ab.uint64Buf)]
nAdd := n - len(ab.uint64Buf)
ab.uint64Buf = append(ab.uint64Buf, make([]uint64, nAdd)...)
} | go/bit/bit.go | 0.650134 | 0.42668 | bit.go | starcoder |
package compiler
import (
"github.com/llir/llvm/ir/constant"
"github.com/llir/llvm/ir/enum"
"github.com/llir/llvm/ir/types"
"github.com/llir/llvm/ir/value"
)
// AddEval generates IR for add
func AddEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewAdd(value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFAdd(value1, value2)
}
return nil
}
// MinusEval generates IR for minus
func MinusEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewSub(value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFSub(value1, value2)
}
return nil
}
// MultipleEval generates IR for multiple
func MultipleEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewMul(value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFMul(value1, value2)
}
return nil
}
// DivideEval generates IR for divide
func DivideEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewSDiv(value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFDiv(value1, value2)
}
return nil
}
// OppositeEval generates IR for opposite
func OppositeEval(scope *Scope, value value.Value) value.Value {
if value.Type() == types.I32 {
return scope.Block.NewSub(constant.NewInt(types.I32, 0), value)
} else if value.Type() == types.Double {
return scope.Block.NewFSub(constant.NewFloat(types.Float, 0.0), value)
}
return nil
}
// CmpEQEval generates IR for =
func CmpEQEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewICmp(enum.IPredEQ, value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFCmp(enum.FPredOEQ, value1, value2)
}
return nil
}
// CmpNEEval generates IR for <>
func CmpNEEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewICmp(enum.IPredNE, value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFCmp(enum.FPredONE, value1, value2)
}
return nil
}
// CmpLTEval generates IR for <
func CmpLTEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewICmp(enum.IPredSLT, value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFCmp(enum.FPredOLT, value1, value2)
}
return nil
}
// CmpLEEval generates IR for <=
func CmpLEEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewICmp(enum.IPredSLE, value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFCmp(enum.FPredOLE, value1, value2)
}
return nil
}
// CmpGTEval generates IR for >
func CmpGTEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewICmp(enum.IPredSGT, value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFCmp(enum.FPredOGT, value1, value2)
}
return nil
}
// CmpGEEval generates IR for >=
func CmpGEEval(scope *Scope, value1 value.Value, value2 value.Value) value.Value {
if value1.Type() == types.I32 {
return scope.Block.NewICmp(enum.IPredSGE, value1, value2)
} else if value1.Type() == types.Double {
return scope.Block.NewFCmp(enum.FPredOGE, value1, value2)
}
return nil
} | internal/compiler/operation.go | 0.541651 | 0.406862 | operation.go | starcoder |
package val
import "github.com/dolthub/dolt/go/store/pool"
type TupleBuilder struct {
Desc TupleDesc
buf [MaxTupleDataSize]byte
pos ByteSize
fields [MaxTupleFields][]byte
}
func NewTupleBuilder(desc TupleDesc) *TupleBuilder {
return &TupleBuilder{Desc: desc}
}
// Tuple materializes a Tuple from the fields written to the TupleBuilder.
func (tb *TupleBuilder) Build(pool pool.BuffPool) (tup Tuple) {
for i, typ := range tb.Desc.Types {
if !typ.Nullable && tb.fields[i] == nil {
panic("cannot write NULL to non-NULL field")
}
}
values := tb.fields[:tb.Desc.Count()]
tup = NewTuple(pool, values...)
tb.Recycle()
return
}
// Recycle resets the TupleBuilder so it can build a new Tuple.
func (tb *TupleBuilder) Recycle() {
tb.pos = 0
}
// PutBool writes a bool to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutBool(i int, v bool) {
tb.Desc.expectEncoding(i, Int8Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+int8Size]
writeBool(tb.fields[i], v)
tb.pos += int8Size
}
// PutInt8 writes an int8 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutInt8(i int, v int8) {
tb.Desc.expectEncoding(i, Int8Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+int8Size]
WriteInt8(tb.fields[i], v)
tb.pos += int8Size
}
// PutUint8 writes a uint8 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutUint8(i int, v uint8) {
tb.Desc.expectEncoding(i, Uint8Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+uint8Size]
WriteUint8(tb.fields[i], v)
tb.pos += uint8Size
}
// PutInt16 writes an int16 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutInt16(i int, v int16) {
tb.Desc.expectEncoding(i, Int16Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+int16Size]
WriteInt16(tb.fields[i], v)
tb.pos += int16Size
}
// PutUint16 writes a uint16 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutUint16(i int, v uint16) {
tb.Desc.expectEncoding(i, Uint16Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+uint16Size]
WriteUint16(tb.fields[i], v)
tb.pos += uint16Size
}
// PutInt32 writes an int32 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutInt32(i int, v int32) {
tb.Desc.expectEncoding(i, Int32Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+int32Size]
WriteInt32(tb.fields[i], v)
tb.pos += int32Size
}
// PutUint32 writes a uint32 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutUint32(i int, v uint32) {
tb.Desc.expectEncoding(i, Uint32Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+uint32Size]
WriteUint32(tb.fields[i], v)
tb.pos += uint32Size
}
// PutInt64 writes an int64 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutInt64(i int, v int64) {
tb.Desc.expectEncoding(i, Int64Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+int64Size]
WriteInt64(tb.fields[i], v)
tb.pos += int64Size
}
// PutUint64 writes a uint64 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutUint64(i int, v uint64) {
tb.Desc.expectEncoding(i, Uint64Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+uint64Size]
WriteUint64(tb.fields[i], v)
tb.pos += uint64Size
}
// PutFloat32 writes a float32 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutFloat32(i int, v float32) {
tb.Desc.expectEncoding(i, Float32Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+float32Size]
WriteFloat32(tb.fields[i], v)
tb.pos += float32Size
}
// PutFloat64 writes a float64 to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutFloat64(i int, v float64) {
tb.Desc.expectEncoding(i, Float64Enc)
tb.fields[i] = tb.buf[tb.pos : tb.pos+float64Size]
WriteFloat64(tb.fields[i], v)
tb.pos += float64Size
}
// PutString writes a string to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutString(i int, v string) {
tb.Desc.expectEncoding(i, StringEnc)
sz := ByteSize(len(v))
tb.fields[i] = tb.buf[tb.pos : tb.pos+sz]
writeString(tb.fields[i], v, tb.Desc.Types[i].Coll)
tb.pos += sz
}
// PutBytes writes a []byte to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutBytes(i int, v []byte) {
tb.Desc.expectEncoding(i, BytesEnc)
sz := ByteSize(len(v))
tb.fields[i] = tb.buf[tb.pos : tb.pos+sz]
writeBytes(tb.fields[i], v, tb.Desc.Types[i].Coll)
tb.pos += sz
}
// PutField writes an interface{} to the ith field of the Tuple being built.
func (tb *TupleBuilder) PutField(i int, v interface{}) {
switch tb.Desc.Types[i].Enc {
case Int8Enc:
tb.PutInt8(i, int8(convInt(v)))
case Uint8Enc:
tb.PutUint8(i, uint8(convUint(v)))
case Int16Enc:
tb.PutInt16(i, int16(convInt(v)))
case Uint16Enc:
tb.PutUint16(i, uint16(convUint(v)))
case Int24Enc:
panic("24 bit")
case Uint24Enc:
panic("24 bit")
case Int32Enc:
tb.PutInt32(i, int32(convInt(v)))
case Uint32Enc:
tb.PutUint32(i, uint32(convUint(v)))
case Int64Enc:
tb.PutInt64(i, int64(convInt(v)))
case Uint64Enc:
tb.PutUint64(i, uint64(convUint(v)))
case Float32Enc:
tb.PutFloat32(i, v.(float32))
case Float64Enc:
tb.PutFloat64(i, v.(float64))
case StringEnc:
tb.PutString(i, v.(string))
case BytesEnc:
tb.PutBytes(i, v.([]byte))
default:
panic("unknown encoding")
}
}
func convInt(v interface{}) int {
switch i := v.(type) {
case int8:
return int(i)
case uint8:
return int(i)
case int16:
return int(i)
case uint16:
return int(i)
case int32:
return int(i)
case uint32:
return int(i)
case int64:
return int(i)
case uint64:
return int(i)
default:
panic("impossible conversion")
}
}
func convUint(v interface{}) uint {
switch i := v.(type) {
case int8:
return uint(i)
case uint8:
return uint(i)
case int16:
return uint(i)
case uint16:
return uint(i)
case int32:
return uint(i)
case uint32:
return uint(i)
case int64:
return uint(i)
case uint64:
return uint(i)
default:
panic("impossible conversion")
}
} | go/store/val/tuple_builder.go | 0.633637 | 0.426501 | tuple_builder.go | starcoder |
package pulse
import (
"fmt"
"strings"
"time"
"github.com/insolar/insolar/longbits"
"github.com/insolar/insolar/network/consensus/common/cryptkit"
)
const InvalidPulseEpoch uint32 = 0
const EphemeralPulseEpoch = InvalidPulseEpoch + 1
var _ DataReader = &Data{}
type Data struct {
PulseNumber Number
DataExt
}
type DataHolder interface {
GetPulseNumber() Number
GetPulseData() Data
GetPulseDataDigest() cryptkit.DigestHolder
}
type DataExt struct {
// ByteSize=44
PulseEpoch uint32
PulseEntropy longbits.Bits256
NextPulseDelta uint16
PrevPulseDelta uint16
Timestamp uint32
}
type DataReader interface {
GetPulseNumber() Number
GetStartOfEpoch() Number
// GetPulseEntropy() [4]uint64
GetNextPulseDelta() uint16
GetPrevPulseDelta() uint16
GetTimestamp() uint64
IsExpectedPulse() bool
IsFromEphemeral() bool
}
func NewFirstPulsarData(delta uint16, entropy longbits.Bits256) Data {
return newPulsarData(OfNow(), delta, entropy)
}
func NewPulsarData(pn Number, deltaNext uint16, deltaPrev uint16, entropy longbits.Bits256) Data {
r := newPulsarData(pn, deltaNext, entropy)
r.PrevPulseDelta = deltaPrev
return r
}
func NewFirstEphemeralData() Data {
return newEphemeralData(MinTimePulse)
}
type EntropyFunc func() longbits.Bits256
func (r Data) String() string {
buf := strings.Builder{}
buf.WriteString(fmt.Sprint(r.PulseNumber))
ep := OfUint32(r.PulseEpoch)
if ep != r.PulseNumber && ep != 0 {
buf.WriteString(fmt.Sprintf("@%d", ep))
}
if r.NextPulseDelta == r.PrevPulseDelta {
buf.WriteString(fmt.Sprintf(",±%d", r.NextPulseDelta))
} else {
if r.NextPulseDelta > 0 {
buf.WriteString(fmt.Sprintf(",+%d", r.NextPulseDelta))
}
if r.PrevPulseDelta > 0 {
buf.WriteString(fmt.Sprintf(",-%d", r.PrevPulseDelta))
}
}
return buf.String()
}
func newPulsarData(pn Number, delta uint16, entropy longbits.Bits256) Data {
if delta == 0 {
panic("delta cant be zero")
}
return Data{
PulseNumber: pn,
DataExt: DataExt{
PulseEpoch: pn.AsUint32(),
PulseEntropy: entropy,
Timestamp: uint32(time.Now().Unix()),
NextPulseDelta: delta,
PrevPulseDelta: 0,
},
}
}
func newEphemeralData(pn Number) Data {
s := Data{
PulseNumber: pn,
DataExt: DataExt{
PulseEpoch: EphemeralPulseEpoch,
Timestamp: 0,
NextPulseDelta: 1,
PrevPulseDelta: 0,
},
}
fixedPulseEntropy(&s.PulseEntropy, s.PulseNumber)
return s
}
/* This function has a fixed implementation and MUST remain unchanged as some elements of Consensus rely on identical behavior of this functions. */
func fixedPulseEntropy(v *longbits.Bits256, pn Number) {
longbits.FillBitsWithStaticNoise(uint32(pn), (*v)[:])
}
func (r Data) EnsurePulseData() {
if !r.PulseNumber.IsTimePulse() {
panic("incorrect pulse number")
}
if !OfUint32(r.PulseEpoch).IsSpecialOrTimePulse() {
panic("incorrect pulse epoch")
}
if r.NextPulseDelta == 0 {
panic("next delta can't be zero")
}
}
func (r Data) IsValidPulseData() bool {
if !r.PulseNumber.IsTimePulse() {
return false
}
if !OfUint32(r.PulseEpoch).IsSpecialOrTimePulse() {
return false
}
if r.NextPulseDelta == 0 {
return false
}
return true
}
func (r Data) IsEmpty() bool {
return r.PulseNumber.IsUnknown()
}
func (r Data) IsEmptyWithEpoch(epoch uint32) bool {
return r.PulseNumber.IsUnknown() && r.PulseEpoch == epoch
}
func (r Data) IsValidExpectedPulseData() bool {
if !r.PulseNumber.IsTimePulse() {
return false
}
if !OfUint32(r.PulseEpoch).IsSpecialOrTimePulse() {
return false
}
if r.PrevPulseDelta != 0 {
return false
}
return true
}
func (r Data) EnsurePulsarData() {
if !OfUint32(r.PulseEpoch).IsTimePulse() {
panic("incorrect pulse epoch by pulsar")
}
r.EnsurePulseData()
}
func (r Data) IsValidPulsarData() bool {
if !OfUint32(r.PulseEpoch).IsTimePulse() {
return false
}
return r.IsValidPulseData()
}
func (r Data) EnsureEphemeralData() {
if r.PulseEpoch != EphemeralPulseEpoch {
panic("incorrect pulse epoch")
}
r.EnsurePulseData()
}
func (r Data) IsValidEphemeralData() bool {
if r.PulseEpoch != EphemeralPulseEpoch {
return false
}
return r.IsValidPulseData()
}
func (r Data) IsFromPulsar() bool {
return r.PulseNumber.IsTimePulse() && OfUint32(r.PulseEpoch).IsTimePulse()
}
func (r Data) IsFromEphemeral() bool {
return r.PulseNumber.IsTimePulse() && r.PulseEpoch == EphemeralPulseEpoch
}
func (r Data) GetStartOfEpoch() Number {
ep := OfUint32(r.PulseEpoch)
if r.PulseNumber.IsTimePulse() {
return ep
}
return r.PulseNumber
}
func (r Data) CreateNextPulse(entropyGen EntropyFunc) Data {
if r.IsFromEphemeral() {
return r.createNextEphemeralPulse()
}
return r.createNextPulsarPulse(r.NextPulseDelta, entropyGen)
}
func (r Data) IsValidNext(n Data) bool {
if r.IsExpectedPulse() || r.GetNextPulseNumber() != n.PulseNumber || r.NextPulseDelta != n.PrevPulseDelta {
return false
}
switch {
case r.IsFromPulsar():
return n.IsValidPulsarData()
case r.IsFromEphemeral():
return n.IsValidEphemeralData()
}
return n.IsValidPulseData()
}
func (r Data) IsValidPrev(p Data) bool {
switch {
case r.IsFirstPulse() || p.IsExpectedPulse() || p.GetNextPulseNumber() != r.PulseNumber || p.NextPulseDelta != r.PrevPulseDelta:
return false
case r.IsFromPulsar():
return p.IsValidPulsarData()
case r.IsFromEphemeral():
return p.IsValidEphemeralData()
default:
return p.IsValidPulseData()
}
}
func (r Data) GetNextPulseNumber() Number {
if r.IsExpectedPulse() {
panic("illegal state")
}
return r.PulseNumber.Next(r.NextPulseDelta)
}
func (r Data) GetPrevPulseNumber() Number {
if r.IsFirstPulse() {
panic("illegal state")
}
return r.PulseNumber.Prev(r.PrevPulseDelta)
}
func (r Data) CreateNextExpected() Data {
s := Data{
PulseNumber: r.GetNextPulseNumber(),
DataExt: DataExt{
PrevPulseDelta: r.NextPulseDelta,
NextPulseDelta: 0,
},
}
if r.IsFromEphemeral() {
s.PulseEpoch = r.PulseEpoch
}
return s
}
func (r Data) CreateNextEphemeralPulse() Data {
if !r.IsFromEphemeral() {
panic("prev is not ephemeral")
}
return r.createNextEphemeralPulse()
}
func (r Data) createNextEphemeralPulse() Data {
s := newEphemeralData(r.GetNextPulseNumber())
s.PrevPulseDelta = r.NextPulseDelta
return s
}
func (r Data) CreateNextPulsarPulse(delta uint16, entropyGen EntropyFunc) Data {
if r.IsFromEphemeral() {
panic("prev is ephemeral")
}
return r.createNextPulsarPulse(delta, entropyGen)
}
func (r Data) createNextPulsarPulse(delta uint16, entropyGen EntropyFunc) Data {
s := newPulsarData(r.GetNextPulseNumber(), delta, entropyGen())
s.PrevPulseDelta = r.NextPulseDelta
return s
}
func (r Data) GetPulseNumber() Number {
return r.PulseNumber
}
func (r Data) GetNextPulseDelta() uint16 {
return r.NextPulseDelta
}
func (r Data) GetPrevPulseDelta() uint16 {
return r.PrevPulseDelta
}
func (r Data) GetTimestamp() uint64 {
return uint64(r.Timestamp)
}
func (r Data) IsExpectedPulse() bool {
return r.PulseNumber.IsTimePulse() && r.NextPulseDelta == 0
}
func (r Data) IsFirstPulse() bool {
return r.PulseNumber.IsTimePulse() && r.PrevPulseDelta == 0
} | pulse/pulse_data.go | 0.67662 | 0.499939 | pulse_data.go | starcoder |
package core
import (
"math"
)
// PointToLineCartesianDistance - Get the min distance from a point to a line (Cartesian Coordinates)
// https://en.m.wikipedia.org/wiki/Distance_from_a_point_to_a_line
func PointToLineCartesianDistance(p Point, l Line) float64 {
var top = math.Abs(((l[1].Y - l[0].Y) * p.X) - ((l[1].X - l[0].X) * p.Y) + (l[1].X * l[0].Y) - (l[1].Y * l[0].X))
var bottom = math.Sqrt(((l[1].Y - l[1].Y) * 2) + (l[1].X - l[0].X*2))
var result = top / bottom
return result
}
// PointToLineGeographicDistance - Gets the min distnace from a point to a line (Geographic Cordinates)
func PointToLineGeographicDistance(p LatLng, l Line) float64 {
var a = LatLng{
Lat: DegToRad(l[0].Y),
Lng: DegToRad(l[0].X),
}
var b = LatLng{
Lat: DegToRad(l[1].Y),
Lng: DegToRad(l[1].X),
}
var c = p.ConvertToRadian()
var t = nearestPointGreatCircle(a, b, c)
aPoint := a.ConvertToPoint()
bPoint := b.ConvertToPoint()
cPoint := c.ConvertToPoint()
tPoint := t.ConvertToPoint()
//If closest point is on the line use that.
if onSegment(aPoint, bPoint, tPoint) {
return PointToPointDistanceCosine(tPoint, cPoint)
}
//Otherwise just use start or end point whichever is closer.
var distanceAC = PointToPointDistanceCosine(aPoint, cPoint)
var distanceBC = PointToPointDistanceCosine(bPoint, cPoint)
if distanceAC < distanceBC {
return distanceAC
}
return distanceBC
}
func nearestPointGreatCircle(a, b, c LatLng) LatLng {
var aCartesian = a.ConvertToXYZ()
var bCartesian = b.ConvertToXYZ()
var cCartesian = c.ConvertToXYZ()
var G = vectorProduct(aCartesian, bCartesian)
var F = vectorProduct(cCartesian, G)
var t = vectorProduct(G, F)
var norm = normalize(t)
var multi = multiplyByScalar(norm, EarthRadius)
var cart = multi.ConvertToLatLng()
return cart
}
func vectorProduct(a, b Point3D) Point3D {
var result = Point3D{
X: a.Y*b.Z - a.Z*b.Y,
Y: a.Z*b.X - a.X*b.Z,
Z: a.X*b.Y - a.Y*b.X,
}
return result
}
func normalize(t Point3D) Point3D {
var length = math.Sqrt((t.X * t.X) + (t.Y * t.Y) + (t.Z * t.Z))
var result = Point3D{
X: t.X / length,
Y: t.Y / length,
Z: t.Z / length,
}
return result
}
func multiplyByScalar(normalize Point3D, k float64) Point3D {
var result = Point3D{
X: normalize.X * k,
Y: normalize.Y * k,
Z: normalize.Z * k,
}
return result
}
//Needs Radians -- Checks if point is on the line by substracting distances to check if total lenght - two sub lenghts are near enough to be zero.
func onSegment(a, b, t Point) bool {
var diff = math.Abs(PointToPointDistanceCosine(a, b) - PointToPointDistanceCosine(a, t) - PointToPointDistanceCosine(b, t))
if diff < 0.1 {
return true
}
return false
} | core/GetPointToLineDistance.go | 0.882713 | 0.695926 | GetPointToLineDistance.go | starcoder |
package function
import (
"errors"
"kanzi"
)
// Zero Length Encoding is a simple encoding algorithm by Wheeler
// closely related to Run Length Encoding. The main difference is
// that only runs of 0 values are processed. Also, the length is
// encoded in a different way (each digit in a different byte)
// This algorithm is well adapted to process post BWT/MTFT data
const (
ZRLT_MAX_RUN = int(1<<31) - 1
)
type ZRLT struct {
size uint
}
func NewZRLT(sz uint) (*ZRLT, error) {
this := new(ZRLT)
this.size = sz
return this, nil
}
func (this *ZRLT) Size() uint {
return this.size
}
func (this *ZRLT) Forward(src, dst []byte) (uint, uint, error) {
if src == nil {
return uint(0), uint(0), errors.New("Invalid null source buffer")
}
if dst == nil {
return uint(0), uint(0), errors.New("Invalid null destination buffer")
}
if kanzi.SameByteSlices(src, dst, false) {
return 0, 0, errors.New("Input and output buffers cannot be equal")
}
srcEnd := this.size
if this.size == 0 {
srcEnd = uint(len(src))
}
dstEnd := uint(len(dst))
dstEnd2 := dstEnd - 2
runLength := 1
srcIdx := uint(0)
dstIdx := uint(0)
for srcIdx < srcEnd && dstIdx < dstEnd {
val := src[srcIdx]
if val == 0 {
runLength++
srcIdx++
if srcIdx < srcEnd && runLength < ZRLT_MAX_RUN {
continue
}
}
if runLength > 1 {
// Encode length
log2 := uint(1)
for runLength>>log2 > 1 {
log2++
}
if dstIdx >= dstEnd-log2 {
break
}
// Write every bit as a byte except the most significant one
for log2 > 0 {
log2--
dst[dstIdx] = byte((runLength >> log2) & 1)
dstIdx++
}
runLength = 1
continue
}
if val >= 0xFE {
if dstIdx >= dstEnd2 {
break
}
dst[dstIdx] = 0xFF
dstIdx++
dst[dstIdx] = val - 0xFE
dstIdx++
} else {
dst[dstIdx] = val + 1
dstIdx++
}
srcIdx++
}
if srcIdx != srcEnd || runLength != 1 {
return srcIdx, dstIdx, errors.New("Output buffer is too small")
}
return srcIdx, dstIdx, nil
}
func (this *ZRLT) Inverse(src, dst []byte) (uint, uint, error) {
if src == nil {
return uint(0), uint(0), errors.New("Invalid null source buffer")
}
if dst == nil {
return uint(0), uint(0), errors.New("Invalid null destination buffer")
}
if kanzi.SameByteSlices(src, dst, false) {
return 0, 0, errors.New("Input and output buffers cannot be equal")
}
srcEnd := this.size
if this.size == 0 {
srcEnd = uint(len(src))
}
dstEnd := uint(len(dst))
runLength := 1
srcIdx := uint(0)
dstIdx := uint(0)
for srcIdx < srcEnd && dstIdx < dstEnd {
if runLength > 1 {
runLength--
dst[dstIdx] = 0
dstIdx++
continue
}
val := src[srcIdx]
if val <= 1 {
// Generate the run length bit by bit (but force MSB)
runLength = 1
for {
runLength = (runLength << 1) | int(val)
srcIdx++
if srcIdx >= srcEnd {
break
}
val = src[srcIdx]
if val > 1 {
break
}
}
continue
}
// Regular data processing
if val == 0xFF {
srcIdx++
if srcIdx >= srcEnd {
break
}
dst[dstIdx] = 0xFE + src[srcIdx]
} else {
dst[dstIdx] = val - 1
}
dstIdx++
srcIdx++
}
// If runLength is not 1, add trailing 0s
end := dstIdx + uint(runLength) - 1
if end > dstEnd {
return srcIdx, dstIdx, errors.New("Output buffer is too small")
}
for dstIdx < end {
dst[dstIdx] = 0
dstIdx++
}
if srcIdx < srcEnd {
return srcIdx, dstIdx, errors.New("Output buffer is too small")
}
return srcIdx, dstIdx, nil
}
// Required encoding output buffer size unknown
func (this ZRLT) MaxEncodedLen(srcLen int) int {
return -1
} | go/src/kanzi/function/ZRLT.go | 0.665737 | 0.424233 | ZRLT.go | starcoder |
package stat
import "math"
// Stat maintains an online collection of summary statistics. By "online" we
// mean each value is added once as in a stream, and there is only an O(1) cost.
type Stat struct {
n int
min float64
max float64
sum float64
sum2 float64
}
// NewStat returns a new Stat struct.
func NewStat() *Stat {
return &Stat{
n: 0,
min: math.MaxFloat64,
max: -math.MaxFloat64,
sum: 0.0,
sum2: 0.0,
}
}
// Add adds a new value to the summary statistics.
func (s *Stat) Add(x float64) {
s.n++
s.sum += x
s.sum2 += x * x
if s.min > x {
s.min = x
}
if s.max < x {
s.max = x
}
}
// AddMany add many values to the summary statistics.
func (s *Stat) AddMany(xs ...float64) {
for _, x := range xs {
s.Add(x)
}
}
// GetCount returns the count of observed values.
func (s *Stat) GetCount() int {
return s.n
}
// GetSum returns the sum of observed values.
func (s *Stat) GetSum() float64 {
return s.sum
}
// GetMean returns the mean (average) of observed values.
func (s *Stat) GetMean() float64 {
if s.n <= 0 {
return 0.0
}
return s.sum / float64(s.n)
}
// GetMean returns the minimum of observed values.
func (s *Stat) GetMin() float64 {
if s.n <= 0 {
return 0.0
}
return s.min
}
// GetMean returns the maximum of observed values.
func (s *Stat) GetMax() float64 {
if s.n <= 0 {
return 0.0
}
return s.max
}
// GetVarP returns the population variance.
func (s *Stat) GetVarP() float64 {
// This is the straightforward method, in Wikipedia called "Naive
// algorithm". See also Welford's online algorithm:
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
if s.n <= 0 {
return 0.0
}
n := float64(s.n)
return (s.sum2 - (s.sum*s.sum)/n) / (n)
}
// GetVarS returns the sample variance.
func (s *Stat) GetVarS() float64 {
if s.n <= 1 {
return 0.0
}
n := float64(s.n)
return (s.sum2 - (s.sum*s.sum)/n) / (n - 1)
}
// GetStdDevP returns the population standard deviation.
func (s *Stat) GetStdDevP() float64 {
return math.Sqrt(s.GetVarP())
}
// GetStdDevS returns the sample standard deviation.
func (s *Stat) GetStdDevS() float64 {
return math.Sqrt(s.GetVarS())
} | stat.go | 0.890919 | 0.545588 | stat.go | starcoder |
package MSFStore
import (
"bytes"
"encoding/gob"
"fmt"
"sort"
"strconv"
)
// Histogram is, for now, a map store implementing something akin to the
// HDRHistogram idea, meaning it's high-accuracy and has few restrictions.
// It can be an issue for space, as it will get bigger as more keys are added.
type Histogram struct {
// This will be used as an int, but it's a float to satisfy upstream
// dependencies
Resolution int
// using strings as keys because I've used floats long enough
// to not compare them for equality.
Registers map[string]float64
}
// NewHistogram returns a new map store, not very exciting
func New(Resolution int) Histogram {
var x Histogram
s := make(map[string]float64)
x.Resolution = Resolution
x.Registers = s
return x
}
// project a value for storage in the histogram
func (m Histogram) project(x float64) string {
return fmt.Sprintf("%."+strconv.Itoa(m.Resolution)+"e", x)
}
// Insert a count at a certain value into a histogram
func (m Histogram) Insert(val float64, count float64) Histogram {
dest := m.project(val)
if m.Registers == nil {
m.Registers = make(map[string]float64)
}
current, ok := m.Registers[dest]
if !ok {
m.Registers[dest] = 0.0
}
m.Registers[dest] = current + count
return m
}
// Read a value out of a histogram
func (m Histogram) Read(val float64) float64 {
dest := m.project(val)
output := m.Registers[dest]
return output
}
// Min returns the pointwise min of the two histograms.
func (m Histogram) Min(o Histogram) Histogram {
out := New(m.Resolution)
for key, val := range m.Registers {
useval := val
if val, ok := o.Registers[key]; ok {
if val < useval {
useval = val
}
out.Registers[key] = useval
}
}
return out
}
// RawHist contains a gonum-compatible array pair.
type RawHist struct {
Location, Weight []float64
}
// ToRawHist gives an interchangeable format for histograms as a pair of arrays of sorted values and weights.
func (m Histogram) ToRawHist() RawHist {
locs := make([]float64, len(m.Registers))
wts := make([]float64, len(m.Registers))
index := 0
for key := range m.Registers {
parsed, err := strconv.ParseFloat(key, 64)
if err != nil {
continue
}
locs[index] = parsed
index++
}
sort.Float64s(locs)
for ii, xx := range locs {
wts[ii] = m.Registers[m.project(xx)]
}
return RawHist{locs, wts}
}
// FromRawHist takes an interchange pair and regenerates a hist.
func FromRawHist(x RawHist, Resolution int) Histogram {
m := New(Resolution)
locs := x.Location
wts := x.Weight
for ii, xx := range locs {
m.Insert(xx, wts[ii])
}
return m
}
// Combine adds two hists together.
func (m Histogram) Combine(x Histogram) Histogram {
out := New(m.Resolution)
for key, val := range x.Registers {
usekey, _ := strconv.ParseFloat(key, 64)
out.Insert(usekey, val)
}
for key, val := range m.Registers {
usekey, _ := strconv.ParseFloat(key, 64)
out.Insert(usekey, val)
}
return out
}
// Total gives the total of all elements in the histogram.
func (m Histogram) Total() float64 {
output := 0.0
for _, val := range m.Registers {
output += val
}
return output
}
// Cancel subtracts the argument from the base. It's called cancel and not diff or
// subtract because enjoy having negative values in your hists if you're not careful.
func (m Histogram) Cancel(x Histogram) Histogram {
out := New(m.Resolution)
for key, val := range m.Registers {
usekey, _ := strconv.ParseFloat(key, 64)
out.Insert(usekey, val)
}
for key, val := range x.Registers {
usekey, _ := strconv.ParseFloat(key, 64)
out.Insert(usekey, -val)
}
return out
}
// Serialize turns a Histogram into bytes.
func (m Histogram) Serialize() []byte {
var outbytes bytes.Buffer
enc := gob.NewEncoder(&outbytes)
_ = enc.Encode(m.ToRawHist())
return outbytes.Bytes()
}
// Deserialize is the inverse of serialize.
func Deserialize(input []byte, res int) (Histogram, error) {
var inbytes bytes.Buffer
var h RawHist
inbytes.Write(input)
dec := gob.NewDecoder(&inbytes)
err := dec.Decode(&h)
out := FromRawHist(h, res)
return out, err
} | msfstore.go | 0.705176 | 0.49707 | msfstore.go | starcoder |
// Package image implements a basic 2-D image library.
package image
// A Config consists of an image's color model and dimensions.
type Config struct {
ColorModel ColorModel
Width, Height int
}
// An Image is a finite rectangular grid of Colors drawn from a ColorModel.
type Image interface {
// ColorModel returns the Image's ColorModel.
ColorModel() ColorModel
// Bounds returns the domain for which At can return non-zero color.
// The bounds do not necessarily contain the point (0, 0).
Bounds() Rectangle
// At returns the color of the pixel at (x, y).
// At(Bounds().Min.X, Bounds().Min.Y) returns the upper-left pixel of the grid.
// At(Bounds().Max.X-1, Bounds().Max.Y-1) returns the lower-right one.
At(x, y int) Color
}
// An RGBA is an in-memory image of RGBAColor values.
type RGBA struct {
// Pix holds the image's pixels. The pixel at (x, y) is Pix[y*Stride+x].
Pix []RGBAColor
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *RGBA) ColorModel() ColorModel { return RGBAColorModel }
func (p *RGBA) Bounds() Rectangle { return p.Rect }
func (p *RGBA) At(x, y int) Color {
if !p.Rect.Contains(Point{x, y}) {
return RGBAColor{}
}
return p.Pix[y*p.Stride+x]
}
func (p *RGBA) Set(x, y int, c Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = toRGBAColor(c).(RGBAColor)
}
func (p *RGBA) SetRGBA(x, y int, c RGBAColor) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *RGBA) Opaque() bool {
if p.Rect.Empty() {
return true
}
base := p.Rect.Min.Y * p.Stride
i0, i1 := base+p.Rect.Min.X, base+p.Rect.Max.X
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for _, c := range p.Pix[i0:i1] {
if c.A != 0xff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewRGBA returns a new RGBA with the given width and height.
func NewRGBA(w, h int) *RGBA {
buf := make([]RGBAColor, w*h)
return &RGBA{buf, w, Rectangle{ZP, Point{w, h}}}
}
// An RGBA64 is an in-memory image of RGBA64Color values.
type RGBA64 struct {
// Pix holds the image's pixels. The pixel at (x, y) is Pix[y*Stride+x].
Pix []RGBA64Color
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *RGBA64) ColorModel() ColorModel { return RGBA64ColorModel }
func (p *RGBA64) Bounds() Rectangle { return p.Rect }
func (p *RGBA64) At(x, y int) Color {
if !p.Rect.Contains(Point{x, y}) {
return RGBA64Color{}
}
return p.Pix[y*p.Stride+x]
}
func (p *RGBA64) Set(x, y int, c Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = toRGBA64Color(c).(RGBA64Color)
}
func (p *RGBA64) SetRGBA64(x, y int, c RGBA64Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *RGBA64) Opaque() bool {
if p.Rect.Empty() {
return true
}
base := p.Rect.Min.Y * p.Stride
i0, i1 := base+p.Rect.Min.X, base+p.Rect.Max.X
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for _, c := range p.Pix[i0:i1] {
if c.A != 0xffff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewRGBA64 returns a new RGBA64 with the given width and height.
func NewRGBA64(w, h int) *RGBA64 {
pix := make([]RGBA64Color, w*h)
return &RGBA64{pix, w, Rectangle{ZP, Point{w, h}}}
}
// An NRGBA is an in-memory image of NRGBAColor values.
type NRGBA struct {
// Pix holds the image's pixels. The pixel at (x, y) is Pix[y*Stride+x].
Pix []NRGBAColor
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *NRGBA) ColorModel() ColorModel { return NRGBAColorModel }
func (p *NRGBA) Bounds() Rectangle { return p.Rect }
func (p *NRGBA) At(x, y int) Color {
if !p.Rect.Contains(Point{x, y}) {
return NRGBAColor{}
}
return p.Pix[y*p.Stride+x]
}
func (p *NRGBA) Set(x, y int, c Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = toNRGBAColor(c).(NRGBAColor)
}
func (p *NRGBA) SetNRGBA(x, y int, c NRGBAColor) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *NRGBA) Opaque() bool {
if p.Rect.Empty() {
return true
}
base := p.Rect.Min.Y * p.Stride
i0, i1 := base+p.Rect.Min.X, base+p.Rect.Max.X
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for _, c := range p.Pix[i0:i1] {
if c.A != 0xff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewNRGBA returns a new NRGBA with the given width and height.
func NewNRGBA(w, h int) *NRGBA {
pix := make([]NRGBAColor, w*h)
return &NRGBA{pix, w, Rectangle{ZP, Point{w, h}}}
}
// An NRGBA64 is an in-memory image of NRGBA64Color values.
type NRGBA64 struct {
// Pix holds the image's pixels. The pixel at (x, y) is Pix[y*Stride+x].
Pix []NRGBA64Color
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *NRGBA64) ColorModel() ColorModel { return NRGBA64ColorModel }
func (p *NRGBA64) Bounds() Rectangle { return p.Rect }
func (p *NRGBA64) At(x, y int) Color {
if !p.Rect.Contains(Point{x, y}) {
return NRGBA64Color{}
}
return p.Pix[y*p.Stride+x]
}
func (p *NRGBA64) Set(x, y int, c Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = toNRGBA64Color(c).(NRGBA64Color)
}
func (p *NRGBA64) SetNRGBA64(x, y int, c NRGBA64Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *NRGBA64) Opaque() bool {
if p.Rect.Empty() {
return true
}
base := p.Rect.Min.Y * p.Stride
i0, i1 := base+p.Rect.Min.X, base+p.Rect.Max.X
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for _, c := range p.Pix[i0:i1] {
if c.A != 0xffff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewNRGBA64 returns a new NRGBA64 with the given width and height.
func NewNRGBA64(w, h int) *NRGBA64 {
pix := make([]NRGBA64Color, w*h)
return &NRGBA64{pix, w, Rectangle{ZP, Point{w, h}}}
}
// An Alpha is an in-memory image of AlphaColor values.
type Alpha struct {
// Pix holds the image's pixels. The pixel at (x, y) is Pix[y*Stride+x].
Pix []AlphaColor
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *Alpha) ColorModel() ColorModel { return AlphaColorModel }
func (p *Alpha) Bounds() Rectangle { return p.Rect }
func (p *Alpha) At(x, y int) Color {
if !p.Rect.Contains(Point{x, y}) {
return AlphaColor{}
}
return p.Pix[y*p.Stride+x]
}
func (p *Alpha) Set(x, y int, c Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = toAlphaColor(c).(AlphaColor)
}
func (p *Alpha) SetAlpha(x, y int, c AlphaColor) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Alpha) Opaque() bool {
if p.Rect.Empty() {
return true
}
base := p.Rect.Min.Y * p.Stride
i0, i1 := base+p.Rect.Min.X, base+p.Rect.Max.X
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for _, c := range p.Pix[i0:i1] {
if c.A != 0xff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewAlpha returns a new Alpha with the given width and height.
func NewAlpha(w, h int) *Alpha {
pix := make([]AlphaColor, w*h)
return &Alpha{pix, w, Rectangle{ZP, Point{w, h}}}
}
// An Alpha16 is an in-memory image of Alpha16Color values.
type Alpha16 struct {
// Pix holds the image's pixels. The pixel at (x, y) is Pix[y*Stride+x].
Pix []Alpha16Color
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *Alpha16) ColorModel() ColorModel { return Alpha16ColorModel }
func (p *Alpha16) Bounds() Rectangle { return p.Rect }
func (p *Alpha16) At(x, y int) Color {
if !p.Rect.Contains(Point{x, y}) {
return Alpha16Color{}
}
return p.Pix[y*p.Stride+x]
}
func (p *Alpha16) Set(x, y int, c Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = toAlpha16Color(c).(Alpha16Color)
}
func (p *Alpha16) SetAlpha16(x, y int, c Alpha16Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Alpha16) Opaque() bool {
if p.Rect.Empty() {
return true
}
base := p.Rect.Min.Y * p.Stride
i0, i1 := base+p.Rect.Min.X, base+p.Rect.Max.X
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
for _, c := range p.Pix[i0:i1] {
if c.A != 0xffff {
return false
}
}
i0 += p.Stride
i1 += p.Stride
}
return true
}
// NewAlpha16 returns a new Alpha16 with the given width and height.
func NewAlpha16(w, h int) *Alpha16 {
pix := make([]Alpha16Color, w*h)
return &Alpha16{pix, w, Rectangle{ZP, Point{w, h}}}
}
// A Gray is an in-memory image of GrayColor values.
type Gray struct {
// Pix holds the image's pixels. The pixel at (x, y) is Pix[y*Stride+x].
Pix []GrayColor
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *Gray) ColorModel() ColorModel { return GrayColorModel }
func (p *Gray) Bounds() Rectangle { return p.Rect }
func (p *Gray) At(x, y int) Color {
if !p.Rect.Contains(Point{x, y}) {
return GrayColor{}
}
return p.Pix[y*p.Stride+x]
}
func (p *Gray) Set(x, y int, c Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = toGrayColor(c).(GrayColor)
}
func (p *Gray) SetGray(x, y int, c GrayColor) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Gray) Opaque() bool {
return true
}
// NewGray returns a new Gray with the given width and height.
func NewGray(w, h int) *Gray {
pix := make([]GrayColor, w*h)
return &Gray{pix, w, Rectangle{ZP, Point{w, h}}}
}
// A Gray16 is an in-memory image of Gray16Color values.
type Gray16 struct {
// Pix holds the image's pixels. The pixel at (x, y) is Pix[y*Stride+x].
Pix []Gray16Color
Stride int
// Rect is the image's bounds.
Rect Rectangle
}
func (p *Gray16) ColorModel() ColorModel { return Gray16ColorModel }
func (p *Gray16) Bounds() Rectangle { return p.Rect }
func (p *Gray16) At(x, y int) Color {
if !p.Rect.Contains(Point{x, y}) {
return Gray16Color{}
}
return p.Pix[y*p.Stride+x]
}
func (p *Gray16) Set(x, y int, c Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = toGray16Color(c).(Gray16Color)
}
func (p *Gray16) SetGray16(x, y int, c Gray16Color) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = c
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Gray16) Opaque() bool {
return true
}
// NewGray16 returns a new Gray16 with the given width and height.
func NewGray16(w, h int) *Gray16 {
pix := make([]Gray16Color, w*h)
return &Gray16{pix, w, Rectangle{ZP, Point{w, h}}}
}
// A PalettedColorModel represents a fixed palette of colors.
type PalettedColorModel []Color
func diff(a, b uint32) uint32 {
if a > b {
return a - b
}
return b - a
}
// Convert returns the palette color closest to c in Euclidean R,G,B space.
func (p PalettedColorModel) Convert(c Color) Color {
if len(p) == 0 {
return nil
}
cr, cg, cb, _ := c.RGBA()
// Shift by 1 bit to avoid potential uint32 overflow in sum-squared-difference.
cr >>= 1
cg >>= 1
cb >>= 1
result := Color(nil)
bestSSD := uint32(1<<32 - 1)
for _, v := range p {
vr, vg, vb, _ := v.RGBA()
vr >>= 1
vg >>= 1
vb >>= 1
dr, dg, db := diff(cr, vr), diff(cg, vg), diff(cb, vb)
ssd := (dr * dr) + (dg * dg) + (db * db)
if ssd < bestSSD {
bestSSD = ssd
result = v
}
}
return result
}
// A Paletted is an in-memory image backed by a 2-D slice of uint8 values and a PalettedColorModel.
type Paletted struct {
// Pix holds the image's pixels. The pixel at (x, y) is Pix[y*Stride+x].
Pix []uint8
Stride int
// Rect is the image's bounds.
Rect Rectangle
// Palette is the image's palette.
Palette PalettedColorModel
}
func (p *Paletted) ColorModel() ColorModel { return p.Palette }
func (p *Paletted) Bounds() Rectangle { return p.Rect }
func (p *Paletted) At(x, y int) Color {
if len(p.Palette) == 0 {
return nil
}
if !p.Rect.Contains(Point{x, y}) {
return p.Palette[0]
}
return p.Palette[p.Pix[y*p.Stride+x]]
}
func (p *Paletted) ColorIndexAt(x, y int) uint8 {
if !p.Rect.Contains(Point{x, y}) {
return 0
}
return p.Pix[y*p.Stride+x]
}
func (p *Paletted) SetColorIndex(x, y int, index uint8) {
if !p.Rect.Contains(Point{x, y}) {
return
}
p.Pix[y*p.Stride+x] = index
}
// Opaque scans the entire image and returns whether or not it is fully opaque.
func (p *Paletted) Opaque() bool {
for _, c := range p.Palette {
_, _, _, a := c.RGBA()
if a != 0xffff {
return false
}
}
return true
}
// NewPaletted returns a new Paletted with the given width, height and palette.
func NewPaletted(w, h int, m PalettedColorModel) *Paletted {
pix := make([]uint8, w*h)
return &Paletted{pix, w, Rectangle{ZP, Point{w, h}}, m}
} | src/pkg/image/image.go | 0.890011 | 0.759448 | image.go | starcoder |
package main
import (
"fmt"
"math"
"time"
)
const maxDelay = 28 // max collect-to-report delay to track in days
type stats struct {
pos, neg, other int // number of molecular tests by result
ab, ag, unk int // number of serological, antigen, and unknown tests
agePos, ageNeg map[ageRange]int // molecular results grouped by patient age
delays, posDelays, negDelays *hist // delays for total, positive, and negative molecular results
}
func newStats() *stats {
return &stats{
agePos: make(map[ageRange]int),
ageNeg: make(map[ageRange]int),
delays: newHist(maxDelay),
posDelays: newHist(maxDelay),
negDelays: newHist(maxDelay),
}
}
func (s stats) String() string {
str := fmt.Sprintf("%4d %4d %2d %4.1f%%",
s.pos, s.neg, s.other, 100*float64(s.pos)/float64(s.total()))
str += fmt.Sprintf(" [%d %d %d %d %d]",
s.delayPct(0), s.delayPct(25), s.delayPct(50), s.delayPct(75), s.delayPct(100))
return str
}
// update incorporates a single test into s.
func (s *stats) update(t testType, res result, ar ageRange, delay int) {
switch t {
case molecular:
switch res {
case positive:
s.pos++
s.agePos[ar]++
s.posDelays.inc(delay)
case negative:
s.neg++
s.ageNeg[ar]++
s.negDelays.inc(delay)
default:
s.other++
}
s.delays.inc(delay)
case serological:
s.ab++
case antigen:
s.ag++
case unknownType:
s.unk++
}
}
func (s *stats) total() int {
return s.pos + s.neg // ignore useless 'other' results
}
func (s *stats) delayPct(pct float64) int {
return s.delays.percentile(pct)
}
func (s *stats) posDelayPct(pct float64) int {
return s.posDelays.percentile(pct)
}
func (s *stats) negDelayPct(pct float64) int {
return s.negDelays.percentile(pct)
}
// estInf returns the estimated number of new infections using Youyang Gu's method
// described at https://covid19-projections.com/estimating-true-infections/.
func (s *stats) estInf() int {
posRate := float64(s.pos) / float64(s.total())
return int(math.Round(float64(s.pos) * (16*math.Pow(posRate, 0.5) + 2.5)))
}
// add incorporates o into s.
func (s *stats) add(o *stats) {
if o == nil {
return
}
s.pos += o.pos
s.neg += o.neg
s.other += o.other
s.ab += o.ab
s.ag += o.ag
s.unk += o.unk
s.delays.add(o.delays)
s.posDelays.add(o.posDelays)
s.negDelays.add(o.negDelays)
for ar := ageMin; ar <= ageMax; ar++ {
s.agePos[ar] += o.agePos[ar]
s.ageNeg[ar] += o.ageNeg[ar]
}
}
// scale multiplies s's values by sc.
func (s *stats) scale(sc float64) {
rs := func(v int) int { return int(math.Round(sc * float64(v))) }
s.pos = rs(s.pos)
s.neg = rs(s.neg)
s.other = rs(s.other)
s.ab = rs(s.ab)
s.ag = rs(s.ag)
s.unk = rs(s.unk)
s.delays.scale(sc)
s.posDelays.scale(sc)
s.negDelays.scale(sc)
for ar := ageMin; ar <= ageMax; ar++ {
s.agePos[ar] = rs(s.agePos[ar])
s.ageNeg[ar] = rs(s.ageNeg[ar])
}
}
type hist struct {
counts []int // bucketed counts
total int // total number of tests in counts
}
func newHist(max int) *hist {
return &hist{counts: make([]int, max+2)} // extra for 0 and for overflow
}
// inc increments the histogram for the supplied value. Negative values are ignored.
func (h *hist) inc(v int) {
if v < 0 {
return
}
i := v
if i >= len(h.counts) {
i = len(h.counts) - 1
}
h.counts[i]++
h.total++
}
func (h *hist) percentile(p float64) int {
if h.total == 0 || p < 0 || p > 100 {
return 0
}
seen := 0
target := 1 + int(math.Round(p*float64(h.total-1)/100))
for i := 0; i < len(h.counts); i++ {
if seen += h.counts[i]; seen >= target {
return i
}
}
panic("didn't find value for percentile") // shouldn't be reached
}
func (h *hist) add(o *hist) {
if len(h.counts) != len(o.counts) {
panic(fmt.Sprintf("can't add histograms with %v and %v bucket(s)", len(h.counts), len(o.counts)))
}
for i := 0; i < len(h.counts); i++ {
h.counts[i] += o.counts[i]
}
h.total += o.total
}
// scale scales h's counts by sc.
func (h *hist) scale(sc float64) {
h.total = 0
for i := range h.counts {
h.counts[i] = int(math.Round(sc * float64(h.counts[i])))
h.total += h.counts[i]
}
}
// statsMap holds stats indexed by time (typically days).
type statsMap map[time.Time]*stats
// getStats returns the stats object for t, creating it if necessary.
func (m statsMap) get(t time.Time) *stats {
if s, ok := m[t]; ok {
return s
}
s := newStats()
m[t] = s
return s
}
// weeklyStats aggregates the stats in dm by week (starting on Sundays).
func weeklyStats(dm statsMap) statsMap {
wm := make(statsMap)
for d, s := range dm {
week := d.AddDate(0, 0, -1*int(d.Weekday())) // subtract to sunday
ws := wm[week]
if ws == nil {
ws = newStats()
wm[week] = ws
}
ws.add(s)
}
return wm
}
// averageStats returns a new map with a numDays-day rolling average for each day in dm.
func averageStats(dm statsMap, numDays int) statsMap {
am := make(statsMap)
days := sortedTimes(dm)
for i, d := range days {
as := am.get(d)
nd := 0
for j := 0; j < numDays && i-j >= 0; j++ {
as.add(dm[days[i-j]])
nd++
}
as.scale(1 / float64(nd))
}
return am
} | bioportal/stats.go | 0.697712 | 0.619471 | stats.go | starcoder |
package main
import (
"encoding/binary"
"fmt"
"io"
)
type SoundFontHydra struct {
// Headers is a listing of all presets within the SoundFont compatible file.
// It always contains a minimum of two records, one record for each preset and one for a terminal record.
Headers []PresetHeader
// PBag is a listing of all preset zones within the SoundFont compatible file.
// It always contains a minimum of two records, one record for each preset and one for a terminal record.
PBag []struct {
GenIndex, ModIndex uint16
}
// PresetModulators is a listing all preset zone modulators within the SoundFont compatible file.
PresetModulators []Modulator
// Generators is a required listing of preset zone generators for each preset zone within the SoundFont compatible file.
PresetGenerators []Generator
// Instruments is a required listing of instrument zones for each instrument within the SoundFont.
Instuments []Instrument
// IBag is a listing of all instrument zones within the SoundFont compatible file.
// It contains one record for each instrument zone plus one for a terminal record.
IBag []struct {
InstGenIndex, InstModIndex uint16
}
// InstrumentModulators is a listing all instrument zone modulators within the SoundFont compatible file.
InstrumentModulators []Modulator
// InstrumentGenerators is a required listing of zone generators for each instrument zone within the SoundFont compatible file.
InstrumentGenerators []Generator
// Samples is a required listing of all samples within the smpl sub-chunk and any referenced ROM samples.
Samples []SampleHeader
}
type PresetHeader struct {
// PresetName contains the name of the preset expressed in ASCII, with unused terminal characters filled with zero valued byte
PresetName [20]byte
// Preset contains the MIDI Preset Number
Preset uint16
// Bank contains the MIDI Bank Number which apply to this preset.
// The special case of a General MIDI percussion bank is handled conventionally by a wBank value of 128.
Bank uint16
// PresetBagNdx is an index to the preset’s zone list in the PBAG sub-chunk.
PresetBagNdx uint16
// Library is reserved for future implementation in a preset library
// management function and should be preserved as read, and created as zero.
Library uint32
// Genre is reserved for future implementation in a preset library
// management function and should be preserved as read, and created as zero.
Genre uint32
// Morphology is reserved for future implementation in a preset library
// management function and should be preserved as read, and created as zero.
Morphology uint32
}
func (p PresetHeader) String() string {
return fmt.Sprintf("PresetHeader{PresetName: %q, Preset: %d, Bank: %d, PresetBagNdx: %d, Library: %d, Genre: %d, Morphology: %d}", p.PresetName, p.Preset, p.Bank, p.PresetBagNdx, p.Library, p.Genre, p.Morphology)
}
type SFModulator uint16
type SFGenerator uint16
type SFTransform uint16
type Modulator struct {
// ModSrcOper is a value of one of the SFModulator enumeration type values. Unknown or undefined values are
// ignored. Modulators with sfModAmtSrcOper set to ‘link’ which have no other modulator linked to it are ignored.
ModSrcOper SFModulator
// ModDestOper indicates the destination of the modulator. The destination is either a value of one of the SFGenerator
// enumeration type values or a link to the sfModSrcOper of another modulator block. The latter is indicated by the top bit of
// the sfModDestOper field being set, the other 15 bits designates the index value of the modulator whose source should be the
// output of the current modulator RELATIVE TO the first modulator in the instrument zone. Unknown or undefined values
// are ignored. Modulators with links that point to a modulator index that exceeds the total number of modulators for a given
// zone are ignored. Linked modulators that are part of circular links are ignored.
ModDestOper SFGenerator
// ModAmount is a signed value indicating the degree to which the source modulates the destination. A zero
// value indicates there is no fixed amount.
ModAmount int16
// ModAmtSrcOper is a value of one of the SFModulator enumeration type values. Unknown or undefined values are
// ignored. Modulators with sfModAmtSrcOper set to ‘link’ are ignored. This value indicates the degree to which the source
// modulates the destination is to be controlled by the specified modulation source. Note that this enumeration is two bytes in
// length.
ModAmtSrcOper SFModulator
// ModTransOper is a value of one of the SFTransform enumeration type values. Unknown or undefined values are
// ignored. This value indicates that a transform of the specified type will be applied to the modulation source before
// application to the modulator. Note that this enumeration is two bytes in length
ModTransOper SFTransform
}
type Generator struct {
// GenOper is a value of one of the SFGenerator enumeration type values. Unknown or undefined values are
// ignored.
GenOper SFGenerator
// GenAmount is the value to be assigned to the specified generator. Note that this can be of three formats. Certain
// generators specify a range of MIDI key numbers of MIDI velocities, with a minimum and maximum value. Other
// generators specify an unsigned WORD value. Most generators, however, specify a signed 16 bit SHORT value.
GenAmount int16
}
type Instrument struct {
// Name is the instrument name expressed in ASCII, with unused terminal characters filled with zero valued bytes.
Name [20]byte
// InstBagNdx is an index to the instrument’s zone list in the IBAG sub-chunk.
InstBagNdx uint16
}
func (inst Instrument) String() string {
return fmt.Sprintf("PresetInstrument{Name: %s, InstBagNdx: %d}", string(inst.Name[:]), inst.InstBagNdx)
}
type SfSampleType uint16
const (
SampleType_Mono SfSampleType = 1
SampleType_Right SfSampleType = 2
SampleType_Left SfSampleType = 4
SampleType_Link SfSampleType = 8
SampleType_Rom_Mono SfSampleType = 0x8001
SampleType_Rom_Right SfSampleType = 0x8002
SampleType_Rom_Left SfSampleType = 0x8004
SampleType_Rom_Link SfSampleType = 0x8008
)
func (s SfSampleType) String() string {
switch s {
case SampleType_Mono:
return "Mono"
case SampleType_Right:
return "Right"
case SampleType_Left:
return "Left"
case SampleType_Link:
return "Link"
case SampleType_Rom_Mono:
return "Rom_Mono"
case SampleType_Rom_Right:
return "Rom_Right"
case SampleType_Rom_Left:
return "Rom_Left"
case SampleType_Rom_Link:
return "Rom_Link"
}
return fmt.Sprintf("Unknown(%d)", s)
}
type SampleHeader struct {
// SampleName is the name of the sample expressed in ASCII, with unused terminal characters filled with zero valued bytes.
SampleName [20]byte
// Start contains the index, in sample data points, from the beginning of the sample data field to the first data
// point of this sample.
Start uint32
// End contains the index, in sample data points, from the beginning of the sample data field to the first of
// the set of 46 zero valued data points following this sample.
End uint32
// Startloop contains the index, in sample data points, from the beginning of the sample data field to the first
// data point in the loop of this sample
Startloop uint32
// Endloop contains the index, in sample data points, from the beginning of the sample data field to the first
// data point following the loop of this sample. Note that this is the data point “equivalent to” the first loop data point, and that
// to produce portable artifact free loops, the eight proximal data points surrounding both the Startloop and Endloop points
// should be identical.
Endloop uint32
// SampleRate contains the sample rate, in hertz, at which this sample was acquired or to which it was most recently converted
SampleRate uint32
// OriginalPitch contains the MIDI key number of the recorded pitch of the sample.
// Values between 128 and 254 are illegal. Whenever an illegal value or a value of 255 is encountered, the value 60 should be used
OriginalPitch uint8
// PitchCorrection contains a pitch correction in cents that should be applied to the sample on playback. The
// purpose of this field is to compensate for any pitch errors during the sample recording process. The correction value is that
// of the correction to be applied.
PitchCorrection int8
// TODO SampleLink
SampleLink uint16
// SampleType is a value of one of the SampleType enumeration type values.
SampleType SfSampleType
}
func (s SampleHeader) String() string {
return fmt.Sprintf("SampleHeader{SampleName: %s, Start: %d, End: %d, Startloop: %d, Endloop: %d, SampleRate: %d, OriginalPitch: %d, PitchCorrection: %d, SampleLink: %d, SampleType: %v}",
string(s.SampleName[:]),
s.Start,
s.End,
s.Startloop,
s.Endloop,
s.SampleRate,
s.OriginalPitch,
s.PitchCorrection,
s.SampleLink,
s.SampleType)
}
func ReadSoundFontHydra(r io.Reader) (*SoundFontHydra, error) {
sound := &SoundFontHydra{}
pdtaChunks := make(map[[4]byte]bool)
pdtaChunks[[4]byte{'p', 'h', 'd', 'r'}] = false
pdtaChunks[[4]byte{'p', 'b', 'a', 'g'}] = false
pdtaChunks[[4]byte{'p', 'm', 'o', 'd'}] = false
pdtaChunks[[4]byte{'p', 'g', 'e', 'n'}] = false
pdtaChunks[[4]byte{'i', 'n', 's', 't'}] = false
pdtaChunks[[4]byte{'i', 'b', 'a', 'g'}] = false
pdtaChunks[[4]byte{'i', 'm', 'o', 'd'}] = false
pdtaChunks[[4]byte{'i', 'g', 'e', 'n'}] = false
pdtaChunks[[4]byte{'s', 'h', 'd', 'r'}] = false
for {
// parse a chunk
var chunk chunk
if err := chunk.parse(r); err != nil {
if err == io.EOF {
break
}
return nil, err
}
_, ok := pdtaChunks[chunk.id]
if !ok {
// skip unknown chunks
fmt.Println("unknown chunk", string(chunk.id[:]))
continue
}
pdtaChunks[chunk.id] = true
fmt.Println("found chunk", string(chunk.id[:]))
// make sense of the chunk
switch chunk.id {
case [4]byte{'p', 'h', 'd', 'r'}:
// each preset header is 38 bytes long
if chunk.size%38 != 0 {
return nil, fmt.Errorf("invalid preset header size %d", chunk.size)
}
sound.Headers = make([]PresetHeader, chunk.size/38)
chunkReader := chunk.newReader()
for i := 0; i < len(sound.Headers); i++ {
if err := binary.Read(chunkReader, binary.LittleEndian, &sound.Headers[i]); err != nil {
return nil, err
}
}
case [4]byte{'p', 'b', 'a', 'g'}:
// each preset bag is 4 bytes long
if chunk.size%4 != 0 {
return nil, fmt.Errorf("invalid preset bag size %d", chunk.size)
}
sound.PBag = make([]struct {
GenIndex, ModIndex uint16
}, chunk.size/4)
for i := 0; i < len(sound.PBag); i++ {
// first 2 bytes represent the major version number
sound.PBag[i].GenIndex = uint16(chunk.data[4*i+1])<<8 | uint16(chunk.data[4*i])
// last 2 bytes represent the minor version number
sound.PBag[i].ModIndex = uint16(chunk.data[4*i+3])<<8 | uint16(chunk.data[4*i+2])
}
case [4]byte{'p', 'm', 'o', 'd'}:
// each preset modulator is 10 bytes long
if chunk.size%10 != 0 {
return nil, fmt.Errorf("invalid preset modulator size %d", chunk.size)
}
sound.PresetModulators = make([]Modulator, chunk.size/10)
chunkReader := chunk.newReader()
for i := 0; i < len(sound.PresetModulators); i++ {
if err := binary.Read(chunkReader, binary.LittleEndian, &sound.PresetModulators[i]); err != nil {
return nil, err
}
}
case [4]byte{'p', 'g', 'e', 'n'}:
// each preset generator is 4 bytes long
if chunk.size%4 != 0 {
return nil, fmt.Errorf("invalid preset generator size %d", chunk.size)
}
sound.PresetGenerators = make([]Generator, chunk.size/4)
chunkReader := chunk.newReader()
for i := 0; i < len(sound.PresetGenerators); i++ {
if err := binary.Read(chunkReader, binary.LittleEndian, &sound.PresetGenerators[i]); err != nil {
return nil, err
}
}
case [4]byte{'i', 'n', 's', 't'}:
// each instrument header is 22 bytes long
if chunk.size%22 != 0 {
return nil, fmt.Errorf("invalid instrument header size %d", chunk.size)
}
sound.Instuments = make([]Instrument, chunk.size/22)
chunkReader := chunk.newReader()
for i := 0; i < len(sound.Instuments); i++ {
if err := binary.Read(chunkReader, binary.LittleEndian, &sound.Instuments[i]); err != nil {
return nil, err
}
}
case [4]byte{'i', 'b', 'a', 'g'}:
// each instrument bag is 4 bytes long
if chunk.size%4 != 0 {
return nil, fmt.Errorf("invalid preset bag size %d", chunk.size)
}
sound.IBag = make([]struct {
InstGenIndex, InstModIndex uint16
}, chunk.size/4)
for i := 0; i < len(sound.IBag); i++ {
// first 2 bytes represent the major version number
sound.IBag[i].InstGenIndex = uint16(chunk.data[4*i+1])<<8 | uint16(chunk.data[4*i])
// last 2 bytes represent the minor version number
sound.IBag[i].InstModIndex = uint16(chunk.data[4*i+3])<<8 | uint16(chunk.data[4*i+2])
}
case [4]byte{'i', 'm', 'o', 'd'}:
// each preset modulator is 10 bytes long
if chunk.size%10 != 0 {
return nil, fmt.Errorf("invalid preset modulator size %d", chunk.size)
}
sound.InstrumentModulators = make([]Modulator, chunk.size/10)
chunkReader := chunk.newReader()
for i := 0; i < len(sound.InstrumentModulators); i++ {
if err := binary.Read(chunkReader, binary.LittleEndian, &sound.InstrumentModulators[i]); err != nil {
return nil, err
}
}
case [4]byte{'i', 'g', 'e', 'n'}:
// each preset generator is 4 bytes long
if chunk.size%4 != 0 {
return nil, fmt.Errorf("invalid preset generator size %d", chunk.size)
}
sound.InstrumentGenerators = make([]Generator, chunk.size/4)
chunkReader := chunk.newReader()
for i := 0; i < len(sound.InstrumentGenerators); i++ {
if err := binary.Read(chunkReader, binary.LittleEndian, &sound.InstrumentGenerators[i]); err != nil {
return nil, err
}
}
case [4]byte{'s', 'h', 'd', 'r'}:
// each sample header is 46 bytes long
if chunk.size%46 != 0 {
return nil, fmt.Errorf("invalid sample header size %d", chunk.size)
}
sound.Samples = make([]SampleHeader, chunk.size/46)
chunkReader := chunk.newReader()
for i := 0; i < len(sound.Samples); i++ {
if err := binary.Read(chunkReader, binary.LittleEndian, &sound.Samples[i]); err != nil {
return nil, err
}
}
}
}
// All chunks must be present
for ck, ok := range pdtaChunks {
if !ok {
return nil, fmt.Errorf("missing chunk %v", string(ck[:]))
}
}
return sound, nil
} | hydra.go | 0.61451 | 0.503113 | hydra.go | starcoder |
package dots
import (
"image"
"image/color"
"unsafe"
)
type DotImage struct {
CpRect image.Rectangle
Stride int
Cps []CodePoint
}
//NewImage creates empty image with
//given size in CodePoints.
func NewImage(r image.Rectangle) *DotImage {
return &DotImage{
Cps: make([]CodePoint, 1*r.Dx()*r.Dy()),
CpRect: r,
Stride: 1 * r.Dx(),
}
}
//Bounds returns image size in pixels.
func (p *DotImage) Bounds() image.Rectangle {
r := p.CpRect
r.Min.X *= 2
r.Max.X *= 2
r.Min.Y *= 4
r.Max.Y *= 4
return r
}
//At uses index of pixel
func (p *DotImage) At(px, py int) color.Color {
x, cpx := px/blockWidth, px%blockWidth
y, cpy := py/blockHeight, py%blockHeight
if p.Cps[p.CpOffset(x, y)].IsOn(cpx, cpy) {
return color.White
}
return color.Black
}
func (p *DotImage) ColorModel() color.Model {
return color.Gray16Model
}
func (p *DotImage) CpOffset(x, y int) int {
return (y-p.CpRect.Min.Y)*p.Stride + (x - p.CpRect.Min.X)
}
func (p *DotImage) CpAt(x, y int) CodePoint {
return p.Cps[p.CpOffset(x, y)]
}
func (p *DotImage) Clear() *DotImage {
for i := range p.Cps {
p.Cps[i] = 0
}
return p
}
func (p *DotImage) Fill(cp CodePoint) *DotImage {
for i := range p.Cps {
p.Cps[i] = cp
}
return p
}
//SubPic returns a picture inside of r.
//The returned value is shared with original picture.
func (p *DotImage) SubImage(r image.Rectangle) *DotImage {
r = r.Intersect(p.CpRect)
i := p.CpOffset(r.Min.X, r.Min.Y)
return &DotImage{
Cps: p.Cps[i:],
Stride: p.Stride,
CpRect: r,
}
}
const (
tx = 0b10000000
maskx = 0b00111111
)
func (p *DotImage) DrawImageTransform(
p2 *DotImage,
transform func(CodePoint, CodePoint) CodePoint) {
r := p.CpRect.Intersect(p2.CpRect)
for y := r.Min.Y; y < r.Max.Y; y++ {
for x := r.Min.X; x < r.Max.X; x++ {
ix0 := p.CpOffset(x, y)
ix2 := p2.CpOffset(x, y)
p.Cps[ix0] = transform(p.Cps[ix0], p2.Cps[ix2])
}
}
}
func (p *DotImage) DrawImage(x, y int, p2 *DotImage) {
p.DrawImageTransform(p2, NEWONLY)
}
func (p *DotImage) FlipBits() *DotImage {
for i := range p.Cps {
p.Cps[i] = ^p.Cps[i]
}
return p
}
func (p *DotImage) ReverseByX() *DotImage {
r := p.CpRect
centerX := (r.Min.X + r.Max.X) / 2
for y := r.Min.Y; y < r.Max.Y; y++ {
for x := r.Min.X; x < centerX; x++ {
ix1 := p.CpOffset(x, y)
ix2 := p.CpOffset(r.Max.X-x-1, y)
p.Cps[ix1], p.Cps[ix2] = p.Cps[ix2], p.Cps[ix1]
}
}
for i := range p.Cps {
p.Cps[i] = p.Cps[i].RevX()
}
return p
}
func (p *DotImage) ReverseByY() *DotImage {
r := p.CpRect
centerY := (r.Min.Y + r.Max.Y) / 2
for y := r.Min.Y; y < centerY; y++ {
for x := r.Min.X; x < r.Max.X; x++ {
ix1 := p.CpOffset(x, y)
ix2 := p.CpOffset(x, r.Max.Y-y-1)
p.Cps[ix1], p.Cps[ix2] = p.Cps[ix2], p.Cps[ix1]
}
}
for i := range p.Cps {
p.Cps[i] = p.Cps[i].RevY()
}
return p
}
//ByteLen returns number of bytes
//required to render image.
func (p *DotImage) ByteLen() int {
return (3*p.CpRect.Dx() + 1) * p.CpRect.Dy()
}
func (p *DotImage) String() string {
buf := make([]byte, p.ByteLen())
p.read(buf)
//using unsafe code for better performance
return *(*string)(unsafe.Pointer(&buf))
}
func (p *DotImage) read(buf []byte) {
r := p.CpRect
dx := r.Dx()
for y := r.Min.Y; y < r.Max.Y; y++ {
line := y - r.Min.Y
for x := r.Min.X; x < r.Max.X; x++ {
col := x - r.Min.X
cpb := byte(p.Cps[p.CpOffset(x, y)])
ix := line*(3*dx+1) + 3*col
buf[ix+0] = 226
buf[ix+1] = tx | (160|(cpb>>6))&maskx
buf[ix+2] = tx | cpb&maskx
}
buf[line*(3*dx+1)+3*dx] = '\n'
}
} | image.go | 0.774796 | 0.471406 | image.go | starcoder |
package stream
import "sync"
// filterErrors records errors accumulated during the execution of a filter.
type filterErrors struct {
mu sync.Mutex
err error
}
func (e *filterErrors) record(err error) {
if err != nil {
e.mu.Lock()
if e.err == nil {
e.err = err
}
e.mu.Unlock()
}
}
func (e *filterErrors) getError() error {
e.mu.Lock()
defer e.mu.Unlock()
return e.err
}
// Arg contains the data passed to Filter.Run. Arg.In is a channel that
// produces the input to the filter, and Arg.Out is a channel that
// receives the output from the filter.
type Arg struct {
In <-chan string
Out chan<- string
dummy bool // To allow later expansion
}
// The Filter interface represents a process that takes as input a
// sequence of strings from a channel and produces a sequence on
// another channel.
type Filter interface {
// RunFilter reads a sequence of items from Arg.In and produces a
// sequence of items on Arg.Out. RunFilter returns nil on success,
// an error otherwise. RunFilter must *not* close the Arg.Out
// channel.
RunFilter(Arg) error
}
// FilterFunc is an adapter type that allows the use of ordinary
// functions as Filters. If f is a function with the appropriate
// signature, FilterFunc(f) is a Filter that calls f.
type FilterFunc func(Arg) error
// RunFilter calls this function. It implements the Filter interface.
func (f FilterFunc) RunFilter(arg Arg) error { return f(arg) }
const channelBuffer = 1000
// Sequence returns a filter that is the concatenation of all filter arguments.
// The output of a filter is fed as input to the next filter.
func Sequence(filters ...Filter) Filter {
if len(filters) == 1 {
return filters[0]
}
return FilterFunc(func(arg Arg) error {
e := &filterErrors{}
in := arg.In
for _, f := range filters {
c := make(chan string, channelBuffer)
go runFilter(f, Arg{In: in, Out: c}, e)
in = c
}
for s := range in {
arg.Out <- s
}
return e.getError()
})
}
// Run executes the sequence of filters and discards all output.
// It returns either nil, an error if any filter reported an error.
func Run(filters ...Filter) error {
return ForEach(Sequence(filters...), func(s string) {})
}
// ForEach calls fn(s) for every item s in the output of filter and
// returns either nil, or any error reported by the execution of the filter.
func ForEach(filter Filter, fn func(s string)) error {
in := make(chan string)
close(in)
out := make(chan string, channelBuffer)
e := &filterErrors{}
go runFilter(filter, Arg{In: in, Out: out}, e)
for s := range out {
fn(s)
}
return e.getError()
}
// Contents returns a slice that contains all items that are
// the output of filters.
func Contents(filters ...Filter) ([]string, error) {
var result []string
err := ForEach(Sequence(filters...), func(s string) {
result = append(result, s)
})
if err != nil {
result = nil // Discard results on error
}
return result, err
}
func runFilter(f Filter, arg Arg, e *filterErrors) {
e.record(f.RunFilter(arg))
close(arg.Out)
for range arg.In { // Discard all unhandled input
}
} | vendor/github.com/ghemawat/stream/stream.go | 0.720958 | 0.410402 | stream.go | starcoder |
package helpers
import (
"fmt"
"math"
"math/bits"
"k8s.io/apimachinery/pkg/api/resource"
)
/*
The Cloud Provider's volume plugins provision disks for corresponding
PersistentVolumeClaims. Cloud Providers use different allocation unit for their
disk sizes. AWS allows you to specify the size as an integer amount of GiB,
while Portworx expects bytes for example. On AWS, if you want a volume of
1500MiB, the actual call to the AWS API should therefore be for a 2GiB disk.
This file contains functions that help rounding a storage request based on a
Cloud Provider's allocation unit.
*/
const (
// GB - GigaByte size
GB = 1000 * 1000 * 1000
// GiB - GibiByte size
GiB = 1024 * 1024 * 1024
// MB - MegaByte size
MB = 1000 * 1000
// MiB - MebiByte size
MiB = 1024 * 1024
// KB - KiloByte size
KB = 1000
// KiB - KibiByte size
KiB = 1024
)
// RoundUpToGiB rounds up given quantity upto chunks of GiB
func RoundUpToGiB(size resource.Quantity) (int64, error) {
return roundUpSizeInt64(size, GiB)
}
// RoundUpToMB rounds up given quantity to chunks of MB
func RoundUpToMB(size resource.Quantity) (int64, error) {
return roundUpSizeInt64(size, MB)
}
// RoundUpToMiB rounds up given quantity upto chunks of MiB
func RoundUpToMiB(size resource.Quantity) (int64, error) {
return roundUpSizeInt64(size, MiB)
}
// RoundUpToKB rounds up given quantity to chunks of KB
func RoundUpToKB(size resource.Quantity) (int64, error) {
return roundUpSizeInt64(size, KB)
}
// RoundUpToKiB rounds up given quantity to chunks of KiB
func RoundUpToKiB(size resource.Quantity) (int64, error) {
return roundUpSizeInt64(size, KiB)
}
// RoundUpToB rounds up given quantity to chunks of bytes
func RoundUpToB(size resource.Quantity) (int64, error) {
return roundUpSizeInt64(size, 1)
}
// RoundUpToGiBInt rounds up given quantity upto chunks of GiB. It returns an
// int instead of an int64 and an error if there's overflow
func RoundUpToGiBInt(size resource.Quantity) (int, error) {
return roundUpSizeInt(size, GiB)
}
// RoundUpToMBInt rounds up given quantity to chunks of MB. It returns an
// int instead of an int64 and an error if there's overflow
func RoundUpToMBInt(size resource.Quantity) (int, error) {
return roundUpSizeInt(size, MB)
}
// RoundUpToMiBInt rounds up given quantity upto chunks of MiB. It returns an
// int instead of an int64 and an error if there's overflow
func RoundUpToMiBInt(size resource.Quantity) (int, error) {
return roundUpSizeInt(size, MiB)
}
// RoundUpToKBInt rounds up given quantity to chunks of KB. It returns an
// int instead of an int64 and an error if there's overflow
func RoundUpToKBInt(size resource.Quantity) (int, error) {
return roundUpSizeInt(size, KB)
}
// RoundUpToKiBInt rounds up given quantity upto chunks of KiB. It returns an
// int instead of an int64 and an error if there's overflow
func RoundUpToKiBInt(size resource.Quantity) (int, error) {
return roundUpSizeInt(size, KiB)
}
// RoundUpToGiBInt32 rounds up given quantity up to chunks of GiB. It returns an
// int32 instead of an int64 and an error if there's overflow
func RoundUpToGiBInt32(size resource.Quantity) (int32, error) {
return roundUpSizeInt32(size, GiB)
}
// roundUpSizeInt calculates how many allocation units are needed to accommodate
// a volume of a given size. It returns an int and an error if there's overflow
func roundUpSizeInt(size resource.Quantity, allocationUnitBytes int64) (int, error) {
if bits.UintSize == 32 {
res, err := roundUpSizeInt32(size, allocationUnitBytes)
return int(res), err
}
res, err := roundUpSizeInt64(size, allocationUnitBytes)
return int(res), err
}
// roundUpSizeInt32 calculates how many allocation units are needed to accommodate
// a volume of a given size. It returns an int32 and an error if there's overflow
func roundUpSizeInt32(size resource.Quantity, allocationUnitBytes int64) (int32, error) {
roundedUpInt32, err := roundUpSizeInt64(size, allocationUnitBytes)
if err != nil {
return 0, err
}
if roundedUpInt32 > math.MaxInt32 {
return 0, fmt.Errorf("quantity %s is too great, overflows int32", size.String())
}
return int32(roundedUpInt32), nil
}
// roundUpSizeInt64 calculates how many allocation units are needed to accommodate
// a volume of a given size. It returns an int64 and an error if there's overflow
func roundUpSizeInt64(size resource.Quantity, allocationUnitBytes int64) (int64, error) {
// Use CmpInt64() to find out if the value of "size" would overflow an
// int64 and therefore have Value() return a wrong result. Then, retrieve
// the value as int64 and perform the rounding.
// It's not convenient to use AsScale() and related functions as they don't
// support BinarySI format, nor can we use AsInt64() directly since it's
// only implemented for int64 scaled numbers (int64Amount).
// CmpInt64() actually returns 0 when comparing an amount bigger than MaxInt64.
if size.CmpInt64(math.MaxInt64) >= 0 {
return 0, fmt.Errorf("quantity %s is too great, overflows int64", size.String())
}
volumeSizeBytes := size.Value()
roundedUp := volumeSizeBytes / allocationUnitBytes
if volumeSizeBytes%allocationUnitBytes > 0 {
roundedUp++
}
return roundedUp, nil
} | vendor/k8s.io/cloud-provider/volume/helpers/rounding.go | 0.834339 | 0.529446 | rounding.go | starcoder |
package search
import (
"github.com/gzg1984/golucene/core/index"
)
// search/similarities/Similarity.java
/*
Similarity defines the components of Lucene scoring.
Expert: Scoring API.
This is a low-level API, you should only extend this API if you want
to implement an information retrieval model. If you are instead
looking for a convenient way to alter Lucene's scoring, consider
extending a high-level implementation such as TFIDFSimilarity, which
implements the vector space model with this API, or just tweaking the
default implementation: DefaultSimilarity.
Similarity determines how Lucene weights terms, and Lucene interacts
with this class at both index-time and query-time.
######Index-time
At indexing time, the indexer calls computeNorm(), allowing the
Similarity implementation to set a per-document value for the field
that will be later accessible via AtomicReader.NormValues(). Lucene
makes no assumption about what is in this norm, but it is most useful
for encoding length normalization information.
Implementations should carefully consider how the normalization is
encoded: while Lucene's classical TFIDFSimilarity encodes a
combination of index-time boost and length normalization information
with SmallFLoat into a single byte, this might not be suitble for all
purposes.
Many formulas require the use of average document length, which can
be computed via a combination of CollectionStatistics.SumTotalTermFreq()
and CollectionStatistics.MaxDoc() or CollectionStatistics.DocCount(),
depending upon whether the average should reflect field sparsity.
Additional scoring factors can be stored in named NumericDocValuesFields
and accessed at query-time with AtomicReader.NumericDocValues().
Finally, using index-time boosts (either via folding into the
normalization byte or via DocValues), is an inefficient way to boost
the scores of different fields if the boost will be the same for
every document, instead the Similarity can simply take a constant
boost parameter C, and PerFieldSimilarityWrapper can return different
instances with different boosts depending upon field name.
######Query-time
At query-time, Quries interact with the Similarity via these steps:
1. The computeWeight() method is called a single time, allowing the
implementation to compute any statistics (such as IDF, average
document length, etc) across the entire collection. The TermStatistics
and CollectionStatistics passed in already contain all of the raw
statistics involved, so a Similarity can freely use any combination
of statistics without causing any additional I/O. Lucene makes no
assumption about what is stored in the returned SimWeight object.
2. The query normalization process occurs a single time:
SimWeight.ValueForNormalization() is called for each query leaf node,
queryNorm() is called for the top-level query, and finally
SimWeight.Normalize() passes down the normalization value and any
top-level boosts (e.g. from enclosing BooleanQuerys).
3. For each sgment in the index, the Query creates a SimScorer. The
score() method is called for each matching document.
######Exlain-time
When IndexSearcher.explain() is called, queries consult the
Similarity's DocScorer for an explanation of how it computed its
score. The query passes in a the document id and an explanation of
how the frequency was computed.
*/
type Similarity interface {
Coord(int, int) float32
// Computes the normalization value for a query given the sum of
// the normalized weights SimWeight.ValueForNormalization of each
// of the query terms. This value is passed back to the weight
// (SimWeight.normalize()) of each query term, to provide a hook
// to attempt to make scores from different queries comparable.
QueryNorm(valueForNormalization float32) float32
/*
Computes the normalization value for a field, given the
accumulated state of term processing for this field (see
FieldInvertState).
Matches in longer fields are less precise, so implementations
of this method usually set smaller values when state.Lenght() is
larger, and larger values when state.Lenght() is smaller.
*/
ComputeNorm(state *index.FieldInvertState) int64
// Compute any collection-level weight (e.g. IDF, average document
// length, etc) needed for scoring a query.
computeWeight(queryBoost float32, collectionStats CollectionStatistics, termStats ...TermStatistics) SimWeight
// Creates a new SimScorer to score matching documents from a
// segment of the inverted index.
simScorer(w SimWeight, ctx *index.AtomicReaderContext) (ss SimScorer, err error)
}
// similarities/PerFieldSimilarityWrapper
type PerFieldSimilarityWrapperSPI interface {
Get(name string) Similarity
}
/*
Provides the ability to use a different Similarity for different
fields.
Subclasses should implement Get() to return an appropriate Similarity
(for example, using field-specific parameter values) for the field.
*/
type PerFieldSimilarityWrapper struct {
spi PerFieldSimilarityWrapperSPI
}
func NewPerFieldSimilarityWrapper(spi PerFieldSimilarityWrapperSPI) *PerFieldSimilarityWrapper {
return &PerFieldSimilarityWrapper{spi: spi}
}
func (wrapper *PerFieldSimilarityWrapper) ComputeNorm(state *index.FieldInvertState) int64 {
return wrapper.spi.Get(state.Name()).ComputeNorm(state)
}
func (wrapper *PerFieldSimilarityWrapper) computeWeight(queryBoost float32,
collectionStats CollectionStatistics, termStats ...TermStatistics) SimWeight {
sim := wrapper.spi.Get(collectionStats.field)
return &PerFieldSimWeight{sim, sim.computeWeight(queryBoost, collectionStats, termStats...)}
}
func (wrapper *PerFieldSimilarityWrapper) simScorer(w SimWeight, ctx *index.AtomicReaderContext) (ss SimScorer, err error) {
panic("not implemented yet")
}
type PerFieldSimWeight struct {
delegate Similarity
delegateWeight SimWeight
}
func (w *PerFieldSimWeight) ValueForNormalization() float32 {
return w.delegateWeight.ValueForNormalization()
}
func (w *PerFieldSimWeight) Normalize(queryNorm, topLevelBoost float32) {
w.delegateWeight.Normalize(queryNorm, topLevelBoost)
} | core/search/similarities.go | 0.826852 | 0.607896 | similarities.go | starcoder |
package opentsdb
import (
"errors"
"fmt"
"strconv"
"time"
)
// GetRelativeStart - returns a start time based on an end time and a duration string
func GetRelativeStart(end time.Time, s string) (time.Time, error) {
if string(s[len(s)-2:]) == "ms" {
d, err := time.ParseDuration(s)
return end.Add(-d), err
}
switch s[len(s)-1:] {
case "s", "m", "h":
d, err := time.ParseDuration(s)
return end.Add(-d), err
case "d":
i, err := strconv.Atoi(string(s[:len(s)-1]))
return end.AddDate(0, 0, -i), err
case "w":
i, err := strconv.Atoi(string(s[:len(s)-1]))
return end.AddDate(0, 0, -i*7), err
case "n":
i, err := strconv.Atoi(string(s[:len(s)-1]))
return end.AddDate(0, -i, 0), err
case "y":
i, err := strconv.Atoi(string(s[:len(s)-1]))
return end.AddDate(-i, 0, 0), err
}
return time.Time{}, fmt.Errorf("unknown time unit: %s", s[len(s)-1:])
}
func parseParams(exp string) []string {
var param []byte
params := []string{}
for i := 1; i < len(exp); i++ {
if string(exp[i]) == "(" {
param = append(param, exp[i])
f := 1
for j := i + 1; j < len(exp); j++ {
if string(exp[j]) == "(" {
f++
}
if string(exp[j]) == ")" {
f--
}
param = append(param, exp[j])
if f == 0 {
i = j + 1
if i == len(exp) {
return params
}
break
}
}
}
if string(exp[i]) == "{" {
param = append(param, exp[i])
for j := i + 1; j < len(exp); j++ {
param = append(param, exp[j])
if string(exp[j]) == "}" {
i = j + 1
break
}
}
}
if string(exp[i]) == "," {
params = append(params, string(param))
param = []byte{}
continue
}
if string(exp[i]) == ")" {
if i+1 == len(exp) {
params = append(params, string(param))
break
}
return params
}
param = append(param, exp[i])
}
return params
}
func parseMap(exp string) (map[string][]string, error) {
if len(exp) == 0 {
return nil, errors.New(`empty map`)
}
if string(exp[0]) != "{" {
return nil, errors.New(`missing '{' at the beginning of map`)
}
var key, value []byte
m := map[string][]string{}
for i := 1; i < len(exp); i++ {
if string(exp[i]) == "=" {
if len(key) == 0 {
return nil, errors.New(`map key cannot be empty`)
}
if _, ok := m[string(key)]; !ok {
m[string(key)] = []string{}
}
for j := i + 1; j < len(exp); j++ {
if string(exp[j]) == "," || string(exp[j]) == "}" {
if len(value) == 0 {
return nil, errors.New(`map value cannot be empty`)
}
m[string(key)] = append(m[string(key)], string(value))
key = []byte{}
value = []byte{}
i = j
break
}
value = append(value, exp[j])
}
continue
}
if string(exp[i]) == "," || string(exp[i]) == "}" {
return nil, errors.New(`bad map format`)
}
key = append(key, exp[i])
}
return m, nil
} | opentsdb/parse.go | 0.583322 | 0.400779 | parse.go | starcoder |
package kmeans
import (
"bytes"
"encoding/binary"
"fmt"
"image/color"
"math"
"math/cmplx"
"math/rand"
//"code.google.com/p/lzma"
"github.com/gonum/plot"
"github.com/gonum/plot/plotter"
"github.com/gonum/plot/vg"
"github.com/gonum/plot/vg/draw"
"github.com/mjibson/go-dsp/fft"
"github.com/pointlander/compress"
)
// Observation: Data Abstraction for an N-dimensional
// observation
type Observation []float64
// Abstracts the Observation with a cluster number
// Update and computeation becomes more efficient
type ClusteredObservation struct {
ClusterNumber int
Observation
}
// Distance Function: To compute the distanfe between observations
type DistanceFunction func(first, second []float64) (float64, error)
/*
func (observation Observation) Sqd(otherObservation Observation) (ssq float64) {
for ii, jj := range observation {
d := jj - otherObservation[ii]
ssq += d * d
}
return ssq
}
*/
// Summation of two vectors
func (observation Observation) Add(otherObservation Observation) {
for ii, jj := range otherObservation {
observation[ii] += jj
}
}
// Multiplication of a vector with a scalar
func (observation Observation) Mul(scalar float64) {
for ii := range observation {
observation[ii] *= scalar
}
}
// Dot Product of Two vectors
func (observation Observation) InnerProduct(otherObservation Observation) {
for ii := range observation {
observation[ii] *= otherObservation[ii]
}
}
// Outer Product of two arrays
// TODO: Need to be tested
func (observation Observation) OuterProduct(otherObservation Observation) [][]float64 {
result := make([][]float64, len(observation))
for ii := range result {
result[ii] = make([]float64, len(otherObservation))
}
for ii := range result {
for jj := range result[ii] {
result[ii][jj] = observation[ii] * otherObservation[jj]
}
}
return result
}
// Find the closest observation and return the distance
// Index of observation, distance
func near(p ClusteredObservation, mean []Observation, distanceFunction DistanceFunction) (int, float64) {
indexOfCluster := 0
minSquaredDistance, _ := distanceFunction(p.Observation, mean[0])
for i := 1; i < len(mean); i++ {
squaredDistance, _ := distanceFunction(p.Observation, mean[i])
if squaredDistance < minSquaredDistance {
minSquaredDistance = squaredDistance
indexOfCluster = i
}
}
return indexOfCluster, math.Sqrt(minSquaredDistance)
}
// Instead of initializing randomly the seeds, make a sound decision of initializing
func seed(data []ClusteredObservation, k int, distanceFunction DistanceFunction) []Observation {
s := make([]Observation, k)
s[0] = data[rand.Intn(len(data))].Observation
d2 := make([]float64, len(data))
for ii := 1; ii < k; ii++ {
var sum float64
for jj, p := range data {
_, dMin := near(p, s[:ii], distanceFunction)
d2[jj] = dMin * dMin
sum += d2[jj]
}
target := rand.Float64() * sum
jj := 0
for sum = d2[0]; sum < target; sum += d2[jj] {
jj++
}
s[ii] = data[jj].Observation
}
return s
}
// K-Means Algorithm
func kmeans(data []ClusteredObservation, mean []Observation, distanceFunction DistanceFunction, threshold int) ([]ClusteredObservation, error) {
counter := 0
for ii, jj := range data {
closestCluster, _ := near(jj, mean, distanceFunction)
data[ii].ClusterNumber = closestCluster
}
mLen := make([]int, len(mean))
for n := len(data[0].Observation); ; {
for ii := range mean {
mean[ii] = make(Observation, n)
mLen[ii] = 0
}
for _, p := range data {
mean[p.ClusterNumber].Add(p.Observation)
mLen[p.ClusterNumber]++
}
for ii := range mean {
mean[ii].Mul(1 / float64(mLen[ii]))
}
var changes int
for ii, p := range data {
if closestCluster, _ := near(p, mean, distanceFunction); closestCluster != p.ClusterNumber {
changes++
data[ii].ClusterNumber = closestCluster
}
}
counter++
if changes == 0 || counter > threshold {
return data, nil
}
}
return data, nil
}
// K-Means Algorithm with smart seeds
// as known as K-Means ++
func Kmeans(rawData [][]float64, k int, distanceFunction DistanceFunction, threshold int) ([]int, []Observation, error) {
data := make([]ClusteredObservation, len(rawData))
for ii, jj := range rawData {
data[ii].Observation = jj
}
seeds := seed(data, k, distanceFunction)
clusteredData, err := kmeans(data, seeds, distanceFunction, threshold)
labels := make([]int, len(clusteredData))
for ii, jj := range clusteredData {
labels[ii] = jj.ClusterNumber
}
return labels, seeds, err
}
func KCmeans(rawData [][]float64, k int, distanceFunction DistanceFunction, threshold, gain int) ([]int, []Observation, error) {
var minClusteredData []ClusteredObservation
var means []Observation
var err error
min := int64(math.MaxInt64)
for clusters := 1; clusters <= k; clusters++ {
data := make([]ClusteredObservation, len(rawData))
for ii, jj := range rawData {
data[ii].Observation = jj
}
seeds := seed(data, clusters, distanceFunction)
clusteredData, _ := kmeans(data, seeds, distanceFunction, threshold)
counts := make([]int, clusters)
for _, jj := range clusteredData {
counts[jj.ClusterNumber]++
}
input := &bytes.Buffer{}
for c := 0; c < clusters; c++ {
err := binary.Write(input, binary.LittleEndian, rand.Float64())
if err != nil {
panic(err)
}
err = binary.Write(input, binary.LittleEndian, int64(counts[c]))
if err != nil {
panic(err)
}
for _, jj := range seeds[c] {
err = binary.Write(input, binary.LittleEndian, jj)
if err != nil {
panic(err)
}
}
/*sigma := make([]float64, len(seeds[c]))*/
for _, j := range clusteredData {
if j.ClusterNumber == c {
for ii, jj := range j.Observation {
x := jj - seeds[c][ii]
//sigma[ii] += x * x
err = binary.Write(input, binary.LittleEndian, x)
if err != nil {
panic(err)
}
}
for ii, jj := range j.Observation {
x := math.Exp(jj - seeds[c][ii])
for i := 0; i < gain; i++ {
err = binary.Write(input, binary.LittleEndian, x*rand.Float64())
if err != nil {
panic(err)
}
}
}
}
}
/*N := float64(counts[c])
for i, j := range sigma {
sigma[i] = math.Sqrt(j / N)
}
for i := 0; i < gain * counts[c]; i++ {
for _, jj := range sigma {
err = binary.Write(input, binary.LittleEndian, 3 * jj * rand.NormFloat64())
if err != nil {
panic(err)
}
}
}*/
}
in, output := make(chan []byte, 1), &bytes.Buffer{}
in <- input.Bytes()
close(in)
compress.BijectiveBurrowsWheelerCoder(in).MoveToFrontRunLengthCoder().AdaptiveCoder().Code(output)
/*output := &bytes.Buffer{}
writer := lzma.NewWriterLevel(output, lzma.BestCompression)
writer.Write(input.Bytes())
writer.Close()*/
complexity := int64(output.Len())
fmt.Printf("%v %v\n", clusters, complexity)
if complexity < min {
min, minClusteredData, means = complexity, clusteredData, make([]Observation, len(seeds))
for ii := range seeds {
means[ii] = make([]float64, len(seeds[ii]))
for jj := range seeds[ii] {
means[ii][jj] = seeds[ii][jj]
}
}
}
}
labels := make([]int, len(minClusteredData))
for ii, jj := range minClusteredData {
labels[ii] = jj.ClusterNumber
}
return labels, means, err
}
func kc(a []byte) float64 {
input, in, output := make([]byte, len(a)), make(chan []byte, 1), &bytes.Buffer{}
copy(input, a)
in <- input
close(in)
compress.BijectiveBurrowsWheelerCoder(in).MoveToFrontRunLengthCoder().AdaptiveCoder().Code(output)
return float64(output.Len())
}
func KC2means(rawData [][]float64, k int, distanceFunction DistanceFunction, threshold, gain int) ([]int, []Observation, error) {
var minClusteredData []ClusteredObservation
var means []Observation
var err error
min := math.MaxFloat64
for clusters := 1; clusters <= k; clusters++ {
data := make([]ClusteredObservation, len(rawData))
for ii, jj := range rawData {
data[ii].Observation = jj
}
seeds := seed(data, clusters, distanceFunction)
clusteredData, _ := kmeans(data, seeds, distanceFunction, threshold)
counts := make([]int, clusters)
for _, jj := range clusteredData {
counts[jj.ClusterNumber]++
}
input, synth := &bytes.Buffer{}, &bytes.Buffer{}
for c := 0; c < clusters; c++ {
/*err := binary.Write(input, binary.LittleEndian, int64(counts[c]))
if err != nil {
panic(err)
}
for _, jj := range seeds[c] {
err = binary.Write(input, binary.LittleEndian, jj)
if err != nil {
panic(err)
}
}
err = binary.Write(synth, binary.LittleEndian, int64(counts[c]))
if err != nil {
panic(err)
}
for _, jj := range seeds[c] {
err = binary.Write(synth, binary.LittleEndian, jj)
if err != nil {
panic(err)
}
}*/
sigma := make([]float64, len(seeds[c]))
for _, j := range clusteredData {
if j.ClusterNumber == c {
for ii, jj := range j.Observation {
x := jj - seeds[c][ii]
sigma[ii] += x * x
err := binary.Write(input, binary.LittleEndian, jj)
if err != nil {
panic(err)
}
}
}
}
N := float64(counts[c])
for i, j := range sigma {
sigma[i] = math.Sqrt(j / N)
}
for i := 0; i < 2*counts[c]; i++ {
for ii, jj := range sigma {
err := binary.Write(synth, binary.LittleEndian, jj*rand.NormFloat64()+seeds[c][ii])
if err != nil {
panic(err)
}
}
}
}
x, y := kc(input.Bytes()), kc(synth.Bytes())
input.Write(synth.Bytes())
xy := kc(input.Bytes())
NCD := (xy - math.Min(x, y)) / math.Max(x, y)
fmt.Printf("%v %v\n", clusters, NCD)
if NCD < min {
min, minClusteredData, means = NCD, clusteredData, make([]Observation, len(seeds))
for ii := range seeds {
means[ii] = make([]float64, len(seeds[ii]))
for jj := range seeds[ii] {
means[ii][jj] = seeds[ii][jj]
}
}
}
}
labels := make([]int, len(minClusteredData))
for ii, jj := range minClusteredData {
labels[ii] = jj.ClusterNumber
}
return labels, means, err
}
func KCMmeans(rawData [][]float64, k int, distanceFunction DistanceFunction, threshold int) ([]int, []Observation, error) {
var minClusteredData []ClusteredObservation
var means []Observation
var err error
max, trace := int64(0), make([]float64, k)
for clusters := 1; clusters <= k; clusters++ {
data := make([]ClusteredObservation, len(rawData))
for ii, jj := range rawData {
data[ii].Observation = jj
}
seeds := seed(data, clusters, distanceFunction)
clusteredData, _ := kmeans(data, seeds, distanceFunction, threshold)
counts := make([]int, clusters)
for _, jj := range clusteredData {
counts[jj.ClusterNumber]++
}
input := &bytes.Buffer{}
for c := 0; c < clusters; c++ {
/*err := binary.Write(input, binary.LittleEndian, rand.Float64())
if err != nil {
panic(err)
}*/
err := binary.Write(input, binary.LittleEndian, int64(counts[c]))
if err != nil {
panic(err)
}
for _, jj := range seeds[c] {
err = binary.Write(input, binary.LittleEndian, jj)
if err != nil {
panic(err)
}
}
for _, j := range clusteredData {
if j.ClusterNumber == c {
for ii, jj := range j.Observation {
err = binary.Write(input, binary.LittleEndian, jj-seeds[c][ii])
if err != nil {
panic(err)
}
}
}
}
}
in, output := make(chan []byte, 1), &bytes.Buffer{}
in <- input.Bytes()
close(in)
compress.BijectiveBurrowsWheelerCoder(in).MoveToFrontRunLengthCoder().AdaptiveCoder().Code(output)
/*output := &bytes.Buffer{}
writer := lzma.NewWriterLevel(output, lzma.BestCompression)
writer.Write(input.Bytes())
writer.Close()*/
complexity := int64(output.Len())
trace[clusters-1] = float64(complexity)
fmt.Printf("%v %v\n", clusters, complexity)
if complexity > max {
max, minClusteredData, means = complexity, clusteredData, make([]Observation, len(seeds))
for ii := range seeds {
means[ii] = make([]float64, len(seeds[ii]))
for jj := range seeds[ii] {
means[ii][jj] = seeds[ii][jj]
}
}
}
}
f := fft.FFTReal(trace)
points, phase, complex := make(plotter.XYs, len(f)-1), make(plotter.XYs, len(f)-1), make(plotter.XYs, len(f))
for i, j := range f[1:] {
points[i].X, points[i].Y = float64(i), cmplx.Abs(j)
phase[i].X, phase[i].Y = float64(i), cmplx.Phase(j)
complex[i].X, complex[i].Y = real(j), imag(j)
}
p, err := plot.New()
if err != nil {
panic(err)
}
p.Title.Text = "FFT Real"
p.X.Label.Text = "X"
p.Y.Label.Text = "Y"
scatter, err := plotter.NewScatter(points)
if err != nil {
panic(err)
}
scatter.Shape = draw.CircleGlyph{}
scatter.Radius = vg.Points(1)
p.Add(scatter)
if err := p.Save(8, 8, "fft_real.png"); err != nil {
panic(err)
}
p, err = plot.New()
if err != nil {
panic(err)
}
p.Title.Text = "FFT Phase"
p.X.Label.Text = "X"
p.Y.Label.Text = "Y"
scatter, err = plotter.NewScatter(phase)
if err != nil {
panic(err)
}
scatter.Shape = draw.CircleGlyph{}
scatter.Radius = vg.Points(1)
scatter.Color = color.RGBA{0, 0, 255, 255}
p.Add(scatter)
if err := p.Save(8, 8, "fft_phase.png"); err != nil {
panic(err)
}
p, err = plot.New()
if err != nil {
panic(err)
}
p.Title.Text = "FFT Complex"
p.X.Label.Text = "X"
p.Y.Label.Text = "Y"
scatter, err = plotter.NewScatter(complex)
if err != nil {
panic(err)
}
scatter.Shape = draw.CircleGlyph{}
scatter.Radius = vg.Points(1)
scatter.Color = color.RGBA{0, 0, 255, 255}
p.Add(scatter)
if err := p.Save(8, 8, "fft_complex.png"); err != nil {
panic(err)
}
labels := make([]int, len(minClusteredData))
for ii, jj := range minClusteredData {
labels[ii] = jj.ClusterNumber
}
return labels, means, err
}
func KCSmeans(rawData [][]float64, k int, distanceFunction DistanceFunction, threshold int) ([]int, error) {
var clusteredData []ClusteredObservation
var err error
for clusters := 1; clusters <= k; clusters++ {
data := make([]ClusteredObservation, len(rawData))
for ii, jj := range rawData {
data[ii].Observation = jj
}
seeds := seed(data, clusters, distanceFunction)
clusteredData, err = kmeans(data, seeds, distanceFunction, threshold)
counts := make([]int, clusters)
for _, jj := range clusteredData {
counts[jj.ClusterNumber]++
}
input, width := &bytes.Buffer{}, len(seeds[0])
x := make([]float64, width)
for c := 0; c < clusters; c++ {
err := binary.Write(input, binary.LittleEndian, rand.Float64())
if err != nil {
panic(err)
}
err = binary.Write(input, binary.LittleEndian, int64(counts[c]))
if err != nil {
panic(err)
}
for _, jj := range seeds[c] {
err = binary.Write(input, binary.LittleEndian, jj)
if err != nil {
panic(err)
}
}
for _, j := range clusteredData {
if j.ClusterNumber == c {
/*distance, _ := distanceFunction(j.Observation, seeds[c])
err = binary.Write(input, binary.LittleEndian, distance)
if err != nil {
panic(err)
}*/
for ii, jj := range j.Observation {
x[ii] = jj - seeds[c][ii]
}
if width == 1 {
err = binary.Write(input, binary.LittleEndian, x[0])
if err != nil {
panic(err)
}
} else {
r := 0.0
for _, i := range x {
r += i * i
}
err = binary.Write(input, binary.LittleEndian, math.Sqrt(r))
if err != nil {
panic(err)
}
t := math.Acos(x[1] / math.Sqrt(x[0]*x[0]+x[1]*x[1]))
if t < 0 {
t = 2*math.Pi - t
}
err = binary.Write(input, binary.LittleEndian, t)
if err != nil {
panic(err)
}
for i := 2; i < width; i++ {
r = 0.0
for _, j := range x[:i+1] {
r += j * j
}
err = binary.Write(input, binary.LittleEndian, math.Acos(x[i]/math.Sqrt(r)))
if err != nil {
panic(err)
}
}
}
}
}
}
in, output := make(chan []byte, 1), &bytes.Buffer{}
in <- input.Bytes()
close(in)
compress.BijectiveBurrowsWheelerCoder(in).MoveToFrontRunLengthCoder().AdaptiveCoder().Code(output)
fmt.Printf("%v %v\n", clusters, output.Len())
}
labels := make([]int, len(clusteredData))
for ii, jj := range clusteredData {
labels[ii] = jj.ClusterNumber
}
return labels, err
} | kmeans.go | 0.594787 | 0.529263 | kmeans.go | starcoder |
package util
import (
"math"
"time"
)
type DateDiffResult struct {
Year,
Month,
Day,
Hour,
Min,
Sec int
}
// AgeAt gets the age of an entity at a certain time.
func AgeAt(birthDate time.Time, now time.Time) int {
years := now.Year() - birthDate.Year()
birthDay := getAdjustedBirthDay(birthDate, now)
if now.YearDay() < birthDay {
years -= 1
}
return years
}
// Age is shorthand for AgeAt(birthDate, time.Now()), and carries the same usage and limitations.
func Age(birthDate time.Time) int {
return AgeAt(birthDate, time.Now())
}
// Gets the adjusted date of birth to work around leap year differences.
func getAdjustedBirthDay(birthDate time.Time, now time.Time) int {
birthDay := birthDate.YearDay()
currentDay := now.YearDay()
if isLeap(birthDate) && !isLeap(now) && birthDay >= 60 {
return birthDay - 1
}
if isLeap(now) && !isLeap(birthDate) && currentDay >= 60 {
return birthDay + 1
}
return birthDay
}
// Works out if a time.Time is in a leap year.
func isLeap(date time.Time) bool {
year := date.Year()
if year%400 == 0 {
return true
} else if year%100 == 0 {
return false
} else if year%4 == 0 {
return true
}
return false
}
func BirthdayInfo(bornAt time.Time) (nextAt time.Time, daysLeft, currentAge int) {
_, mo1, d1 := bornAt.Date()
y2, mo2, _ := time.Now().Date()
// adjust year if birthday passed
if mo2 > mo1 {
y2++
}
nextAt = time.Date(y2, mo1, d1, 0, 0, 0, 0, bornAt.Location())
daysLeft = DaysBetween(nextAt, time.Now())
currentAge = Age(bornAt)
return
}
func DaysBetween(a, b time.Time) int {
if a.After(b) {
a, b = b, a
}
if b.Sub(a).Hours()/24.0 < 1 {
return 0
}
return int(math.Ceil(b.Sub(a).Hours() / 24.0))
}
func DateDiff(a, b time.Time) DateDiffResult {
if a.Location() != b.Location() {
b = b.In(a.Location())
}
if a.After(b) {
a, b = b, a
}
y1, mo1, d1 := a.Date()
y2, mo2, d2 := b.Date()
h1, m1, s1 := a.Clock()
h2, m2, s2 := b.Clock()
year := y2 - y1
month := int(mo2 - mo1)
day := d2 - d1
hour := h2 - h1
min := m2 - m1
sec := s2 - s1
if sec < 0 {
sec += 60
min--
}
if min < 0 {
min += 60
hour--
}
if hour < 0 {
hour += 24
day--
}
if day < 0 {
// days in month:
t := time.Date(y1, mo1, 32, 0, 0, 0, 0, time.UTC)
day += 32 - t.Day()
month--
}
if month < 0 {
month += 12
year--
}
return DateDiffResult{
Year: year,
Month: month,
Day: day,
Hour: hour,
Min: min,
Sec: sec,
}
} | util/date.go | 0.645567 | 0.521715 | date.go | starcoder |
// Package shist provides functions for computing a histogram of values of an
// image, and for computing and rendering a 2-dimensional histogram of values of
// a complex or ComplexInt32 gradient image.
package shist
import (
"fmt"
"image"
"math"
"math/bits"
)
import (
. "github.com/Causticity/sipp/scomplex"
. "github.com/Causticity/sipp/simage"
)
// SippHist is a 2-dimensional histogram of the values in a complex gradient
// image.
type SippHist struct {
// A reference to the gradient image we are computing from
Grad *ComplexImage
// These should be odd so that there is always a centre point.
width, height uint32
// The histogram data.
Bin []uint32
// The index of the histogram bin for each gradient image pixel.
BinIndex []int
// The maximum bin value in the histogram.
Max uint32
// A suppressed version of the histogram, stored as floats for subsequent
// computation.
suppressed []float64
// The maximum suppressed value, stored as a float for subsequent
// computation.
maxSuppressed float64
}
func (hist *SippHist) Size() (uint32, uint32) {
return hist.width, hist.height
}
const greyHistSize8BPP = 256
const greyHistSize16BPP = 65536
// GreyHist computes a 1D histogram of the greyscale values in the image.
func GreyHist(im SippImage) (hist []uint32) {
histSize := greyHistSize8BPP
is16 := false
if im.Bpp() == 16 {
histSize = greyHistSize16BPP
is16 = true
}
hist = make([]uint32, histSize)
imPix := im.Pix()
for y := 0; y < im.Bounds().Dy(); y++ {
for x := 0; x < im.Bounds().Dx(); x++ {
index := im.PixOffset(x, y)
var val uint16 = uint16(imPix[index])
if is16 {
val = val<<8 | uint16(imPix[index+1])
}
hist[val]++
}
}
return
}
// sparseHistogramEntrySize is the number of uint32s per histogram entry.
// The size in bytes (or uint32s) of a Go map is not easy to determine, but the
// number of buckets is always a power of 2, so as a rough estimate, we'll take
// the minimum size of an entry to be the size of the complex128 index (4 uint32s)
// plus the count (1 uint32) plus a 64-bit pointer for overhead (2 uint32s). The
// last of these is just a wild guess. Then we multiply this entry size by the
// number of pixels rounded up to the next power of 2 to get an estimate of the
// sparse histogram size.
const sparseHistogramEntrySize = 4 + 1 + 2 // See above
const flatBinSize = 1
// Return the next power of 2 higher than the input, or panic if the input is 0,
// 1, or would overflow, as none of these should ever occur.
func upToNextPowerOf2(n uint32) uint32 {
// An input of 0 or 1 is invalid, but should never be sent in, so panic.
if n == 0 || n == 1 {
panic(1)
}
// Next check if the input already is a power of 2.
if bits.OnesCount32(n) == 1 {
return n
}
// Now check that the highest bit isn't already set, because if it is then
// we would overflow. This means we have more than 2 billion pixels, which
// would be problematic for plenty of other reasons, so here we assume that
// this is an error in the caller and just panic.
lz := bits.LeadingZeros32(n)
if lz == 0 {
panic(1)
}
// Otherwise return a number with a single bit set one position higher than
// the highest input bit set.
return 1 << (32 - lz)
}
// All images with excursions <= this will use the flat version, in order to
// avoid the computational overhead of sparse histograms for 8-bit and
// low-excursion 16-bit images, even though the sparse histogram would usually
// be smaller. Note that excursion, the distance from 0 along either axis of the
// complex plane, is always positive.
// Later, this will be used to scale down sparse histograms for rendering, so
// that histogram renderings will never be more that double this value on a side.
const minSparseExcursion = 1024
// Hist computes the 2D histogram from the given gradient image.
func Hist(grad *ComplexImage) (hist *SippHist) {
hist = new(SippHist)
hist.Grad = grad
// The size of a flat histogram is one flatBinSize per bin. The number
// of bins is the product of the histogram width and height. The width and
// height are twice the maximum excursion on the real and imaginary axes,
// respectively, plus one to ensure that the width and height are odd so
// that there is always a single central bin in both dimensions.
maxRealExcursion := uint32(math.Max(math.Abs(grad.MaxRe), math.Abs(grad.MinRe)))
maxImagExcursion := uint32(math.Max(math.Abs(grad.MaxIm), math.Abs(grad.MinIm)))
maxExcursion := uint32(math.Max(float64(maxRealExcursion), float64(maxImagExcursion)))
hist.width = maxRealExcursion * 2 + 1 // Ensure both are odd
hist.height = maxImagExcursion * 2 + 1
flatHistSize := hist.width * hist.height * flatBinSize
nPix := uint32(len(grad.Pix))
// Compute the size of the regular histogram and the maximum size of a sparse
// histogram and use the smaller version
// The maximum size of a sparse histogram is one sparseHistogramEntry per
// gradient pixel, but Go maps always have a power of 2 number of entries.
// See the comment for sparseHistogramEntrySize above.
numMapEntries := upToNextPowerOf2(nPix)
maxSparseSize := uint32(sparseHistogramEntrySize * numMapEntries)
//fmt.Println("flat histogram width, height: ", width, height)
//fmt.Println("maxSparseSize:", maxSparseSize, ", flatHistSize:", flatHistSize)
if maxExcursion > minSparseExcursion && maxSparseSize < flatHistSize {
// Use a sparse histogram
fmt.Println("Using sparse histogram")
// TODO: No other code uses this yet.
// A sparse histogram is a map of actually occurring values.
sparse := make(map[complex128]uint32)
for _, pixel := range grad.Pix {
v := sparse[pixel]
v++ // v is 0 for the empty initial case, so this always works
sparse[pixel] = v
if v > hist.Max {
hist.Max = v
}
}
} else {
// Use a flat histogram
fmt.Println("Using flat histogram")
histDataSize := int(hist.width) * int(hist.height) // Always odd
hist.Bin = make([]uint32, histDataSize)
hist.BinIndex = make([]int, nPix)
// Walk through the image, computing the bin address from the gradient
// values storing the bin address in BinIndex and incrementing the bin.
// Save the maximum bin value as well.
for i, pixel := range grad.Pix {
u := int(math.Floor(real(pixel))) + int(maxRealExcursion)
v := int(math.Floor(imag(pixel))) + int(maxImagExcursion)
hist.BinIndex[i] = v*int(hist.width) + u
hist.Bin[hist.BinIndex[i]]++
if hist.Bin[hist.BinIndex[i]] > hist.Max {
hist.Max = hist.Bin[hist.BinIndex[i]]
}
}
//fmt.Println("Histogram complete. Maximum bin value:", hist.Max)
}
return
}
// supScale determines a scale factor that is the ratio of the distance to
// the given x, y from the centre, over the given maximum distance. We assume
// that width and height are odd, so that there is an exact centre.
func supScale(x, y, width, height int, maxDist float64) float64 {
xdist := float64(x - (width-1)/2)
ydist := float64(y - (height-1)/2)
hyp := math.Hypot(xdist, ydist)
return (hyp / maxDist)
}
// Suppress suppresses the spike near the origin of the histogram by scaling
// the values in the histogram by a facter determined by their distance from the
// origin.
func (hist *SippHist) suppress() {
if hist.suppressed != nil {
return
}
size := int(hist.width) * int(hist.height)
hist.suppressed = make([]float64, size)
var index uint32 = 0
hist.maxSuppressed = 0
for y := 0; y < int(hist.height); y++ {
for x := 0; x < int(hist.width); x++ {
sscale := supScale(x, y, int(hist.width), int(hist.height), hist.Grad.MaxMod)
hist.suppressed[index] = float64(hist.Bin[index]) * sscale
if hist.suppressed[index] > hist.maxSuppressed {
hist.maxSuppressed = hist.suppressed[index]
}
index++
}
}
//fmt.Println("Distance suppression complete; max suppressed value:", hist.maxSuppressed)
}
// RenderSuppressed renders a suppressed version of the histogram and returns
// the result as an 8-bit grayscale image.
func (hist *SippHist) RenderSuppressed() SippImage {
// Here we will generate an 8-bit output image of the same size as the
// histogram, scaled to use the full dynamic range of the image format.
hist.suppress()
width, height := hist.Size()
var scale float64 = 255.0 / hist.maxSuppressed
//fmt.Println("Suppressed Render scale factor:", scale)
rnd := new(SippGray)
rnd.Gray = image.NewGray(image.Rect(0, 0, int(width), int(height)))
rndPix := rnd.Pix()
for index, val := range hist.suppressed {
rndPix[index] = uint8(val * scale)
}
return rnd
}
// Render renders the histogram by clipping all values to 255. Returns an 8-bit
// grayscale image.
func (hist *SippHist) Render() SippImage {
// Here we will generate an 8-bit output image of the same size as the
// histogram, clipped to 255.
width, height := hist.Size()
//var scale float64 = 255.0 / float64(hist.Max)
//fmt.Println("Render scale factor:", scale)
rnd := new(SippGray)
rnd.Gray = image.NewGray(image.Rect(0, 0, int(width), int(height)))
rndPix := rnd.Pix()
for index, val := range hist.Bin {
if val > 255 {
val = 255
}
rndPix[index] = uint8(val)
}
return rnd
} | shist/shist.go | 0.794624 | 0.788685 | shist.go | starcoder |
package main
import (
"bytes"
. "specify"
t "./_test/specify"
)
func init() {
Describe("Be", func() {
It("should match reference equality", func(e Example) {
var a, b int
e.Value(&a).Should(t.Be(&a))
e.Value(&a).ShouldNot(t.Be(&b))
})
It("should not care about the value", func(e Example) {
a := 42
b := 42
e.Value(&a).ShouldNot(t.Be(&b))
})
})
Describe("BeNil", func() {
It("should match nil", func(e Example) { e.Value(nil).Should(t.BeNil()) })
It("should not match non-nil values", func(e Example) { e.Value(42).ShouldNot(t.BeNil()) })
It("should not match zero", func(e Example) { e.Value(0).ShouldNot(t.BeNil()) })
It("should not match false", func(e Example) { e.Value(false).ShouldNot(t.BeNil()) })
})
Describe("BeFalse", func() {
It("should match false", func(e Example) { e.Value(false).Should(t.BeFalse()) })
It("should not match true", func(e Example) { e.Value(true).ShouldNot(t.BeFalse()) })
It("should not match nil", func(e Example) { e.Value(nil).ShouldNot(t.BeFalse()) })
It("should not match zero", func(e Example) { e.Value(0).ShouldNot(t.BeFalse()) })
It("should not match other values", func(e Example) { e.Value(42).ShouldNot(t.BeFalse()) })
})
Describe("BeTrue", func() {
It("should match true", func(e Example) { e.Value(true).Should(t.BeTrue()) })
It("should not match false", func(e Example) { e.Value(false).ShouldNot(t.BeTrue()) })
It("should not match other values", func(e Example) { e.Value(42).ShouldNot(t.BeTrue()) })
})
Describe("BeEqualTo", func() {
It("should match numbers", func(e Example) {
e.Value(1).Should(t.BeEqualTo(1))
e.Value(1.2).ShouldNot(t.BeEqualTo(2.1))
})
It("should match strings", func(e Example) {
e.Value("foo").Should(t.BeEqualTo("foo"))
e.Value("Doctor").ShouldNot(t.BeEqualTo("Donna"))
})
It("should match things with EqualTo()", func(e Example) { e.Value([]byte{1, 2}).Should(t.BeEqualTo(bslice([]byte{1, 2}))) })
})
}
type bslice []byte
func (self bslice) EqualTo(value interface{}) bool {
if other, ok := value.([]byte); ok {
return bytes.Equal(([]byte)(self), other)
}
return false
} | src/matcher_spec.go | 0.712032 | 0.776199 | matcher_spec.go | starcoder |
package main
import (
"fmt"
"io"
)
type SoundFontInfo struct {
// SfVersion identifyies the SoundFont specification version level to which the file complies.
// e.g. 2.1
SfVersion struct {
Major, Minor uint16
} // made from the ifil subchunk
// Engine is a mandatory field identifying the wavetable sound engine for which the file was optimized.
// It contains an ASCII string of 256 or fewer bytes including one or two terminators of value zero, so as to make
// the total byte count even.
Engine string // made from the isng subchunk
// Name is a mandatory field providing the name of the SoundFont compatible bank.
// It contains an ASCII string of 256 or fewer bytes including one or two terminators of value zero, so as to make
// the total byte count even.
// e.g. "General MIDI\0\0"
Name string // made from the INAM subchunk
// ROM is an optional field identifying a particular wavetable sound data ROM to which any ROM samples refer.
// It contains an ASCII string of 256 or fewer bytes including one or two terminators of value zero, so as to make
// the total byte count even. Both ROM and ROMVer must be present if either is present.
ROM string // made from the IROM subchunk
// ROMVer is an optional field identifying the particular wavetable sound data ROM revision to which any
// ROM samples refer. Both ROM and ROMVer must be present if either is present.
// e.g. 1.0
ROMVer struct {
Major, Minor uint16
} // made from the IVER subchunk
// CreationDate is an optional field identifying the creation date of the SoundFont compatible bank.
// It contains an ASCII string of 256 or fewer bytes including one or two terminators of value zero, so as to make
// the total byte count even.
// Conventionally, the format of the string is “Month Day, Year”
// e.g. "January 1, 2000"
CreationDate string // made from the ICRD subchunk
// Engineers is an optional field identifying the engineers who created the SoundFont compatible bank.
// It contains an ASCII string of 256 or fewer bytes including one or two terminators of value zero, so as to make
// the total byte count even.
// e.g. "<NAME>\0\0"
Engineers string // made from the IENG subchunk
// Product is an optional field identifying any specific product for which the SoundFont compatible bank is intended.
// It contains an ASCII string of 256 or fewer bytes including one or two terminators of value zero, so as to make
// the total byte count even.
// e.g. "SBAWE32\0\0"
Product string // made from the IPRD subchunk
// Copyright is an optional field containing any copyright assertion string associated with the SoundFont compatible bank.
// It contains an ASCII string of 256 or fewer bytes including one or two terminators of value zero, so as to make
// the total byte count even.
// e.g. "Copyright (c) 1994-95, <NAME>. All rights reserved.\0"
Copyright string // made from the ICOP subchunk
// Comments is an optional field containing any comments associated with the SoundFont compatible bank.
// It contains an ASCII string of 65,536 or fewer bytes including one or two terminators of value zero, so as to make
// the total byte count even.
// e.g. "This space unintentionally left blank.\0\0"
Comments string // made from the ICMT subchunk
// Software is an optional field identifying the SoundFont compatible tools used to create and most recently
// modify the SoundFont compatible bank. It contains an ASCII string of 256 or fewer bytes including one or two
// terminators of value zero, so as to make the total byte count even.
// e.g. "Sonic Foundry's SoundFont Editor v2.01\0\0"
Software string // made from the IFST subchunk
}
func (info SoundFontInfo) String() string {
return fmt.Sprintf("SoundFontInfo{\n\tSfVersion: %d.%d\n\tEngine: %q\n\tName: %q\n\tROM: %q\n\tIVER: %d.%d\n\tCreationDate: %q\n\tEngineers: %q\n\tProduct: %q\n\tCopyright: %q\n\tComments: %q\n\tSoftware: %q\n\t}",
info.SfVersion.Major,
info.SfVersion.Minor,
info.Engine,
info.Name,
info.ROM,
info.ROMVer.Major,
info.ROMVer.Minor,
info.CreationDate,
info.Engineers,
info.Product,
info.Copyright,
info.Comments,
info.Software)
}
// ReadSoundFontInfo parses a SoundFont info list.
func ReadSoundFontInfo(r io.Reader) (*SoundFontInfo, error) {
info := &SoundFontInfo{}
// TODO refactor this out
// read "INFO" from the "LIST" header
ok, err := Expect(r, []byte{'I', 'N', 'F', 'O'})
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("expected \"INFO\"")
}
// Keep track of known chunks and if we've seen them already
infoChunks := make(map[[4]byte]bool)
infoChunks[[4]byte{'i', 'f', 'i', 'l'}] = false
infoChunks[[4]byte{'i', 's', 'n', 'g'}] = false
infoChunks[[4]byte{'I', 'N', 'A', 'M'}] = false
infoChunks[[4]byte{'i', 'r', 'o', 'm'}] = false
infoChunks[[4]byte{'i', 'v', 'e', 'r'}] = false
infoChunks[[4]byte{'I', 'C', 'R', 'D'}] = false
infoChunks[[4]byte{'I', 'E', 'N', 'G'}] = false
infoChunks[[4]byte{'I', 'P', 'R', 'D'}] = false
infoChunks[[4]byte{'I', 'C', 'O', 'P'}] = false
infoChunks[[4]byte{'I', 'C', 'M', 'T'}] = false
infoChunks[[4]byte{'I', 'S', 'F', 'T'}] = false
for {
// parse a chunk
var chunk chunk
if err := chunk.parse(r); err != nil {
if err == io.EOF {
break
}
return nil, err
}
// check if we know how to parse this chunk and if we've seen it already
seen, ok := infoChunks[chunk.id]
if !ok {
// skip unknown chunks
fmt.Println("unknown chunk", chunk.id)
continue
}
if seen {
return nil, fmt.Errorf("duplicate chunk %v", chunk.id)
}
infoChunks[chunk.id] = true
// make sense of the chunk
switch chunk.id {
case [4]byte{'i', 'f', 'i', 'l'}:
// must contain 4 bytes
if chunk.size != 4 {
return nil, fmt.Errorf("ifil subchunk must contain 4 bytes")
}
// first 2 bytes represent the major version number
info.SfVersion.Major = uint16(chunk.data[1])<<8 | uint16(chunk.data[0])
// last 2 bytes represent the minor version number
info.SfVersion.Minor = uint16(chunk.data[3])<<8 | uint16(chunk.data[2])
case [4]byte{'i', 's', 'n', 'g'}:
// must contain 256 of fewer bytes
if chunk.size > 256 {
return nil, fmt.Errorf("isng subchunk must contain 256 or fewer bytes")
}
info.Engine = string(chunk.data)
case [4]byte{'I', 'N', 'A', 'M'}:
// must contain 256 of fewer bytes
if chunk.size > 256 {
return nil, fmt.Errorf("Inam subchunk must contain 256 or fewer bytes")
}
info.Name = string(chunk.data)
case [4]byte{'i', 'r', 'o', 'm'}:
// must contain 256 of fewer bytes
if chunk.size > 256 {
return nil, fmt.Errorf("irom subchunk must contain 256 or fewer bytes")
}
info.ROM = string(chunk.data)
case [4]byte{'i', 'v', 'e', 'r'}:
// must contain 4 bytes
if chunk.size != 4 {
return nil, fmt.Errorf("iver subchunk must contain 4 bytes")
}
// first 2 bytes represent the major version number
info.ROMVer.Major = uint16(chunk.data[1])<<8 | uint16(chunk.data[0])
// last 2 bytes represent the minor version number
info.ROMVer.Minor = uint16(chunk.data[3])<<8 | uint16(chunk.data[2])
case [4]byte{'I', 'C', 'R', 'D'}:
// must contain 256 of fewer bytes
if chunk.size > 256 {
return nil, fmt.Errorf("ICRD subchunk must contain 256 or fewer bytes")
}
info.CreationDate = string(chunk.data)
case [4]byte{'I', 'E', 'N', 'G'}:
// must contain 256 of fewer bytes
if chunk.size > 256 {
return nil, fmt.Errorf("IENG subchunk must contain 256 or fewer bytes")
}
info.Engineers = string(chunk.data)
case [4]byte{'I', 'P', 'R', 'D'}:
// must contain 256 of fewer bytes
if chunk.size > 256 {
return nil, fmt.Errorf("IPRD subchunk must contain 256 or fewer bytes")
}
info.Product = string(chunk.data)
case [4]byte{'I', 'C', 'O', 'P'}:
// must contain 256 of fewer bytes
if chunk.size > 256 {
return nil, fmt.Errorf("ICOP subchunk must contain 256 or fewer bytes")
}
info.Copyright = string(chunk.data)
case [4]byte{'I', 'C', 'M', 'T'}:
// must contain 65536 of fewer bytes
if chunk.size > 65536 {
return nil, fmt.Errorf("ICMT subchunk must contain 65536 or fewer bytes")
}
info.Comments = string(chunk.data)
case [4]byte{'I', 'S', 'F', 'T'}:
// must contain 256 of fewer bytes
if chunk.size > 256 {
return nil, fmt.Errorf("ISFT subchunk must contain 256 or fewer bytes")
}
info.Software = string(chunk.data)
}
}
// If the ifil sub-chunk is missing, or its size is not four bytes, the file should be rejected as structurally unsound.
if ok := infoChunks[[4]byte{'i', 'f', 'i', 'l'}]; !ok {
return nil, fmt.Errorf("ifil chunk is missing")
}
// If the isng sub-chunk is missing, or is not terminated with a zero valued byte, or its contents are an unknown sound engine,
// the field should be ignored and EMU8000 assumed.
if ok := infoChunks[[4]byte{'i', 's', 'n', 'g'}]; !ok {
info.Engine = "EMU8000"
}
return info, nil
} | info.go | 0.531939 | 0.482856 | info.go | starcoder |
package aoc2019
import (
"context"
"fmt"
"io/ioutil"
"math"
"strings"
"github.com/pkg/errors"
)
type day15TileType int64
const (
day15TileTypeWall day15TileType = iota
day15TileTypeFloor
day15TileTypeOxygen
day15TileTypeUnknown
)
type day15Tile struct {
X, Y int64
Type day15TileType
distFromStart int64
}
func (d day15Tile) key() string { return fmt.Sprintf("%d:%d", d.X, d.Y) }
type day15Grid map[string]*day15Tile
func (d day15Grid) annotateDistance(x, y, dist int64) {
var t = d.getTile(x, y)
if t == nil {
panic("Access to non-existent tile")
}
if t.Type != day15TileTypeFloor && t.Type != day15TileTypeOxygen {
// Do not annotate distance on walls
return
}
if t.distFromStart <= dist {
// Distance already set, no need to set again
return
}
// Set distance
t.distFromStart = dist
// Annotate next fields
d.annotateDistance(x-1, y, dist+1)
d.annotateDistance(x+1, y, dist+1)
d.annotateDistance(x, y-1, dist+1)
d.annotateDistance(x, y+1, dist+1)
}
func (d day15Grid) bounds() (minX, minY, maxX, maxY int64) {
minX = math.MaxInt64
minY = math.MaxInt64
for _, t := range d {
if t.X < minX {
minX = t.X
}
if t.X > maxX {
maxX = t.X
}
if t.Y < minY {
minY = t.Y
}
if t.Y > maxY {
maxY = t.Y
}
}
return
}
func (d day15Grid) getTile(x, y int64) *day15Tile {
if v, ok := d[day15Tile{X: x, Y: y}.key()]; ok {
return v
}
return nil
}
func (d day15Grid) getTileType(x, y int64) day15TileType {
if v := d.getTile(x, y); v != nil {
return v.Type
}
return day15TileTypeUnknown
}
func (d day15Grid) print() {
minX, minY, maxX, maxY := d.bounds()
for y := minY; y <= maxY; y++ {
for x := minX; x <= maxX; x++ {
if x == 0 && y == 0 {
fmt.Printf("@")
continue
}
t := d.getTileType(x, y)
switch t {
case day15TileTypeFloor:
fmt.Printf(".")
case day15TileTypeWall:
fmt.Printf("\u2588")
case day15TileTypeOxygen:
fmt.Printf("X")
case day15TileTypeUnknown:
fmt.Printf("\u2593")
}
}
fmt.Println()
}
}
func day15ScanGrid(code []int64) (day15Grid, error) {
var (
grid = make(day15Grid)
posX, posY int64
rotation int64 = 1 // start facing north
)
recordPosition := func(success bool, tile day15TileType) bool {
var nPosX, nPosY = posX, posY
if success {
defer func() { posX, posY = nPosX, nPosY }()
}
switch rotation {
case 1:
nPosX -= 1
case 2:
nPosX += 1
case 3:
nPosY -= 1
case 4:
nPosY += 1
}
if t := grid.getTileType(nPosX, nPosY); t == tile {
return true
}
nT := day15Tile{X: nPosX, Y: nPosY, Type: tile, distFromStart: math.MaxInt64}
grid[nT.key()] = &nT
return false
}
rotate := func(forward bool) {
var (
fr = []int64{1, 4, 2, 3}
br = []int64{1, 3, 2, 4}
r []int64
)
if forward {
r = fr
} else {
r = br
}
nP := int64IndexOf(r, rotation) + 1
if nP == len(r) {
nP = 0
}
rotation = r[nP]
}
var (
in = make(chan int64)
out = make(chan int64)
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go executeIntcodeWithParams(intcodeParams{
Code: code,
Context: ctx,
In: in,
Out: out,
})
// Start by moving
in <- rotation
for res := range out {
var alreadyKnown bool
switch day15TileType(res) {
case day15TileTypeWall:
// Ran into wall, not a successful move
alreadyKnown = recordPosition(false, day15TileType(res))
// Rotate once forward
rotate(false)
case day15TileTypeFloor, day15TileTypeOxygen:
// Moved to new tile, successful move
alreadyKnown = recordPosition(true, day15TileType(res))
// Rotate once backward
rotate(true)
default:
// Thefuck?
return grid, errors.Errorf("Invalid tile type detected: %d", res)
}
_ = alreadyKnown
if posX == 0 && posY == 0 {
// We've reached a position twice, let's quit the program but not
// yet the function as the input then will hang
cancel()
}
in <- rotation
}
return grid, nil
}
func int64IndexOf(s []int64, e int64) int {
for i, se := range s {
if se == e {
return i
}
}
return -1
}
func solveDay15Part1(inFile string) (int64, error) {
raw, err := ioutil.ReadFile(inFile)
if err != nil {
return 0, errors.Wrap(err, "Unable to read input")
}
code, err := parseIntcode(strings.TrimSpace(string(raw)))
if err != nil {
return 0, errors.Wrap(err, "Unable to parse Intcode")
}
grid, err := day15ScanGrid(code)
if err != nil {
return 0, errors.Wrap(err, "Unable to scan grid")
}
grid.annotateDistance(0, 0, 0)
var oxygen *day15Tile
for _, t := range grid {
if t.Type == day15TileTypeOxygen {
oxygen = t
break
}
}
return oxygen.distFromStart, nil
}
func solveDay15Part2(inFile string) (int64, error) {
raw, err := ioutil.ReadFile(inFile)
if err != nil {
return 0, errors.Wrap(err, "Unable to read input")
}
code, err := parseIntcode(strings.TrimSpace(string(raw)))
if err != nil {
return 0, errors.Wrap(err, "Unable to parse Intcode")
}
grid, err := day15ScanGrid(code)
if err != nil {
return 0, errors.Wrap(err, "Unable to scan grid")
}
var oxygenSystem *day15Tile
for _, t := range grid {
if t.Type == day15TileTypeOxygen {
oxygenSystem = t
break
}
}
grid.annotateDistance(oxygenSystem.X, oxygenSystem.Y, 0)
var farthestTile = oxygenSystem
for _, t := range grid {
if t.distFromStart > farthestTile.distFromStart && t.Type == day15TileTypeFloor {
farthestTile = t
}
}
return farthestTile.distFromStart, nil
} | day15.go | 0.594904 | 0.437463 | day15.go | starcoder |
package ringbuffer
import (
"github.com/pkg/errors"
)
const (
defaultRingBufferCapacity = 8192
)
// A RingBuffer implements a cyclical buffer, that maintains the last cap bytes written to it, where cap is the capacity
// of the ring buffer.
// This type is not safe to be used concurrently. When using it from multiple goroutines, all accesses have to be
// synchronized externally.
type RingBuffer struct {
buf []byte
startOfs int
fill int
}
// NewRingBuffer creates and returns a new ring buffer with the given capacity.
func NewRingBuffer(cap int) *RingBuffer {
if cap < 0 {
panic(errors.Errorf("invalid ring buffer capacity %d", cap))
} else if cap == 0 {
cap = defaultRingBufferCapacity
}
return &RingBuffer{
buf: make([]byte, cap),
}
}
// Capacity returns the capacity of the ring buffer.
func (r *RingBuffer) Capacity() int {
return len(r.buf)
}
// Size returns the current size of the ring buffer.
func (r *RingBuffer) Size() int {
return r.fill
}
// Reset clears the ring buffer. The callback, if given, is invoked for all data chunks that are cleared from the
// buffer.
func (r *RingBuffer) Reset(cb func([]byte)) {
if r.fill > 0 && cb != nil {
for _, chunk := range r.ReadAll() {
cb(chunk)
}
}
r.startOfs = 0
r.fill = 0
}
// ReadAll returns all data stored in the ring buffer, possibly in chunks. The returned chunks are only valid until the
// next call to either `Write` or `Reset`.
func (r *RingBuffer) ReadAll() [][]byte {
return r.readRaw(r.startOfs, r.fill)
}
// readRaw reads and returns up to `num` bytes of data starting at `startIdx`. As this function is not exposed and only
// called externally, no further validation on the input arguments is performed.
func (r *RingBuffer) readRaw(startIdx, num int) [][]byte {
if num <= 0 {
return nil
}
endIdx := startIdx + num
if endIdx > len(r.buf) {
return [][]byte{r.buf[startIdx:], r.buf[:endIdx-len(r.buf)]}
}
return [][]byte{r.buf[startIdx:endIdx]}
}
// ReadFirst returns the first num bytes stored in the ring buffer, possibly in chunks. The returned chunks are only
// valid until the next call to either `Write` or `Reset`.
func (r *RingBuffer) ReadFirst(num int) [][]byte {
if num > r.fill {
num = r.fill
}
return r.readRaw(r.startOfs, num)
}
// Read reads up to num bytes from the ring buffer, starting at a position determined by `from`. If `from` is negative,
// it is interpreted to refer to the position `-from` bytes from the end of the buffer (clamped at position 0, i.e., the
// beginning of the buffer). The returned chunks are only valid until the next call to either `Write` or `Reset`.
func (r *RingBuffer) Read(from, num int) [][]byte {
if from < 0 {
from = r.fill + from
if from < 0 {
from = 0
}
}
if from >= r.fill {
return nil
}
if num > r.fill-from {
num = r.fill - from
}
return r.readRaw((r.startOfs+from)%len(r.buf), num)
}
// ReadLast returns the last num bytes in the ring buffer, possibly in chunks. The returned chunks are only valid until
// the next call to either `Write` or `Reset`.
func (r *RingBuffer) ReadLast(num int) [][]byte {
if num > r.fill {
num = r.fill
}
return r.readRaw((r.startOfs+r.fill-num)%len(r.buf), num)
}
// Write writes data to the ring buffer, evicting old data if the buffer is full. The callback cb is called for every
// chunk of data that is evicted from the ring buffer, or skipped in the input data because it would not fit. The caller
// must not retain a reference to the data after the callback completes; if the data needs to be retained, it must be
// copied by the caller.
func (r *RingBuffer) Write(data []byte, cb func([]byte)) {
if len(data) >= len(r.buf) {
if cb != nil {
for _, chunk := range r.ReadAll() {
cb(chunk)
}
}
overflowLen := len(data) - len(r.buf)
if overflowLen > 0 && cb != nil {
cb(data[:overflowLen])
}
copy(r.buf, data[overflowLen:])
r.startOfs = 0
r.fill = len(r.buf)
return
}
if overflowLen := len(data) + r.fill - len(r.buf); overflowLen > 0 {
if cb != nil {
for _, chunk := range r.ReadFirst(overflowLen) {
cb(chunk)
}
}
r.startOfs = (r.startOfs + overflowLen) % len(r.buf)
r.fill -= overflowLen
}
// We can now assume that the buffer has enough capacity for data
startIdx := (r.startOfs + r.fill) % len(r.buf)
dataLen := len(data)
endIdx := startIdx + len(data)
if over := endIdx - len(r.buf); over > 0 {
copy(r.buf[startIdx:], data[:len(data)-over])
startIdx = 0
endIdx = over
data = data[len(data)-over:]
}
copy(r.buf[startIdx:endIdx], data)
r.fill += dataLen
} | pkg/ringbuffer/ring_buffer.go | 0.81257 | 0.451689 | ring_buffer.go | starcoder |
package twobucket
import (
"errors"
)
type bucket int
const (
bOne bucket = iota
bTwo
)
type step int
const (
emptyOne step = iota
emptyTwo
fillOne
fillTwo
pourOneToTwo
pourTwoToOne
)
type problem struct {
capacity [2]int
goal int
start bucket
}
type state struct {
level [2]int
previousStep step
numSteps int
}
const (
FirstBucketName = "one"
SecondBucketName = "two"
)
// Solve uses given bucket sizes, the goal amount, and starting bucket to
// solve the two-bucket problem to measure exactly the goal mount,
// returning the goal bucket name "one" or "two",
// the required number of steps, and the fill-level of the other bucket.
func Solve(sizeBucketOne,
sizeBucketTwo,
goalAmount int,
startBucket string) (goalBucket string, numSteps, otherBucketLevel int, e error) {
if e = validateParameters(sizeBucketOne, sizeBucketTwo, goalAmount, startBucket); e != nil {
return "", 0, 0, e
}
p := problem{
capacity: [2]int{sizeBucketOne, sizeBucketTwo},
goal: goalAmount,
}
var s state
if startBucket == FirstBucketName {
p.start = bOne
performStep(p, &s, fillOne)
} else {
p.start = bTwo
performStep(p, &s, fillTwo)
}
// Initial step might be a solution.
if !isSolution(p, s) {
s = findGoal(p, s)
}
switch {
case s.level[bOne] == p.goal:
return FirstBucketName, s.numSteps, s.level[bTwo], nil
case s.level[bTwo] == p.goal:
return SecondBucketName, s.numSteps, s.level[bOne], nil
}
return "", 0, 0, errors.New("no solution")
}
func validateParameters(sizeBucketOne, sizeBucketTwo, goalAmount int, startBucket string) error {
if sizeBucketOne <= 0 {
return errors.New("sizeBucketOne invalid")
}
if sizeBucketTwo <= 0 {
return errors.New("sizeBucketTwo invalid")
}
if goalAmount <= 0 {
return errors.New("goalAmount invalid")
}
if startBucket != FirstBucketName && startBucket != SecondBucketName {
return errors.New("startBucket invalid")
}
return nil
}
func isSolution(p problem, s state) bool {
return s.level[bOne] == p.goal || s.level[bTwo] == p.goal
}
func findGoal(p problem, s state) (g state) {
searchList := make([]state, 1)
searchList[0] = s
// Use breadth-first search to find the goal, tracking any previously visited states.
visited := map[[2]int]bool{}
// Mark as already visited two invalid bucket levels: 0,0 and the reverse starting position.
visited[[2]int{0, 0}] = true
if p.start == bOne {
visited[[2]int{0, p.capacity[bTwo]}] = true
} else {
visited[[2]int{p.capacity[bOne], 0}] = true
}
for len(searchList) != 0 {
// Pop one item from the searchList each pass.
current := searchList[0]
searchList = searchList[1:]
for _, x := range getPossibleSteps(p, current) {
next := current
performStep(p, &next, x)
if isSolution(p, next) {
return next
}
if !visited[next.level] {
searchList = append(searchList, next)
visited[next.level] = true
}
}
}
return state{}
}
func performStep(p problem, s *state, x step) {
switch x {
case emptyOne:
s.level[bOne] = 0
case emptyTwo:
s.level[bTwo] = 0
case fillOne:
s.level[bOne] = p.capacity[bOne]
case fillTwo:
s.level[bTwo] = p.capacity[bTwo]
case pourOneToTwo:
pour(p, s, bOne, bTwo)
case pourTwoToOne:
pour(p, s, bTwo, bOne)
}
s.numSteps++
s.previousStep = x
}
// pour from bucket a to b.
func pour(p problem, s *state, a, b bucket) {
amount := p.capacity[b] - s.level[b]
if amount > s.level[a] {
amount = s.level[a]
}
s.level[b] += amount
s.level[a] -= amount
}
func getPossibleSteps(p problem, s state) (list []step) {
for x := emptyOne; x <= pourTwoToOne; x++ {
if canPerformStep(p, s, x) {
list = append(list, x)
}
}
return list
}
func canPerformStep(p problem, s state, x step) bool {
switch x {
case emptyOne:
if s.previousStep == fillOne || s.previousStep == pourOneToTwo {
return false
}
return s.level[bOne] != 0
case emptyTwo:
if s.previousStep == fillTwo || s.previousStep == pourTwoToOne {
return false
}
return s.level[bTwo] != 0
case fillOne:
if s.previousStep == emptyOne || s.level[bOne] == p.capacity[bOne] {
return false
}
return true
case fillTwo:
if s.previousStep == emptyTwo || s.level[bTwo] == p.capacity[bTwo] {
return false
}
return true
case pourOneToTwo:
return s.level[bOne] != 0 && s.level[bTwo] < p.capacity[bTwo]
case pourTwoToOne:
return s.level[bTwo] != 0 && s.level[bOne] < p.capacity[bOne]
}
return false
} | exercises/two-bucket/example.go | 0.621081 | 0.438545 | example.go | starcoder |
package overlay
import (
"fmt"
"strconv"
"strings"
"go.starlark.net/starlark"
)
type MatchAnnotationExpectsKwarg struct {
expects *starlark.Value
missingOK *starlark.Value
thread *starlark.Thread
}
func (a *MatchAnnotationExpectsKwarg) FillInDefaults(defaults MatchChildDefaultsAnnotation) {
if a.expects == nil {
a.expects = defaults.expects.expects
}
if a.missingOK == nil {
a.missingOK = defaults.expects.missingOK
}
}
func (a MatchAnnotationExpectsKwarg) Check(num int) error {
switch {
case a.missingOK != nil && a.expects != nil:
return fmt.Errorf("Expected only one of keyword arguments ('missing_ok', 'expects') specified")
case a.missingOK != nil:
if typedResult, ok := (*a.missingOK).(starlark.Bool); ok {
if typedResult {
allowedVals := []starlark.Value{starlark.MakeInt(0), starlark.MakeInt(1)}
return a.checkValue(starlark.NewList(allowedVals), num)
}
return a.checkValue(starlark.MakeInt(1), num)
}
return fmt.Errorf("Expected keyword argument 'missing_ok' to be a boolean")
case a.expects != nil:
return a.checkValue(*a.expects, num)
default:
return a.checkValue(starlark.MakeInt(1), num)
}
}
func (a MatchAnnotationExpectsKwarg) checkValue(val interface{}, num int) error {
switch typedVal := val.(type) {
case starlark.Int:
return a.checkInt(typedVal, num)
case starlark.String:
return a.checkString(typedVal, num)
case *starlark.List:
return a.checkList(typedVal, num)
case starlark.Callable:
result, err := starlark.Call(a.thread, typedVal, starlark.Tuple{starlark.MakeInt(num)}, []starlark.Tuple{})
if err != nil {
return err
}
if typedResult, ok := result.(starlark.Bool); ok {
if !bool(typedResult) {
return fmt.Errorf("Expectation of number of matched nodes failed")
}
return nil
}
return fmt.Errorf("Expected keyword argument 'expects' to have a function that returns a boolean")
default:
return fmt.Errorf("Expected '%s' annotation keyword argument 'expects'"+
" to be either int, string or function, but was %T", AnnotationMatch, typedVal)
}
}
func (a MatchAnnotationExpectsKwarg) checkInt(typedVal starlark.Int, num int) error {
i1, ok := typedVal.Int64()
if ok {
if i1 != int64(num) {
return fmt.Errorf("Expected number of matched nodes to be %d, but was %d", i1, num)
}
return nil
}
i2, ok := typedVal.Uint64()
if ok {
if i2 != uint64(num) {
return fmt.Errorf("Expected number of matched nodes to be %d, but was %d", i2, num)
}
return nil
}
panic("Unsure how to convert starlark.Int to int")
}
func (a MatchAnnotationExpectsKwarg) checkString(typedVal starlark.String, num int) error {
typedValStr := string(typedVal)
if strings.HasSuffix(typedValStr, "+") {
typedInt, err := strconv.Atoi(strings.TrimSuffix(typedValStr, "+"))
if err != nil {
return fmt.Errorf("Expected '%s' to be in format 'i+' where i is an integer", typedValStr)
}
if num < typedInt {
return fmt.Errorf("Expected number of matched nodes to be >= %d, but was %d", typedInt, num)
}
return nil
}
return fmt.Errorf("Expected '%s' to be in format 'i+' where i is an integer", typedValStr)
}
func (a MatchAnnotationExpectsKwarg) checkList(typedVal *starlark.List, num int) error {
var lastErr error
var val starlark.Value
iter := typedVal.Iterate()
defer iter.Done()
for iter.Next(&val) {
lastErr = a.checkValue(val, num)
if lastErr == nil {
return nil
}
}
return lastErr
} | pkg/yttlibrary/overlay/match_annotation_expects_kwarg.go | 0.586878 | 0.477737 | match_annotation_expects_kwarg.go | starcoder |
package quasigo
//go:generate stringer -type=opcode -trimprefix=op
type opcode byte
const (
opInvalid opcode = 0
// Encoding: 0x01 (width=1)
// Stack effect: (value) -> ()
opPop opcode = 1
// Encoding: 0x02 (width=1)
// Stack effect: (x) -> (x x)
opDup opcode = 2
// Encoding: 0x03 index:u8 (width=2)
// Stack effect: () -> (value)
opPushParam opcode = 3
// Encoding: 0x04 index:u8 (width=2)
// Stack effect: () -> (value:int)
opPushIntParam opcode = 4
// Encoding: 0x05 index:u8 (width=2)
// Stack effect: () -> (value)
opPushLocal opcode = 5
// Encoding: 0x06 index:u8 (width=2)
// Stack effect: () -> (value:int)
opPushIntLocal opcode = 6
// Encoding: 0x07 (width=1)
// Stack effect: () -> (false)
opPushFalse opcode = 7
// Encoding: 0x08 (width=1)
// Stack effect: () -> (true)
opPushTrue opcode = 8
// Encoding: 0x09 constid:u8 (width=2)
// Stack effect: () -> (const)
opPushConst opcode = 9
// Encoding: 0x0a constid:u8 (width=2)
// Stack effect: () -> (const:int)
opPushIntConst opcode = 10
// Encoding: 0x0b index:u8 (width=2)
// Stack effect: (value) -> ()
opSetLocal opcode = 11
// Encoding: 0x0c index:u8 (width=2)
// Stack effect: (value:int) -> ()
opSetIntLocal opcode = 12
// Encoding: 0x0d index:u8 (width=2)
// Stack effect: unchanged
opIncLocal opcode = 13
// Encoding: 0x0e index:u8 (width=2)
// Stack effect: unchanged
opDecLocal opcode = 14
// Encoding: 0x0f (width=1)
// Stack effect: (value) -> (value)
opReturnTop opcode = 15
// Encoding: 0x10 (width=1)
// Stack effect: (value) -> (value)
opReturnIntTop opcode = 16
// Encoding: 0x11 (width=1)
// Stack effect: unchanged
opReturnFalse opcode = 17
// Encoding: 0x12 (width=1)
// Stack effect: unchanged
opReturnTrue opcode = 18
// Encoding: 0x13 offset:i16 (width=3)
// Stack effect: unchanged
opJump opcode = 19
// Encoding: 0x14 offset:i16 (width=3)
// Stack effect: (cond:bool) -> ()
opJumpFalse opcode = 20
// Encoding: 0x15 offset:i16 (width=3)
// Stack effect: (cond:bool) -> ()
opJumpTrue opcode = 21
// Encoding: 0x16 funcid:u16 (width=3)
// Stack effect: (args...) -> (results...)
opCallNative opcode = 22
// Encoding: 0x17 (width=1)
// Stack effect: (value) -> (result:bool)
opIsNil opcode = 23
// Encoding: 0x18 (width=1)
// Stack effect: (value) -> (result:bool)
opIsNotNil opcode = 24
// Encoding: 0x19 (width=1)
// Stack effect: (value:bool) -> (result:bool)
opNot opcode = 25
// Encoding: 0x1a (width=1)
// Stack effect: (x:int y:int) -> (result:bool)
opEqInt opcode = 26
// Encoding: 0x1b (width=1)
// Stack effect: (x:int y:int) -> (result:bool)
opNotEqInt opcode = 27
// Encoding: 0x1c (width=1)
// Stack effect: (x:int y:int) -> (result:bool)
opGtInt opcode = 28
// Encoding: 0x1d (width=1)
// Stack effect: (x:int y:int) -> (result:bool)
opGtEqInt opcode = 29
// Encoding: 0x1e (width=1)
// Stack effect: (x:int y:int) -> (result:bool)
opLtInt opcode = 30
// Encoding: 0x1f (width=1)
// Stack effect: (x:int y:int) -> (result:bool)
opLtEqInt opcode = 31
// Encoding: 0x20 (width=1)
// Stack effect: (x:string y:string) -> (result:bool)
opEqString opcode = 32
// Encoding: 0x21 (width=1)
// Stack effect: (x:string y:string) -> (result:bool)
opNotEqString opcode = 33
// Encoding: 0x22 (width=1)
// Stack effect: (x:string y:string) -> (result:string)
opConcat opcode = 34
// Encoding: 0x23 (width=1)
// Stack effect: (x:int y:int) -> (result:int)
opAdd opcode = 35
// Encoding: 0x24 (width=1)
// Stack effect: (x:int y:int) -> (result:int)
opSub opcode = 36
// Encoding: 0x25 (width=1)
// Stack effect: (s:string from:int to:int) -> (result:string)
opStringSlice opcode = 37
// Encoding: 0x26 (width=1)
// Stack effect: (s:string from:int) -> (result:string)
opStringSliceFrom opcode = 38
// Encoding: 0x27 (width=1)
// Stack effect: (s:string to:int) -> (result:string)
opStringSliceTo opcode = 39
// Encoding: 0x28 (width=1)
// Stack effect: (s:string) -> (result:int)
opStringLen opcode = 40
)
type opcodeInfo struct {
width int
}
var opcodeInfoTable = [256]opcodeInfo{
opInvalid: {width: 1},
opPop: {width: 1},
opDup: {width: 1},
opPushParam: {width: 2},
opPushIntParam: {width: 2},
opPushLocal: {width: 2},
opPushIntLocal: {width: 2},
opPushFalse: {width: 1},
opPushTrue: {width: 1},
opPushConst: {width: 2},
opPushIntConst: {width: 2},
opSetLocal: {width: 2},
opSetIntLocal: {width: 2},
opIncLocal: {width: 2},
opDecLocal: {width: 2},
opReturnTop: {width: 1},
opReturnIntTop: {width: 1},
opReturnFalse: {width: 1},
opReturnTrue: {width: 1},
opJump: {width: 3},
opJumpFalse: {width: 3},
opJumpTrue: {width: 3},
opCallNative: {width: 3},
opIsNil: {width: 1},
opIsNotNil: {width: 1},
opNot: {width: 1},
opEqInt: {width: 1},
opNotEqInt: {width: 1},
opGtInt: {width: 1},
opGtEqInt: {width: 1},
opLtInt: {width: 1},
opLtEqInt: {width: 1},
opEqString: {width: 1},
opNotEqString: {width: 1},
opConcat: {width: 1},
opAdd: {width: 1},
opSub: {width: 1},
opStringSlice: {width: 1},
opStringSliceFrom: {width: 1},
opStringSliceTo: {width: 1},
opStringLen: {width: 1},
} | vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/opcodes.gen.go | 0.52975 | 0.465691 | opcodes.gen.go | starcoder |
package erf
import (
"github.com/dreading/gospecfunc/erf/internal/toms"
"math"
"math/cmplx"
)
// Erf computes approximate values for the error function
func Erf(z complex128) complex128 {
return 1 - toms.Faddeyeva(1i*z)*cmplx.Exp(-z*z)
}
// Erfc computes approximate values for the complementary error function erfc(z) = 1 - erf(z)
func Erfc(z complex128) complex128 {
return toms.Faddeyeva(1i*z) * cmplx.Exp(-z*z)
}
// Erfcx computes approximate values for the scaled complementary error function erfcx(z) = exp(z^2) * erfc(z)
func Erfcx(z complex128) complex128 {
return toms.Faddeyeva(1i * z)
}
// Erfi computes approximate values for the imaginary error function erfi(z) = -i*erf(iz).
func Erfi(z complex128) complex128 {
return -1i * Erf(1i*z)
}
// Dawson computes approximate values for the Dawson function (integral). Dawson function is the one-sided Fourier–Laplace sine transform of the Gaussian function.
func Dawson(z complex128) complex128 {
return complex(0, 0.5*math.Sqrt(math.Pi)) * (cmplx.Exp(-z*z) - toms.Faddeyeva(z))
}
// Fresnel computes approximate values for the cos and sin Fresnel integral int_0^x cos(t^2) dt and integral int_0^x sin(t^2) dt
func Fresnel(z complex128) (complex128, complex128) {
var halfSqrtPi = 0.5 * math.Sqrt(math.Pi)
var erfPlus = complex(0.5, 0.5) * Erf(z*complex(halfSqrtPi, -halfSqrtPi))
var erfNeg = complex(0.5, -0.5) * Erf(z*complex(halfSqrtPi, halfSqrtPi))
return complex(0.5, 0) * (erfNeg + erfPlus), complex(0, 0.5) * (erfNeg - erfPlus)
}
// Voigt computes approximate values for the real and imaginary Voigt functions: https://dlmf.nist.gov/7.19
// Here we use Faddeyeva to provide analytical continuation for all t in R
func Voigt(x float64, t float64) (float64, float64) {
//Limiting values as t -> 0
if t <= math.Nextafter(0, 1) {
var onexx = 1 + x*x
return 1 / onexx, x / onexx
}
var one2SqrtT = 1 / (2 * math.Sqrt(t))
var z = complex(one2SqrtT, -x*one2SqrtT)
var c = complex(math.Sqrt(math.Pi/(4*t)), 0) * toms.Faddeyeva(1i*z)
return real(c), imag(c)
}
// Faddeyeva computes the plasma dispersion Faddeyeva function, w(z) = exp(-z^2) * erfc(-i*z)
// where z=x+iy and erfc(z) is the complex complementary error function of z
func Faddeyeva(z complex128) complex128 {
return toms.Faddeyeva(z)
} | erf/erf.go | 0.824356 | 0.527377 | erf.go | starcoder |
package tree
type Color int
const (
RED Color = 0
BLACK Color = 1
)
type TreeNode struct {
color Color
value int
left *TreeNode
right *TreeNode
parent *TreeNode
}
func NewTreeNode(value int, color Color, parent *TreeNode) *TreeNode {
return &TreeNode{
color: color,
value: value,
left: nil,
right: nil,
parent: parent,
}
}
type RedBlackTree struct {
root *TreeNode
size int
leaf *TreeNode
}
func NewRedBlackTree() *RedBlackTree {
return &RedBlackTree{
root: nil,
size: 0,
leaf: NewTreeNode(0, BLACK, nil),
}
}
func (t *RedBlackTree) Insert(value int) {
newNode := NewTreeNode(value, RED, nil)
newNode.left = t.leaf
newNode.right = t.leaf
if t.root == nil {
newNode.color = BLACK
t.root = newNode
return
}
node := t.root
pnode := t.root
for {
pnode = node
if node.value < value {
node = node.right
} else {
node = node.left
}
if t.leaf == node {
break
}
}
if pnode.value > value {
pnode.left = newNode
} else {
pnode.right = newNode
}
newNode.parent = pnode
t.fixTree(newNode)
}
func (t *RedBlackTree) fixTree(node *TreeNode) {
n := node
for {
if n == nil {
break
}
pnode := n.parent
if n == t.root || pnode.color == BLACK {
break
}
ppnode := pnode.parent
var unode *TreeNode
if ppnode.left == pnode {
unode = ppnode.right
} else {
unode = ppnode.left
}
if unode.color == RED {
ppnode.color = RED
unode.color = BLACK
pnode.color = BLACK
n = pnode.parent
} else if pnode == ppnode.left {
if pnode.right == n {
t.rotateLeft(n.parent)
}
t.rotateRight(ppnode)
ppnode.color = RED
if ppnode.parent != nil {
ppnode.parent.color = BLACK
n = ppnode.parent
} else {
n = pnode.parent
}
} else if pnode == ppnode.right {
if pnode.left == n {
t.rotateRight(n.parent)
}
t.rotateLeft(ppnode)
ppnode.color = RED
if ppnode.parent != nil {
ppnode.parent.color = BLACK
n = ppnode.parent
} else {
n = pnode.parent
}
}
t.root.color = BLACK
}
}
func (t *RedBlackTree) rotateLeft(xnode *TreeNode) {
pnode := xnode.parent
ynode := xnode.right
xnode.right = ynode.left
ynode.left = xnode
ynode.parent = pnode
xnode.parent = ynode
if pnode == nil {
t.root = ynode
return
}
if pnode.left == xnode {
pnode.left = ynode
} else {
pnode.right = ynode
}
}
func (t *RedBlackTree) rotateRight(xnode *TreeNode) {
pnode := xnode.parent
ynode := xnode.left
xnode.left = ynode.right
ynode.right = xnode
ynode.parent = pnode
xnode.parent = ynode
if pnode == nil {
t.root = ynode
return
}
if pnode.right == xnode {
pnode.right = ynode
} else {
pnode.left = ynode
}
}
func (t *RedBlackTree) Search(value int) *TreeNode {
tnode := t.root
for {
if tnode == t.leaf || tnode.value == value {
return tnode
}
if tnode.value < value {
tnode = tnode.right
} else {
tnode = tnode.left
}
}
} | tree/RedBlackTree.go | 0.681409 | 0.488222 | RedBlackTree.go | starcoder |
package main
import (
"bytes"
"fmt"
"os"
"strconv"
"strings"
fa "github.com/kentwait/gofasta"
)
// ConsistentAlignmentPositions returns the list of positions in the alignment that are considered consistent given by the alignment pattern per site across all given alignments.
func ConsistentAlignmentPositions(gapChar string, matrices ...[][]int) []bool {
// Transpose matrices and combine as string
patternSetMap := make(map[string]int)
var templatePattern []string
var patternBuffer bytes.Buffer
/*
Loops over all ungapped position matrices passed to the function.
Assumes that all matrices have the same shape.
For two matrices given, each matrix will look like this:
a := [[0, 1, 2, 3,-1, 4, 5, 6,-1, 7,...]
[0, 1, 2, 3, 4,-1, 5, 6,-1, 7,...]
[0, 1, 2, 3,-1,-1, 4, 5, 6, 7,...]]
b := [[0, 1, 2, 3,-1, 4, 5, 6,-1, 7,...]
[0, 1, 2, 3,-1, 4, 5, 6,-1, 7,...]
[0, 1, 2, 3,-1,-1, 4, 5, 6, 7,...]]
This loop iterates over each matrix, and transposes the matrix such that:
at := [[ 0, 0, 0],
[ 1, 1, 1],
[ 2, 2, 2],
[ 3, 3, 3],
[-1, 4,-1],
[ 5, 5, 4],
[ 6, 6, 5],
[-1,-1, 6],
[ 7, 7, 7],
... ]
bt := [[ 0, 0, 0],
[ 1, 1, 1],
[ 2, 2, 2],
[ 3, 3, 3],
[-1,-1,-1],
[ 4, 4,-1],
[ 5, 5, 4],
[ 6, 6, 5],
[-1,-1, 6],
[ 7, 7, 7],
... ]
The goal is to compare whether the pattern in the rows are consistent across all matrices.
Comparing rows means comparing slices. In Golang, slices cannot be immediately compared, and must use a loop to do an itemwise comparison.
As a workaround, each row is encoded as a string whose values are separated by a comma "," like this:
at := ["0,0,0",
"1,1,1",
"2,2,2",
"3,3,3",
"-1,4,-1",
"5,5,4",
"6,6,5",
"-1,-1,6",
"7,7,7",
... ]
bt := ["0,0,0",
"1,1,1",
"2,2,2",
"3,3,3",
"-1,-1,-1",
"4,4,-1",
"5,5,4",
"6,6,5",
"-1,-1,6",
"7,7,7",
... ]
Through this method, each row becomes a string and multiple comparisons becomes straightforward.
"0,0,0" == "0,0,0" = true
"1,1,1" == "1,1,1" = true
"1,1,1" == "1,1,1" = true
"2,2,2" == "2,2,2" = true
"3,3,3" == "3,3,3" = true
"-1,4,-1" == "-1,-1,-1" = false
...
*/
for k, matrix := range matrices {
for j := 0; j < len(matrix[0]); j++ {
// Gets the pattern for each column in the matrix
for i := 0; i < len(matrix); i++ {
patternBuffer.WriteString(strconv.Itoa(matrix[i][j]) + ",")
}
// If this is the first matrix, store its pattern in a slice.
// This pattern will be used as the template later.
if k == 0 {
templatePattern = append(templatePattern, patternBuffer.String())
}
// Adds the pattern to a mapping.
// Increments to record the number of times the pattern has been observed.
patternSetMap[patternBuffer.String()]++
// Clears the buffer for the next column
patternBuffer.Reset()
}
}
pos := make([]bool, len(matrices[0][0]))
for j, pattern := range templatePattern {
// For the current column, get how many times the template pattern was observed.
// If the number of times is equal to the number of matrices, then this pattern was observed over all the matrices and is consistent.
// However, if the number of times is less than the number of matrices, then some matrices showed a different pattern.
// This deficit makes the current column inconsistent.
if patternSetMap[pattern] == len(matrices) {
pos[j] = true
} else {
pos[j] = false
}
}
return pos
}
// ConsistentCodonAlignmentPositions returns the list of positions in the codon alignment that are considered consistent given by the alignment pattern per site across all given alignments.
func ConsistentCodonAlignmentPositions(gapChar string, matrices ...[][]int) []bool {
// ConsistentAlignmentPositions constructs a boolean slice for the codon matrices
var codonPos []bool
for _, pos := range ConsistentAlignmentPositions(gapChar, matrices...) {
// Added 3 times to because each codon has 3 nucleotide sites
codonPos = append(codonPos, pos, pos, pos)
}
return codonPos
}
// ConsistentAlnPipeline aligns using global, local, and affine-local alignment strategies to determine positions that have a consistent alignment pattern over the three different strategies.
func ConsistentAlnPipeline(inputPath, gapChar, markerID, consistentMarker, inconsistentMarker string, iterations int, toUpper, toLower, saveTempAlns bool) bytes.Buffer {
// TODO: Allow this to be a parameter instead of being hard-coded
const mafftCmd = "mafft"
os.Stderr.WriteString(fmt.Sprintf("%s: ", inputPath))
/* Align using 3 different strategies implemented in MAFFT
- global alignment (GinsiAlign)
- local alignment (LinsiAlign)
- affine-gap local alignment (EinsiAlign)
These calls run sequentially with MAFFT saturating all cores.
MAFFT outputs results to stdout and these functions capture stdout to return a string.
*/
// TODO: Propagate ExecMafft error into *Align
// TODO: *Align should probably output a buffer instead of a string
ginsiString := CharAlign(mafftCmd, inputPath, "ginsi", iterations)
linsiString := CharAlign(mafftCmd, inputPath, "linsi", iterations)
einsiString := CharAlign(mafftCmd, inputPath, "einsi", iterations)
// Check if alignments are not empty.
// If empty, print error message to stderr and exit with code 1
if len(ginsiString) == 0 {
EmptyAlnError("G-INSI", inputPath)
}
if len(linsiString) == 0 {
EmptyAlnError("L-INSI", inputPath)
}
if len(einsiString) == 0 {
EmptyAlnError("E-INSI", inputPath)
}
// Each result (string) is parsed to create character alignments
ginsiAln := fa.FastaToAlignment(strings.NewReader(ginsiString), false)
linsiAln := fa.FastaToAlignment(strings.NewReader(linsiString), false)
einsiAln := fa.FastaToAlignment(strings.NewReader(einsiString), false)
os.Stderr.WriteString(".")
// Writes temp alignments if necessary
// TODO: ToFasta is not necessary, just write the ginsiString, linsiString, einsiString
if saveTempAlns == true {
einsiAln.ToFastaFile(inputPath + ".einsi.aln")
ginsiAln.ToFastaFile(inputPath + ".ginsi.aln")
linsiAln.ToFastaFile(inputPath + ".linsi.aln")
}
// consistentPos is a boolean slice indicating per position whether it is consistent or not.
consistentPos := ConsistentAlignmentPositions(
gapChar,
einsiAln.UngappedPositionMatrix(gapChar),
ginsiAln.UngappedPositionMatrix(gapChar),
linsiAln.UngappedPositionMatrix(gapChar),
)
os.Stderr.WriteString(".")
if toUpper == true {
einsiAln.ToUpper()
} else if toLower == true {
einsiAln.ToLower()
}
os.Stderr.WriteString(" Done.\n")
// TODO: Add aiblity to select what alignment is outputted
return MarkedAlignmentToBuffer(einsiAln, consistentPos, markerID, consistentMarker, inconsistentMarker)
}
// ConsistentCodonAlnPipeline aligns codon sequences using global, local, and affine-local alignment strategies to determine positions that have a consistent alignment pattern over the three different strategies.
func ConsistentCodonAlnPipeline(inputPath, gapChar, markerID, consistentMarker, inconsistentMarker string, iterations int, toUpper, toLower, saveTempAlns bool) bytes.Buffer {
// TODO: Allow this to be a parameter instead of being hard-coded
const mafftCmd = "mafft"
os.Stderr.WriteString(fmt.Sprintf("%s: ", inputPath))
// Create an Alignment of CodonSequence to generate translated protein sequence from nucleotide sequence
c := fa.FastaFileToCodonAlignment(inputPath)
// Read protein sequences from Alignment of CodonSequences and create a Fasta string in buffer
protReader := strings.NewReader(c.ToFasta())
// Pass buff to each of the three alignment strategies.
// These will align protein sequences in MAFFT.
// Based on the protein alignment, the original codon alignment is adjusted using the AlignCodonsUsingProtAlignment function.
ginsiString := CodonAlignStdin(mafftCmd, protReader, "ginsi", iterations, c)
linsiString := CodonAlignStdin(mafftCmd, protReader, "linsi", iterations, c)
einsiString := CodonAlignStdin(mafftCmd, protReader, "einsi", iterations, c)
// Check if string alignment is not empty
if len(ginsiString) == 0 {
EmptyAlnError("G-INSI", inputPath)
}
if len(linsiString) == 0 {
EmptyAlnError("L-INSI", inputPath)
}
if len(einsiString) == 0 {
EmptyAlnError("E-INSI", inputPath)
}
// The FASTA outputs are parsed to create codon alignments.
ginsiAln := fa.FastaToAlignment(strings.NewReader(ginsiString), true)
linsiAln := fa.FastaToAlignment(strings.NewReader(linsiString), true)
einsiAln := fa.FastaToAlignment(strings.NewReader(einsiString), true)
os.Stderr.WriteString(".")
// TODO: ToFasta conversion is unnecessary.
// *insiString is already in FASTA format
if saveTempAlns == true {
einsiAln.ToFastaFile(inputPath + ".einsi.aln")
ginsiAln.ToFastaFile(inputPath + ".ginsi.aln")
linsiAln.ToFastaFile(inputPath + ".linsi.aln")
}
// consistentPos is a boolean slice indicating per position whether it is consistent or not. Length of consistentPos is the length of the codon alignment as single characters.
consistentPos := ConsistentCodonAlignmentPositions(
gapChar,
einsiAln.UngappedPositionMatrix(gapChar),
ginsiAln.UngappedPositionMatrix(gapChar),
linsiAln.UngappedPositionMatrix(gapChar),
)
if toUpper == true {
einsiAln.ToUpper()
} else if toLower == true {
einsiAln.ToLower()
}
os.Stderr.WriteString(" Done.\n")
return MarkedAlignmentToBuffer(einsiAln, consistentPos, markerID, consistentMarker, inconsistentMarker)
} | pipeline.go | 0.659734 | 0.60013 | pipeline.go | starcoder |
package gopy
// Number is a generic interface of all numeric types in Go.
type Number interface {
int | int64 | int32 | int16 | int8 | uint | uint64 | uint32 | uint16 | uint8 | float64 | float32
}
// NumLike is a generic interface of all numeric types and custom numeric types in Go.
type NumLike interface {
~int | ~int64 | ~int32 | ~int16 | ~int8 | ~uint | ~uint64 | ~uint32 | ~uint16 | ~uint8 | ~float64 | ~float32
}
// Map takes a `function` and a `slice`, calls `function` on all elements of `slice` and returns the results in another slice.
func Map[T any, U any, C ~[]T](function func(T) U, slice C) []U {
result := make([]U, len(slice))
for i, x := range slice {
result[i] = function(x)
}
return result
}
// Filter returns a subset of `slice` containing elements satisfying the predicate `pred`.
func Filter[T any, C ~[]T](pred func(T) bool, slice C) []T {
result := make([]T, 0, len(slice))
for _, x := range slice {
if pred(x) {
result = append(result, x)
}
}
return result
}
// Reduce calls the reducing `function` with intermediate result kept on the left side, and elements of `sequence` on the right side, repeating thru the whole sequence and returns the final result.
func Reduce[T any, U any, Uslice ~[]U](function func(T, U) T, sequence Uslice, initial T) T {
result := initial
for _, x := range sequence {
result = function(result, x)
}
return result
}
// Reversed returns a reversed copy of `sequence`.
func Reversed[T any, C ~[]T](sequence C) []T {
result := make([]T, len(sequence))
lastIdx := len(sequence) - 1
for i, x := range sequence {
result[lastIdx-i] = x
}
return result
}
// Sum returns the sum of the collection of numbers.
func Sum[T NumLike, NumSlice ~[]T](nums NumSlice) T {
return Reduce(func(x, y T) T { return x + y }, nums, T(0))
}
// VarSum is the variadic version of Sum.
func VarSum[T Number](nums ...T) T {
return Sum(nums)
}
func reduceMonoid[T any, Tslice ~[]T](function func(T, T) T, sequence Tslice, val0 T) T {
if len(sequence) == 0 {
return val0
}
return Reduce(function, sequence, sequence[0])
}
func min[T NumLike](x, y T) T {
if x < y {
return x
}
return y
}
// Min returns the minimum element of `nums`.
func Min[T NumLike, NumSlice ~[]T](nums NumSlice) T {
return reduceMonoid(min[T], nums, T(0))
}
// VarMin is the variadic version of Min.
func VarMin[T Number](nums ...T) T {
return Min(nums)
}
func max[T NumLike](x, y T) T {
if x >= y {
return x
}
return y
}
// Max returns the maximum element of `nums`.
func Max[T NumLike, NumSlice ~[]T](nums NumSlice) T {
return reduceMonoid(max[T], nums, T(0))
}
// VarMax is the variadic version of Max.
func VarMax[T Number](nums ...T) T {
return Max(nums)
}
// All returns true if and only if all elements of `bools` is true.
func All[BoolSlice ~[]bool](bools BoolSlice) bool {
return Reduce(
func(a, b bool) bool { return a && b },
bools,
true,
)
}
// VarAll is the variadic version of All.
func VarAll(bools ...bool) bool {
return All(bools)
}
// Any returns true if any element of `bools` is true, and returns false otherwise.
func Any[BoolSlice ~[]bool](bools BoolSlice) bool {
return Reduce(
func(a, b bool) bool { return a || b },
bools,
false,
)
}
// VarAny is the variadic version of Any.
func VarAny(bools ...bool) bool {
return Any(bools)
} | sliceops.go | 0.875095 | 0.648911 | sliceops.go | starcoder |
package query
// ExampleSpec provides a mapping example and some input/output results to
// display.
type ExampleSpec struct {
Mapping string
Summary string
Results [][2]string
}
// NewExampleSpec creates a new example spec.
func NewExampleSpec(summary, mapping string, results ...string) ExampleSpec {
structuredResults := make([][2]string, 0, len(results)/2)
for i, res := range results {
if i%2 == 1 {
structuredResults = append(structuredResults, [2]string{results[i-1], res})
}
}
return ExampleSpec{
Mapping: mapping,
Summary: summary,
Results: structuredResults,
}
}
//------------------------------------------------------------------------------
// Status of a function or method.
type Status string
// Component statuses.
var (
StatusStable Status = "stable"
StatusBeta Status = "beta"
StatusDeprecated Status = "deprecated"
)
//------------------------------------------------------------------------------
// FunctionCategory is an abstract title for functions of a similar purpose.
type FunctionCategory string
// Function categories.
var (
FunctionCategoryGeneral FunctionCategory = "General"
FunctionCategoryMessage FunctionCategory = "Message Info"
FunctionCategoryEnvironment FunctionCategory = "Environment"
)
// FunctionSpec describes a Bloblang function.
type FunctionSpec struct {
// The release status of the function.
Status Status
// A category to place the function within.
Category FunctionCategory
// Name of the function (as it appears in config).
Name string
// Description of the functions purpose (in markdown).
Description string
// Examples shows general usage for the function.
Examples []ExampleSpec
}
// NewFunctionSpec creates a new function spec.
func NewFunctionSpec(category FunctionCategory, name, description string, examples ...ExampleSpec) FunctionSpec {
return FunctionSpec{
Status: StatusStable,
Category: category,
Name: name,
Description: description,
Examples: examples,
}
}
// Beta flags the function as a beta component.
func (s FunctionSpec) Beta() FunctionSpec {
s.Status = StatusBeta
return s
}
// NewDeprecatedFunctionSpec creates a new function spec that is deprecated. The
// function will not appear in docs or searches but will still be usable in
// mappings.
func NewDeprecatedFunctionSpec(name string) FunctionSpec {
return FunctionSpec{
Name: name,
Status: StatusDeprecated,
}
}
//------------------------------------------------------------------------------
// MethodCategory is an abstract title for methods of a similar purpose.
type MethodCategory string
// Method categories.
var (
MethodCategoryStrings MethodCategory = "String Manipulation"
MethodCategoryTime MethodCategory = "Timestamp Manipulation"
MethodCategoryRegexp MethodCategory = "Regular Expressions"
MethodCategoryEncoding MethodCategory = "Encoding and Encryption"
MethodCategoryCoercion MethodCategory = "Type Coercion"
MethodCategoryParsing MethodCategory = "Parsing"
MethodCategoryObjectAndArray MethodCategory = "Object & Array Manipulation"
)
// MethodCatSpec describes how a method behaves in the context of a given
// category.
type MethodCatSpec struct {
Category MethodCategory
Description string
Examples []ExampleSpec
}
// MethodSpec describes a Bloblang method.
type MethodSpec struct {
// The release status of the function.
Status Status
// Name of the method (as it appears in config).
Name string
// Description of the method purpose (in markdown).
Description string
// Examples shows general usage for the method.
Examples []ExampleSpec
// Categories that this method fits within.
Categories []MethodCatSpec
}
// NewMethodSpec creates a new method spec.
func NewMethodSpec(name, description string, examples ...ExampleSpec) MethodSpec {
return MethodSpec{
Name: name,
Status: StatusStable,
Description: description,
Examples: examples,
}
}
// NewDeprecatedMethodSpec creates a new method spec that is deprecated. The
// method will not appear in docs or searches but will still be usable in
// mappings.
func NewDeprecatedMethodSpec(name string) MethodSpec {
return MethodSpec{
Name: name,
Status: StatusDeprecated,
}
}
// Beta flags the function as a beta component.
func (m MethodSpec) Beta() MethodSpec {
m.Status = StatusBeta
return m
}
// InCategory describes the methods behaviour in the context of a given
// category, methods can belong to multiple categories. For example, the
// `contains` method behaves differently in the object and array category versus
// the strings one, but belongs in both.
func (m MethodSpec) InCategory(category MethodCategory, description string, examples ...ExampleSpec) MethodSpec {
cats := make([]MethodCatSpec, 0, len(m.Categories)+1)
cats = append(cats, m.Categories...)
cats = append(cats, MethodCatSpec{
Category: category,
Description: description,
Examples: examples,
})
m.Categories = cats
return m
} | internal/bloblang/query/docs.go | 0.812904 | 0.645274 | docs.go | starcoder |
package poly
import (
poly1d "github.com/adamcolton/geom/calc/poly"
"github.com/adamcolton/geom/d2"
"github.com/adamcolton/geom/d2/curve/line"
)
// Poly is a 2D polynomial curve.
type Poly struct {
Coefficients
}
// New polynomial curve
func New(pts ...d2.V) Poly {
return Poly{Slice(pts)}
}
// Copy the coefficients into an instance of Slice. The provided buffer will
// be used if it has sufficient capacity.
func (p Poly) Copy(buf []d2.V) Poly {
ln := p.Len()
out := Buf(ln, buf)
for i := range out {
out[i] = p.Coefficient(i)
}
return Poly{out}
}
// Pt1 returns the point on the curve at t0.
func (p Poly) Pt1(t0 float64) d2.Pt {
return d2.Pt{
X: p.X().F(t0),
Y: p.Y().F(t0),
}
}
// X returns the 1D polynomial formed by the X values.
func (p Poly) X() poly1d.Poly {
return poly1d.Poly{X(p)}
}
// X returns the 1D polynomial formed by the Y values.
func (p Poly) Y() poly1d.Poly {
return poly1d.Poly{Y(p)}
}
// Add creates a new polynomial by summinging p with p2.
func (p Poly) Add(p2 Poly) Poly {
return Poly{Sum{p, p2}}
}
// Multiply creates a new polynomial by taking the produce of p with p2.
func (p Poly) Multiply(p2 Poly) Poly {
return Poly{Product{p, p2}}
}
// V represents the derivative of a Polynomial and will return d2.V instead of
// d2.Pt.
type V struct {
Poly
}
// Copy the coefficients into an instance of Slice. The provided buffer will
// be used if it has sufficient capacity.
func (v V) Copy(buf []d2.V) V {
return V{v.Poly.Copy(buf)}
}
// V returns and instace of V that holds the derivative of p.
func (p Poly) V() V {
return V{Poly{Derivative{p}}}
}
// V1 returns V at t0.
func (v V) V1(t0 float64) d2.V {
return v.Pt1(t0).V()
}
// V1c0 returns and instance of V fulfilling d2.V1 and caching the derivative.
// Note that this is still not buffered, so for repeated calls, make a copy to
// reduce duplicated work.
func (p Poly) V1c0() d2.V1 {
return p.V()
}
// V1 takes the derivate of p at t0.
func (p Poly) V1(t0 float64) d2.V {
return p.V1c0().V1(t0)
}
// PolyLineIntersections returns the intersection points relative to the
// Polynomial curve.
func (p Poly) PolyLineIntersections(l line.Line, buf []float64) []float64 {
if l.D.X == 0 {
d0 := poly1d.New(-l.T0.X)
return p.X().Add(d0).Roots(buf)
}
if l.D.Y == 0 {
d0 := poly1d.New(-l.T0.Y)
return p.Y().Add(d0).Roots(buf)
}
m := l.M()
p2 := p.Y().Add(p.X().Scale(-m))
d0 := poly1d.New(m*l.T0.X - l.T0.Y)
p2 = p2.Add(d0)
return p2.Roots(buf)
}
// LineIntersections fulfills line.Intersector and returns the intersections
// relative to the line.
func (p Poly) LineIntersections(l line.Line, buf []float64) []float64 {
ln := len(buf)
ts := p.PolyLineIntersections(l, buf)
if lnTs := len(ts); lnTs > 0 {
if ln == 0 || lnTs < ln {
ln = lnTs
}
var toCoord, atCoord func(float64) float64
if l.D.X == 0 {
toCoord, atCoord = p.Y().F, l.AtY
} else {
toCoord, atCoord = p.X().F, l.AtX
}
for i, t := range ts[:ln] {
ts[i] = atCoord(toCoord(t))
}
}
return ts
} | d2/curve/poly/poly.go | 0.815416 | 0.739658 | poly.go | starcoder |
package cryptypes
import "database/sql/driver"
// EncryptedInt8 supports encrypting Int8 data
type EncryptedInt8 struct {
Field
Raw int8
}
// Scan converts the value from the DB into a usable EncryptedInt8 value
func (s *EncryptedInt8) Scan(value interface{}) error {
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized EncryptedInt8 value into a value that can safely be stored in the DB
func (s EncryptedInt8) Value() (driver.Value, error) {
return encrypt(s.Raw)
}
// NullEncryptedInt8 supports encrypting nullable Int8 data
type NullEncryptedInt8 struct {
Field
Raw int8
Empty bool
}
// Scan converts the value from the DB into a usable NullEncryptedInt8 value
func (s *NullEncryptedInt8) Scan(value interface{}) error {
if value == nil {
s.Raw = 0
s.Empty = true
return nil
}
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized NullEncryptedInt8 value into a value that can safely be stored in the DB
func (s NullEncryptedInt8) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encrypt(s.Raw)
}
// SignedInt8 supports signing Int8 data
type SignedInt8 struct {
Field
Raw int8
Valid bool
}
// Scan converts the value from the DB into a usable SignedInt8 value
func (s *SignedInt8) Scan(value interface{}) (err error) {
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedInt8 value into a value that can safely be stored in the DB
func (s SignedInt8) Value() (driver.Value, error) {
return sign(s.Raw)
}
// NullSignedInt8 supports signing nullable Int8 data
type NullSignedInt8 struct {
Field
Raw int8
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedInt8 value
func (s *NullSignedInt8) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedInt8 value into a value that can safely be stored in the DB
func (s NullSignedInt8) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return sign(s.Raw)
}
// SignedEncryptedInt8 supports signing and encrypting Int8 data
type SignedEncryptedInt8 struct {
Field
Raw int8
Valid bool
}
// Scan converts the value from the DB into a usable SignedEncryptedInt8 value
func (s *SignedEncryptedInt8) Scan(value interface{}) (err error) {
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedEncryptedInt8 value into a value that can safely be stored in the DB
func (s SignedEncryptedInt8) Value() (driver.Value, error) {
return encryptSign(s.Raw)
}
// NullSignedEncryptedInt8 supports signing and encrypting nullable Int8 data
type NullSignedEncryptedInt8 struct {
Field
Raw int8
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedEncryptedInt8 value
func (s *NullSignedEncryptedInt8) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedEncryptedInt8 value into a value that can safely be stored in the DB
func (s NullSignedEncryptedInt8) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encryptSign(s.Raw)
} | cryptypes/type_int8.go | 0.795936 | 0.443721 | type_int8.go | starcoder |
package edge
import (
"errors"
"image"
"image/draw"
"math"
"github.com/robfig/graphics-go/graphics/convolve"
)
var (
sobelX = &convolve.SeparableKernel{
X: []float64{-1, 0, +1},
Y: []float64{1, 2, 1},
}
sobelY = &convolve.SeparableKernel{
X: []float64{1, 2, 1},
Y: []float64{-1, 0, +1},
}
scharrX = &convolve.SeparableKernel{
X: []float64{-1, 0, +1},
Y: []float64{3, 10, 3},
}
scharrY = &convolve.SeparableKernel{
X: []float64{3, 10, 3},
Y: []float64{-1, 0, +1},
}
prewittX = &convolve.SeparableKernel{
X: []float64{-1, 0, +1},
Y: []float64{1, 1, 1},
}
prewittY = &convolve.SeparableKernel{
X: []float64{1, 1, 1},
Y: []float64{-1, 0, +1},
}
)
func diffOp(mag, dir *image.Gray, src image.Image, opX, opY *convolve.SeparableKernel) error {
if src == nil {
return errors.New("graphics: src is nil")
}
b := src.Bounds()
srcg, ok := src.(*image.Gray)
if !ok {
srcg = image.NewGray(b)
draw.Draw(srcg, b, src, b.Min, draw.Src)
}
mx := image.NewGray(b)
if err := convolve.Convolve(mx, srcg, opX); err != nil {
return err
}
my := image.NewGray(b)
if err := convolve.Convolve(my, srcg, opY); err != nil {
return err
}
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
off := (y-mx.Rect.Min.Y)*mx.Stride + (x-mx.Rect.Min.X)*1
cx := float64(mx.Pix[off])
cy := float64(my.Pix[off])
if mag != nil {
off = (y-mag.Rect.Min.Y)*mag.Stride + (x-mag.Rect.Min.X)*1
mag.Pix[off] = uint8(math.Sqrt(cx*cx + cy*cy))
}
if dir != nil {
off = (y-dir.Rect.Min.Y)*dir.Stride + (x-dir.Rect.Min.X)*1
angle := math.Atan(cy / cx)
// Round the angle to 0, 45, 90, or 135 degrees.
angle = math.Mod(angle, 2*math.Pi)
var degree uint8
if angle <= math.Pi/8 {
degree = 0
} else if angle <= math.Pi*3/8 {
degree = 45
} else if angle <= math.Pi*5/8 {
degree = 90
} else if angle <= math.Pi*7/8 {
degree = 135
} else {
degree = 0
}
dir.Pix[off] = degree
}
}
}
return nil
}
// Sobel returns the magnitude and direction of the Sobel operator.
// dir pixels hold the rounded direction value either 0, 45, 90, or 135.
func Sobel(mag, dir *image.Gray, src image.Image) error {
return diffOp(mag, dir, src, sobelX, sobelY)
}
// Scharr returns the magnitude and direction of the Scharr operator.
// This is very similar to Sobel, with less angular error.
func Scharr(mag, dir *image.Gray, src image.Image) error {
return diffOp(mag, dir, src, scharrX, scharrY)
}
// Prewitt returns the magnitude and direction of the Prewitt operator.
func Prewitt(mag, dir *image.Gray, src image.Image) error {
return diffOp(mag, dir, src, prewittX, prewittY)
} | graphics/edge/sobel.go | 0.709321 | 0.492798 | sobel.go | starcoder |
package stringo
import (
"strings"
"unicode/utf8"
)
type TransformFlag uint
const (
// TransformNone No transformations are ordered. Only constraints maximum length
// TransformNone turns all other flags OFF.
TransformNone TransformFlag = 1
// TransformTrim Trim spaces before and after process the input
// TransformTrim Trims the string, removing leading and trailing spaces
TransformTrim TransformFlag = 2
// TransformLowerCase Makes the string lowercase
// If case transformation flags are combined, the last one remains, considering the following order: TransformTitleCase, TransformLowerCase and TransformUpperCase.
TransformLowerCase TransformFlag = 4
// TransformUpperCase Makes the string uppercase
// If case transformation flags are combined, the last one remains, considering the following order: TransformTitleCase, TransformLowerCase and TransformUpperCase.
TransformUpperCase TransformFlag = 8
// TransformOnlyDigits Removes all non-numeric characters
TransformOnlyDigits TransformFlag = 16
// TransformOnlyLetters Removes all non-letter characters
TransformOnlyLetters TransformFlag = 32
// TransformOnlyLettersAndDigits Leaves only letters and numbers
TransformOnlyLettersAndDigits TransformFlag = 64
// TransformHash After process all other flags, applies SHA256 hashing on string for output
// The routine applies handy.Sha256Hash() on given string
TransformHash TransformFlag = 128
// TransformTitleCase Makes the string uppercase
// If case transformation flags are combined, the last one remains, considering the following order: TransformTitleCase, TransformLowerCase and TransformUpperCase.
TransformTitleCase TransformFlag = 256
// TransformRemoveDigits Removes all digit characters, without to touch on any other
// If combined with TransformOnlyLettersAndDigits, TransformOnlyDigits or TransformOnlyLetters, it's ineffective
TransformRemoveDigits TransformFlag = 512
)
// Transform handles a string according given flags/parametrization, as follows:
// The transformations are made in arbitrary order, what can result in unexpected output. If the order matters, use TransformSerially instead.
// If maxLen==0, truncation is skipped
// The last operations are, by order, truncation and trimming.
func Transform(s string, maxLen int, transformFlags TransformFlag) string {
if s == "" {
return s
}
if transformFlags&TransformNone == TransformNone {
if maxLen > 0 && utf8.RuneCountInString(s) > maxLen {
s = string([]rune(s)[:maxLen])
}
return s
}
if (transformFlags & TransformOnlyLettersAndDigits) == TransformOnlyLettersAndDigits {
s = OnlyLettersAndNumbers(s)
}
if (transformFlags & TransformOnlyDigits) == TransformOnlyDigits {
s = OnlyDigits(s)
}
if (transformFlags & TransformOnlyLetters) == TransformOnlyLetters {
s = OnlyLetters(s)
}
if (transformFlags & TransformRemoveDigits) == TransformRemoveDigits {
s = RemoveDigits(s)
}
// Have to trim before and after, to avoid issues with string truncation and new leading/trailing spaces
if (transformFlags & TransformTrim) == TransformTrim {
s = strings.TrimSpace(s)
}
if (transformFlags & TransformTitleCase) == TransformTitleCase {
s = strings.Title(strings.ToLower(s))
}
if (transformFlags & TransformLowerCase) == TransformLowerCase {
s = strings.ToLower(s)
}
if (transformFlags & TransformUpperCase) == TransformUpperCase {
s = strings.ToUpper(s)
}
if (transformFlags & TransformHash) == TransformHash {
s = Sha256Hash(s)
}
if s == "" {
return s
}
if maxLen > 0 && utf8.RuneCountInString(s) > maxLen {
s = string([]rune(s)[:maxLen])
}
// Have to trim before and after, to avoid issues with string truncation and new leading/trailing spaces
if (transformFlags & TransformTrim) == TransformTrim {
s = strings.TrimSpace(s)
}
return s
}
// TransformSerially reformat given string according parameters, in the order these params were sent
// Example: TransformSerially("uh lalah 123", 4, TransformOnlyDigits,TransformHash,TransformUpperCase)
// First remove non-digits, then hashes string and after make it all uppercase.
// If maxLen==0, truncation is skipped
// Truncation is the last operation
func TransformSerially(s string, maxLen int, transformFlags ...TransformFlag) string {
if s == "" {
return s
}
for _, flag := range transformFlags {
switch flag {
case TransformOnlyLettersAndDigits:
s = OnlyLettersAndNumbers(s)
case TransformOnlyDigits:
s = OnlyDigits(s)
case TransformOnlyLetters:
s = OnlyLetters(s)
case TransformTrim:
s = strings.TrimSpace(s)
case TransformTitleCase:
s = strings.ToTitle(s)
case TransformLowerCase:
s = strings.ToLower(s)
case TransformUpperCase:
s = strings.ToUpper(s)
case TransformHash:
s = Sha256Hash(s)
}
}
if maxLen > 0 && utf8.RuneCountInString(s) > maxLen {
s = string([]rune(s)[:maxLen])
}
return s
} | transform.go | 0.539226 | 0.558869 | transform.go | starcoder |
package codegen
import (
"regexp"
"strings"
"github.com/pulumi/pulumi/pkg/v2/codegen/schema"
)
var (
// IMPORTANT! The following regexp's contain named capturing groups.
// It's the `?P<group_name>` where group_name can be any name.
// When changing the group names, be sure to change the reference to
// the corresponding group name below where they are used as well.
surroundingTextRE = regexp.MustCompile("({{% examples %}}(.|\n)*?{{% /examples %}})")
examplesSectionRE = regexp.MustCompile(
"(?P<examples_start>{{% examples %}})(?P<examples_content>(.|\n)*?)(?P<examples_end>{{% /examples %}})")
individualExampleRE = regexp.MustCompile(
"(?P<example_start>{{% example %}})(?P<example_content>(.|\n)*?)(?P<example_end>{{% /example %}})")
h3TitleRE = regexp.MustCompile("(### .*)")
// The following regexp's match the code snippet blocks in a single example section.
tsCodeSnippetRE = regexp.MustCompile("(```(typescript))((.|\n)*?)(```)")
goCodeSnippetRE = regexp.MustCompile("(```(go))((.|\n)*?)(```)")
pythonCodeSnippetRE = regexp.MustCompile("(```(python))((.|\n)*?)(```)")
csharpCodeSnippetRE = regexp.MustCompile("(```(csharp))((.|\n)*?)(```)")
)
// DocLanguageHelper is an interface for extracting language-specific information from a Pulumi schema.
// See the implementation for this interface under each of the language code generators.
type DocLanguageHelper interface {
GetPropertyName(p *schema.Property) (string, error)
GetDocLinkForResourceType(pkg *schema.Package, moduleName, typeName string) string
GetDocLinkForPulumiType(pkg *schema.Package, typeName string) string
GetDocLinkForResourceInputOrOutputType(pkg *schema.Package, moduleName, typeName string, input bool) string
GetDocLinkForFunctionInputOrOutputType(pkg *schema.Package, moduleName, typeName string, input bool) string
GetDocLinkForBuiltInType(typeName string) string
GetLanguageTypeString(pkg *schema.Package, moduleName string, t schema.Type, input, optional bool) string
GetFunctionName(modName string, f *schema.Function) string
// GetResourceFunctionResultName returns the name of the result type when a static resource function is used to lookup
// an existing resource.
GetResourceFunctionResultName(modName string, f *schema.Function) string
}
type exampleParts struct {
Title string
Snippet string
}
func getFirstMatchedGroupsFromRegex(regex *regexp.Regexp, str string) map[string]string {
groups := map[string]string{}
// Get all matching groups.
matches := regex.FindAllStringSubmatch(str, -1)
if len(matches) == 0 {
return groups
}
firstMatch := matches[0]
// Get the named groups in our regex.
groupNames := regex.SubexpNames()
for i, value := range firstMatch {
groups[groupNames[i]] = value
}
return groups
}
func getAllMatchedGroupsFromRegex(regex *regexp.Regexp, str string) map[string][]string {
// Get all matching groups.
matches := regex.FindAllStringSubmatch(str, -1)
// Get the named groups in our regex.
groupNames := regex.SubexpNames()
groups := map[string][]string{}
for _, match := range matches {
for j, value := range match {
if existing, ok := groups[groupNames[j]]; ok {
existing = append(existing, value)
groups[groupNames[j]] = existing
continue
}
groups[groupNames[j]] = []string{value}
}
}
return groups
}
func isEmpty(s string) bool {
return strings.Replace(s, "\n", "", 1) == ""
}
// extractExamplesSection returns the content available between the {{% examples %}} shortcode.
// Otherwise returns nil.
func extractExamplesSection(description string) *string {
examples := getFirstMatchedGroupsFromRegex(examplesSectionRE, description)
if content, ok := examples["examples_content"]; ok && !isEmpty(content) {
return &content
}
return nil
}
func identifyExampleParts(exampleContent string, lang string) *exampleParts {
codeFence := "```" + lang
langSnippetIndex := strings.Index(exampleContent, codeFence)
// If there is no snippet for the provided language in this example,
// then just return nil.
if langSnippetIndex < 0 {
return nil
}
var snippet string
switch lang {
case "csharp":
snippet = csharpCodeSnippetRE.FindString(exampleContent)
case "go":
snippet = goCodeSnippetRE.FindString(exampleContent)
case "python":
snippet = pythonCodeSnippetRE.FindString(exampleContent)
case "typescript":
snippet = tsCodeSnippetRE.FindString(exampleContent)
}
return &exampleParts{
Title: h3TitleRE.FindString(exampleContent),
Snippet: snippet,
}
}
func getExamplesForLang(examplesContent string, lang string) []exampleParts {
examples := make([]exampleParts, 0)
exampleMatches := getAllMatchedGroupsFromRegex(individualExampleRE, examplesContent)
if matchedExamples, ok := exampleMatches["example_content"]; ok {
for _, ex := range matchedExamples {
exampleParts := identifyExampleParts(ex, lang)
if exampleParts == nil || exampleParts.Snippet == "" {
continue
}
examples = append(examples, *exampleParts)
}
}
return examples
}
// StripNonRelevantExamples strips the non-relevant language snippets from a resource's description.
func StripNonRelevantExamples(description string, lang string) string {
if description == "" {
return ""
}
// Replace the entire section (including the shortcodes themselves) enclosing the
// examples section, with a placeholder, which itself will be replaced appropriately
// later.
newDescription := surroundingTextRE.ReplaceAllString(description, "{{ .Examples }}")
// Get the content enclosing the outer examples short code.
examplesContent := extractExamplesSection(description)
if examplesContent == nil {
return strings.ReplaceAll(newDescription, "{{ .Examples }}", "")
}
// Within the examples section, identify each example.
builder := strings.Builder{}
examples := getExamplesForLang(*examplesContent, lang)
numExamples := len(examples)
if numExamples > 0 {
builder.WriteString("## Example Usage\n\n")
}
for i, ex := range examples {
builder.WriteString(ex.Title + "\n\n")
builder.WriteString(ex.Snippet + "\n")
// Print an extra new-line character as long as this is not
// the last example.
if i != numExamples-1 {
builder.WriteString("\n")
}
}
return strings.ReplaceAll(newDescription, "{{ .Examples }}", builder.String())
} | pkg/codegen/docs.go | 0.674587 | 0.522263 | docs.go | starcoder |
package core
import (
"fmt"
"math"
"github.com/go-gl/mathgl/mgl32"
"github.com/go-gl/mathgl/mgl64"
)
// Shadower is an interface which wraps logic to implement shadowing of a light
type Shadower interface {
// Textures returns the shadow textures used by this shadower
Textures() []Texture
// Render calls the shadower render implementation by assing a light and a scene camera.
Render(*Light, *Camera)
}
// ShadowMap is a utility implementation of the Shadower interface which renders shadows by using a cascading shadow map.
type ShadowMap struct {
size uint32
cameras []*Camera
textures []Texture
}
const (
numCascades = 3
maxCascades = 10
)
// NewShadowMap returns a new ShadowMap
func NewShadowMap(size uint32) *ShadowMap {
shadowMap := &ShadowMap{size, make([]*Camera, numCascades), make([]Texture, numCascades)}
for i := 0; i < numCascades; i++ {
// create a framebuffer for the cascade
framebuffer := renderSystem.NewFramebuffer()
// create a texture to write to
texture := renderSystem.NewTexture(TextureDescriptor{
Width: size,
Height: size,
Mipmaps: false,
Target: TextureTarget2D,
Format: TextureFormatRG,
SizedFormat: TextureSizedFormatRG32F,
ComponentType: TextureComponentTypeFLOAT,
Filter: TextureFilterLinear,
WrapMode: TextureWrapModeRepeat,
}, nil)
// set it as the framebuffer color attachment
framebuffer.SetColorAttachment(0, texture)
// create a camera and set its framebuffer
c := NewCamera("ShadowCamera", OrthographicProjection)
c.SetFramebuffer(framebuffer)
c.SetViewport(mgl32.Vec4{0.0, 0.0, float32(size), float32(size)})
c.SetAutoReshape(false)
c.SetRenderTechnique(nil)
shadowMap.cameras[i] = c
shadowMap.textures[i] = framebuffer.ColorAttachment(0)
}
return shadowMap
}
// Textures implements the Shadower interface
func (s *ShadowMap) Textures() []Texture {
return s.textures
}
func (s *ShadowMap) renderCascade(cascade int, light *Light, camera *Camera) {
/*
1-find all objects that are inside the current camera frustum
2-find minimal aa bounding box that encloses them all
3-transform corners of that bounding box to the light's space (using light's view matrix)
4-find aa bounding box in light's space of the transformed (now obb) bounding box
5-this aa bounding box is your directional light's orthographic frustum.
*/
var shadowCam = s.cameras[cascade]
// compute lightcamera viewmatrix
lightPos64 := mgl64.Vec3{float64(light.Block.Position.X()), float64(light.Block.Position.Y()), float64(light.Block.Position.Z())}
shadowCam.viewMatrix = mgl64.LookAtV(lightPos64, mgl64.Vec3{0.0, 0.0, 0.0}, mgl64.Vec3{0.0, 1.0, 0.0})
// 3-transform corners of that bounding box to the light's space (using light's view matrix)
// 4-find aa bounding box in light's space of the transformed (now obb) bounding box
nodesBoundsLight := camera.cascadingAABBS[cascade].Transformed(shadowCam.viewMatrix)
// 5-this aa bounding box is your directional light's orthographic frustum. except we want integer increments
worldUnitsPerTexel := nodesBoundsLight.Max().Sub(nodesBoundsLight.Min()).Mul(1.0 / float64(s.size))
projMinX := math.Floor(nodesBoundsLight.Min().X()/worldUnitsPerTexel.X()) * worldUnitsPerTexel.X()
projMaxX := math.Floor(nodesBoundsLight.Max().X()/worldUnitsPerTexel.X()) * worldUnitsPerTexel.X()
projMinY := math.Floor(nodesBoundsLight.Min().Y()/worldUnitsPerTexel.Y()) * worldUnitsPerTexel.Y()
projMaxY := math.Floor(nodesBoundsLight.Max().Y()/worldUnitsPerTexel.Y()) * worldUnitsPerTexel.Y()
shadowCam.projectionMatrix = mgl64.Ortho(
projMinX, projMaxX,
projMinY, projMaxY,
-nodesBoundsLight.Max().Z(),
-nodesBoundsLight.Min().Z())
vpmatrix := shadowCam.projectionMatrix.Mul4(shadowCam.viewMatrix)
biasvpmatrix := mgl64.Mat4FromCols(
mgl64.Vec4{0.5, 0.0, 0.0, 0.0},
mgl64.Vec4{0.0, 0.5, 0.0, 0.0},
mgl64.Vec4{0.0, 0.0, 0.5, 0.0},
mgl64.Vec4{0.5, 0.5, 0.5, 1.0}).Mul4(vpmatrix)
// set light block
light.Block.ZCuts[cascade] = mgl32.Vec4{float32(camera.cascadingZCuts[cascade]), 0.0, 0.0, 0.0}
light.Block.VPMatrix[cascade] = Mat4DoubleToFloat(biasvpmatrix)
// set camera constants
shadowCam.constants.SetData(shadowCam.projectionMatrix, shadowCam.viewMatrix, nil)
// create a single stage now
renderSystem.Dispatch(&SetFramebufferCommand{shadowCam.framebuffer})
renderSystem.Dispatch(&SetViewportCommand{shadowCam.viewport})
renderSystem.Dispatch(&ClearCommand{shadowCam.clearMode, shadowCam.clearColor, shadowCam.clearDepth})
// create pass per bucket, opaque is default
for state, nodeBucket := range camera.stateBuckets {
if state.Blending {
continue
}
for _, n := range nodeBucket {
n.materialData.SetTexture(fmt.Sprintf("shadowTex%d", cascade), s.textures[cascade])
}
renderSystem.Dispatch(&BindStateCommand{resourceManager.State("shadow")})
renderSystem.Dispatch(&BindUniformBufferCommand{"cameraConstants", shadowCam.constants.buffer})
RenderBatchedNodes(shadowCam, nodeBucket)
}
}
// Render implements the Shadower interface
func (s *ShadowMap) Render(light *Light, cam *Camera) {
for c := 0; c < numCascades; c++ {
s.renderCascade(c, light, cam)
}
} | core/shadow.go | 0.816918 | 0.404449 | shadow.go | starcoder |
package common
// The following `enum` definitions are in line with the corresponding
// ones in InChI 1.04 software. A notable difference is that we DO
// NOT provide for specifying bond stereo with respect to the second
// atom in the pair.
// Radical represents possible radical configurations of an atom.
type Radical uint8
const (
RadicalNone Radical = iota
RadicalSinglet
RadicalDoublet
RadicalTriplet
)
// BondType defines the possible types of bonds between a pair of
// atoms.
type BondType uint8
const (
BondTypeNone BondType = iota
BondTypeSingle
BondTypeDouble
BondTypeTriple
BondTypeAltern // InChI says 'avoid by all means'!
)
// BondStereo defines the possible stereo orientations of a given
// bond, when 2-D coordinates are given.
type BondStereo uint8
const (
BondStereoNone BondStereo = 0
BondStereoUp BondStereo = 1
BondStereoEither BondStereo = 4
BondStereoDown BondStereo = 6
BondStereoDoubleEither BondStereo = 3
)
// StereoType specifies the nature of the origin of the stereo
// behaviour.
type StereoType uint8
const (
StereoTypeNone StereoType = iota
StereoTypeDoubleBond
StereoTypeTetrahedral
StereoTypeAllene
)
// StereoParity defines the possible stereo configurations, given a
// particular stereo centre (atom or bond).
type StereoParity uint8
const (
StereoParityNone StereoParity = iota
StereoParityOdd
StereoParityEven
StereoParityUnknown
StereoParityUndefined
)
// The following `enum` definitions are as per RxnWeaver's internal
// requirements and concepts. They do not necessarily map readily to
// any definitions in other software.
// Unsaturation reflects a composite of an atom's oxidation state and
// its neighbours.
type Unsaturation uint8
const (
UnsaturationNone Unsaturation = iota
UnsaturationAromatic
UnsaturationDoubleBondC
UnsaturationDoubleBondW
UnsaturationDoubleBondCC
UnsaturationDoubleBondCW
UnsaturationDoubleBondWW
UnsaturationTripleBondC
UnsaturationTripleBondW
UnsaturationCharged
) | common/enums.go | 0.720663 | 0.581511 | enums.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.