code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package prime
import (
"fmt"
"math/big"
)
// An elliptic curve of the form y^2 = x^3 + a*x + b.
type Curve struct {
f *Field // over which the curve is defined
a,b *Element // coefficients
}
// A point with nil x,y coordinates is considered a point at infinity.
type Point struct {
c *Curve // associated curve
x,y *Element // point coordinates
}
func (c *Curve) IsInf(p *Point) bool {
return p.x == nil && p.y == nil
}
func (c *Curve) Define(f *Field, a, b int) *Curve {
c.f = f
c.a = f.Int64(int64(a))
c.b = f.Int64(int64(b))
return c
}
func (c *Curve) NewPoint() *Point {
return new(Point).SetCurve(c)
}
func (p *Point) SetCurve(c *Curve) *Point {
p.c = c
return p
}
func (p *Point) Set(x, y *Element) *Point {
c := p.c
f := c.f
// calculate both sides of the curve equation
l := f.NewElement().Exp(y, f.Int64(2)) // y^2
r := f.NewElement().Exp(x, f.Int64(3)) // x^3
r.Add(r, f.NewElement().Mul(c.a, x)) // x^3 + a*x
r.Add(r, c.b) // x^3 + a*x + b
if l.Cmp(r) != 0 {
panic("point not on curve")
}
p.x = x
p.y = y
return p
}
func (p *Point) GetX() *big.Int {
return p.x.v
}
func (p *Point) GetY() *big.Int {
return p.y.v
}
func (p *Point) SetInf() *Point {
p.x = nil
p.y = nil
return p
}
func (p *Point) Neg(t *Point) *Point {
c := t.c
f := c.f
if c.IsInf(t) {
return p.SetInf() // -inf = inf
} else {
return p.Set(t.x, f.NewElement().Neg(t.y))
}
}
func (p *Point) Equal(t *Point) bool {
if p == t {
return true
} else if p.x != nil && t.x != nil {
return p.x.Cmp(t.x) == 0 && p.y.Cmp(t.y) == 0
} else {
return false
}
}
func (p *Point) String() string {
c := p.c
if c.IsInf(p) {
return "(inf,inf)"
} else {
return fmt.Sprintf("(%s,%s)", p.x, p.y)
}
}
// The three algorithms below (point doubling, addition and
// multiplication) follow the definitions given in Guide to Elliptic
// Curve Cryptogaphy by <NAME> & Vanstone, first edition.
// Section 3.1.2
func (p *Point) Double(t *Point) *Point {
c := t.c
if t.Equal(c.NewPoint().Neg(t)) {
return p.SetInf()
}
f := c.f
x := f.NewElement()
y := f.NewElement()
y.Exp(t.x, f.Int64(2))
y.Mul(y, f.Int64(3))
y.Add(y, c.a)
y.Div(y, f.NewElement().Mul(f.Int64(2), t.y))
x.Exp(y, f.Int64(2))
x.Sub(x, f.NewElement().Mul(f.Int64(2), t.x))
y.Mul(y, f.NewElement().Sub(t.x, x))
y.Sub(y, t.y)
return p.Set(x, y)
}
// Section 3.1.2
func (p *Point) Add(t, u *Point) *Point {
c := t.c
if t.Equal(u) || t.Equal(c.NewPoint().Neg(u)) {
return p.SetInf()
} else if c.IsInf(u) {
return p.Set(t.x, t.y)
} else if c.IsInf(t) {
return p.Set(u.x, u.y)
}
f := c.f
x := f.NewElement()
y := f.NewElement()
y.Sub(u.y, t.y)
y.Div(y, f.NewElement().Sub(u.x, t.x))
x.Exp(y, f.Int64(2))
x.Sub(x, t.x)
x.Sub(x, u.x)
y.Mul(y, f.NewElement().Sub(t.x, x))
y.Sub(y, t.y)
return p.Set(x, y)
}
// Algorithm 3.26
func (p *Point) Mul(t *Point, k *big.Int) *Point {
c := t.c
u := c.NewPoint().Set(t.x, t.y)
p.SetInf()
for i := 0; i < k.BitLen(); i++ {
if k.Bit(i) == 1 {
p.Add(p, u)
}
u.Double(u)
}
return p
} | src/godot/ecdsa/prime/curve.go | 0.805058 | 0.500366 | curve.go | starcoder |
package slippy
import (
"errors"
"fmt"
"github.com/go-spatial/geom"
)
// MaxZoom is the lowest zoom (furthest in)
const MaxZoom = 22
// NewTile returns a Tile of Z,X,Y passed in
func NewTile(z, x, y uint) *Tile {
return &Tile{
Z: z,
X: x,
Y: y,
}
}
// Tile describes a slippy tile.
type Tile struct {
// zoom
Z uint
// column
X uint
// row
Y uint
}
// NewTileMinMaxer returns the smallest tile which fits the
// geom.MinMaxer. Note: it assumes the values of ext are
// EPSG:4326 (lng/lat)
func NewTileMinMaxer(g Grid, ext geom.MinMaxer) (*Tile, bool) {
tile, ok := g.FromNative(MaxZoom, geom.Point{
ext.MinX(),
ext.MinY(),
})
if !ok {
return nil, false
}
var ret *Tile
for z := uint(MaxZoom); int(z) >= 0 && ret == nil; z-- {
RangeFamilyAt(g, tile, z, func(tile *Tile) error {
if ext, ok := Extent(g, tile); ok && ext.Contains(geom.Point(ext.Max())) {
ret = tile
return errors.New("stop iter")
}
return nil
})
}
return ret, true
}
// FromBounds returns a list of tiles that make up the bound given. The bounds should be defined as the following lng/lat points [4]float64{west,south,east,north}
func FromBounds(g Grid, bounds *geom.Extent, z uint) []Tile {
if bounds == nil {
return nil
}
p1, ok := g.FromNative(z, bounds.Min())
if !ok {
return nil
}
p2, ok := g.FromNative(z, bounds.Max())
if !ok {
return nil
}
minx, maxx := p1.X, p2.X
if minx > maxx {
minx, maxx = maxx, minx
}
miny, maxy := p1.Y, p2.Y
if miny > maxy {
miny, maxy = maxy, miny
}
ret := make([]Tile, 0, (maxx-minx+1)*(maxy-miny+1))
for x := minx; x <= maxx; x++ {
for y := miny; y <= maxy; y++ {
ret = append(ret, Tile{z, x, y})
}
}
return ret
}
// ZXY returns back the z,x,y of the tile
func (t Tile) ZXY() (uint, uint, uint) { return t.Z, t.X, t.Y }
type Iterator func(*Tile) error
// RangeFamilyAt calls f on every tile vertically related to t at the specified zoom
// TODO (ear7h): sibling support
func RangeFamilyAt(g Grid, t *Tile, zoom uint, f Iterator) error {
tl, ok := g.ToNative(t)
if !ok {
return fmt.Errorf("tile %v not valid for grid", t)
}
br, ok := g.ToNative(NewTile(t.Z, t.X+1, t.Y+1))
if !ok {
return fmt.Errorf("tile %v not valid for grid", t)
}
tlt, ok := g.FromNative(zoom, tl)
if !ok {
return fmt.Errorf("tile %v not valid for grid", t)
}
brt, ok := g.FromNative(zoom, br)
if !ok {
return fmt.Errorf("tile %v not valid for grid", t)
}
for x := tlt.X; x < brt.X; x++ {
for y := tlt.Y; y < brt.Y; y++ {
err := f(NewTile(zoom, x, y))
if err != nil {
return err
}
}
}
return nil
} | slippy/tile.go | 0.65379 | 0.40028 | tile.go | starcoder |
package main
import (
"image/color"
"math"
"fyne.io/fyne/v2"
"fyne.io/fyne/v2/canvas"
"fyne.io/fyne/v2/theme"
"fyne.io/fyne/v2/widget"
"github.com/Jacalz/linalg/matrix"
)
// LineDrawer draws lines from a matrix of position vectors.
type LineDrawer struct {
widget.BaseWidget
lines []fyne.CanvasObject
matrix matrix.Matrix
}
// NewLineDrawer creates a new LineDrawer with the given matrix.
func NewLineDrawer(matrix matrix.Matrix) *LineDrawer {
return &LineDrawer{
lines: LinesFromMatrix(matrix),
matrix: matrix,
}
}
// LinesFromMatrix creates new canvas.Line from the matrix.
func LinesFromMatrix(M matrix.Matrix) []fyne.CanvasObject {
return []fyne.CanvasObject{
NewLineBetween(M[0][0], M[0][12], M[1][0], M[1][12]),
NewLineBetween(M[0][0], M[0][10], M[1][0], M[1][10]),
NewLineBetween(M[0][1], M[0][10], M[1][1], M[1][10]),
NewLineBetween(M[0][1], M[0][8], M[1][1], M[1][8]),
NewLineBetween(M[0][2], M[0][9], M[1][2], M[1][9]),
NewLineBetween(M[0][2], M[0][8], M[1][2], M[1][8]),
NewLineBetween(M[0][3], M[0][11], M[1][3], M[1][11]),
NewLineBetween(M[0][3], M[0][9], M[1][3], M[1][9]),
NewLineBetween(M[0][4], M[0][13], M[1][4], M[1][13]),
NewLineBetween(M[0][4], M[0][11], M[1][4], M[1][11]),
NewLineBetween(M[0][5], M[0][15], M[1][5], M[1][15]),
NewLineBetween(M[0][5], M[0][13], M[1][5], M[1][13]),
NewLineBetween(M[0][6], M[0][14], M[1][6], M[1][14]),
NewLineBetween(M[0][6], M[0][15], M[1][6], M[1][15]),
NewLineBetween(M[0][7], M[0][12], M[1][7], M[1][12]),
NewLineBetween(M[0][7], M[0][14], M[1][7], M[1][14]),
}
}
// Scrolled handles the zooming of the view.
func (l *LineDrawer) Scrolled(s *fyne.ScrollEvent) {
a := float64(s.Scrolled.DY) / 8 // One scroll step seems to be 10.
if a < 0 {
a += 2 // Get it back into the positive range.
}
T := matrix.Matrix{
{a, 0, 0},
{0, a, 0},
{0, 0, a},
}
l.matrix, _ = matrix.Mult(T, l.matrix)
l.lines = LinesFromMatrix(l.matrix)
l.Refresh()
}
// Dragged handles the rotation of the view.
func (l *LineDrawer) Dragged(d *fyne.DragEvent) {
a := float64(d.Dragged.DY) * 0.007
b := float64(d.Dragged.DX) * -0.007
R := matrix.Matrix{
{math.Cos(b), 0, math.Sin(b)},
{math.Sin(a) * math.Sin(b), math.Cos(a), -math.Sin(a) * math.Cos(b)},
{-math.Cos(a) * math.Sin(b), math.Sin(a), math.Cos(a) * math.Cos(b)},
}
l.matrix, _ = matrix.Mult(R, l.matrix)
l.lines = LinesFromMatrix(l.matrix)
l.Refresh()
}
// DragEnd is not currently needed other than to satisfy fyne.Draggable.
func (l *LineDrawer) DragEnd() {
}
// CreateRenderer is a method that creates a renderer for the widget.
func (l *LineDrawer) CreateRenderer() fyne.WidgetRenderer {
l.ExtendBaseWidget(l)
return &lineRenderer{lineDrawer: l}
}
type lineRenderer struct {
lineDrawer *LineDrawer
}
func (lr *lineRenderer) Destroy() {
}
func (lr *lineRenderer) Layout(s fyne.Size) {
}
func (lr *lineRenderer) MinSize() fyne.Size {
return fyne.NewSize(theme.IconInlineSize(), theme.IconInlineSize())
}
func (lr *lineRenderer) Objects() []fyne.CanvasObject {
return lr.lineDrawer.lines
}
func (lr *lineRenderer) Refresh() {
canvas.Refresh(lr.lineDrawer)
}
// NewLineBetween creates a new line between the given coordinates.
func NewLineBetween(x1, y1, x2, y2 float64) *canvas.Line {
return &canvas.Line{
Position1: fyne.NewPos(float32(x1)+20, float32(x2)+20),
Position2: fyne.NewPos(float32(y1)+20, float32(y2)+20),
StrokeColor: color.NRGBA{R: 0xff, G: 0xeb, B: 0x3b, A: 0xaf},
StrokeWidth: 7,
}
} | lines.go | 0.675229 | 0.439627 | lines.go | starcoder |
package godmt
import (
"fmt"
"go/ast"
"go/token"
"reflect"
)
// BasicTypeLiteralParser will try to extract the type of a basic literal type,
// whether const or var.
func BasicTypeLiteralParser(d *ast.Ident, item *ast.BasicLit) ScannedType {
itemType := fmt.Sprintf("%T", item.Value)
if item.Kind == token.INT {
itemType = "int64"
} else if item.Kind == token.FLOAT {
itemType = "float64"
}
rawDecl := reflect.ValueOf(d.Obj.Decl).Elem().Interface().(ast.ValueSpec)
return ScannedType{
Name: d.Name,
Kind: itemType,
Value: item.Value,
Doc: ExtractComments(rawDecl.Doc),
InternalType: ConstType,
}
}
// IdentifierParser will try to extract the type of an identifier, like booleans.
// This will return a pointer to a ScannedType, thus the result should be checked for nil.
func IdentifierParser(d, item *ast.Ident) *ScannedType {
rawDecl := reflect.ValueOf(d.Obj.Decl).Elem().Interface().(ast.ValueSpec)
if item.Name == "true" || item.Name == "false" {
return &ScannedType{
Name: d.Name,
Kind: "bool",
Value: item.Name,
Doc: ExtractComments(rawDecl.Doc),
InternalType: ConstType,
}
}
return nil
}
// CompositeLiteralSliceParser will extract a map ScannedType from the valid corresponding composite literal declaration.
func CompositeLiteralMapParser(d *ast.Ident, mapElements []ast.Expr, item *ast.CompositeLit) ScannedType {
cleanMap := make(map[string]string)
for j := range mapElements {
rawKey := reflect.ValueOf(mapElements[j]).Elem().FieldByName("Key")
switch rawKey.Interface().(type) {
case *ast.BasicLit:
rawValue := reflect.ValueOf(mapElements[j]).Elem().FieldByName("Value").Interface()
switch item := rawValue.(type) {
case *ast.BasicLit:
cleanMap[fmt.Sprintf("%v", rawKey.Interface().(*ast.BasicLit).Value)] = item.Value
case *ast.Ident:
cleanMap[fmt.Sprintf("%v", rawKey.Interface().(*ast.BasicLit).Value)] = item.Name
}
default:
break
}
}
var doc []string
rawDecl := reflect.ValueOf(d.Obj.Decl).Elem().Interface().(ast.ValueSpec)
if rawDecl.Doc != nil {
for i := range rawDecl.Doc.List {
doc = append(doc, rawDecl.Doc.List[i].Text)
}
}
return ScannedType{
Name: d.Name,
Kind: fmt.Sprintf(
"map[%s]%s",
GetMapValueType(item.Type.(*ast.MapType).Key),
GetMapValueType(item.Type.(*ast.MapType).Value),
),
Value: cleanMap,
InternalType: MapType,
Doc: doc,
}
}
// CompositeLiteralSliceParser will extract a slice ScannedType from the valid corresponding composite literal declaration.
func CompositeLiteralSliceParser(d *ast.Ident, sliceType string, item *ast.CompositeLit) ScannedType {
rawDecl := reflect.ValueOf(d.Obj.Decl).Elem().Interface().(ast.ValueSpec)
return ScannedType{
Name: d.Name,
Kind: fmt.Sprintf("[]%s", sliceType),
Value: ExtractSliceValues(item.Elts),
Doc: ExtractComments(rawDecl.Doc),
InternalType: SliceType,
}
}
// ImportedStructFieldParser will transform a field of a struct that contains a basic entity
// into a program readable ScannedStructField.
func SimpleStructFieldParser(field *ast.Field) *ScannedStructField {
if field.Names != nil {
tag := field.Tag
var tagValue string
if tag != nil {
tagValue = tag.Value
}
switch fieldType := field.Type.(type) {
case *ast.MapType:
key := GetMapValueType(fieldType.Key)
value := GetMapValueType(fieldType.Value)
kind := fmt.Sprintf("map[%s]%s", key, value)
return &ScannedStructField{
Doc: ExtractComments(field.Doc),
Name: field.Names[0].Name,
Kind: kind,
Tag: tagValue,
InternalType: MapType,
}
case *ast.Ident:
return &ScannedStructField{
Doc: ExtractComments(field.Doc),
Name: field.Names[0].Name,
Kind: fieldType.Name,
Tag: tagValue,
InternalType: ConstType,
}
}
}
// Struct inside a struct
fieldType := reflect.ValueOf(field.Type).Elem().FieldByName("Obj").Interface().(*ast.Object)
tag := field.Tag
var tagValue string
if tag != nil {
tagValue = tag.Value
}
return &ScannedStructField{
Doc: nil,
Name: fieldType.Name,
Kind: "struct",
Tag: tagValue,
}
}
// ImportedStructFieldParser will transform a field of a struct that contains an imported entity
// into a program readable ScannedStructField.
func ImportedStructFieldParser(field *ast.Field) *ScannedStructField {
fieldType := reflect.ValueOf(field.Type).Interface().(*ast.SelectorExpr)
tag := field.Tag
var tagValue string
if tag != nil {
tagValue = tag.Value
}
name := fmt.Sprintf("%s", field.Names)
if len(field.Names) > 0 {
name = field.Names[0].Name
}
packageName := fmt.Sprintf("%s", reflect.ValueOf(fieldType.X).Elem().FieldByName("Name"))
return &ScannedStructField{
Doc: nil,
Name: name,
Kind: fieldType.Sel.Name,
Tag: tagValue,
ImportDetails: &ImportedEntityDetails{
EntityName: fieldType.Sel.Name,
PackageName: packageName,
},
}
}
func GetSliceType(objectTypeDetails *ast.ArrayType) string {
kind := "[]"
switch arrayElements := objectTypeDetails.Elt.(type) {
case *ast.MapType:
key := GetMapValueType(arrayElements.Key)
value := GetMapValueType(arrayElements.Value)
kind += fmt.Sprintf("map[%s]%s", key, value)
return kind
case *ast.Ident:
kind += arrayElements.Name
return kind
default:
return "interface{}" //nolint:goconst
}
} | pkg/godmt/ParserUtils.go | 0.589716 | 0.424889 | ParserUtils.go | starcoder |
package bytesurl
import (
"bytes"
"sort"
)
// Values maps a string key to a list of values.
// It is typically used for query parameters and form values.
// Unlike in the http.Header map, the keys in a Values map
// are case-sensitive.
type Values map[string][][]byte
// Get gets the first value associated with the given key.
// If there are no values associated with the key, Get returns
// the empty string. To access multiple values, use the map
// directly.
func (v Values) Get(key string) []byte {
if v == nil {
return EmptyByte
}
vs, ok := v[key]
if !ok || len(vs) == 0 {
return EmptyByte
}
return vs[0]
}
// Set sets the key to value. It replaces any existing
// values.
func (v Values) Set(key string, value []byte) {
v[key] = [][]byte{value}
}
// Add adds the value to key. It appends to any existing
// values associated with key.
func (v Values) Add(key string, value []byte) {
v[key] = append(v[key], value)
}
// Del deletes the values associated with key.
func (v Values) Del(key string) {
delete(v, key)
}
// ParseQuery parses the URL-encoded query string and returns
// a map listing the values specified for each key.
// ParseQuery always returns a non-nil map containing all the
// valid query parameters found; err describes the first decoding error
// encountered, if any.
func ParseQuery(query []byte) (m Values, err error) {
m = make(Values)
err = parseQuery(m, query)
return
}
func parseQuery(m Values, query []byte) (err error) {
for bytes.Compare(query, EmptyByte) != 0 {
key := query
if i := bytes.IndexAny(key, "&;"); i >= 0 {
key, query = key[:i], key[i+1:]
} else {
query = EmptyByte
}
if bytes.Equal(key, EmptyByte) {
continue
}
value := EmptyByte
if i := bytes.Index(key, EqualByte); i >= 0 {
key, value = key[:i], key[i+1:]
}
key, err1 := QueryUnescape(key)
if err1 != nil {
if err == nil {
err = err1
}
continue
}
value, err1 = QueryUnescape(value)
if err1 != nil {
if err == nil {
err = err1
}
continue
}
indexKey := string(key)
m[indexKey] = append(m[indexKey], value)
}
return err
}
// Encode encodes the values into ``URL encoded'' form
// ("bar=baz&foo=quux") sorted by key.
func (v Values) Encode() string {
if v == nil {
return ""
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := append(QueryEscape([]byte(k)), EqualByte...)
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.Write(prefix)
buf.Write(QueryEscape(v))
}
}
return buf.String()
} | values.go | 0.726717 | 0.484075 | values.go | starcoder |
package data
import (
"fmt"
)
// vector represents a Field's collection of Elements.
type vector interface {
Set(idx int, i interface{})
Append(i interface{})
Extend(i int)
At(i int) interface{}
Len() int
Type() FieldType
PointerAt(i int) interface{}
CopyAt(i int) interface{}
ConcreteAt(i int) (val interface{}, ok bool)
SetConcrete(i int, val interface{})
Insert(i int, val interface{})
Delete(i int)
}
func vectorFieldType(v vector) FieldType {
switch v.(type) {
case *int8Vector:
return FieldTypeInt8
case *nullableInt8Vector:
return FieldTypeNullableInt8
case *int16Vector:
return FieldTypeInt16
case *nullableInt16Vector:
return FieldTypeNullableInt16
case *int32Vector:
return FieldTypeInt32
case *nullableInt32Vector:
return FieldTypeNullableInt32
case *int64Vector:
return FieldTypeInt64
case *nullableInt64Vector:
return FieldTypeNullableInt64
case *uint8Vector:
return FieldTypeUint8
case *nullableUint8Vector:
return FieldTypeNullableUint8
case *uint16Vector:
return FieldTypeUint16
case *nullableUint16Vector:
return FieldTypeNullableUint16
case *uint32Vector:
return FieldTypeUint32
case *nullableUint32Vector:
return FieldTypeNullableUint32
case *uint64Vector:
return FieldTypeUint64
case *nullableUint64Vector:
return FieldTypeNullableUint64
case *float32Vector:
return FieldTypeFloat32
case *nullableFloat32Vector:
return FieldTypeNullableFloat32
case *float64Vector:
return FieldTypeFloat64
case *nullableFloat64Vector:
return FieldTypeNullableFloat64
case *stringVector:
return FieldTypeString
case *nullableStringVector:
return FieldTypeNullableString
case *boolVector:
return FieldTypeBool
case *nullableBoolVector:
return FieldTypeNullableBool
case *timeTimeVector:
return FieldTypeTime
case *nullableTimeTimeVector:
return FieldTypeNullableTime
}
return FieldTypeUnknown
}
func (p FieldType) String() string {
if p <= 0 {
return "invalid/unsupported"
}
return fmt.Sprintf("[]%v", p.ItemTypeString())
}
// NewFieldFromFieldType creates a new Field of the given FieldType of length n.
func NewFieldFromFieldType(p FieldType, n int) *Field {
f := &Field{}
switch p {
// ints
case FieldTypeInt8:
f.vector = newInt8Vector(n)
case FieldTypeNullableInt8:
f.vector = newNullableInt8Vector(n)
case FieldTypeInt16:
f.vector = newInt16Vector(n)
case FieldTypeNullableInt16:
f.vector = newNullableInt16Vector(n)
case FieldTypeInt32:
f.vector = newInt32Vector(n)
case FieldTypeNullableInt32:
f.vector = newNullableInt32Vector(n)
case FieldTypeInt64:
f.vector = newInt64Vector(n)
case FieldTypeNullableInt64:
f.vector = newNullableInt64Vector(n)
// uints
case FieldTypeUint8:
f.vector = newUint8Vector(n)
case FieldTypeNullableUint8:
f.vector = newNullableUint8Vector(n)
case FieldTypeUint16:
f.vector = newUint16Vector(n)
case FieldTypeNullableUint16:
f.vector = newNullableUint16Vector(n)
case FieldTypeUint32:
f.vector = newUint32Vector(n)
case FieldTypeNullableUint32:
f.vector = newNullableUint32Vector(n)
case FieldTypeUint64:
f.vector = newUint64Vector(n)
case FieldTypeNullableUint64:
f.vector = newNullableUint64Vector(n)
// floats
case FieldTypeFloat32:
f.vector = newFloat32Vector(n)
case FieldTypeNullableFloat32:
f.vector = newNullableFloat32Vector(n)
case FieldTypeFloat64:
f.vector = newFloat64Vector(n)
case FieldTypeNullableFloat64:
f.vector = newNullableFloat64Vector(n)
// other
case FieldTypeString:
f.vector = newStringVector(n)
case FieldTypeNullableString:
f.vector = newNullableStringVector(n)
case FieldTypeBool:
f.vector = newBoolVector(n)
case FieldTypeNullableBool:
f.vector = newNullableBoolVector(n)
case FieldTypeTime:
f.vector = newTimeTimeVector(n)
case FieldTypeNullableTime:
f.vector = newNullableTimeTimeVector(n)
default:
panic("unsupported FieldType")
}
return f
} | data/vector.go | 0.600305 | 0.595522 | vector.go | starcoder |
package gofsm
import (
"fmt"
"strings"
"time"
)
// TransitionRecord represents an info on a single FSM transtion.
type TransitionRecord struct {
When time.Time
From string
To string
Note string
}
// New initialize a new FSM.
func NewFSM() *FSM {
return &FSM{
states: map[string]*State{},
events: map[string]*Event{},
}
}
// A FSM represents an FSM with its initial state, states, and events.
type FSM struct {
initial string
states map[string]*State
events map[string]*Event
}
// Initial sets the FSM initial state.
func (sm *FSM) Initial(name string) *FSM {
sm.initial = name
return sm
}
// State adds a state name to the FSM.
func (sm *FSM) State(name string) *State {
state := &State{Name: name}
sm.states[name] = state
return state
}
// Event adds an event name to the FSM.
func (sm *FSM) Event(name string) *Event {
event := &Event{Name: name}
sm.events[name] = event
return event
}
// Trigger performs a transition according to event name on a stated type, and returns the transition info.
func (sm *FSM) Trigger(name string, value Stater, desc ...string) (*TransitionRecord, error) {
current := value.GetState()
if current == "" {
current = sm.initial
value.SetState(sm.initial)
}
event := sm.events[name]
if event == nil {
return nil, fmt.Errorf("failed to perform event %s from state %s, no such event", name, current)
}
var matches []*Transition
for _, transition := range event.transitions {
valid := len(transition.froms) == 0
for _, from := range transition.froms {
if from == current {
valid = true
}
}
if valid {
matches = append(matches, transition)
}
}
if len(matches) != 1 {
return nil, fmt.Errorf("failed to perform event %s from state %s, invalid number of transitions (%d) in event", name, current, len(matches))
}
transition := matches[0]
if err := sm.exitFromCurrentState(value, current); err != nil {
return nil, err
}
if err := sm.performActionsBeforeTransition(value, transition); err != nil {
return nil, err
}
value.SetState(transition.to)
previous := current
if err := sm.performActionsAfterTransition(value, previous, transition); err != nil {
return nil, err
}
if err := sm.enterNextState(value, previous, transition.to); err != nil {
return nil, err
}
return &TransitionRecord{When: time.Now().UTC(), From: previous, To: transition.to, Note: strings.Join(desc, "")}, nil
}
func (sm *FSM) exitFromCurrentState(value Stater, current string) error {
state, ok := sm.states[current]
if !ok {
return nil
}
for _, exit := range state.exits {
if err := exit(value); err != nil {
return err
}
}
return nil
}
func (sm *FSM) performActionsBeforeTransition(value Stater, transition *Transition) error {
for _, before := range transition.befores {
if err := before(value); err != nil {
return err
}
}
return nil
}
func (sm *FSM) enterNextState(value Stater, previous, next string) error {
state, ok := sm.states[next]
if !ok {
return nil
}
for _, enter := range state.enters {
if err := enter(value); err != nil {
value.SetState(previous)
return err
}
}
return nil
}
func (sm *FSM) performActionsAfterTransition(value Stater, previous string, transition *Transition) error {
for _, after := range transition.afters {
if err := after(value); err != nil {
value.SetState(previous)
return err
}
}
return nil
} | fsm.go | 0.730578 | 0.406214 | fsm.go | starcoder |
// package collection is Golang Common methods of slice and array
// Including general operations such as slice and array de duplication, delete, and empty set
// For sorting and adding elements of slices and arrays, please use the official sort package related methods
package collection
import (
"errors"
"fmt"
"reflect"
)
//Remove duplicate elements from collection
//params: a: slice object,example:[]string, []int, []float64, ...
//return: []interface{}: New slice object with duplicate elements removed
func SliceRemoveDuplicate(a interface{}) (ret []interface{}) {
if reflect.TypeOf(a).Kind() != reflect.Slice {
fmt.Printf("The parameter passed in is not a slice but %T\n", a)
return ret
}
va := reflect.ValueOf(a)
m := make(map[interface{}]bool)
for i := 0; i < va.Len(); i++ {
sliceVal := va.Index(i).Interface()
if _, ok := m[sliceVal]; !ok {
ret = append(ret, sliceVal)
m[sliceVal] = true
}
}
return ret
}
// InsertSlice insert a element to slice in the specified index
// Note that original slice will not be modified
func InsertSlice(slice interface{}, index int, value interface{}) (interface{}, error) {
// check params
v := reflect.ValueOf(slice)
if v.Kind() != reflect.Slice {
return nil, errors.New("target isn't a slice")
}
if index < 0 || index > v.Len() || reflect.TypeOf(slice).Elem() != reflect.TypeOf(value) {
return nil, errors.New("param is invalid")
}
dst := reflect.MakeSlice(reflect.TypeOf(slice), 0, 0)
// add the element to the end of slice
if index == v.Len() {
dst = reflect.AppendSlice(dst, v.Slice(0, v.Len()))
dst = reflect.Append(dst, reflect.ValueOf(value))
return dst.Interface(), nil
}
dst = reflect.AppendSlice(dst, v.Slice(0, index+1))
dst = reflect.AppendSlice(dst, v.Slice(index, v.Len()))
dst.Index(index).Set(reflect.ValueOf(value))
return dst.Interface(), nil
}
// DeleteSliceE deletes the specified subscript element from the slice
// Note that original slice will not be modified
func DeleteSliceE(slice interface{}, index int) (interface{}, error) {
// check params
v := reflect.ValueOf(slice)
if v.Kind() != reflect.Slice {
return nil, errors.New("target isn't a slice")
}
if v.Len() == 0 || index < 0 || index > v.Len()-1 {
return nil, errors.New("param is invalid")
}
dst := reflect.MakeSlice(reflect.TypeOf(slice), 0, 0)
dst = reflect.AppendSlice(dst, v.Slice(0, index))
dst = reflect.AppendSlice(dst, v.Slice(index+1, v.Len()))
return dst.Interface(), nil
}
// GetEleIndexesSliceE finds all indexes of the specified element in a slice
func GetEleIndexesSliceE(slice interface{}, value interface{}) ([]int, error) {
// check params
v := reflect.ValueOf(slice)
if v.Kind() != reflect.Slice {
return nil, errors.New("target isn't a slice")
}
var indexes []int
for i := 0; i < v.Len(); i++ {
if v.Index(i).Interface() == value {
indexes = append(indexes, i)
}
}
return indexes, nil
}
//Returns true if this array、slice、map contains the specified element.
//More formally, returns true if and only if this list contains
//at least one element e such that
//@params:list---> slice、array、map,example:[]string, []int, []float64, map[string]string……
//@params:e--->An element in a slice array map.example:"hello",123……
func Contains(list interface{}, e interface{}) (bool, error) {
listType := reflect.TypeOf(list).Kind()
listValue := reflect.ValueOf(list)
if listType == reflect.Slice || listType == reflect.Array || listType == reflect.Map {
switch listType {
case reflect.Slice, reflect.Array:
for i := 0; i < listValue.Len(); i++ {
if listValue.Index(i).Interface() == e {
return true, nil
}
}
case reflect.Map:
if listValue.MapIndex(reflect.ValueOf(e)).IsValid() {
return true, nil
}
}
return false, errors.New(reflect.ValueOf(e).String() + " not in list")
} else {
return false, errors.New(listValue.String() + " It is not any type in array, slice or map ")
}
}
//Gets the union of two slices
func Union(slice1, slice2 []string) []string {
m := make(map[string]int)
for _, v := range slice1 {
m[v]++
}
for _, v := range slice2 {
times, _ := m[v]
if times == 0 {
slice1 = append(slice1, v)
}
}
return slice1
}
//Get the intersection of two slices
func Intersect(slice1, slice2 []string) []string {
m := make(map[string]int)
nn := make([]string, 0)
for _, v := range slice1 {
m[v]++
}
for _, v := range slice2 {
times, _ := m[v]
if times == 1 {
nn = append(nn, v)
}
}
return nn
}
//Get the difference set of two slices
func Difference(slice1, slice2 []string) []string {
m := make(map[string]int)
nn := make([]string, 0)
inter := Intersect(slice1, slice2)
for _, v := range inter {
m[v]++
}
for _, value := range slice1 {
times, _ := m[value]
if times == 0 {
nn = append(nn, value)
}
}
return nn
}
func StructToMap(obj interface{}) (result map[string]interface{}, err error) {
k := reflect.TypeOf(obj)
v := reflect.ValueOf(obj)
if k.Kind() != reflect.Ptr {
err = fmt.Errorf("type must be a pointer")
return
}
if k.Elem().Kind() != reflect.Struct {
err = fmt.Errorf("element type must be a struct")
return
}
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("%v", r)
return
}
}()
result = make(map[string]interface{})
for i := 0; i < k.Elem().NumField(); i++ {
name := k.Elem().Field(i).Name
field := v.Elem().Field(i)
switch field.Kind() {
case reflect.Slice, reflect.Struct, reflect.Ptr:
continue
default:
result[name] = field.Interface()
}
}
return
} | collection/collection.go | 0.674158 | 0.517693 | collection.go | starcoder |
package main
import (
. "github.com/9d77v/leetcode/pkg/algorithm/binarytree"
)
/*
题目: 二叉树的最近公共祖先
给定一个二叉树, 找到该树中两个指定节点的最近公共祖先。
百度百科中最近公共祖先的定义为:“对于有根树 T 的两个节点 p、q,最近公共祖先表示为一个节点 x,满足 x 是 p、q 的祖先且 x 的深度尽可能大(一个节点也可以是它自己的祖先)。”
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/lowest-common-ancestor-of-a-binary-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
提示:
树中节点数目在范围 [2, 105] 内。
-109 <= Node.val <= 109
所有 Node.val 互不相同 。
p != q
p 和 q 均存在于给定的二叉树中。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/lowest-common-ancestor-of-a-binary-tree
*/
/*
方法一: 递归
时间复杂度:О(n)
空间复杂度:О(n)
运行时间:12 ms 内存消耗:7.6 MB
*/
func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {
if root == nil || root == p || root == q {
return root
}
l, r := lowestCommonAncestor(root.Left, p, q), lowestCommonAncestor(root.Right, p, q)
if l == nil {
return r
}
if r == nil {
return l
}
return root
}
/*
方法二: 递归
时间复杂度:О(n)
空间复杂度:О(n)
运行时间:16 ms 内存消耗:7.7 MB
*/
func lowestCommonAncestorFunc2(root, p, q *TreeNode) *TreeNode {
var result *TreeNode
var dfs func(root, p, q *TreeNode) bool
dfs = func(root, p, q *TreeNode) bool {
if root == nil {
return false
}
lson, rson := dfs(root.Left, p, q), dfs(root.Right, p, q)
if (lson && rson) || ((root == p || root == q) && (lson || rson)) {
result = root
}
return lson || rson || root == p || root == q
}
dfs(root, p, q)
return result
}
/*
方法三: hash
时间复杂度:О(n)
空间复杂度:О(n)
运行时间:16 ms 内存消耗:7.9 MB
*/
func lowestCommonAncestorFunc3(root, p, q *TreeNode) *TreeNode {
parentMap, parentSet := map[*TreeNode]*TreeNode{}, map[*TreeNode]struct{}{}
var dfs func(root *TreeNode)
dfs = func(root *TreeNode) {
if root == nil {
return
}
if root.Left != nil {
parentMap[root.Left] = root
dfs(root.Left)
}
if root.Right != nil {
parentMap[root.Right] = root
dfs(root.Right)
}
}
dfs(root)
for p != nil {
parentSet[p] = struct{}{}
p = parentMap[p]
}
for q != nil {
if _, ok := parentSet[q]; ok {
return q
}
q = parentMap[q]
}
return nil
} | internal/leetcode/236.lowest-common-ancestor-of-a-binary-tree/main.go | 0.525125 | 0.453383 | main.go | starcoder |
// Counter (CTR) mode with support for continuation.
// CTR converts a block cipher into a stream cipher by
// repeatedly encrypting an incrementing counter and
// xoring the resulting stream of data with the input.
// See NIST SP 800-38A, pp 13-15
package cryptoutils
import (
"bytes"
"crypto/cipher"
"encoding/gob"
)
type betterCTRState struct {
Ctr []byte
Out []byte
OutUsed int
}
type BetterCTR struct {
b cipher.Block
ctr []byte
out []byte
outUsed int
}
const streamBufferSize = 512
// NewBetterCTR returns a Stream which encrypts/decrypts using the given Block in
// counter mode. The length of iv must be the same as the Block's block size.
func NewBetterCTR(block cipher.Block, iv []byte) *BetterCTR {
if len(iv) != block.BlockSize() {
panic("cryptoutils.NewBetterCTR: IV length must equal block size")
}
bufSize := streamBufferSize
if bufSize < block.BlockSize() {
bufSize = block.BlockSize()
}
return &BetterCTR{
b: block,
ctr: dup(iv),
out: make([]byte, 0, bufSize),
outUsed: 0,
}
}
// NewBetterCTR returns a Stream from state
func NewBetterCTRFromState(block cipher.Block, state []byte) *BetterCTR {
x := &BetterCTR{
b: block,
}
x.SetState(state)
return x
}
func (x *BetterCTR) GetState() []byte {
var state bytes.Buffer
enc := gob.NewEncoder(&state)
enc.Encode(betterCTRState{
Ctr: x.ctr,
Out: x.out,
OutUsed: x.outUsed,
})
return state.Bytes()
}
func (x *BetterCTR) SetState(state []byte) error {
dec := gob.NewDecoder(bytes.NewBuffer(state))
var s betterCTRState
err := dec.Decode(&s)
if err != nil {
return err
}
x.ctr = s.Ctr
x.out = s.Out
x.outUsed = s.OutUsed
return nil
}
func (x *BetterCTR) refill() {
remain := len(x.out) - x.outUsed
if remain > x.outUsed {
return
}
copy(x.out, x.out[x.outUsed:])
x.out = x.out[:cap(x.out)]
bs := x.b.BlockSize()
for remain < len(x.out)-bs {
x.b.Encrypt(x.out[remain:], x.ctr)
remain += bs
// Increment counter
for i := len(x.ctr) - 1; i >= 0; i-- {
x.ctr[i]++
if x.ctr[i] != 0 {
break
}
}
}
x.out = x.out[:remain]
x.outUsed = 0
}
func (x *BetterCTR) XORKeyStream(dst, src []byte) {
for len(src) > 0 {
if x.outUsed >= len(x.out)-x.b.BlockSize() {
x.refill()
}
n := xorBytes(dst, src, x.out[x.outUsed:])
dst = dst[n:]
src = src[n:]
x.outUsed += n
}
}
func dup(p []byte) []byte {
q := make([]byte, len(p))
copy(q, p)
return q
} | betterctr.go | 0.681833 | 0.432063 | betterctr.go | starcoder |
Lookup Table Based Math Functions
Faster than standard math package functions, but less accurate.
*/
//-----------------------------------------------------------------------------
package core
import "math"
//-----------------------------------------------------------------------------
func init() {
cosLUTInit()
powLUTInit()
}
//-----------------------------------------------------------------------------
// Cosine Lookup
const cosLUTBits = 10
const cosLUTSize = 1 << cosLUTBits
const cosFracBits = 32 - cosLUTBits
const cosFracMask = (1 << cosFracBits) - 1
var cosLUTy [cosLUTSize]float32
var cosLUTdy [cosLUTSize]float32
// cosLUTInit creates y/dy cosine lookup tables for TAU radians.
func cosLUTInit() {
dx := Tau / cosLUTSize
for i := 0; i < cosLUTSize; i++ {
y0 := math.Cos(float64(i) * dx)
y1 := math.Cos(float64(i+1) * dx)
cosLUTy[i] = float32(y0)
cosLUTdy[i] = float32((y1 - y0) / (1 << cosFracBits))
}
}
// CosLookup returns the cosine of x (32 bit unsigned phase value).
func CosLookup(x uint32) float32 {
idx := x >> cosFracBits
return cosLUTy[idx] + float32(x&cosFracMask)*cosLUTdy[idx]
}
// Cos returns the cosine of x (radians).
func Cos(x float32) float32 {
xi := uint32(Abs(x) * PhaseScale)
return CosLookup(xi)
}
// Sin returns the sine of x (radians).
func Sin(x float32) float32 {
return Cos((Pi / 2) - x)
}
// Tan returns the tangent of x (radians).
func Tan(x float32) float32 {
return Sin(x) / Cos(x)
}
//-----------------------------------------------------------------------------
// Power Function
const powLUTBits = 7
const powLUTSize = 1 << powLUTBits
const powLUTMask = powLUTSize - 1
var powLUT0 [powLUTSize]float32
var powLUT1 [powLUTSize]float32
// powLUTInit creates the power lookup tables.
func powLUTInit() {
for i := 0; i < powLUTSize; i++ {
x := float64(i) / powLUTSize
powLUT0[i] = float32(math.Pow(2, x))
x = float64(i) / (powLUTSize * powLUTSize)
powLUT1[i] = float32(math.Pow(2, x))
}
}
// pow2_int returns 2 to the x where x is an integer [-126,127]
func pow2Int(x int) float32 {
return math.Float32frombits((127 + uint32(x)) << 23)
}
// pow2_frac returns 2 to the x where x is a fraction [0,1)
func pow2Frac(x float32) float32 {
n := int(x * (1 << (powLUTBits * 2)))
x0 := powLUT0[(n>>powLUTBits)&powLUTMask]
x1 := powLUT1[n&powLUTMask]
return x0 * x1
}
// Pow2 returns 2 to the x.
func Pow2(x float32) float32 {
if x == 0 {
return 1
}
nf := int(math.Trunc(float64(x)))
ff := x - float32(nf)
if ff < 0 {
nf--
ff++
}
return pow2Int(nf) * pow2Frac(ff)
}
const logE2 = 1.4426950408889634 // 1.0 / math.log(2)
// PowE returns e to the x.
func PowE(x float32) float32 {
return Pow2(logE2 * x)
}
//----------------------------------------------------------------------------- | core/lut.go | 0.852322 | 0.520923 | lut.go | starcoder |
package julia
import (
"image"
"image/color"
"sync"
)
func DrawJulia(src image.Image, rgba *image.RGBA, zoom float64, center complex128, iterations float64) {
w, h := float64(rgba.Bounds().Size().X), float64(rgba.Bounds().Size().Y)
ratio := w / h
fillMapper(src)
wg := new(sync.WaitGroup)
wg.Add(int(w))
for x := 0.0; x < w; x++ {
// (x-w/2.0) is used to make the x-axis (and also the origo) run through the middle of the screen, horizontally.
// (0.2 * zoom * w) is used to make the zoom proportional to the width of the image.
// However since we want to zoom on the x-/y-axis equally we need to make the real value proportional to the imaginary value.
// Because 'w' is 'ratio' times bigger than 'h' we multiply the real value with ratio.
// Now the values are proportional.
pr := ratio * (x - w/2.0) / (0.2 * zoom * w)
go roworbit(src, rgba, w, h, x, zoom, iterations, pr, center, wg)
}
wg.Wait()
}
// Julia returns an image of size n x n of the Julia set for f.
func Julia(f ComplexFunc, n int) image.Image {
cols := make(chan column)
bounds := image.Rect(-n/2, -n/2, n/2, n/2)
for x := bounds.Min.X; x < bounds.Max.X; x++ {
go produceCols(x, n, f, bounds, cols)
}
return drawCols(bounds, cols, n)
}
func produceCols(x, n int, f ComplexFunc, bounds image.Rectangle, cols chan<- column) {
col := make([]color.RGBA, bounds.Max.Y-bounds.Min.Y)
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
col[y+n/2] = point(x, y, n, f)
}
cols <- column{x, col}
}
func point(x, y, n int, f ComplexFunc) color.RGBA {
s := float64(n / 4)
// function point in julia set relative to the image resolution.
rel := complex(float64(x)/s, float64(y)/s)
// Julia value, i.e divergence rate.
jv := Iterate(f, rel, 256)
return color.RGBA{
0,
0,
uint8(jv % 32 * 8),
255,
}
}
type column struct {
x int
pixels []color.RGBA
}
func drawCols(bounds image.Rectangle, cols chan column, n int) (img *image.RGBA) {
img = image.NewRGBA(bounds)
num := 0
for col := range cols {
if num == bounds.Max.Y-bounds.Min.Y-1 {
close(cols)
}
for y, pixel := range col.pixels {
img.Set(col.x, y-n/2, pixel)
}
num++
}
return
}
// Iterate sets z_0 = z, and repeatedly computes z_n = f(z_{n-1}), n ≥ 1,
// until |z_n| > 2 or n = max and returns this n.
func Iterate(f ComplexFunc, z complex128, max int) (n int) {
for ; n < max; n++ {
if real(z)*real(z)+imag(z)*imag(z) > 4 {
break
}
z = f(z)
}
return
} | fractal/julia/julia.go | 0.789071 | 0.487551 | julia.go | starcoder |
package finnhub
import (
"encoding/json"
)
// RecommendationTrend struct for RecommendationTrend
type RecommendationTrend struct {
// Company symbol.
Symbol *string `json:"symbol,omitempty"`
// Number of recommendations that fall into the Buy category
Buy *int64 `json:"buy,omitempty"`
// Number of recommendations that fall into the Hold category
Hold *int64 `json:"hold,omitempty"`
// Updated period
Period *string `json:"period,omitempty"`
// Number of recommendations that fall into the Sell category
Sell *int64 `json:"sell,omitempty"`
// Number of recommendations that fall into the Strong Buy category
StrongBuy *int64 `json:"strongBuy,omitempty"`
// Number of recommendations that fall into the Strong Sell category
StrongSell *int64 `json:"strongSell,omitempty"`
}
// NewRecommendationTrend instantiates a new RecommendationTrend object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewRecommendationTrend() *RecommendationTrend {
this := RecommendationTrend{}
return &this
}
// NewRecommendationTrendWithDefaults instantiates a new RecommendationTrend object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewRecommendationTrendWithDefaults() *RecommendationTrend {
this := RecommendationTrend{}
return &this
}
// GetSymbol returns the Symbol field value if set, zero value otherwise.
func (o *RecommendationTrend) GetSymbol() string {
if o == nil || o.Symbol == nil {
var ret string
return ret
}
return *o.Symbol
}
// GetSymbolOk returns a tuple with the Symbol field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RecommendationTrend) GetSymbolOk() (*string, bool) {
if o == nil || o.Symbol == nil {
return nil, false
}
return o.Symbol, true
}
// HasSymbol returns a boolean if a field has been set.
func (o *RecommendationTrend) HasSymbol() bool {
if o != nil && o.Symbol != nil {
return true
}
return false
}
// SetSymbol gets a reference to the given string and assigns it to the Symbol field.
func (o *RecommendationTrend) SetSymbol(v string) {
o.Symbol = &v
}
// GetBuy returns the Buy field value if set, zero value otherwise.
func (o *RecommendationTrend) GetBuy() int64 {
if o == nil || o.Buy == nil {
var ret int64
return ret
}
return *o.Buy
}
// GetBuyOk returns a tuple with the Buy field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RecommendationTrend) GetBuyOk() (*int64, bool) {
if o == nil || o.Buy == nil {
return nil, false
}
return o.Buy, true
}
// HasBuy returns a boolean if a field has been set.
func (o *RecommendationTrend) HasBuy() bool {
if o != nil && o.Buy != nil {
return true
}
return false
}
// SetBuy gets a reference to the given int64 and assigns it to the Buy field.
func (o *RecommendationTrend) SetBuy(v int64) {
o.Buy = &v
}
// GetHold returns the Hold field value if set, zero value otherwise.
func (o *RecommendationTrend) GetHold() int64 {
if o == nil || o.Hold == nil {
var ret int64
return ret
}
return *o.Hold
}
// GetHoldOk returns a tuple with the Hold field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RecommendationTrend) GetHoldOk() (*int64, bool) {
if o == nil || o.Hold == nil {
return nil, false
}
return o.Hold, true
}
// HasHold returns a boolean if a field has been set.
func (o *RecommendationTrend) HasHold() bool {
if o != nil && o.Hold != nil {
return true
}
return false
}
// SetHold gets a reference to the given int64 and assigns it to the Hold field.
func (o *RecommendationTrend) SetHold(v int64) {
o.Hold = &v
}
// GetPeriod returns the Period field value if set, zero value otherwise.
func (o *RecommendationTrend) GetPeriod() string {
if o == nil || o.Period == nil {
var ret string
return ret
}
return *o.Period
}
// GetPeriodOk returns a tuple with the Period field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RecommendationTrend) GetPeriodOk() (*string, bool) {
if o == nil || o.Period == nil {
return nil, false
}
return o.Period, true
}
// HasPeriod returns a boolean if a field has been set.
func (o *RecommendationTrend) HasPeriod() bool {
if o != nil && o.Period != nil {
return true
}
return false
}
// SetPeriod gets a reference to the given string and assigns it to the Period field.
func (o *RecommendationTrend) SetPeriod(v string) {
o.Period = &v
}
// GetSell returns the Sell field value if set, zero value otherwise.
func (o *RecommendationTrend) GetSell() int64 {
if o == nil || o.Sell == nil {
var ret int64
return ret
}
return *o.Sell
}
// GetSellOk returns a tuple with the Sell field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RecommendationTrend) GetSellOk() (*int64, bool) {
if o == nil || o.Sell == nil {
return nil, false
}
return o.Sell, true
}
// HasSell returns a boolean if a field has been set.
func (o *RecommendationTrend) HasSell() bool {
if o != nil && o.Sell != nil {
return true
}
return false
}
// SetSell gets a reference to the given int64 and assigns it to the Sell field.
func (o *RecommendationTrend) SetSell(v int64) {
o.Sell = &v
}
// GetStrongBuy returns the StrongBuy field value if set, zero value otherwise.
func (o *RecommendationTrend) GetStrongBuy() int64 {
if o == nil || o.StrongBuy == nil {
var ret int64
return ret
}
return *o.StrongBuy
}
// GetStrongBuyOk returns a tuple with the StrongBuy field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RecommendationTrend) GetStrongBuyOk() (*int64, bool) {
if o == nil || o.StrongBuy == nil {
return nil, false
}
return o.StrongBuy, true
}
// HasStrongBuy returns a boolean if a field has been set.
func (o *RecommendationTrend) HasStrongBuy() bool {
if o != nil && o.StrongBuy != nil {
return true
}
return false
}
// SetStrongBuy gets a reference to the given int64 and assigns it to the StrongBuy field.
func (o *RecommendationTrend) SetStrongBuy(v int64) {
o.StrongBuy = &v
}
// GetStrongSell returns the StrongSell field value if set, zero value otherwise.
func (o *RecommendationTrend) GetStrongSell() int64 {
if o == nil || o.StrongSell == nil {
var ret int64
return ret
}
return *o.StrongSell
}
// GetStrongSellOk returns a tuple with the StrongSell field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RecommendationTrend) GetStrongSellOk() (*int64, bool) {
if o == nil || o.StrongSell == nil {
return nil, false
}
return o.StrongSell, true
}
// HasStrongSell returns a boolean if a field has been set.
func (o *RecommendationTrend) HasStrongSell() bool {
if o != nil && o.StrongSell != nil {
return true
}
return false
}
// SetStrongSell gets a reference to the given int64 and assigns it to the StrongSell field.
func (o *RecommendationTrend) SetStrongSell(v int64) {
o.StrongSell = &v
}
func (o RecommendationTrend) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Symbol != nil {
toSerialize["symbol"] = o.Symbol
}
if o.Buy != nil {
toSerialize["buy"] = o.Buy
}
if o.Hold != nil {
toSerialize["hold"] = o.Hold
}
if o.Period != nil {
toSerialize["period"] = o.Period
}
if o.Sell != nil {
toSerialize["sell"] = o.Sell
}
if o.StrongBuy != nil {
toSerialize["strongBuy"] = o.StrongBuy
}
if o.StrongSell != nil {
toSerialize["strongSell"] = o.StrongSell
}
return json.Marshal(toSerialize)
}
type NullableRecommendationTrend struct {
value *RecommendationTrend
isSet bool
}
func (v NullableRecommendationTrend) Get() *RecommendationTrend {
return v.value
}
func (v *NullableRecommendationTrend) Set(val *RecommendationTrend) {
v.value = val
v.isSet = true
}
func (v NullableRecommendationTrend) IsSet() bool {
return v.isSet
}
func (v *NullableRecommendationTrend) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableRecommendationTrend(val *RecommendationTrend) *NullableRecommendationTrend {
return &NullableRecommendationTrend{value: val, isSet: true}
}
func (v NullableRecommendationTrend) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableRecommendationTrend) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_recommendation_trend.go | 0.785802 | 0.523725 | model_recommendation_trend.go | starcoder |
package arts
import (
"math"
"math/rand"
"github.com/andrewwatson/generativeart"
"github.com/andrewwatson/generativeart/common"
"github.com/fogleman/gg"
)
type colorCircle struct {
circleNum int
}
func NewColorCircle(circleNum int) *colorCircle {
return &colorCircle{
circleNum: circleNum,
}
}
// Generative draws a color circle images.
func (cc *colorCircle) Generative(c *generativeart.Canva) string {
ctex := gg.NewContextForRGBA(c.Img())
for i := 0; i < cc.circleNum; i++ {
rnd := rand.Intn(3)
x := common.RandomRangeFloat64(-0.1, 1.1) * float64(c.Width())
y := common.RandomRangeFloat64(-0.1, 1.1) * float64(c.Height())
s := common.RandomRangeFloat64(0, common.RandomRangeFloat64(0, float64(c.Width()/2))) + 10
if rnd == 2 {
rnd = rand.Intn(3)
}
switch rnd {
case 0:
cc.drawCircleV1(ctex, c, x, y, s)
case 1:
ctex.SetLineWidth(common.RandomRangeFloat64(0, 1))
ctex.SetColor(c.Opts().ColorSchema()[rand.Intn(len(c.Opts().ColorSchema()))])
ctex.DrawCircle(x, y, common.RandomRangeFloat64(0, s)/2)
ctex.Stroke()
case 2:
cc.drawCircleV2(ctex, c, x, y, s)
}
}
return ""
}
func (cc *colorCircle) drawCircleV1(ctex *gg.Context, c *generativeart.Canva, x, y, s float64) {
n := common.RandomRangeInt(4, 30)
cs := common.RandomRangeFloat64(2, 8)
ctex.SetColor(c.Opts().ColorSchema()[rand.Intn(len(c.Opts().ColorSchema()))])
ctex.Push()
ctex.Translate(x, y)
for a := 0.0; a < math.Pi*2.0; a += math.Pi * 2.0 / float64(n) {
ctex.DrawEllipse(s*0.5*math.Cos(a), s*0.5*math.Sin(a), cs/2, cs/2)
ctex.Fill()
}
ctex.Pop()
}
func (cc *colorCircle) drawCircleV2(ctex *gg.Context, c *generativeart.Canva, x, y, s float64) {
cl := c.Opts().ColorSchema()[rand.Intn(len(c.Opts().ColorSchema()))]
ctex.SetLineWidth(1.0)
sx := s * common.RandomRangeFloat64(0.1, 0.55)
for j := 0.0001; j < sx; j++ {
dd := s + j*2.0
alpha := int(255 * sx / j)
if alpha > 255 {
alpha = 255
}
if alpha < 0 {
alpha = 0
}
//alpha := RandomRangeInt(30, 150)
cl.A = uint8(alpha)
ctex.SetColor(cl)
for i := 0; i < 200; i++ {
theta := common.RandomRangeFloat64(0, math.Pi*2)
xx := x + dd*0.3*math.Cos(theta)
yy := y + dd*0.3*math.Sin(theta)
//ctex.DrawLine(xx, yy, xx, yy)
ctex.DrawPoint(xx, yy, 0.6)
ctex.Stroke()
}
}
} | arts/colorcircle.go | 0.651244 | 0.49884 | colorcircle.go | starcoder |
package continuous
import (
"github.com/jtejido/stats"
"github.com/jtejido/stats/err"
"math"
"math/rand"
)
// Johnson SN Distribution (Normal)
// https://reference.wolfram.com/language/ref/JohnsonDistribution.html
type JohnsonSN struct {
gamma, delta, location, scale float64 // γ, δ, location μ, and scale σ
src rand.Source
}
func NewJohnsonSN(gamma, delta, location, scale float64) (*JohnsonSN, error) {
return NewJohnsonSNWithSource(gamma, delta, location, scale, nil)
}
func NewJohnsonSNWithSource(gamma, delta, location, scale float64, src rand.Source) (*JohnsonSN, error) {
if delta <= 0 && scale <= 0 {
return nil, err.Invalid()
}
return &JohnsonSN{gamma, delta, location, scale, src}, nil
}
// γ ∈ (-∞,∞)
// δ ∈ (0,∞)
// μ ∈ (-∞,∞)
// σ ∈ (0,∞)
func (j *JohnsonSN) Parameters() stats.Limits {
return stats.Limits{
"γ": stats.Interval{math.Inf(-1), math.Inf(1), true, true},
"δ": stats.Interval{0, math.Inf(1), true, true},
"μ": stats.Interval{math.Inf(-1), math.Inf(1), true, true},
"σ": stats.Interval{0, math.Inf(1), true, true},
}
}
// x ∈ (-∞,∞)
func (j *JohnsonSN) Support() stats.Interval {
return stats.Interval{math.Inf(-1), math.Inf(1), true, true}
}
func (j *JohnsonSN) Probability(x float64) float64 {
return (math.Exp(-.5*math.Pow(j.gamma+(j.delta*(x-j.location))/j.scale, 2)) * j.delta) / (math.Sqrt(2*math.Pi) * j.scale)
}
func (j *JohnsonSN) Distribution(x float64) float64 {
return .5 * math.Erfc(-((j.gamma + (j.delta*(x-j.location))/j.scale) / math.Sqrt(2)))
}
func (j *JohnsonSN) Mean() float64 {
return (j.delta*j.location - j.gamma*j.scale) / j.delta
}
func (j *JohnsonSN) Variance() float64 {
return (j.scale * j.scale) / (j.delta * j.delta)
}
func (j *JohnsonSN) Median() float64 {
return j.location - (j.gamma*j.scale)/j.delta
}
func (j *JohnsonSN) ExKurtosis() float64 {
return 0
}
func (j *JohnsonSN) Entropy() float64 {
stats.NotImplementedError()
return math.NaN()
}
func (j *JohnsonSN) Inverse(q float64) float64 {
if q <= 0 {
return math.Inf(-1)
}
if q >= 1 {
return math.Inf(1)
}
return j.location + (j.scale*(-j.gamma-math.Sqrt(2)*math.Erfcinv(2*q)))/j.delta
}
func (j *JohnsonSN) Rand() float64 {
var rnd float64
if j.src != nil {
rnd = rand.New(j.src).Float64()
} else {
rnd = rand.Float64()
}
return j.Inverse(rnd)
} | dist/continuous/johnson_sn.go | 0.827061 | 0.435962 | johnson_sn.go | starcoder |
package indexer
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"image/color"
"math"
"net/http"
"sort"
"strings"
"time"
svg "github.com/ajstarks/svgo"
"github.com/mikeydub/go-gallery/service/persist"
)
/**
* The drawing instructions for the nine different symbols are as follows:
*
* . Draw nothing in the cell.
* O Draw a circle bounded by the cell.
* + Draw centered lines vertically and horizontally the length of the cell.
* X Draw diagonal lines connecting opposite corners of the cell.
* | Draw a centered vertical line the length of the cell.
* - Draw a centered horizontal line the length of the cell.
* \ Draw a line connecting the top left corner of the cell to the bottom right corner.
* / Draw a line connecting the bottom left corner of teh cell to the top right corner.
* # Fill in the cell completely.
*
*/
func autoglyphs(i *Indexer, turi persist.TokenURI, addr persist.Address, tid persist.TokenID) (persist.TokenMetadata, error) {
width := 80
height := 80
buf := &bytes.Buffer{}
canvas := svg.New(buf)
canvas.Start(width, height)
canvas.Square(0, 0, width, canvas.RGB(255, 255, 255))
for i, c := range turi {
y := int(math.Floor(float64(i)/float64(64))) + 8
x := (i % 64) + 8
switch c {
case 'O':
canvas.Circle(x, y, 1, canvas.RGB(0, 0, 0))
case '+':
canvas.Line(x, y, x+1, y, `stroke="black"`, `stroke-width="0.2"`, `stroke-linecap="butt"`)
canvas.Line(x, y, x, (y + 1), `stroke="black"`, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case 'X':
canvas.Line(x, y, x+1, y+1, `stroke="black"`, `stroke-width="0.2"`, `stroke-linecap="butt"`)
canvas.Line(x, y, x+1, y-1, `stroke="black"`, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '|':
canvas.Line(x, y, x, y+1, `stroke="black"`, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '-':
canvas.Line(x, y, x+1, y, `stroke="black"`, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '\\':
canvas.Line(x, y, x+1, y+1, `stroke="black"`, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '/':
canvas.Line(x, y, x+1, y-1, `stroke="black"`, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '#':
canvas.Rect(x, y, 1, 1, `stroke="black"`, `stroke-width="0.2"`, `stroke-linecap="butt"`)
}
}
canvas.End()
return persist.TokenMetadata{
"name": fmt.Sprintf("Autoglyph #%s", tid.Base10String()),
"description": "Autoglyphs are the first “on-chain” generative art on the Ethereum blockchain. A completely self-contained mechanism for the creation and ownership of an artwork.",
"image": fmt.Sprintf("data:image/svg+xml;base64,%s", base64.StdEncoding.EncodeToString(buf.Bytes())),
}, nil
}
/**
* The drawing instructions for the nine different symbols are as follows:
*
* . Draw nothing in the cell.
* O Draw a circle bounded by the cell.
* + Draw centered lines vertically and horizontally the length of the cell.
* X Draw diagonal lines connecting opposite corners of the cell.
* | Draw a centered vertical line the length of the cell.
* - Draw a centered horizontal line the length of the cell.
* \ Draw a line connecting the top left corner of the cell to the bottom right corner.
* / Draw a line connecting the bottom left corner of teh cell to the top right corner.
* # Fill in the cell completely.
*
* The 'tokenURI' function of colorglyphs adds two pieces of information to the response provided by autoglyphs:
* 1) The color scheme to apply to the Colorglyph.
* 2) The address of the Colorglyph's creator, from which colors are derived.
*
* The address of the Colorglyph's creator is split up into 35 6 digit chunks.
* For example, the first three chunks of 0xb189f76323678E094D4996d182A792E52369c005 are: b189f7, 189f76, and 89f763.
* The last chunk is 69c005.
* Each Colorglyph is an Autoglyph with a color scheme applied to it.
* Each Colorglyph takes the same shape as the Autoglyph of the corresponding ID.
* If the Colorglyph's ID is higher than 512, it takes the shape of the Autoglyph with its Colorglyphs ID - 512.
* Each black element in the Autoglyph is assigned a new color.
* The background color of the Autoglyph is changed to either black or one of the address colors.
* Visual implementations of Colorglyphs may exercise a substantial degree of flexibility.
* Color schemes that use multiple colors may apply any permitted color to any element,
* but no color should appear more than 16 times as often as the color with the lowest number of incidences.
* In the event that a color meets two conditions (reddest and orangest, for example),
* it may be used for both purposes. The previous guideline establishing a threshold ratio of occurances
* treats the reddest color and the orangest color as two different colors, even if they have the same actual value.
* lightest address color = chunk with the lowest value resulting from red value + green value + blue value
* second lightest address color = second lightest chunk in relevant address
* third lightest address color = third lightest chunk in relevant address
* fourth lightest address color = fourth lightest chunk in relevant address
* fifth lightest address color = fifth lightest chunk in relevant address
* reddest address color = chunk with the lowest value resulting from red value - green value - blue value
* orangest address color = chunk with the highest value resulting from red value - blue value
* yellowest address color = chunk with higest value resulting from red value + green value - blue value
* greenest address color = chunk with higest value resulting from green value - red value - blue value
* bluest address color = chunk with higest value resulting from blue value - green value - red value
* darkest address color = darkest chunk in relevant address
* white = ffffff
* black = 020408
* scheme 1 = lightest address color, third lightest address color, and fifth lightest address color on black
* scheme 2 = lighest 4 address colors on black
* scheme 3 = reddest address color, orangest address color, and yellowest address color on black
* scheme 4 = reddest address color, yellowest address color, greenest address color, and white on black
* scheme 5 = lightest address color, reddest address color, yellowest address color, greenest address color, and bluest address color on black
* scheme 6 = reddest address color and white on black
* scheme 7 = greenest address color on black
* scheme 8 = lightest address color on darkest address color
* scheme 9 = greenest address color on reddest address color
* scheme 10 = reddest address color, yellowest address color, bluest address color, lightest address color, and black on white
*/
func colorglyphs(i *Indexer, turi persist.TokenURI, addr persist.Address, tid persist.TokenID) (persist.TokenMetadata, error) {
spl := strings.Split(string(turi), " ")
if len(spl) != 3 {
panic("invalid colorglyphs tokenURI")
}
allColors := make([]color.RGBA, 35)
for i := 0; i < 35; i++ {
col, err := parseHexColor(spl[2][i : i+6])
if err != nil {
panic(err)
}
allColors[i] = col
}
// sort colors by value
sort.Slice(allColors, func(i, j int) bool {
return allColors[i].R+allColors[i].G+allColors[i].B < allColors[j].R+allColors[j].G+allColors[j].B
})
lightestColor := allColors[0]
secondLightestColor := allColors[1]
thirdLightestColor := allColors[2]
fourthLightestColor := allColors[3]
fifthLightestColor := allColors[4]
darkestColor := allColors[34]
sort.Slice(allColors, func(i, j int) bool {
return allColors[i].R-allColors[i].G-allColors[i].B < allColors[j].R-allColors[j].G-allColors[j].B
})
reddestColor := allColors[0]
sort.Slice(allColors, func(i, j int) bool {
return allColors[i].R-allColors[i].B > allColors[j].R-allColors[j].B
})
orangestColor := allColors[0]
sort.Slice(allColors, func(i, j int) bool {
return allColors[i].R+allColors[i].G-allColors[i].B > allColors[j].R+allColors[j].G-allColors[j].B
})
yellowestColor := allColors[0]
sort.Slice(allColors, func(i, j int) bool {
return allColors[i].G-allColors[i].R-allColors[i].B > allColors[j].G-allColors[j].R-allColors[j].B
})
greenestColor := allColors[0]
sort.Slice(allColors, func(i, j int) bool {
return allColors[i].B-allColors[i].G-allColors[i].R > allColors[j].B-allColors[j].G-allColors[j].R
})
bluestColor := allColors[0]
white := color.RGBA{255, 255, 255, 255}
black := color.RGBA{2, 4, 8, 0}
var schemeColors []color.RGBA
var backgroundColor color.RGBA
switch spl[1] {
case "1":
schemeColors = []color.RGBA{lightestColor, thirdLightestColor, fifthLightestColor}
backgroundColor = black
case "2":
schemeColors = []color.RGBA{lightestColor, secondLightestColor, thirdLightestColor, fourthLightestColor}
backgroundColor = black
case "3":
schemeColors = []color.RGBA{reddestColor, orangestColor, yellowestColor}
backgroundColor = black
case "4":
schemeColors = []color.RGBA{reddestColor, yellowestColor, greenestColor, white}
backgroundColor = black
case "5":
schemeColors = []color.RGBA{lightestColor, reddestColor, yellowestColor, greenestColor, bluestColor}
backgroundColor = black
case "6":
schemeColors = []color.RGBA{reddestColor, white}
backgroundColor = black
case "7":
schemeColors = []color.RGBA{greenestColor}
backgroundColor = black
case "8":
schemeColors = []color.RGBA{lightestColor}
backgroundColor = darkestColor
case "9":
schemeColors = []color.RGBA{greenestColor}
backgroundColor = reddestColor
case "10":
schemeColors = []color.RGBA{reddestColor, yellowestColor, bluestColor, lightestColor, black}
backgroundColor = white
}
width := 80
height := 80
buf := &bytes.Buffer{}
canvas := svg.New(buf)
canvas.Start(width, height)
canvas.Square(0, 0, width, canvas.RGB(int(backgroundColor.R), int(backgroundColor.G), int(backgroundColor.B)))
for i, c := range spl[0] {
y := int(math.Floor(float64(i)/float64(64))) + 8
x := (i % 64) + 8
col := schemeColors[int(math.Floor(float64(int(c))/float64(len(schemeColors))))%len(schemeColors)]
stroke := fmt.Sprintf(`stroke="rgb(%d,%d,%d)"`, col.R, col.G, col.B)
switch c {
case 'O':
canvas.Circle(x, y, 1, stroke, `stroke-width="0.1"`, `stroke-linecap="butt"`, `fill="none"`)
case '+':
canvas.Line(x, y, x+1, y, stroke, `stroke-width="0.2"`, `stroke-linecap="butt"`)
canvas.Line(x, y, x, (y + 1), stroke, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case 'X':
canvas.Line(x, y, x+1, y+1, stroke, `stroke-width="0.2"`, `stroke-linecap="butt"`)
canvas.Line(x, y, x+1, y-1, stroke, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '|':
canvas.Line(x, y, x, y+1, stroke, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '-':
canvas.Line(x, y, x+1, y, stroke, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '\\':
canvas.Line(x, y, x+1, y+1, stroke, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '/':
canvas.Line(x, y, x+1, y-1, stroke, `stroke-width="0.2"`, `stroke-linecap="butt"`)
case '#':
canvas.Rect(x, y, 1, 1, stroke, `stroke-width="0.2"`, `stroke-linecap="butt"`)
}
}
canvas.End()
return persist.TokenMetadata{
"name": fmt.Sprintf("Colorglyph #%s", tid.Base10String()),
"description": fmt.Sprintf("A Colorglyph with color scheme %s. Created by %s.", spl[1], spl[2]),
"image": fmt.Sprintf("data:image/svg+xml;base64,%s", base64.StdEncoding.EncodeToString(buf.Bytes())),
}, nil
}
func parseHexColor(s string) (c color.RGBA, err error) {
asBytes, err := hex.DecodeString(s)
if err != nil {
return
}
fmt.Printf("Hex: %s Bytes: %+v\n", s, asBytes)
c.R = asBytes[0]
c.G = asBytes[1]
c.B = asBytes[2]
return
}
const ensGraph = "https://api.thegraph.com/subgraphs/name/ensdomains/ens"
type ensDomain struct {
LabelName string `json:"labelName"`
}
type ensDomains struct {
Domains []ensDomain `json:"domains"`
}
type graphResponse struct {
Data ensDomains `json:"data"`
}
func ens(i *Indexer, turi persist.TokenURI, addr persist.Address, tid persist.TokenID) (persist.TokenMetadata, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
gql := fmt.Sprintf(`
{
domains(first:1, where:{labelhash:"%s"}){
labelName
}
}`, tid)
jsonData := map[string]interface{}{
"query": gql,
}
marshaled, err := json.Marshal(jsonData)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, "POST", ensGraph, bytes.NewBuffer(marshaled))
if err != nil {
return nil, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var gr graphResponse
err = json.NewDecoder(resp.Body).Decode(&gr)
if err != nil {
return nil, err
}
if len(gr.Data.Domains) == 0 {
return nil, fmt.Errorf("no ENS domain found for %s", tid)
}
if len(gr.Data.Domains) > 1 {
return nil, fmt.Errorf("multiple ENS domains found for %s", tid)
}
domain := gr.Data.Domains[0]
width := 240
height := 240
buf := &bytes.Buffer{}
canvas := svg.New(buf)
canvas.Start(width, height)
canvas.Square(0, 0, width, canvas.RGB(255, 255, 255))
canvas.Text(width/2, height/2, domain.LabelName+".eth", `font-size="16px"`, `text-anchor="middle"`, `alignment-baseline="middle"`)
canvas.End()
return persist.TokenMetadata{
"name": fmt.Sprintf("ENS: %s", domain.LabelName+".eth"),
"description": "ENS names are used to resolve domain names to Ethereum addresses.",
"image": fmt.Sprintf("data:image/svg+xml;base64,%s", base64.StdEncoding.EncodeToString(buf.Bytes())),
}, nil
} | indexer/metadatas.go | 0.755276 | 0.459015 | metadatas.go | starcoder |
package numgo
import (
"fmt"
"math"
"runtime"
"sort"
)
// Equals performs boolean '==' element-wise comparison
func (a *Array64) Equals(b *Array64) (r *Arrayb) {
r = a.compValid(b, "Equals()")
if r != nil {
return r
}
r = a.comp(b, func(i, j float64) bool {
return i == j || math.IsNaN(i) && math.IsNaN(j)
})
return
}
// NotEq performs boolean '1=' element-wise comparison
func (a *Array64) NotEq(b *Array64) (r *Arrayb) {
r = a.compValid(b, "NotEq()")
if r != nil {
return r
}
r = a.comp(b, func(i, j float64) bool {
return i != j && !(math.IsNaN(i) && math.IsNaN(j))
})
return
}
// Less performs boolean '<' element-wise comparison
func (a *Array64) Less(b *Array64) (r *Arrayb) {
r = a.compValid(b, "Less()")
if r != nil {
return r
}
r = a.comp(b, func(i, j float64) bool {
return i < j
})
return
}
// LessEq performs boolean '<=' element-wise comparison
func (a *Array64) LessEq(b *Array64) (r *Arrayb) {
r = a.compValid(b, "LessEq()")
if r != nil {
return r
}
r = a.comp(b, func(i, j float64) bool {
return i <= j
})
return
}
// Greater performs boolean '<' element-wise comparison
func (a *Array64) Greater(b *Array64) (r *Arrayb) {
r = a.compValid(b, "Greater()")
if r != nil {
return r
}
r = a.comp(b, func(i, j float64) bool {
return i > j
})
return
}
// GreaterEq performs boolean '<=' element-wise comparison
func (a *Array64) GreaterEq(b *Array64) (r *Arrayb) {
r = a.compValid(b, "GreaterEq()")
if r != nil {
return r
}
r = a.comp(b, func(i, j float64) bool {
return i >= j
})
return
}
func (a *Array64) compValid(b *Array64, mthd string) (r *Arrayb) {
switch {
case a == nil || a.data == nil && a.err == nil:
r = &Arrayb{err: NilError}
if debug {
r.debug = fmt.Sprintf("Nil pointer received by %s", mthd)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
case b == nil || b.data == nil && b.err == nil:
r = &Arrayb{err: NilError}
if debug {
r.debug = fmt.Sprintf("Array received by %s is a Nil Pointer.", mthd)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
case a.err != nil:
r = &Arrayb{err: a.err}
if debug {
r.debug = fmt.Sprintf("Error in %s arrays", mthd)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
case b.err != nil:
r = &Arrayb{err: b.err}
if debug {
r.debug = fmt.Sprintf("Error in %s arrays", mthd)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
case len(a.shape) < len(b.shape):
r = &Arrayb{err: ShapeError}
if debug {
r.debug = fmt.Sprintf("Array received by %s can not be broadcast. Shape: %v Val shape: %v", mthd, a.shape, b.shape)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
}
for i, j := len(b.shape)-1, len(a.shape)-1; i >= 0; i, j = i-1, j-1 {
if a.shape[j] != b.shape[i] {
r = &Arrayb{err: ShapeError}
if debug {
r.debug = fmt.Sprintf("Array received by %s can not be broadcast. Shape: %v Val shape: %v", mthd, a.shape, b.shape)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
}
}
return nil
}
// Validation and error checks must be complete before calling comp
func (a *Array64) comp(b *Array64, f func(i, j float64) bool) (r *Arrayb) {
r = newArrayB(b.shape...)
for i := range r.data {
r.data[i] = f(a.data[i], b.data[i])
}
return
}
// Any will return true if any element is non-zero, false otherwise.
func (a *Arrayb) Any(axis ...int) *Arrayb {
if a.valAxis(&axis, "All") {
return a
}
if len(axis) == 0 {
for _, v := range a.data {
if v {
return Fullb(true, 1)
}
}
return Fullb(false, 1)
}
sort.IntSlice(axis).Sort()
n := make([]int, len(a.shape)-len(axis))
axis:
for i, t := 0, 0; i < len(a.shape); i++ {
for _, w := range axis {
if i == w {
continue axis
}
}
n[t] = a.shape[i]
t++
}
t := a.data
for i := 0; i < len(axis); i++ {
maj, min := a.strides[axis[i]], a.strides[axis[i]]/a.shape[axis[i]]
for j := 0; j+maj <= len(t); j += maj {
for k := j; k < j+min; k++ {
for z := k + min; z < j+maj; z += min {
t[k] = t[k] || t[z]
}
}
}
j := 1
for ; j < len(t)/maj; j++ {
copy(t[j*min:(j+1)*min], t[j*maj:j*maj+min])
}
t = append(t[:0], t[0:j*min]...)
}
a.data = t
a.shape = n
tmp := 1
for i := len(n); i > 0; i-- {
a.strides[i] = tmp
tmp *= n[i-1]
}
a.strides[0] = tmp
a.strides = a.strides[0 : len(n)+1]
return a
}
// All will return true if all elements are non-zero, false otherwise.
func (a *Arrayb) All(axis ...int) *Arrayb {
if a.valAxis(&axis, "All") {
return a
}
if len(axis) == 0 {
for _, v := range a.data {
if !v {
return Fullb(false, 1)
}
}
return Fullb(true, 1)
}
sort.IntSlice(axis).Sort()
n := make([]int, len(a.shape)-len(axis))
axis:
for i, t := 0, 0; i < len(a.shape); i++ {
for _, w := range axis {
if i == w {
continue axis
}
}
n[t] = a.shape[i]
t++
}
t := a.data
for i := 0; i < len(axis); i++ {
maj, min := a.strides[axis[i]], a.strides[axis[i]]/a.shape[axis[i]]
for j := 0; j+maj <= len(t); j += maj {
for k := j; k < j+min; k++ {
for z := k + min; z < j+maj; z += min {
t[k] = t[k] && t[z]
}
}
}
j := 1
for ; j < len(t)/maj; j++ {
a := t[j*min : (j+1)*min]
b := t[j*maj : j*maj+min]
copy(a, b)
}
t = append(t[:0], t[0:j*min]...)
}
a.data = t
a.shape = n
tmp := 1
for i := len(n); i > 0; i-- {
a.strides[i] = tmp
tmp *= n[i-1]
}
a.strides[0] = tmp
a.strides = append(a.strides[:0], a.strides[0:len(n)+1]...)
return a
}
func (a *Arrayb) valAxis(axis *[]int, mthd string) bool {
axis = cleanAxis(axis)
switch {
case a == nil || a.err != nil:
return true
case len(*axis) > len(a.shape):
a.err = ShapeError
if debug {
a.debug = fmt.Sprintf("Too many axes received by %s(). Shape: %v Axes: %v", mthd, a.shape, axis)
a.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return true
}
for _, v := range *axis {
if v < 0 || v >= len(a.shape) {
a.err = IndexError
if debug {
a.debug = fmt.Sprintf("Axis out of range received by %s(). Shape: %v Axes: %v", mthd, a.shape, axis)
a.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return true
}
}
return false
}
// Equals performs boolean '==' element-wise comparison
func (a *Arrayb) Equals(b *Arrayb) (r *Arrayb) {
r = a.compValid(b, "Equals()")
if r != nil {
return r
}
r = a.comp(b, func(i, j bool) bool {
return i == j
})
return
}
// NotEq performs boolean '1=' element-wise comparison
func (a *Arrayb) NotEq(b *Arrayb) (r *Arrayb) {
r = a.compValid(b, "NotEq()")
if r != nil {
return r
}
r = a.comp(b, func(i, j bool) bool {
return i != j
})
return
}
func (a *Arrayb) compValid(b *Arrayb, mthd string) (r *Arrayb) {
switch {
case a == nil || a.data == nil && a.err == nil:
r = &Arrayb{err: NilError}
if debug {
r.debug = fmt.Sprintf("Nil pointer received by %s", mthd)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
case b == nil || b.data == nil && b.err == nil:
r = &Arrayb{err: NilError}
if debug {
r.debug = fmt.Sprintf("Array received by %s is a Nil Pointer.", mthd)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
case a.err != nil:
r = &Arrayb{err: a.err}
if debug {
r.debug = fmt.Sprintf("Error in %s arrays", mthd)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
case b.err != nil:
r = &Arrayb{err: b.err}
if debug {
r.debug = fmt.Sprintf("Error in %s arrays", mthd)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
case len(a.shape) < len(b.shape):
r = &Arrayb{err: ShapeError}
if debug {
r.debug = fmt.Sprintf("Array received by %s can not be broadcast. Shape: %v Val shape: %v", mthd, a.shape, b.shape)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
}
for i, j := len(b.shape)-1, len(a.shape)-1; i >= 0; i, j = i-1, j-1 {
if a.shape[j] != b.shape[i] {
r = &Arrayb{err: ShapeError}
if debug {
r.debug = fmt.Sprintf("Array received by %s can not be broadcast. Shape: %v Val shape: %v", mthd, a.shape, b.shape)
r.stack = string(stackBuf[:runtime.Stack(stackBuf, false)])
}
return r
}
}
return nil
}
// Validation and error checks must be complete before calling comp
func (a *Arrayb) comp(b *Arrayb, f func(i, j bool) bool) (r *Arrayb) {
r = newArrayB(b.shape...)
for i := range r.data {
r.data[i] = f(a.data[i], b.data[i])
}
return
} | boolOps.go | 0.563498 | 0.435421 | boolOps.go | starcoder |
package mound
import (
colorful "github.com/lucasb-eyer/go-colorful"
log "github.com/sirupsen/logrus"
)
// Direction determines the next location of the turmite
type Direction int
// [N]orth [E]ast [S]outh [W]est
// Clockwise movement starting at N (noon)
const (
North Direction = iota
East
South
West
)
// Turn is relative to the direction of the turmite
type Turn int
// [L]eft [R]igth [U]-turn [N]o-turn
const (
R Turn = iota
L
U
N
)
// Move defines the color to replace under the turmite and the direction the turmite should move
type Move struct {
C colorful.Color
T Turn
}
// Rule is a single direction in the form of a dictionary
type Rule map[colorful.Color]Move
// Turmite is an individual that exists inside a grid
// X and Y are positions of size defined in a the Block of a Grid
type Turmite struct {
Direction Direction
Location int
Rule Rule
}
// CreateTurmite returns a turmite structure
func CreateTurmite(d Direction, l int, r Rule) *Turmite {
t := Turmite{
Direction: d,
Location: l,
Rule: r,
}
return &t
}
// CreateRules returns a new Rule structure
func CreateRules() *Rule {
m := Rule(make(map[colorful.Color]Move))
return &m
}
// AddRule takes a color as a key and creates a move out of a second color and a turn and is added to the map
func (r Rule) AddRule(c1 colorful.Color, c2 colorful.Color, t Turn) {
m := Move{
C: c2,
T: t,
}
r[c1] = m
}
// findMove returns the move associated with a color if it exists
func (t Turmite) findMove(c colorful.Color) (Move, bool) {
m, ok := t.Rule[c]
log.WithFields(log.Fields{
"color": c.Hex(),
"move color": m.C.Hex(),
"turn ": m.T,
"ok": ok,
}).Info("move associated with color and turmite")
return m, ok
}
// Move takes a turmite, a turn, and a mound and returns the new direction and the update position of the turmite
func (t Turmite) move(turn Turn, grid Grid) (Direction, int) {
log.WithFields(log.Fields{
"turn": turn,
"direction": t.Direction,
"location": t.Location,
}).Info("turmite state before move")
direction := t.Direction
switch turn {
case R:
direction = direction + 1
case L:
direction = direction + 3
case U:
direction = direction + 2
}
direction = direction % 4
max := grid.X * grid.Y
position := t.Location
// Update position - wrap if needed
switch direction {
case North:
if (position - grid.Y) >= 0 {
position = position - grid.Y
} else {
position = ((grid.X - 1) * grid.Y) + position
}
case East:
if position+1 < max {
position = position + 1
} else {
position = ((grid.X - 1) * grid.Y) + 1
}
case South:
if (position + grid.Y) < max {
position = position + grid.Y
} else {
position = position + grid.Y - ((grid.X - 1) * grid.Y)
}
case West:
if position != 0 {
position = position - 1
} else {
position = (grid.X - 1) * grid.Y
}
}
log.WithFields(log.Fields{
"turn": turn,
"direction": direction,
"location": position,
}).Info("turmite state after move")
return direction, position
} | mound/turmite.go | 0.737158 | 0.479504 | turmite.go | starcoder |
package ast
/*
* GroupingSet -
* representation of CUBE, ROLLUP and GROUPING SETS clauses
*
* In a Query with grouping sets, the groupClause contains a flat list of
* SortGroupClause nodes for each distinct expression used. The actual
* structure of the GROUP BY clause is given by the groupingSets tree.
*
* In the raw parser output, GroupingSet nodes (of all types except SIMPLE
* which is not used) are potentially mixed in with the expressions in the
* groupClause of the SelectStmt. (An expression can't contain a GroupingSet,
* but a list may mix GroupingSet and expression nodes.) At this stage, the
* content of each node is a list of expressions, some of which may be RowExprs
* which represent sublists rather than actual row constructors, and nested
* GroupingSet nodes where legal in the grammar. The structure directly
* reflects the query syntax.
*
* In parse analysis, the transformed expressions are used to build the tlist
* and groupClause list (of SortGroupClause nodes), and the groupingSets tree
* is eventually reduced to a fixed format:
*
* EMPTY nodes represent (), and obviously have no content
*
* SIMPLE nodes represent a list of one or more expressions to be treated as an
* atom by the enclosing structure; the content is an integer list of
* ressortgroupref values (see SortGroupClause)
*
* CUBE and ROLLUP nodes contain a list of one or more SIMPLE nodes.
*
* SETS nodes contain a list of EMPTY, SIMPLE, CUBE or ROLLUP nodes, but after
* parse analysis they cannot contain more SETS nodes; enough of the syntactic
* transforms of the spec have been applied that we no longer have arbitrarily
* deep nesting (though we still preserve the use of cube/rollup).
*
* Note that if the groupingSets tree contains no SIMPLE nodes (only EMPTY
* nodes at the leaves), then the groupClause will be empty, but this is still
* an aggregation query (similar to using aggs or HAVING without GROUP BY).
*
* As an example, the following clause:
*
* GROUP BY GROUPING SETS ((a,b), CUBE(c,(d,e)))
*
* looks like this after raw parsing:
*
* SETS( RowExpr(a,b) , CUBE( c, RowExpr(d,e) ) )
*
* and parse analysis converts it to:
*
* SETS( SIMPLE(1,2), CUBE( SIMPLE(3), SIMPLE(4,5) ) )
*/
type GroupingSetKind uint
const (
GROUPING_SET_EMPTY GroupingSetKind = iota
GROUPING_SET_SIMPLE
GROUPING_SET_ROLLUP
GROUPING_SET_CUBE
GROUPING_SET_SETS
) | pkg/ast/grouping_set_kind.go | 0.730097 | 0.4184 | grouping_set_kind.go | starcoder |
package minecraftColor
import (
"image/color"
)
// Convert hex color to RGBA color.
func toRGBA(i int) color.RGBA {
return color.RGBA{
R: uint8(i / 0x10000),
G: uint8(i / 0x100),
B: uint8(i),
A: 0xFF,
}
}
var (
BlackRGBA = toRGBA(0)
// https://minecraft-el.gamepedia.com/Biome/ID
Biome = [256]color.RGBA{
toRGBA(0x000070), // Ocean
toRGBA(0x8DB360), // Plains
toRGBA(0xFA9418), // Desert
toRGBA(0x606060), // Extreme Hills
toRGBA(0x056621), // Forest
toRGBA(0x0B6659), // Taiga
toRGBA(0x07F9B2), // Swampland
toRGBA(0x0000FF), // River
toRGBA(0xFF0000), // Hell
toRGBA(0x8080FF), // The End (Sky)
toRGBA(0x9090A0), // FrozenOcean
toRGBA(0xA0A0FF), // FrozenRiver
toRGBA(0xFFFFFF), // Ice Plains
toRGBA(0xA0A0A0), // Ice Mountains
toRGBA(0xFF00FF), // MushroomIsland
toRGBA(0xA000FF), // MushroomIslandShore
toRGBA(0xFADE55), // Beach
toRGBA(0xD25F12), // DesertHills
toRGBA(0x22551C), // ForestHills
toRGBA(0x163933), // TaigaHills
toRGBA(0x72789A), // Extreme Hills Edge
toRGBA(0x537B09), // Jungle
toRGBA(0x2C4205), // JungleHills
toRGBA(0x628B17), // JungleEdge
toRGBA(0x000030), // Deep Ocean
toRGBA(0xA2A284), // Stone Beach
toRGBA(0xFAF0C0), // Cold Beach
toRGBA(0x307444), // Birch Forest
toRGBA(0x1F5F32), // Birch Forest Hills
toRGBA(0x40511A), // Roofed Forest
toRGBA(0x31554A), // Cold Taiga
toRGBA(0x243F36), // Cold Taiga Hills
toRGBA(0x596651), // Mega Taiga
toRGBA(0x545F3E), // Mega Taiga Hills
toRGBA(0x507050), // Extreme Hills+
toRGBA(0xBDB25F), // Savanna
toRGBA(0xA79D64), // Savanna Plateau
toRGBA(0xD94515), // Mesa
toRGBA(0xB09765), // Mesa Plateau F
toRGBA(0xCA8C65), // Mesa Plateau
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA, // void
BlackRGBA, // Plains M
toRGBA(0xB5DB88), // Sunflower Plains
toRGBA(0xFFBC40), // Desert M
toRGBA(0x888888), // Extreme Hills M
toRGBA(0x6A7425), // Flower Forest
toRGBA(0x596651), // Taiga M
toRGBA(0x2FFFDA), // Swampland M
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
toRGBA(0xB4DCDC), // Ice Plains Spikes
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
BlackRGBA,
toRGBA(0x7BA331), // Jungle M
BlackRGBA,
toRGBA(0x8AB33F), // JungleEdge M
BlackRGBA,
BlackRGBA,
BlackRGBA,
toRGBA(0x589C6C), // Birch Forest M
toRGBA(0x47875A), // Birch Forest Hills M
toRGBA(0x687942), // Roofed Forest M
toRGBA(0x597D72), // Cold Taiga M
BlackRGBA,
toRGBA(0x6B5F4C), // Mega Spruce Taiga
toRGBA(0x6D7766), // Redwood Taiga Hills M
toRGBA(0x789878), // Extreme Hills+ M
toRGBA(0xE5DA87), // Savanna M
toRGBA(0xCFC58C), // Savanna Plateau M
toRGBA(0xFF6D3D), // Mesa (Bryce)
toRGBA(0xD8BF8D), // Mesa Plateau F M
toRGBA(0xF2B48D), // Mesa Plateau M
}
) | pkg/minecraftColor/biome.go | 0.511229 | 0.685457 | biome.go | starcoder |
package scp03
import (
"fmt"
"github.com/llkennedy/globalplatform/goimpl/nist/sp800108"
)
// KDF is a wrapper around SP800-108's KBKDF with SCP03-specified parameters
type KDF struct{}
// DataDerivationConstant is a data derivation constant for KDF functions
type DataDerivationConstant byte
const (
// DDCCardCryptogram is a Card Cryptogram Data Derivation Constant
DDCCardCryptogram DataDerivationConstant = 0b00000000
// DDCHostCryptogram is a Host Cryptogram Data Derivation Constant
DDCHostCryptogram DataDerivationConstant = 0b00000001
// DDCCardChallengeGeneration is a Card Challenge Generation Data Derivation Constant
DDCCardChallengeGeneration DataDerivationConstant = 0b00000010
// DDCSENC is a S-ENC derivation Data Derivation Constant
DDCSENC DataDerivationConstant = 0b00000100
// DDCSMAC is a S-MAC derivation Data Derivation Constant
DDCSMAC DataDerivationConstant = 0b00000110
// DDCSRMAC is a S-RMAC derivation Data Derivation Constant
DDCSRMAC DataDerivationConstant = 0b00000111
)
// KDFOutputLength is a valid output length for the KDF
type KDFOutputLength uint16
const (
// KDFOutput64 is a 64 bit output from the KDF
KDFOutput64 = 0x0040
// KDFOutput128 is a 128 bit output from the KDF
KDFOutput128 = 0x0080
// KDFOutput192 is a 192 bit output from the KDF
KDFOutput192 = 0x00C0
// KDFOutput256 is a 256 bit output from the KDF
KDFOutput256 = 0x0100
)
// Derive derives data from a base key and input data
func (k *KDF) Derive(key []byte, rawKDF sp800108.KDF, label [11]byte, ddc DataDerivationConstant, length KDFOutputLength, context []byte /*FIXME: "further specified"*/) ([]byte, error) {
if key == nil || rawKDF == nil {
return nil, fmt.Errorf("KDF: nil parameters")
}
switch ddc {
case DDCCardChallengeGeneration, DDCCardCryptogram, DDCHostCryptogram, DDCSENC, DDCSMAC, DDCSRMAC:
// Supported value
default:
return nil, fmt.Errorf("KDF: Invalid data derivation constant: %x", ddc)
}
lengthData := []byte{0, 0}
switch length {
case KDFOutput64:
lengthData[1] = 0x40
case KDFOutput128:
lengthData[1] = 0x80
case KDFOutput192:
lengthData[1] = 0xC0
case KDFOutput256:
lengthData[0] = 0x01
default:
return nil, fmt.Errorf("KDF: Invalid output length: %d", length)
}
prf := &sp800108.PRFCMAC{}
ordering := []sp800108.InputStringOrdering{sp800108.InputOrderLabel, sp800108.InputOrderEmptySeparator, sp800108.InputOrderL, sp800108.InputOrderCounter, sp800108.InputOrderCounter}
return rawKDF.Derive(prf, sp800108.CounterLength8, key, label[:], context, lengthData, ordering)
} | goimpl/scp03/kdf.go | 0.604516 | 0.410047 | kdf.go | starcoder |
package lucene42
import (
"github.com/jtejido/golucene/core/codec/compressing"
)
// lucene42/Lucene42TermVectorsFormat.java
/*
Lucene 4.2 term vectors format.
Very similarly to Lucene41StoredFieldsFormat, this format is based on
compressed chunks of data, with document-level granularity so that a
document can never span across distinct chunks. Moreover, data is
made as compact as possible:
- textual data is compressedusing the very light LZ4 compression
algorithm,
- binary data is written using fixed-size blocks of packed ints.
Term vectors are stored using two files
- a data file where terms, frequencies, positions, offsets and
payloads are stored,
- an index file, loaded into memory, used to locate specific
documents in the data file.
Looking up term vectors for any document requires at most 1 disk seek.
File formats
1. vector_data
A vector data file (extension .tvd). This file stores terms,
frequencies, positions, offsets and payloads for every document. Upon
writing a new segment, it accumulates data into memory until the
buffer used to store terms and payloads grows beyond 4KB. Then it
flushes all metadata, terms and positions to disk using LZ4
compression for terms and payloads and blocks of packed ints for
positions
Here is more detailed description of the field data file format:
- VectorData (.tvd) --> <Header>, PackedIntsVersion, ChunkSize, <Chunk>^ChunkCount
- Header --> CodecHeader
- PackedIntsVersion --> PackedInts.CURRENT_VERSION as a VInt
- ChunkSize is the number of bytes of terms to accumulate before
flusing, as a VInt
- ChunkCount is not known in advance and is the number of chunks
necessary to store all document of the segment
- Chunk --> DocBase, ChunkDocs, <NumFields>, <FieldNums>, <FieldNumOffs>, <Flags>,
<NumTerms>, <TermLengths>, <TermFreqs>, <Position>, <StartOffsets>, <Lengths>,
<PayloadLengths>, <TermAndPayloads>
- DocBase is the ID of the first doc of the chunk as a VInt
- ChunkDocs is the number of documents in the chunk
- NumFields --> DocNumFields^ChunkDocs
- DocNUmFields is the number of fields for each doc, written as a
VInt if ChunkDocs==1 and as a PackedInts array otherwise
- FieldNums --> FieldNumDelta^TotalFields, as a PackedInts array
- FieldNumOff is the offset of the field number in FieldNums
- TotalFields is the total number of fields (sum of the values of NumFields)
- Flags --> Bit <FieldFlags>
- Bit is a single bit which when true means that fields have the same
options for every document in the chunk
- FieldFlags --> if Bit==1: Flag^TotalDistinctFields else Flag^TotalFields
- Flag: a 3-bits int where:
- the first bit means that the field has positions
- the second bit means that the field has offsets
- the third bitmeans that the field has payloads
- NumTerms --> FieldNumTerms^TotalFields
- FieldNumTerms: the numer of terms for each field, using blocks of 64 packed ints
- TermLengths --> PrefixLength^TotalTerms SuffixLength^TotalTerms
- TotalTerms: total number of terms (sum of NumTerms)
- SuffixLength: length of the term of a field, the common prefix with
the previous term otherwise using blocks of 64 packed ints
- TermFreqs --> TermFreqMinus1^TotalTerms
- TermFreqMinus1: (frequency - 1) for each term using blocks of 64 packed ints
- Positions --> PositionDelta^TotalPositions
- TotalPositions is the sum of frequencies of terms of all fields that have positions
- PositionDelta: the absolute position fo rthe first position of a
term, and the difference with the previous positions for following
positions using blocks of 64 packed ints
- StartOffsets --> (AvgCharsPerTerm^TotalDistinctFields) StartOffsetDelta^TotalOffsets
- TotalOffsets is the sum of frequencies of terms of all fields tha thave offsets
- AvgCharsPerTerm: average number of chars per term, encoded as a
float32 on 4 bytes. They are not present if no field has both
positions and offsets enabled.
- StartOffsetDelta: (startOffset - previousStartOffset - AvgCharsPerTerm
* PositionDelta). previousStartOffset is 0 for the first offset and
AvgCharsPerTerm is 0 if the field has no ositions using blocks of
64 pakced ints
- Lengths --> LengthMinusTermLength^TotalOffsets
- LengthMinusTermLength: (endOffset - startOffset - termLenght) using blocks of 64 packed ints
- PayloadLengths --> PayloadLength^TotalPayloads
- TotalPayloads is the sum of frequencies of terms of all fields that have payloads
- PayloadLength is the payload length encoded using blocks of 64 packed ints
- TermAndPayloads --> LZ4-compressed representation of <FieldTermsAndPayLoads>^TotalFields
- FieldTermsAndPayLoads --> Terms (Payloads)
- Terms: term bytes
- Payloads: payload bytes (if the field has payloads)
2. vector_index
An index file (extension .tvx).
- VectorIndex (.tvx) --> <Header>, <ChunkIndex>
- Header --> CodecHeader
- ChunkIndex: See CompressingStoredFieldsIndexWriter
*/
type Lucene42TermVectorsFormat struct {
*compressing.CompressingTermVectorsFormat
}
func NewLucene42TermVectorsFormat() *Lucene42TermVectorsFormat {
return &Lucene42TermVectorsFormat{
compressing.NewCompressingTermVectorsFormat("Lucene41StoredFields", "", compressing.COMPRESSION_MODE_FAST, 1<<12),
}
} | core/codec/lucene42/termVectors.go | 0.649245 | 0.586878 | termVectors.go | starcoder |
package main
import (
"image"
"image/color"
"image/gif"
"log"
"math"
"os"
"github.com/unixpickle/model3d/model2d"
"github.com/unixpickle/essentials"
"github.com/unixpickle/model3d/model3d"
"github.com/unixpickle/model3d/render3d"
)
const (
FrameSkip = 4
ImageSize = 200
)
func main() {
mesh := CreateMesh()
rotation := model2d.BezierCurve{
model2d.Coord{X: 0, Y: 0},
model2d.Coord{X: 0.5, Y: math.Pi / 4},
model2d.Coord{X: 1.0, Y: math.Pi / 4},
model2d.Coord{X: 2.0, Y: math.Pi / 4},
model2d.Coord{X: 2.5, Y: 0.1},
model2d.Coord{X: 3.0, Y: 0},
}
translateX := model2d.BezierCurve{
model2d.Coord{X: 0, Y: 0},
model2d.Coord{X: 0.5, Y: 0},
model2d.Coord{X: 1.0, Y: -0.6},
model2d.Coord{X: 2.0, Y: -0.5},
model2d.Coord{X: 2.5, Y: 0.6},
model2d.Coord{X: 3.0, Y: 0.5},
}
translateZ := model2d.BezierCurve{
model2d.Coord{X: 0, Y: 0},
model2d.Coord{X: 0.5, Y: -0.3},
model2d.Coord{X: 1.0, Y: 0},
model2d.Coord{X: 1.5, Y: 0.3},
model2d.Coord{X: 2.0, Y: 0},
model2d.Coord{X: 3.0, Y: 0},
}
a := model3d.NewARAP(mesh)
df := a.SeqDeformer()
var g gif.GIF
var frame int
for t := 0.0; t < 3.0; t += 0.05 {
log.Println("Frame", frame, "...")
rotation := rotation.EvalX(t)
translate := model3d.Coord3D{X: translateX.EvalX(t), Z: translateZ.EvalX(t)}
transform := model3d.JoinedTransform{
model3d.Rotation(model3d.Z(1), rotation),
&model3d.Translate{Offset: translate},
}
deformed := df(Constraints(mesh, transform))
if frame%FrameSkip == 0 {
g.Image = append(g.Image, RenderFrame(deformed))
g.Delay = append(g.Delay, 10*FrameSkip)
}
frame++
}
w, err := os.Create("output.gif")
essentials.Must(err)
defer w.Close()
essentials.Must(gif.EncodeAll(w, &g))
}
func RenderFrame(mesh *model3d.Mesh) *image.Paletted {
renderer := &render3d.RayCaster{
Camera: render3d.NewCameraAt(model3d.Coord3D{Y: -3}, model3d.Coord3D{}, math.Pi/3.6),
Lights: []*render3d.PointLight{
{
Origin: model3d.Coord3D{Y: -100},
Color: render3d.NewColor(1.0),
},
},
}
img := render3d.NewImage(ImageSize, ImageSize)
renderer.Render(img, render3d.Objectify(mesh, nil))
var palette []color.Color
for i := 0; i < 256; i++ {
palette = append(palette, color.Gray{Y: uint8(i)})
}
fullImg := img.Gray()
outImg := image.NewPaletted(image.Rect(0, 0, img.Width, img.Height), palette)
for y := 0; y < img.Height; y++ {
for x := 0; x < img.Width; x++ {
outImg.Set(x, y, fullImg.At(x, y))
}
}
return outImg
}
func CreateMesh() *model3d.Mesh {
box := model3d.NewMesh()
squareSize := 0.05
width := 0.4
height := 1.0
addQuad := func(p model3d.Coord3D, normalAxis int) {
ax1 := model3d.X(squareSize)
ax2 := model3d.Y(squareSize)
if normalAxis == 0 {
ax1 = model3d.Z(squareSize)
} else if normalAxis == 1 {
ax2 = model3d.Z(squareSize)
}
box.Add(&model3d.Triangle{p, p.Add(ax1), p.Add(ax2)})
box.Add(&model3d.Triangle{p.Add(ax1), p.Add(ax2), p.Add(ax1).Add(ax2)})
}
// All but top two faces.
for x := -width; x < width-1e-8; x += squareSize {
for z := -height; z < height-1e-8; z += squareSize {
for _, y := range []float64{-width, width} {
addQuad(model3d.XYZ(x, y, z), 1)
addQuad(model3d.XYZ(y, x, z), 0)
}
}
}
// Top two faces.
for x := -width; x < width-1e-8; x += squareSize {
for y := -width; y < width-1e-8; y += squareSize {
addQuad(model3d.XYZ(x, y, -height), 2)
addQuad(model3d.XYZ(x, y, height), 2)
}
}
// Fix holes due to rounding errors.
box = box.Repair(1e-8)
// Fix normals due to arbitrary triangle ordering.
box, _ = box.RepairNormals(1e-8)
// Don't let the box face the camera head on,
// allowing us to see more detail.
return box.Rotate(model3d.Z(1), 0.4)
}
func Constraints(mesh *model3d.Mesh, transform model3d.Transform) model3d.ARAPConstraints {
min, max := mesh.Min(), mesh.Max()
control := model3d.ARAPConstraints{}
for _, v := range mesh.VertexSlice() {
if v.Z == min.Z {
control[v] = v
} else if v.Z == max.Z {
control[v] = transform.Apply(v)
}
}
return control
} | examples/renderings/deformation/main.go | 0.524882 | 0.430806 | main.go | starcoder |
package state
import (
"context"
"time"
)
// State represents the state of the current state machine.
type State interface {
Do(ctx context.Context) (States, error)
}
// OnFailure is invoked when a permanent failure occurs during processing.
type OnFailure interface {
Fail(ctx context.Context, err error) States
}
// States represents a collection of states returned from a particular machine state.
type States struct {
// States contains the states that will be independently executed by the state machine.
States []State
// OnFailure will be invoked if non-nil if any of the states or their following states
// fail permanently (i.e. after all retry attempts).
// If additional OnFailure functions are defined by later states then all functions
// will be invoked in reverse order (i.e. last defined will be invoked first).
OnFailure OnFailure
}
// NewStates returns a States instance with the given state operations.
func NewStates(states ...State) States {
var sts []State
for _, st := range states {
if st != nil {
sts = append(sts, st)
}
}
return States{
States: sts,
}
}
// Empty returns true if the States instance is empty.
func (ss States) Empty() bool {
return len(ss.States) == 0
}
// StateFunc defines a function that implements the State interface.
type StateFunc func(ctx context.Context) (States, error)
// Do implements the State interface.
func (sf StateFunc) Do(ctx context.Context) (States, error) {
return sf(ctx)
}
// OnFailureFunc defines a function that implements the OnFailure interface.
type OnFailureFunc func(ctx context.Context, err error) States
// Fail implements the OnFailure interface.
func (off OnFailureFunc) Fail(ctx context.Context, err error) States {
return off(ctx, err)
}
// HasAfter is an interface defining a State that is to be executed after a given time.
type HasAfter interface {
After() time.Time
}
// AfterState defines a state operation that is invoked after a given duration.
type AfterState struct {
t time.Time
state State
}
// NewAfterState returns a State instance that invokes the given state operation at or after
// the given time.
func NewAfterState(t time.Time, state State) *AfterState {
// skip any nested AfterState instances.
for {
if as, ok := state.(*AfterState); ok {
state = as.state
} else {
break
}
}
return &AfterState{
t: t,
state: state,
}
}
// Do implements the State interface.
func (as AfterState) Do(ctx context.Context) (States, error) {
return as.state.Do(ctx)
}
// After implements the HasAfter interface.
func (as AfterState) After() time.Time {
return as.t
}
// NewErrorState returns a StateFunc that will return the given error.
func NewErrorState(err error) StateFunc {
return func(ctx context.Context) (States, error) {
return Error(err)
}
}
// After performs the state function after a given duration.
func After(d time.Duration, state State) (States, error) {
return Single(NewAfterState(time.Now().UTC().Add(d), state))
}
// None returns a state that has no additional operations.
func None() (States, error) {
return NewStates(), nil
}
// Single returns a state with a single specified operation.
func Single(state State) (States, error) {
return NewStates(state), nil
}
// Many returns a state with multiple operations that are executed independently.
func Many(states ...State) (States, error) {
return NewStates(states...), nil
}
// Error returns a state that simply returns the given error.
func Error(err error) (States, error) {
return NewStates(), err
}
// WithFailure returns a state that handles the failure of the given state by invoking
// the given OnFailure func.
func WithFailure(state State, onFailure OnFailure) StateFunc {
return func(ctx context.Context) (States, error) {
sts := NewStates(state)
sts.OnFailure = onFailure
return sts, nil
}
} | state/state.go | 0.800848 | 0.519704 | state.go | starcoder |
package weather
// K returns the temperature in kelvin.
func (t Temperature) K() int {
return int(t)
}
// TemperatureFromK creates a kelvin temperature value.
func TemperatureFromK(k float64) Temperature {
return Temperature(k)
}
// C returns the temperature in degrees celcius.
func (t Temperature) C() int {
return int(float64(t) - 273.15)
}
// TemperatureFromC creates a celcius temperature value.
func TemperatureFromC(c float64) Temperature {
return Temperature(c + 273.15)
}
// F returns the temperature in degrees fahrenheit.
func (t Temperature) F() int {
c := float64(t) - 273.15
return int(c*1.8 + 32)
}
// TemperatureFromF creates a fahrenheit temperature value.
func TemperatureFromF(f float64) Temperature {
return TemperatureFromC((f - 32.0) / 1.8)
}
// Millibar returns pressure in millibars (hPa).
func (p Pressure) Millibar() float64 {
return float64(p)
}
// PressureFromMillibar creates a millibar pressure value.
func PressureFromMillibar(mb float64) Pressure {
return Pressure(mb)
}
// Pascal returns pressure in pascals.
func (p Pressure) Pascal() float64 {
return p.Millibar() * 100
}
// PressureFromPascal creates a millibar pressure value.
func PressureFromPascal(pa float64) Pressure {
return Pressure(pa * 0.01)
}
// Atm returns pressure in atmospheres.
func (p Pressure) Atm() float64 {
return p.Millibar() * 0.000986923
}
// PressureFromAtm creates an atmospheric pressure value.
func PressureFromAtm(atm float64) Pressure {
return Pressure(atm * 1013.25)
}
// Torr returns pressure in torr. ~= mmHg.
func (p Pressure) Torr() float64 {
return p.Millibar() * 0.750062
}
// PressureFromTorr creates a torr pressure value.
func PressureFromTorr(t float64) Pressure {
return Pressure(t * 1.33322)
}
// InHg returns pressure in inches of mercury (inHg).
func (p Pressure) InHg() float64 {
return p.Millibar() * 0.0295301
}
// PressureFromInHg creates an inches of mercury pressure value.
func PressureFromInHg(inHg float64) Pressure {
return Pressure(inHg * 33.8638)
}
// Psi returns pressure in pounds per square inch.
func (p Pressure) Psi() float64 {
return p.Millibar() * 0.01450377
}
// PressureFromPsi creates a pounds/sq. in. pressure value.
func PressureFromPsi(psi float64) Pressure {
return Pressure(psi * 68.9476)
}
// Ms returns the speed in meters per second.
func (s Speed) Ms() float64 {
return float64(s)
}
// SpeedFromMs creates a meters/second speed value.
func SpeedFromMs(ms float64) Speed {
return Speed(ms)
}
// Kmh returns the speed in kilometers per hour.
func (s Speed) Kmh() float64 {
return s.Ms() * 3.6
}
// SpeedFromKmh creates a kilometers/hour speed value.
func SpeedFromKmh(kmh float64) Speed {
return Speed(kmh / 3.6)
}
// Mph returns the speed in miles per hour.
func (s Speed) Mph() float64 {
return s.Ms() * 2.23694
}
// SpeedFromMph creates a miles/hour speed value.
func SpeedFromMph(mph float64) Speed {
return Speed(mph / 2.23694)
}
// Knots returns the speed in knots.
func (s Speed) Knots() float64 {
return s.Ms() * 1.94384
}
// SpeedFromKnots creates a knots speed value.
func SpeedFromKnots(kts float64) Speed {
return Speed(kts / 1.94384)
}
// Deg returns the direction in meteorological degrees.
func (d Direction) Deg() int {
return int(d)
}
// Cardinal returns the cardinal direction.
func (d Direction) Cardinal() string {
cardinal := ""
deg := d.Deg()
m := 34 // rounded from (90/4 + 90/8)
// primary cardinal direction first. N, E, S, W.
switch {
case deg < m || deg > 360-m:
cardinal = "N"
case 90-m < deg && deg < 90+m:
cardinal = "E"
case 180-m < deg && deg < 180+m:
cardinal = "S"
case 270-m < deg && deg < 270+m:
cardinal = "W"
}
// Now append the midway points. NE, NW, SE, SW.
switch {
case 45-m < deg && deg < 45+m:
cardinal += "NE"
case 135-m < deg && deg < 135+m:
cardinal += "SE"
case 225-m < deg && deg < 225+m:
cardinal += "SW"
case 315-m < deg && deg < 315+m:
cardinal += "NW"
}
return cardinal
} | modules/weather/units.go | 0.91816 | 0.646739 | units.go | starcoder |
package urlstruct
import (
"database/sql"
"encoding"
"fmt"
"reflect"
"strconv"
"time"
)
var (
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
nullBoolType = reflect.TypeOf((*sql.NullBool)(nil)).Elem()
nullInt64Type = reflect.TypeOf((*sql.NullInt64)(nil)).Elem()
nullFloat64Type = reflect.TypeOf((*sql.NullFloat64)(nil)).Elem()
nullStringType = reflect.TypeOf((*sql.NullString)(nil)).Elem()
mapStringStringType = reflect.TypeOf((*map[string]string)(nil)).Elem()
)
type scannerFunc func(v reflect.Value, values []string) error
func scanner(typ reflect.Type) scannerFunc {
if typ == timeType {
return scanTime
}
if typ.Implements(textUnmarshalerType) {
return scanTextUnmarshaler
}
if reflect.PtrTo(typ).Implements(textUnmarshalerType) {
return scanTextUnmarshalerAddr
}
switch typ {
case durationType:
return scanDuration
case nullBoolType:
return scanNullBool
case nullInt64Type:
return scanNullInt64
case nullFloat64Type:
return scanNullFloat64
case nullStringType:
return scanNullString
case mapStringStringType:
return scanMapStringString
}
switch typ.Kind() {
case reflect.Bool:
return scanBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return scanInt64
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return scanUint64
case reflect.Float32:
return scanFloat32
case reflect.Float64:
return scanFloat64
case reflect.String:
return scanString
}
return nil
}
func sliceScanner(typ reflect.Type) scannerFunc {
switch typ.Elem().Kind() {
case reflect.Int:
return scanIntSlice
case reflect.Int32:
return scanInt32Slice
case reflect.Int64:
return scanInt64Slice
case reflect.String:
return scanStringSlice
}
return nil
}
func scanTextUnmarshaler(v reflect.Value, values []string) error {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
u := v.Interface().(encoding.TextUnmarshaler)
return u.UnmarshalText([]byte(values[0]))
}
func scanTextUnmarshalerAddr(v reflect.Value, values []string) error {
if !v.CanAddr() {
return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type())
}
u := v.Addr().Interface().(encoding.TextUnmarshaler)
return u.UnmarshalText([]byte(values[0]))
}
func scanBool(v reflect.Value, values []string) error {
f, err := strconv.ParseBool(values[0])
if err != nil {
return err
}
v.SetBool(f)
return nil
}
func scanInt64(v reflect.Value, values []string) error {
n, err := strconv.ParseInt(values[0], 10, 64)
if err != nil {
return err
}
v.SetInt(n)
return nil
}
func scanUint64(v reflect.Value, values []string) error {
n, err := strconv.ParseUint(values[0], 10, 64)
if err != nil {
return err
}
v.SetUint(n)
return nil
}
func scanFloat32(v reflect.Value, values []string) error {
return scanFloat(v, values, 32)
}
func scanFloat64(v reflect.Value, values []string) error {
return scanFloat(v, values, 64)
}
func scanFloat(v reflect.Value, values []string, bits int) error {
n, err := strconv.ParseFloat(values[0], bits)
if err != nil {
return err
}
v.SetFloat(n)
return nil
}
func scanString(v reflect.Value, values []string) error {
v.SetString(values[0])
return nil
}
func scanTime(v reflect.Value, values []string) error {
tm, err := parseTime(values[0])
if err != nil {
return err
}
v.Set(reflect.ValueOf(tm))
return nil
}
func parseTime(s string) (time.Time, error) {
n, err := strconv.ParseInt(s, 10, 64)
if err == nil {
return time.Unix(n, 0), nil
}
if len(s) >= 5 && s[4] == '-' {
return time.Parse(time.RFC3339Nano, s)
}
if len(s) == 15 {
const basicFormat = "20060102T150405"
return time.Parse(basicFormat, s)
}
const basicFormat = "20060102T150405-07:00"
return time.Parse(basicFormat, s)
}
func scanDuration(v reflect.Value, values []string) error {
dur, err := time.ParseDuration(values[0])
if err != nil {
return err
}
v.SetInt(int64(dur))
return nil
}
func scanNullBool(v reflect.Value, values []string) error {
value := sql.NullBool{
Valid: true,
}
s := values[0]
if s == "" {
v.Set(reflect.ValueOf(value))
return nil
}
f, err := strconv.ParseBool(s)
if err != nil {
return err
}
value.Bool = f
v.Set(reflect.ValueOf(value))
return nil
}
func scanNullInt64(v reflect.Value, values []string) error {
value := sql.NullInt64{
Valid: true,
}
s := values[0]
if s == "" {
v.Set(reflect.ValueOf(value))
return nil
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return err
}
value.Int64 = n
v.Set(reflect.ValueOf(value))
return nil
}
func scanNullFloat64(v reflect.Value, values []string) error {
value := sql.NullFloat64{
Valid: true,
}
s := values[0]
if s == "" {
v.Set(reflect.ValueOf(value))
return nil
}
n, err := strconv.ParseFloat(s, 64)
if err != nil {
return err
}
value.Float64 = n
v.Set(reflect.ValueOf(value))
return nil
}
func scanNullString(v reflect.Value, values []string) error {
value := sql.NullString{
Valid: true,
}
s := values[0]
if s == "" {
v.Set(reflect.ValueOf(value))
return nil
}
value.String = s
v.Set(reflect.ValueOf(value))
return nil
}
func scanMapStringString(v reflect.Value, values []string) error {
if len(values)%2 != 0 {
return nil
}
m := make(map[string]string)
for i := 0; i < len(values); i += 2 {
m[values[i]] = values[i+1]
}
v.Set(reflect.ValueOf(m))
return nil
}
func scanIntSlice(v reflect.Value, values []string) error {
nn := make([]int, 0, len(values))
for _, s := range values {
n, err := strconv.Atoi(s)
if err != nil {
return err
}
nn = append(nn, n)
}
v.Set(reflect.ValueOf(nn))
return nil
}
func scanInt32Slice(v reflect.Value, values []string) error {
nn := make([]int32, 0, len(values))
for _, s := range values {
n, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return err
}
nn = append(nn, int32(n))
}
v.Set(reflect.ValueOf(nn))
return nil
}
func scanInt64Slice(v reflect.Value, values []string) error {
nn := make([]int64, 0, len(values))
for _, s := range values {
n, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return err
}
nn = append(nn, n)
}
v.Set(reflect.ValueOf(nn))
return nil
}
func scanStringSlice(v reflect.Value, values []string) error {
v.Set(reflect.ValueOf(values))
return nil
} | scan.go | 0.537527 | 0.420421 | scan.go | starcoder |
package day11
import (
"errors"
"fmt"
"io"
)
type IntGrid struct {
data [][]int
width int
height int
}
type Point struct {
X int
Y int
}
func (p Point) Plus(offset *Point) *Point {
return &Point{X: p.X + offset.X, Y: p.Y + offset.Y}
}
func NewPoint(x int, y int) *Point {
return &Point{x, y}
}
func NewIntGrid(width int, height int) *IntGrid {
data := make([][]int, height)
for r := range data {
data[r] = make([]int, width)
}
return &IntGrid{data: data, width: width, height: height}
}
func (g *IntGrid) Clone() *IntGrid {
clone := NewIntGrid(g.width, g.height)
for y := range g.data {
for x, v := range g.data[y] {
//goland:noinspection GoUnhandledErrorResult
clone.SetXY(x, y, v)
}
}
return clone
}
func (g *IntGrid) SetXY(x int, y int, v int) error {
if y < 0 || y >= g.height {
return errors.New("invalid y")
}
if x < 0 || x >= g.width {
return errors.New("invalid x")
}
g.data[y][x] = v
return nil
}
func (g *IntGrid) GetXY(x int, y int) (int, error) {
if y < 0 || y >= g.height {
return 0, errors.New("invalid y")
}
if x < 0 || x >= g.width {
return 0, errors.New("invalid x")
}
return g.data[y][x], nil
}
func (g *IntGrid) MustGetXY(x int, y int) int {
if y < 0 || y >= g.height {
panic("invalid y")
}
if x < 0 || x >= g.width {
panic("invalid x")
}
return g.data[y][x]
}
func (g *IntGrid) Each(r func(p *Point, v int)) {
for y := range g.data {
for x, v := range g.data[y] {
r(NewPoint(x, y), v)
}
}
}
func (g *IntGrid) Count(r func(p *Point, v int) bool) int {
result := 0
for y := range g.data {
for x, v := range g.data[y] {
if r(NewPoint(x, y), v) {
result++
}
}
}
return result
}
func (g *IntGrid) CountAdjacents(x int, y int, matcher func(p *Point, v int) bool) int {
result := 0
if y > 0 {
if matcher(NewPoint(x, y-1), g.MustGetXY(x, y-1)) {
result++
}
}
if x < g.width-1 && y > 0 {
if matcher(NewPoint(x+1, y-1), g.MustGetXY(x+1, y-1)) {
result++
}
}
if x < g.width-1 {
if matcher(NewPoint(x+1, y), g.MustGetXY(x+1, y)) {
result++
}
}
if x < g.width-1 && y < g.height-1 {
if matcher(NewPoint(x+1, y+1), g.MustGetXY(x+1, y+1)) {
result++
}
}
if y < g.height-1 {
if matcher(NewPoint(x, y+1), g.MustGetXY(x, y+1)) {
result++
}
}
if x > 0 && y < g.height-1 {
if matcher(NewPoint(x-1, y+1), g.MustGetXY(x-1, y+1)) {
result++
}
}
if x > 0 {
if matcher(NewPoint(x-1, y), g.MustGetXY(x-1, y)) {
result++
}
}
if x > 0 && y > 0 {
if matcher(NewPoint(x-1, y-1), g.MustGetXY(x-1, y-1)) {
result++
}
}
return result
}
func (g *IntGrid) countHelper(p *Point, offset *Point, stopAtFirst bool, filter func(p *Point, v int) bool, matcher func(p *Point, v int) bool) int {
result := 0
p = p.Plus(offset)
for {
if !(0 <= p.X && p.X < g.width) {
break
}
if !(0 <= p.Y && p.Y < g.height) {
break
}
if matcher(p, g.MustGetXY(p.X, p.Y)) {
result++
break
}
if !filter(p, g.MustGetXY(p.X, p.Y)) {
break
}
p = p.Plus(offset)
}
return result
}
func (g *IntGrid) CountAdjacentVectors(x int, y int, stopAtFirst bool, filter func(p *Point, v int) bool, matcher func(p *Point, v int) bool) int {
result := 0
p := NewPoint(x, y)
if y > 0 {
offset := NewPoint(0, -1)
result += g.countHelper(p, offset, stopAtFirst, filter, matcher)
}
if x < g.width-1 && y > 0 {
offset := NewPoint(1, -1)
result += g.countHelper(p, offset, stopAtFirst, filter, matcher)
}
if x < g.width-1 {
offset := NewPoint(1, 0)
result += g.countHelper(p, offset, stopAtFirst, filter, matcher)
}
if x < g.width-1 && y < g.height-1 {
offset := NewPoint(1, 1)
result += g.countHelper(p, offset, stopAtFirst, filter, matcher)
}
if y < g.height-1 {
offset := NewPoint(0, 1)
result += g.countHelper(p, offset, stopAtFirst, filter, matcher)
}
if x > 0 && y < g.height-1 {
offset := NewPoint(-1, 1)
result += g.countHelper(p, offset, stopAtFirst, filter, matcher)
}
if x > 0 {
offset := NewPoint(-1, 0)
result += g.countHelper(p, offset, stopAtFirst, filter, matcher)
}
if x > 0 && y > 0 {
offset := NewPoint(-1, -1)
result += g.countHelper(p, offset, stopAtFirst, filter, matcher)
}
return result
}
func (g *IntGrid) Print(w io.Writer) {
for y := range g.data {
line := ""
for _, c := range g.data[y] {
line += string(c)
}
fmt.Fprintf(w, "%s\n", line)
}
} | day11/grid.go | 0.552298 | 0.417331 | grid.go | starcoder |
package treap
// node represents node of a treap.
type node[T any] struct {
priority int
value T
left *node[T]
right *node[T]
size int
}
// contains returns true if given node contains given value,
// false otherwise.
func (n *node[T]) contains(value T, comp func(T, T) int) bool {
if n == nil {
return false
}
if comp(n.value, value) == 0 {
return true
}
if comp(value, n.value) < 0 {
return n.left.contains(value, comp)
}
return n.right.contains(value, comp)
}
// tryRemoveMin tries to remove minimal element in given node if this element is the same as given one.
func tryRemoveMin[T any](n *node[T], expected T, comp func(T, T) int) *node[T] {
if n == nil {
return nil
}
if comp(n.value, expected) == 0 {
n = merge(n.left, n.right)
return n
}
n.left = tryRemoveMin(n.left, expected, comp)
n.recalculateSize()
return n
}
// merge merges two nodes, all elements of left node should be less than any of right node.
func merge[T any](left *node[T], right *node[T]) *node[T] {
if left == nil {
return right
}
if right == nil {
return left
}
if left.priority < right.priority {
right.left = merge(left, right.left)
right.recalculateSize()
return right
}
left.right = merge(left.right, right)
left.recalculateSize()
return left
}
// split splits given node by given key into two nodes.
func split[T any](n *node[T], key T, comp func(T, T) int) (*node[T], *node[T]) {
if n == nil {
return nil, nil
}
if comp(key, n.value) > 0 {
left, right := split(n.right, key, comp)
n.right = left
n.recalculateSize()
return n, right
}
left, right := split(n.left, key, comp)
n.left = right
n.recalculateSize()
return left, n
}
// recalculateSize recalculates size of given node.
func (n *node[T]) recalculateSize() {
if n == nil {
return
}
n.size = 0
if n.left != nil {
n.size += n.left.size
}
if n.right != nil {
n.size += n.right.size
}
n.size++
return
}
// getAll returns all elements in node.
// Length of elements should be same as size of node.
func (n *node[T]) getAll(elements []T) {
lSize := 0
if n.left != nil {
lSize = n.left.size
n.left.getAll(elements[:lSize])
}
elements[lSize] = n.value
if n.right != nil {
n.right.getAll(elements[lSize+1:])
}
} | treap/node.go | 0.831964 | 0.57344 | node.go | starcoder |
package aws
import (
"context"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/turbot/steampipe-plugin-sdk/v3/grpc/proto"
"github.com/turbot/steampipe-plugin-sdk/v3/plugin"
"github.com/turbot/steampipe-plugin-sdk/v3/plugin/transform"
)
func tableAwsVpcFlowLogEventListKeyColumns() []*plugin.KeyColumn {
return []*plugin.KeyColumn{
{Name: "log_group_name"},
{Name: "log_stream_name", Require: plugin.Optional},
{Name: "filter", Require: plugin.Optional, CacheMatch: "exact"},
{Name: "region", Require: plugin.Optional},
{Name: "timestamp", Operators: []string{">", ">=", "=", "<", "<="}, Require: plugin.Optional},
// others
{Name: "event_id", Require: plugin.Optional},
{Name: "interface_id", Require: plugin.Optional},
{Name: "src_addr", Require: plugin.Optional},
{Name: "dst_addr", Require: plugin.Optional},
{Name: "src_port", Require: plugin.Optional},
{Name: "dst_port", Require: plugin.Optional},
{Name: "action", Require: plugin.Optional},
{Name: "log_status", Require: plugin.Optional},
}
}
//// TABLE DEFINITION
func tableAwsVpcFlowLogEvent(_ context.Context) *plugin.Table {
return &plugin.Table{
Name: "aws_vpc_flow_log_event",
Description: "AWS VPC Flow Log events from CloudWatch Logs",
List: &plugin.ListConfig{
Hydrate: listCloudwatchLogEvents,
KeyColumns: tableAwsVpcFlowLogEventListKeyColumns(),
},
GetMatrixItem: BuildRegionList,
Columns: awsRegionalColumns([]*plugin.Column{
// Top columns
{Name: "log_group_name", Type: proto.ColumnType_STRING, Transform: transform.FromQual("log_group_name"), Description: "The name of the log group to which this event belongs."},
{Name: "log_stream_name", Type: proto.ColumnType_STRING, Description: "The name of the log stream to which this event belongs."},
{Name: "timestamp", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromField("Timestamp").Transform(transform.UnixMsToTimestamp), Description: "The time when the event occurred."},
{Name: "version", Type: proto.ColumnType_INT, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 0), Description: "The VPC Flow Logs version. If you use the default format, the version is 2. If you use a custom format, the version is the highest version among the specified fields. For example, if you specify only fields from version 2, the version is 2. If you specify a mixture of fields from versions 2, 3, and 4, the version is 4."},
{Name: "interface_account_id", Type: proto.ColumnType_STRING, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 1), Description: "The AWS account ID of the owner of the source network interface for which traffic is recorded. If the network interface is created by an AWS service, for example when creating a VPC endpoint or Network Load Balancer, the record may display unknown for this field."},
{Name: "interface_id", Type: proto.ColumnType_STRING, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 2), Description: "The ID of the network interface for which the traffic is recorded."},
{Name: "src_addr", Type: proto.ColumnType_IPADDR, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 3), Description: "The source address for incoming traffic, or the IPv4 or IPv6 address of the network interface for outgoing traffic on the network interface. The IPv4 address of the network interface is always its private IPv4 address. See also pkt-srcaddr."},
{Name: "dst_addr", Type: proto.ColumnType_IPADDR, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 4), Description: "The destination address for outgoing traffic, or the IPv4 or IPv6 address of the network interface for incoming traffic on the network interface. The IPv4 address of the network interface is always its private IPv4 address. See also pkt-dstaddr."},
{Name: "src_port", Type: proto.ColumnType_INT, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 5), Description: "The source port of the traffic."},
{Name: "dst_port", Type: proto.ColumnType_INT, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 6), Description: "The destination port of the traffic."},
{Name: "protocol", Type: proto.ColumnType_INT, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 7), Description: "The IANA protocol number of the traffic. For more information, see Assigned Internet Protocol Numbers."},
{Name: "packets", Type: proto.ColumnType_INT, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 8), Description: "The number of packets transferred during the flow."},
{Name: "bytes", Type: proto.ColumnType_INT, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 9), Description: "The number of bytes transferred during the flow."},
{Name: "start", Type: proto.ColumnType_TIMESTAMP, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 10).Transform(transform.UnixToTimestamp), Description: "The time when the first packet of the flow was received within the aggregation interval. This might be up to 60 seconds after the packet was transmitted or received on the network interface."},
{Name: "end", Type: proto.ColumnType_TIMESTAMP, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 11).Transform(transform.UnixToTimestamp), Description: "The time when the last packet of the flow was received within the aggregation interval. This might be up to 60 seconds after the packet was transmitted or received on the network interface."},
{Name: "action", Type: proto.ColumnType_STRING, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 12), Description: "The action that is associated with the traffic: ACCEPT — The recorded traffic was permitted by the security groups and network ACLs. REJECT — The recorded traffic was not permitted by the security groups or network ACLs."},
{Name: "log_status", Type: proto.ColumnType_STRING, Hydrate: getMessageField, Transform: transform.FromValue().TransformP(getField, 13), Description: "The logging status of the flow log: OK — Data is logging normally to the chosen destinations. NODATA — There was no network traffic to or from the network interface during the aggregation interval. SKIPDATA — Some flow log records were skipped during the aggregation interval. This may be because of an internal capacity constraint, or an internal error."},
// Other columns
{Name: "event_id", Description: "The ID of the event.", Type: proto.ColumnType_STRING, Transform: transform.FromField("EventId")},
{Name: "filter", Description: "Filter pattern for the search.", Type: proto.ColumnType_STRING, Transform: transform.FromQual("filter")},
{Name: "ingestion_time", Description: "The time when the event was ingested.", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromField("IngestionTime").Transform(transform.UnixMsToTimestamp)},
}),
}
}
func getMessageField(_ context.Context, _ *plugin.QueryData, h *plugin.HydrateData) (interface{}, error) {
e := h.Item.(*cloudwatchlogs.FilteredLogEvent)
fields := strings.Fields(*e.Message)
return fields, nil
}
func getField(_ context.Context, d *transform.TransformData) (interface{}, error) {
fields := d.Value.([]string)
idx := d.Param.(int)
if fields[idx] == "-" {
return nil, nil
}
return fields[idx], nil
}
func buildFilter(equalQuals plugin.KeyColumnEqualsQualMap) []string {
filters := []string{}
filterQuals := []string{"action", "log_status", "interface_id", "event_id", "src_addr", "dst_addr", "src_port", "dst_port"}
for _, qual := range filterQuals {
switch qual {
case "action", "log_status", "interface_id", "event_id":
if equalQuals[qual] != nil {
filters = append(filters, equalQuals[qual].GetStringValue())
}
case "src_addr", "dst_addr":
if equalQuals[qual] != nil {
filters = append(filters, equalQuals[qual].GetInetValue().Addr)
}
case "src_port", "dst_port":
if equalQuals[qual] != nil {
filters = append(filters, strconv.Itoa(int(equalQuals[qual].GetInt64Value())))
}
}
}
return filters
} | aws/table_aws_vpc_flow_log_event.go | 0.660391 | 0.441372 | table_aws_vpc_flow_log_event.go | starcoder |
package conversion
import (
"math"
)
// KphToMps - converts kilometres per hour to meters per second
func KphToMps(kph int) float64 {
return float64(kph) / 3.6
}
// KphToKts - converts kilometres per hour to knots
func KphToKts(kph int) float64 {
return float64(kph) / 1.852
}
// KtsToMps - converts knots to meters per second
func KtsToMps(kts float64) float64 {
return kts / 1.94384
}
// MpsToKts - converts meters per second to knots
func MpsToKts(m float64) float64 {
return m * 1.94384
}
// SMileToM - converts statute miles to meters
func SMileToM(sm float64) int {
return int(math.Round(sm * 1609.344))
}
// FtToM - converts feet to meters (rounded to 10 meters)
func FtToM(ft int) int {
return int(math.Round(float64(ft)*0.3048/10) * 10)
}
// MToFt - converts metres to feet (rounded to 10)
func MToFt(m int) int {
return int(math.Round(float64(m)*3.28084/10) * 10)
}
// MToSMile - converts metres to statute miles
func MToSMile(m int) float64 {
return float64(m) * 0.00062137119223733
}
// FtToSMile - converts feet to statute miles
func FtToSMile(m int) float64 {
return float64(m) / 5280
}
// SMileToFt - converts statute miles to feet
func SMileToFt(m float64) int {
return int(m * 5280)
}
// InHgTohPa - converts inch of mercury to hectopascal
func InHgTohPa(inHg float64) int {
return int(math.Round(inHg * 33.86389))
}
// HPaToMmHg - converts hectopascal to mm of mercury
func HPaToMmHg(hPa int) int {
return int(math.Round(float64(hPa) * 0.75006375541921))
}
// MmHgToHPa - converts mm of mercury to hectopascal
func MmHgToHPa(mm int) int {
return int(math.Round(float64(mm) * 1.333223684))
}
// DirectionToCardinalDirection - converts direction in degrees to points of the compass
func DirectionToCardinalDirection(dir int) string {
index := int(math.Round(float64(dir%360) / 45))
return map[int]string{0: "N", 1: "NE", 2: "E", 3: "SE", 4: "S", 5: "SW", 6: "W", 7: "NW", 8: "N"}[index]
}
// CalcRelativeHumidity - calculates the relative humidity of the dew point and temperature
func CalcRelativeHumidity(temp, dewpoint int) int {
// see https://www.vaisala.com/sites/default/files/documents/Humidity_Conversion_Formulas_B210973EN-F.pdf
// used constants in temperature range -20...+50°C
m := 7.591386
tn := 240.7263
rh := 100 * math.Pow(10, m*(float64(dewpoint)/(float64(dewpoint)+tn)-(float64(temp)/(float64(temp)+tn))))
return int(math.Round(rh))
} | conversion/conversion.go | 0.81593 | 0.534612 | conversion.go | starcoder |
package heap
type Value interface{}
type Cmp func(a, b Value) bool
type Heap interface {
InitWithCmp(cmp Cmp)
Push(x Value)
Pop() Value
Peek() Value
Len() int
IndexOf(x Value) int
Fix(i int)
Remove(i int) Value
Update(i int, value Value)
}
func New() Heap {
return NewWithCap(0)
}
func NewWithCap(cap int) Heap {
return &heapImp{slice: make([]Value, 0, cap)}
}
func NewWithSlice(slice []Value) Heap {
return &heapImp{slice: slice}
}
type heapImp struct {
cmp Cmp
slice []Value
}
// InitWithCmp will build the heap by the given cmp.
// The complexity is O(n) where n = h.Len().
func (h *heapImp) InitWithCmp(cmp Cmp) {
h.cmp = cmp
n := h.Len()
for i := n/2 - 1; i >= 0; i-- {
h.down(i, n)
}
}
// Push pushes the element x onto the heap.
// The complexity is O(log n) where n = h.Len().
func (h *heapImp) Push(x Value) {
h.slice = append(h.slice, x)
h.up(h.Len() - 1)
}
// Pop removes and returns the peek element from the heap.
// The complexity is O(log n) where n = h.Len().
// Pop is equivalent to Remove(h, 0).
func (h *heapImp) Pop() Value {
last := h.Len() - 1
h.swap(0, last)
h.down(0, last)
res := h.slice[last]
h.slice = h.slice[:last]
return res
}
// Peek returns the peek value of the heap
// The complexity is O(1)
func (h *heapImp) Peek() Value {
return h.slice[0]
}
// Len returns the size of the heap.
// The complexity is O(1)
func (h *heapImp) Len() int {
return len(h.slice)
}
// IndexOf returns the index of x in the inner slice of the heap
// If x is not in the heap, returns -1
// The complexity is O(n)
func (h *heapImp) IndexOf(x Value) int {
return h.indexOf(x, 0)
}
func (h *heapImp) indexOf(x Value, i int) int {
if i >= h.Len() || h.cmp(x, h.slice[i]) {
return -1
}
if h.slice[i] == x {
return i
}
// search in left child
if r := h.indexOf(x, 2*i+1); r != -1 {
return r
}
// search in right child
return h.indexOf(x, 2*i+2)
}
// Fix re-establishes the heap ordering after the element at index i has changed its value.
// Changing the value of the element at index i and then calling Fix is equivalent to,
// but less expensive than, calling Remove(h, i) followed by a Push of the new value.
// The complexity is O(log n) where n = h.Len().
func (h *heapImp) Fix(i int) {
if !h.down(i, h.Len()) {
h.up(i)
}
}
// Remove removes and returns the element at index i from the heap.
// The complexity is O(log n) where n = h.Len().
func (h *heapImp) Remove(i int) Value {
n := h.Len() - 1
if n != i {
h.swap(i, n)
if !h.down(i, n) {
h.up(i)
}
}
res := h.slice[n]
h.slice = h.slice[:n]
return res
}
// Update update the value of the element at index i, and then fix the heap
// The complexity is O(log n) where n = h.Len().
func (h *heapImp) Update(i int, value Value) {
h.slice[i] = value
h.Fix(i)
}
func (h *heapImp) swap(i, j int) {
h.slice[i], h.slice[j] = h.slice[j], h.slice[i]
}
func (h *heapImp) up(i int) {
for {
parent := (i - 1) / 2
if i == parent || !h.cmp(h.slice[i], h.slice[parent]) {
break
}
h.swap(parent, i)
i = parent
}
}
func (h *heapImp) down(i, n int) bool {
cur := i
for {
child := 2*cur + 1 // left
if child >= n || child < 0 {
break
}
right := child + 1
if right < n && h.cmp(h.slice[right], h.slice[child]) {
child = right
}
if !h.cmp(h.slice[child], h.slice[cur]) {
break
}
h.swap(cur, child)
cur = child
}
return cur > i
} | heap/heap.go | 0.761272 | 0.439026 | heap.go | starcoder |
package git
// BoolCache caches a boolean variable.
// The zero value is an empty cache.
type BoolCache struct {
initialized bool
value bool
}
// Set allows collaborators to signal when the current branch has changed.
func (sc *BoolCache) Set(newValue bool) {
sc.initialized = true
sc.value = newValue
}
// Value provides the current value.
func (sc *BoolCache) Value() bool {
if !sc.initialized {
panic("using current branch before initialization")
}
return sc.value
}
// Initialized indicates if we have a current branch.
func (sc *BoolCache) Initialized() bool {
return sc.initialized
}
// Invalidate removes the cached value.
func (sc *BoolCache) Invalidate() {
sc.initialized = false
}
// StringCache caches a string value.
// The zero value is an empty cache.
type StringCache struct {
initialized bool
value string
}
// Set allows collaborators to signal when the current branch has changed.
func (sc *StringCache) Set(newValue string) {
sc.initialized = true
sc.value = newValue
}
// Value provides the current value.
func (sc *StringCache) Value() string {
if !sc.initialized {
panic("cannot access unitialized cached value")
}
return sc.value
}
// Initialized indicates if we have a current branch.
func (sc *StringCache) Initialized() bool {
return sc.initialized
}
// Invalidate removes the cached value.
func (sc *StringCache) Invalidate() {
sc.initialized = false
}
// StringSliceCache caches a string slice value.
// The zero value is an empty cache.
type StringSliceCache struct {
initialized bool
value []string
}
// Set allows collaborators to signal when the current branch has changed.
func (ssc *StringSliceCache) Set(newValue []string) {
ssc.initialized = true
ssc.value = newValue
}
// Value provides the current value.
func (ssc *StringSliceCache) Value() []string {
if !ssc.Initialized() {
panic("cannot access unitialized cached value")
}
return ssc.value
}
// Initialized indicates if we have a current branch.
func (ssc *StringSliceCache) Initialized() bool {
return ssc.initialized
}
// Invalidate removes the cached value.
func (ssc *StringSliceCache) Invalidate() {
ssc.initialized = false
} | src/git/cache.go | 0.832815 | 0.467696 | cache.go | starcoder |
package trie
import "strings"
type node struct {
content rune
wordMarker bool
children []*node
}
func (n *node) findChild(r rune) *node {
for _, e := range n.children {
if e.content == r {
return e
}
}
return nil
}
// Trie represents a trie also called prefix tree.
// The zero value for Trie is an empty trie ready to use.
type Trie struct {
root node
nbNode int
}
// Init initializes or clears trie t.
func (t *Trie) Init() *Trie {
t.root.content = 0
t.root.children = nil
t.root.wordMarker = false
t.nbNode = 0
return t
}
// New returns an initialized trie.
func New() *Trie { return new(Trie).Init() }
// AddWord adds word in t.
// Word is stored one rune per node in t.
// If the root of the word is already present only missing runes are added.
// wordmarker is set to true on the node that contains last word rune.
func (t *Trie) AddWord(word string) {
currentNode := &t.root
if len(word) == 0 {
currentNode.wordMarker = true
}
for i, c := range word {
child := currentNode.findChild(c)
if child == nil {
newNode := &node{content: c}
currentNode.children = append(currentNode.children, newNode)
t.nbNode++
currentNode = newNode
} else {
currentNode = child
}
if i == len(word)-1 {
currentNode.wordMarker = true
}
}
}
// SearchWord searches a path in t that contains every word runes and
// end by wordmarked node.
func (t *Trie) SearchWord(word string) bool {
currentNode := &t.root
for _, c := range word {
child := currentNode.findChild(c)
if child == nil {
return false
}
currentNode = child
}
return currentNode.wordMarker
}
// findAllWords use DFS algorithm to find all words from node n
func findAllWords(n *node, prefix string, words []string) []string {
if n.wordMarker {
words = append(words, prefix)
}
for _, child := range n.children {
prefix = prefix + string(child.content)
words = findAllWords(child, prefix, words)
prefix = strings.TrimSuffix(prefix, string(child.content))
}
return words
}
// FindAllWords returns all words present in the trie
func (t *Trie) FindAllWords() []string {
var words []string
return findAllWords(&t.root, "", words)
}
// FindAllMatchingWords returns all words present in the trie that
// match the prefix
func (t *Trie) FindAllMatchingWords(prefix string) []string {
var matchs []string
currentNode := &t.root
for _, c := range prefix {
child := currentNode.findChild(c)
if child == nil {
return matchs
}
currentNode = child
}
matchs = findAllWords(currentNode, prefix, matchs)
return matchs
} | trie.go | 0.757615 | 0.452113 | trie.go | starcoder |
package rollsum
import (
"encoding/binary"
)
const FULL_BYTES_16 = (1 << 16) - 1
// Rollsum32Base decouples the rollsum algorithm from the implementation of
// hash.Hash and the storage the rolling checksum window
// this allows us to write different versions of the storage for the distinctly different
// use-cases and optimize the storage with the usage pattern.
func NewRollsum32Base(blockSize uint) *Rollsum32Base {
return &Rollsum32Base{blockSize: blockSize}
}
// The specification of hash.Hash is such that it cannot be implemented without implementing storage
// but the most optimal storage scheme depends on usage of the circular buffer & hash
type Rollsum32Base struct {
blockSize uint
a, b uint32
}
// Add a single byte into the rollsum
func (r *Rollsum32Base) AddByte(b byte) {
r.a += uint32(b)
r.b += r.a
}
func (r *Rollsum32Base) AddBytes(bs []byte) {
for _, b := range bs {
r.a += uint32(b)
r.b += r.a
}
}
// Remove a byte from the end of the rollsum
// Use the previous length (before removal)
func (r *Rollsum32Base) RemoveByte(b byte, length int) {
r.a -= uint32(b)
r.b -= uint32(uint(length) * uint(b))
}
func (r *Rollsum32Base) RemoveBytes(bs []byte, length int) {
for _, b := range bs {
r.a -= uint32(b)
r.b -= uint32(uint(length) * uint(b))
length -= 1
}
}
func (r *Rollsum32Base) AddAndRemoveBytes(add []byte, remove []byte, length int) {
len_added := len(add)
len_removed := len(remove)
startEvicted := len_added - len_removed
r.AddBytes(add[:startEvicted])
length += startEvicted
for i := startEvicted; i < len_added; i++ {
r.RemoveByte(remove[i-startEvicted], length)
r.AddByte(add[i])
}
}
// Set a whole block of blockSize
func (r *Rollsum32Base) SetBlock(block []byte) {
r.Reset()
r.AddBytes(block)
}
// Reset the hash to the initial state
func (r *Rollsum32Base) Reset() {
r.a, r.b = 0, 0
}
// size of the hash in bytes
func (r *Rollsum32Base) Size() int {
return 4
}
// Puts the sum into b. Avoids allocation. b must have length >= 4
func (r *Rollsum32Base) GetSum(b []byte) {
value := uint32((r.a & FULL_BYTES_16) + ((r.b & FULL_BYTES_16) << 16))
binary.LittleEndian.PutUint32(b, value)
} | rollsum/rollsum_32_base.go | 0.718693 | 0.470311 | rollsum_32_base.go | starcoder |
package graph
import (
"sort"
)
type BasicTypeNodeSlice []*BasicTypeNode
func (nm BasicTypeNodeSlice) GetIds() (ids []NodeId) {
for _, n := range nm {
ids = append(ids, n.Id())
}
return
}
func (ns BasicTypeNodeSlice) Sort() (BasicTypeNodeSlice) {
sort.Slice(ns, func(i, j int) bool {
return ns[i].Namex < ns[j].Namex
})
return ns
}
func (ns BasicTypeNodeSlice) Map() (fnm BasicTypeNodeMap) {
fnm = BasicTypeNodeMap{}
fnm.Add(ns...)
return
}
type EndpointNodeSlice []*EndpointNode
func (nm EndpointNodeSlice) GetIds() (ids []NodeId) {
for _, n := range nm {
ids = append(ids, n.Id())
}
return
}
func (ns EndpointNodeSlice) Sort() (EndpointNodeSlice) {
sort.Slice(ns, func(i, j int) bool {
return ns[i].Namex < ns[j].Namex
})
return ns
}
func (ns EndpointNodeSlice) Map() (fnm EndpointNodeMap) {
fnm = EndpointNodeMap{}
fnm.Add(ns...)
return
}
type EnumNodeSlice []*EnumNode
func (nm EnumNodeSlice) GetIds() (ids []NodeId) {
for _, n := range nm {
ids = append(ids, n.Id())
}
return
}
func (ns EnumNodeSlice) Sort() (EnumNodeSlice) {
sort.Slice(ns, func(i, j int) bool {
return ns[i].Namex < ns[j].Namex
})
return ns
}
func (ns EnumNodeSlice) Map() (fnm EnumNodeMap) {
fnm = EnumNodeMap{}
fnm.Add(ns...)
return
}
type FieldNodeSlice []*FieldNode
func (nm FieldNodeSlice) GetIds() (ids []NodeId) {
for _, n := range nm {
ids = append(ids, n.Id())
}
return
}
func (ns FieldNodeSlice) Sort() (FieldNodeSlice) {
sort.Slice(ns, func(i, j int) bool {
return ns[i].Namex < ns[j].Namex
})
return ns
}
func (ns FieldNodeSlice) Map() (fnm FieldNodeMap) {
fnm = FieldNodeMap{}
fnm.Add(ns...)
return
}
type RelationNodeSlice []*RelationNode
func (nm RelationNodeSlice) GetIds() (ids []NodeId) {
for _, n := range nm {
ids = append(ids, n.Id())
}
return
}
func (ns RelationNodeSlice) Sort() (RelationNodeSlice) {
sort.Slice(ns, func(i, j int) bool {
return ns[i].Namex < ns[j].Namex
})
return ns
}
func (ns RelationNodeSlice) Map() (fnm RelationNodeMap) {
fnm = RelationNodeMap{}
fnm.Add(ns...)
return
}
type TypeNodeSlice []*TypeNode
func (nm TypeNodeSlice) GetIds() (ids []NodeId) {
for _, n := range nm {
ids = append(ids, n.Id())
}
return
}
func (ns TypeNodeSlice) Sort() (TypeNodeSlice) {
sort.Slice(ns, func(i, j int) bool {
return ns[i].Namex < ns[j].Namex
})
return ns
}
func (ns TypeNodeSlice) Map() (fnm TypeNodeMap) {
fnm = TypeNodeMap{}
fnm.Add(ns...)
return
}
type PathNodeSlice []*PathNode
func (nm PathNodeSlice) GetIds() (ids []NodeId) {
for _, n := range nm {
ids = append(ids, n.Id())
}
return
}
func (ns PathNodeSlice) Sort() (PathNodeSlice) {
sort.Slice(ns, func(i, j int) bool {
return ns[i].Namex < ns[j].Namex
})
return ns
}
func (ns PathNodeSlice) Map() (fnm PathNodeMap) {
fnm = PathNodeMap{}
fnm.Add(ns...)
return
} | asg/pkg/v0/asg/graph/NodeSlice_.go | 0.579876 | 0.553083 | NodeSlice_.go | starcoder |
package humanize
import (
"fmt"
"math"
"strings"
"time"
gohumanize "github.com/dustin/go-humanize"
)
func Timestamp(ts time.Time) string {
return fmt.Sprintf("%s (%s)", ts.Format("Mon Jan 02 15:04:05 -0700"), gohumanize.Time(ts))
}
var relativeMagnitudes = []gohumanize.RelTimeMagnitude{
{D: time.Second, Format: "0 seconds", DivBy: time.Second},
{D: 2 * time.Second, Format: "1 second %s", DivBy: 1},
{D: time.Minute, Format: "%d seconds %s", DivBy: time.Second},
{D: 2 * time.Minute, Format: "1 minute %s", DivBy: 1},
{D: time.Hour, Format: "%d minutes %s", DivBy: time.Minute},
{D: 2 * time.Hour, Format: "1 hour %s", DivBy: 1},
{D: gohumanize.Day, Format: "%d hours %s", DivBy: time.Hour},
{D: 2 * gohumanize.Day, Format: "1 day %s", DivBy: 1},
{D: gohumanize.Week, Format: "%d days %s", DivBy: gohumanize.Day},
{D: 2 * gohumanize.Week, Format: "1 week %s", DivBy: 1},
{D: gohumanize.Month, Format: "%d weeks %s", DivBy: gohumanize.Week},
{D: 2 * gohumanize.Month, Format: "1 month %s", DivBy: 1},
{D: gohumanize.Year, Format: "%d months %s", DivBy: gohumanize.Month},
{D: 18 * gohumanize.Month, Format: "1 year %s", DivBy: 1},
{D: 2 * gohumanize.Year, Format: "2 years %s", DivBy: 1},
{D: gohumanize.LongTime, Format: "%d years %s", DivBy: gohumanize.Year},
{D: math.MaxInt64, Format: "a long while %s", DivBy: 1},
}
// TruncatedDuration returns a duration truncated to a single unit
func TruncatedDuration(d time.Duration) string {
start := time.Time{}
finish := start.Add(d)
return strings.TrimSpace(gohumanize.CustomRelTime(start, finish, "", "", relativeMagnitudes))
}
// Duration humanizes time.Duration output to a meaningful value with up to two units
func Duration(d time.Duration) string {
if d.Seconds() < 60.0 {
return TruncatedDuration(d)
}
if d.Minutes() < 60.0 {
remainingSeconds := int64(math.Mod(d.Seconds(), 60))
return fmt.Sprintf("%s %d seconds", TruncatedDuration(d), remainingSeconds)
}
if d.Hours() < 24.0 {
remainingMinutes := int64(math.Mod(d.Minutes(), 60))
return fmt.Sprintf("%s %d minutes", TruncatedDuration(d), remainingMinutes)
}
remainingHours := int64(math.Mod(d.Hours(), 24))
return fmt.Sprintf("%s %d hours", TruncatedDuration(d), remainingHours)
}
// RelativeDuration returns a formatted duration from the relative times
func RelativeDuration(start, finish time.Time) string {
if finish.IsZero() && !start.IsZero() {
finish = time.Now().UTC()
}
return Duration(finish.Sub(start))
}
var shortTimeMagnitudes = []gohumanize.RelTimeMagnitude{
{D: time.Second, Format: "0s", DivBy: time.Second},
{D: 2 * time.Second, Format: "1s %s", DivBy: 1},
{D: time.Minute, Format: "%ds %s", DivBy: time.Second},
{D: 2 * time.Minute, Format: "1m %s", DivBy: 1},
{D: time.Hour, Format: "%dm %s", DivBy: time.Minute},
{D: 2 * time.Hour, Format: "1h %s", DivBy: 1},
{D: gohumanize.Day, Format: "%dh %s", DivBy: time.Hour},
{D: 2 * gohumanize.Day, Format: "1d %s", DivBy: 1},
{D: gohumanize.Week, Format: "%dd %s", DivBy: gohumanize.Day},
}
// RelativeDurationShort returns a relative duration in short format
func RelativeDurationShort(start, finish time.Time) string {
if finish.IsZero() && !start.IsZero() {
finish = time.Now().UTC()
}
return strings.TrimSpace(gohumanize.CustomRelTime(start, finish, "", "", shortTimeMagnitudes))
} | humanize/humanize.go | 0.725065 | 0.471588 | humanize.go | starcoder |
package refconv
import (
"fmt"
"math"
"math/cmplx"
"reflect"
"strconv"
"time"
"github.com/cstockton/go-conv/internal/refutil"
)
var (
emptyTime = time.Time{}
typeOfTime = reflect.TypeOf(emptyTime)
typeOfDuration = reflect.TypeOf(time.Duration(0))
)
func (c Conv) convStrToDuration(v string) (time.Duration, error) {
if parsed, err := time.ParseDuration(v); err == nil {
return parsed, nil
}
if parsed, err := strconv.ParseInt(v, 10, 0); err == nil {
return time.Duration(parsed), nil
}
if parsed, err := strconv.ParseFloat(v, 64); err == nil {
return time.Duration(1e9 * parsed), nil
}
return 0, fmt.Errorf("cannot parse %#v (type string) as time.Duration", v)
}
func (c Conv) convNumToDuration(k reflect.Kind, v reflect.Value) (time.Duration, bool) {
switch {
case refutil.IsKindInt(k):
return time.Duration(v.Int()), true
case refutil.IsKindUint(k):
T := v.Uint()
if T > math.MaxInt64 {
T = math.MaxInt64
}
return time.Duration(T), true
case refutil.IsKindFloat(k):
T := v.Float()
if math.IsNaN(T) || math.IsInf(T, 0) {
return 0, true
}
return time.Duration(1e9 * T), true
case refutil.IsKindComplex(k):
T := v.Complex()
if cmplx.IsNaN(T) || cmplx.IsInf(T) {
return 0, true
}
return time.Duration(1e9 * real(T)), true
}
return 0, false
}
type durationConverter interface {
Duration() (time.Duration, error)
}
// Duration attempts to convert the given value to time.Duration, returns the
// zero value and an error on failure.
func (c Conv) Duration(from interface{}) (time.Duration, error) {
if T, ok := from.(string); ok {
return c.convStrToDuration(T)
} else if T, ok := from.(time.Duration); ok {
return T, nil
} else if c, ok := from.(durationConverter); ok {
return c.Duration()
}
value := refutil.IndirectVal(reflect.ValueOf(from))
kind := value.Kind()
switch {
case reflect.String == kind:
return c.convStrToDuration(value.String())
case refutil.IsKindNumeric(kind):
if parsed, ok := c.convNumToDuration(kind, value); ok {
return parsed, nil
}
}
return 0, newConvErr(from, "time.Duration")
}
type timeConverter interface {
Time() (time.Time, error)
}
// Time attempts to convert the given value to time.Time, returns the zero value
// of time.Time and an error on failure.
func (c Conv) Time(from interface{}) (time.Time, error) {
if T, ok := from.(time.Time); ok {
return T, nil
} else if T, ok := from.(*time.Time); ok {
return *T, nil
} else if c, ok := from.(timeConverter); ok {
return c.Time()
}
value := reflect.ValueOf(refutil.Indirect(from))
kind := value.Kind()
switch {
case reflect.String == kind:
if T, ok := convStringToTime(value.String()); ok {
return T, nil
}
case reflect.Struct == kind:
if value.Type().ConvertibleTo(typeOfTime) {
valueConv := value.Convert(typeOfTime)
if valueConv.CanInterface() {
return valueConv.Interface().(time.Time), nil
}
}
field := value.FieldByName("Time")
if field.IsValid() && field.CanInterface() {
return c.Time(field.Interface())
}
}
return emptyTime, newConvErr(from, "time.Time")
}
type formatInfo struct {
format string
needed string
}
var formats = []formatInfo{
{time.RFC3339Nano, ""},
{time.RFC3339, ""},
{time.RFC850, ""},
{time.RFC1123, ""},
{time.RFC1123Z, ""},
{"02 Jan 06 15:04:05", ""},
{"02 Jan 06 15:04:05 +-0700", ""},
{"02 Jan 06 15:4:5 MST", ""},
{"02 Jan 2006 15:04:05", ""},
{"2 Jan 2006 15:04:05", ""},
{"2 Jan 2006 15:04:05 MST", ""},
{"2 Jan 2006 15:04:05 -0700", ""},
{"2 Jan 2006 15:04:05 -0700 (MST)", ""},
{"02 January 2006 15:04", ""},
{"02 Jan 2006 15:04 MST", ""},
{"02 Jan 2006 15:04:05 MST", ""},
{"02 Jan 2006 15:04:05 -0700", ""},
{"02 Jan 2006 15:04:05 -0700 (MST)", ""},
{"Mon, 2 Jan 15:04:05 MST 2006", ""},
{"Mon, 2 Jan 15:04:05 MST 2006", ""},
{"Mon, 02 Jan 2006 15:04:05", ""},
{"Mon, 02 Jan 2006 15:04:05 (MST)", ""},
{"Mon, 2 Jan 2006 15:04:05", ""},
{"Mon, 2 Jan 2006 15:04:05 MST", ""},
{"Mon, 2 Jan 2006 15:04:05 -0700", ""},
{"Mon, 2 Jan 2006 15:04:05 -0700 (MST)", ""},
{"Mon, 02 Jan 06 15:04:05 MST", ""},
{"Mon, 02 Jan 2006 15:04:05 -0700", ""},
{"Mon, 02 Jan 2006 15:04:05 -0700 MST", ""},
{"Mon, 02 Jan 2006 15:04:05 -0700 (MST)", ""},
{"Mon, 02 Jan 2006 15:04:05 -0700 (MST-07:00)", ""},
{"Mon, 02 Jan 2006 15:04:05 -0700 (MST MST)", ""},
{"Mon, 02 Jan 2006 15:04 -0700", ""},
{"Mon, 02 Jan 2006 15:04 -0700 (MST)", ""},
{"Mon Jan 02 15:05:05 2006 MST", ""},
{"Monday, 02 Jan 2006 15:04 -0700", ""},
{"Monday, 02 Jan 2006 15:04:05 -0700", ""},
{time.UnixDate, ""},
{time.RubyDate, ""},
{time.RFC822, ""},
{time.RFC822Z, ""},
}
// Quick google yields no date parsing libraries, first thing that came to mind
// was trying all the formats in time package. This is reasonable enough until
// I can find a decent lexer or polish up my "timey" Go lib. I am using the
// table of dates politely released into public domain by github.com/tomarus:
// https://github.com/tomarus/parsedate/blob/master/parsedate.go
func convStringToTime(s string) (time.Time, bool) {
if len(s) == 0 {
return time.Time{}, false
}
for _, f := range formats {
_, err := time.Parse(f.format, s)
if err != nil {
continue
}
if t, err := time.Parse(
f.format+f.needed, s+time.Now().Format(f.needed)); err == nil {
return t, true
}
}
return time.Time{}, false
} | vendor/github.com/cstockton/go-conv/internal/refconv/time.go | 0.704058 | 0.401336 | time.go | starcoder |
package primitives
import (
"git.maze.io/go/math32"
"github.com/go-gl/mathgl/mgl32"
)
// DRAW WITH: gl.DrawElements(gl.TRIANGLES, xSegments*ySegments*6, gl.UNSIGNED_INT, unsafe.Pointer(nil))
func Sphere(ySegments, xSegments int) (vertices, normals, tCoords []float32, indices []uint32) {
for y := 0; y <= ySegments; y++ {
for x := 0; x <= xSegments; x++ {
xSegment := float32(x) / float32(xSegments)
ySegment := float32(y) / float32(ySegments)
xPos := float32(math32.Cos(xSegment*math32.Pi*2.0) * math32.Sin(ySegment*math32.Pi))
yPos := float32(math32.Cos(ySegment * math32.Pi))
zPos := float32(math32.Sin(xSegment*math32.Pi*2.0) * math32.Sin(ySegment*math32.Pi))
vertices = append(vertices, xPos, yPos, zPos)
xPos, yPos, zPos = mgl32.Vec3{xPos, yPos, zPos}.Normalize().Elem()
normals = append(normals, xPos, yPos, zPos)
tCoords = append(tCoords, xSegment, ySegment)
}
}
for i := 0; i < ySegments; i++ {
for j := 0; j < xSegments; j++ {
a1 := uint32(i*(xSegments+1) + j)
a2 := uint32((i+1)*(xSegments+1) + j)
a3 := uint32((i+1)*(xSegments+1) + j + 1)
b1 := uint32(i*(xSegments+1) + j)
b2 := uint32((i+1)*(xSegments+1) + j + 1)
b3 := uint32(i*(xSegments+1) + j + 1)
indices = append(indices, a1, a2, a3, b1, b2, b3)
}
}
return
}
// DRAW WITH: gl.DrawElements(gl.TRIANGLES, 3*xSegments* (2*zSegments-1), gl.UNSIGNED_INT, unsafe.Pointer(nil))
func Circle(xSegments, zSegments int) (vertices, normals, tCoords []float32, indices []uint32) {
vertices = append(vertices, 0, 0, 0)
normals = append(normals, 0, 1, 0)
tCoords = append(tCoords, 0, 0)
for x := 1; x <= xSegments; x++ {
indices = append(indices, 0, uint32(x), uint32(x+1))
}
for z := 1; z <= zSegments; z++ {
for x := 0; x <= xSegments; x++ {
xSegment := float32(x) / float32(xSegments)
zSegment := float32(z) / float32(zSegments)
xPos := float32(math32.Cos(xSegment*math32.Pi*2.0) * zSegment)
yPos := float32(0)
zPos := float32(math32.Sin(xSegment*math32.Pi*2.0) * zSegment)
vertices = append(vertices, xPos, yPos, zPos)
normals = append(normals, 0, 1, 0)
tCoords = append(tCoords, xSegment, zSegment)
if x < xSegments && z < zSegments {
indices = append(indices, uint32(
x+1+((z-1)*(xSegments+1))),
uint32(x+1+((z)*(xSegments+1))),
uint32(x+2+((z-1)*(xSegments+1))),
uint32(x+1+((z)*(xSegments+1))+1),
uint32(x+2+((z-1)*(xSegments+1))),
uint32(x+1+((z)*(xSegments+1))),
)
}
}
}
return
}
// DRAW WITH: gl.DrawElements(gl.TRIANGLES, 6 * xSegments * (ySegments + 2*zSegments - 1), gl.UNSIGNED_INT, unsafe.Pointer(nil))
func Cylinder(ySegments, xSegments, zSegments int) (vertices, normals, tCoords []float32, indices []uint32) {
verticesB, normalsB, tCoordsB, indicesB := Circle(xSegments, zSegments)
vertices, normals, indices = verticesB, normalsB, indicesB
indexOfset := uint32(len(vertices) / 3)
tCoordsOfset := (float32(zSegments) / float32(ySegments+2*zSegments))
for i, v := range tCoordsB {
if i%2 == 1 {
tCoords = append(tCoords, 1-tCoordsOfset*v)
} else {
tCoords = append(tCoords, v)
}
}
for y := 0; y <= ySegments; y++ {
for x := 0; x <= xSegments; x++ {
xSegment := float32(x) / float32(xSegments)
ySegment := float32(y) / float32(ySegments)
xPos := float32(math32.Cos(xSegment * math32.Pi * 2.0))
yPos := ySegment
zPos := float32(math32.Sin(xSegment * math32.Pi * 2.0))
vertices = append(vertices, xPos, yPos, zPos)
xPos, yPos, zPos = mgl32.Vec3{xPos, 0, zPos}.Normalize().Elem()
normals = append(normals, xPos, yPos, zPos)
ySegment = (float32(y) + float32(zSegments)) / float32(ySegments+2*zSegments)
tCoords = append(tCoords, xSegment, 1-ySegment)
}
}
for i := 0; i < ySegments; i++ {
for j := 0; j < xSegments; j++ {
a1 := uint32(i*(xSegments+1)+j) + indexOfset
a2 := uint32((i+1)*(xSegments+1)+j) + indexOfset
a3 := uint32((i+1)*(xSegments+1)+j+1) + indexOfset
b1 := uint32(i*(xSegments+1)+j) + indexOfset
b2 := uint32((i+1)*(xSegments+1)+j+1) + indexOfset
b3 := uint32(i*(xSegments+1)+j+1) + indexOfset
indices = append(indices, a1, a2, a3, b1, b2, b3)
}
}
indexOfset = uint32(len(vertices) / 3)
for i := len(indicesB) - 1; i >= 0; i-- {
indices = append(indices, indicesB[i]+indexOfset)
}
for i, v := range verticesB {
if i%3 == 1 {
vertices = append(vertices, 1)
} else {
vertices = append(vertices, v)
}
}
for i, v := range tCoordsB {
if i%2 == 1 {
tCoords = append(tCoords, (float32(zSegments)/float32(ySegments))*v)
} else {
tCoords = append(tCoords, v)
}
}
normals = append(normals, normalsB...)
return
}
func Cone(ySegments, xSegments, zSegments int) (vertices, normals, tCoords []float32, indices []uint32) {
verticesB, normalsB, tCoordsB, indicesB := Circle(xSegments, zSegments)
vertices, normals, indices = verticesB, normalsB, indicesB
indexOfset := uint32(len(vertices) / 3)
tCoordsOfset := (float32(zSegments) / float32(ySegments+zSegments))
for i, v := range tCoordsB {
if i%2 == 1 {
tCoords = append(tCoords, 1-tCoordsOfset*v)
} else {
tCoords = append(tCoords, v)
}
}
for y := 0; y <= ySegments; y++ {
for x := 0; x <= xSegments; x++ {
xSegment := float32(x) / float32(xSegments)
ySegment := float32(y) / float32(ySegments)
xPos := float32(math32.Cos(xSegment*math32.Pi*2.0) * (1 - ySegment))
yPos := ySegment
zPos := float32(math32.Sin(xSegment*math32.Pi*2.0) * (1 - ySegment))
vertices = append(vertices, xPos, yPos, zPos)
normals = append(normals, xPos, 1, zPos)
ySegment = (float32(y) + float32(zSegments)) / float32(ySegments+zSegments)
tCoords = append(tCoords, xSegment, 1-ySegment)
}
}
for i := 0; i < ySegments; i++ {
for j := 0; j < xSegments; j++ {
a1 := uint32(i*(xSegments+1)+j) + indexOfset
a2 := uint32((i+1)*(xSegments+1)+j) + indexOfset
a3 := uint32((i+1)*(xSegments+1)+j+1) + indexOfset
b1 := uint32(i*(xSegments+1)+j) + indexOfset
b2 := uint32((i+1)*(xSegments+1)+j+1) + indexOfset
b3 := uint32(i*(xSegments+1)+j+1) + indexOfset
indices = append(indices, a1, a2, a3, b1, b2, b3)
}
}
return
}
// DRAW WITH: gl.DrawElements(gl.TRIANGLES, xSegments*ySegments*6, gl.UNSIGNED_INT, unsafe.Pointer(nil))
func Square(ySegments, xSegments int, segmentLength float32) (vertices, normals, tCoords []float32, indices []uint32) {
yOfset := (float32(ySegments) * segmentLength) / 2
xOfset := (float32(xSegments) * segmentLength) / 2
for v := 0; v <= ySegments; v++ {
for h := 0; h <= xSegments; h++ {
vertices = append(vertices, (float32(h)*segmentLength)-xOfset, 0, (float32(v)*segmentLength)-yOfset)
normals = append(normals, 0, 1, 0)
tCoords = append(tCoords, (float32(h) * (1 / float32(ySegments))), (float32(v) * (1 / float32(xSegments))))
if h < ySegments && v < xSegments {
indices = append(indices, []uint32{
uint32(h + (ySegments+1)*v),
uint32(h + (ySegments+1)*v + (ySegments + 1)),
uint32(h + (ySegments+1)*v + 1),
uint32(h + (ySegments+1)*v + (ySegments + 1) + 1),
uint32(h + (ySegments+1)*v + 1),
uint32(h + (ySegments+1)*v + (ySegments + 1)),
}...)
}
}
}
return
}
// DRAW WITH: gl.DrawElements(gl.TRIANGLES, 6*6, gl.UNSIGNED_INT, unsafe.Pointer(nil))
func Cube(X, Y, Z float32) (vertices, normals, tCoords []float32, indices []uint32) {
vertices = []float32{
//Front
-X / 2, -Y / 2, Z / 2,
X / 2, -Y / 2, Z / 2,
-X / 2, Y / 2, Z / 2,
X / 2, Y / 2, Z / 2,
//Back
-X / 2, -Y / 2, -Z / 2,
X / 2, -Y / 2, -Z / 2,
-X / 2, Y / 2, -Z / 2,
X / 2, Y / 2, -Z / 2,
//Right
X / 2, -Y / 2, -Z / 2,
X / 2, -Y / 2, Z / 2,
X / 2, Y / 2, -Z / 2,
X / 2, Y / 2, Z / 2,
//Left
-X / 2, -Y / 2, -Z / 2,
-X / 2, -Y / 2, Z / 2,
-X / 2, Y / 2, -Z / 2,
-X / 2, Y / 2, Z / 2,
//Top
-X / 2, Y / 2, Z / 2,
X / 2, Y / 2, Z / 2,
-X / 2, Y / 2, -Z / 2,
X / 2, Y / 2, -Z / 2,
//Bottom
-X / 2, -Y / 2, Z / 2,
X / 2, -Y / 2, Z / 2,
-X / 2, -Y / 2, -Z / 2,
X / 2, -Y / 2, -Z / 2,
}
normalsTemp := []float32{
0, 0, 1,
0, 0, -1,
1, 0, 0,
-1, 0, 0,
0, 1, 0,
0, -1, 0,
}
for i := 0; i < 6; i++ {
idx := uint32(3 * i)
normals = append(normals, normalsTemp[idx], normalsTemp[idx+1], normalsTemp[idx+2], normalsTemp[idx], normalsTemp[idx+1], normalsTemp[idx+2], normalsTemp[idx], normalsTemp[idx+1], normalsTemp[idx+2], normalsTemp[idx], normalsTemp[idx+1], normalsTemp[idx+2])
tCoords = append(tCoords, 0, 1, 1, 1, 0, 0, 1, 0)
idx = uint32(4 * i)
indices = append(indices, idx, idx+1, idx+2, idx+3, idx+2, idx+1)
}
return
} | primitives/geometry.go | 0.53048 | 0.671356 | geometry.go | starcoder |
package main
import (
"fmt"
"github.com/theatlasroom/advent-of-code/go/2019/intcode"
"github.com/theatlasroom/advent-of-code/go/2019/utils"
)
/**
--- Day 2: 1202 Program Alarm ---
On the way to your gravity assist around the Moon, your ship computer beeps angrily about a "1202 program alarm". On the radio, an Elf is already explaining how to handle the situation: "Don't worry, that's perfectly norma--" The ship computer bursts into flames.
You notify the Elves that the computer's magic smoke seems to have escaped. "That computer ran Intcode programs like the gravity assist program it was working on; surely there are enough spare parts up there to build a new Intcode computer!"
An Intcode program is a list of integers separated by commas (like 1,0,0,3,99). To run one, start by looking at the first integer (called position 0). Here, you will find an opcode - either 1, 2, or 99. The opcode indicates what to do; for example, 99 means that the program is finished and should immediately halt. Encountering an unknown opcode means something went wrong.
Opcode 1 adds together numbers read from two positions and stores the result in a third position. The three integers immediately after the opcode tell you these three positions - the first two indicate the positions from which you should read the input values, and the third indicates the position at which the output should be stored.
For example, if your Intcode computer encounters 1,10,20,30, it should read the values at positions 10 and 20, add those values, and then overwrite the value at position 30 with their sum.
Opcode 2 works exactly like opcode 1, except it multiplies the two inputs instead of adding them. Again, the three integers after the opcode indicate where the inputs and outputs are, not their values.
Once you're done processing an opcode, move to the next one by stepping forward 4 positions.
For example, suppose you have the following program:
1,9,10,3,2,3,11,0,99,30,40,50
For the purposes of illustration, here is the same program split into multiple lines:
1,9,10,3,
2,3,11,0,
99,
30,40,50
The first four integers, 1,9,10,3, are at positions 0, 1, 2, and 3. Together, they represent the first opcode (1, addition), the positions of the two inputs (9 and 10), and the position of the output (3). To handle this opcode, you first need to get the values at the input positions: position 9 contains 30, and position 10 contains 40. Add these numbers together to get 70. Then, store this value at the output position; here, the output position (3) is at position 3, so it overwrites itself. Afterward, the program looks like this:
1,9,10,70,
2,3,11,0,
99,
30,40,50
Step forward 4 positions to reach the next opcode, 2. This opcode works just like the previous, but it multiplies instead of adding. The inputs are at positions 3 and 11; these positions contain 70 and 50 respectively. Multiplying these produces 3500; this is stored at position 0:
3500,9,10,70,
2,3,11,0,
99,
30,40,50
Stepping forward 4 more positions arrives at opcode 99, halting the program.
Here are the initial and final states of a few more small programs:
1,0,0,0,99 becomes 2,0,0,0,99 (1 + 1 = 2).
2,3,0,3,99 becomes 2,3,0,6,99 (3 * 2 = 6).
2,4,4,5,99,0 becomes 2,4,4,5,99,9801 (99 * 99 = 9801).
1,1,1,4,99,5,6,0,99 becomes 30,1,1,4,2,5,6,0,99.
Once you have a working computer, the first step is to restore the gravity assist program (your puzzle input) to the "1202 program alarm" state it had just before the last computer caught fire. To do this, before running the program, replace position 1 with the value 12 and replace position 2 with the value 2. What value is left at position 0 after the program halts?
*/
func main() {
input := utils.LoadDataAsString("2.txt")
program := utils.StrToIntArr(input)
nextProgram := intcode.ComputeNextProgram(program)
fmt.Println("Result:\n", nextProgram)
} | go/2019/2019_2.go | 0.53437 | 0.683637 | 2019_2.go | starcoder |
package pak
import "fmt"
type Bin struct {
W, H float64
Boxes []*Box
FreeRectangles []*FreeSpaceBox
Heuristic *Base
}
func NewBin(w float64, h float64, s *Base) *Bin {
if s == nil {
s = &Base{&BestAreaFit{}}
}
return &Bin{w, h, nil, []*FreeSpaceBox{{W: w, H: h}}, s}
}
func (b *Bin) Area() float64 {
return b.W * b.H
}
func (b *Bin) Eficiency() float64 {
boxesArea := 0.0
for _, box := range b.Boxes {
boxesArea += box.Area()
}
return boxesArea * 100 / b.Area()
}
func (b *Bin) Label() string {
return fmt.Sprintf("%.2fx%.2f %.2f", b.W, b.H, b.Eficiency())
}
func (b *Bin) Insert(box *Box) bool {
if box.Packed {
return false
}
b.Heuristic.FindPositionForNewNode(box, b.FreeRectangles)
if !box.Packed {
return false
}
numRectanglesToProcess := len(b.FreeRectangles)
i := 0
for i < numRectanglesToProcess {
if b.splitFreeNode(b.FreeRectangles[i], box) {
b.FreeRectangles = append(b.FreeRectangles[:i], b.FreeRectangles[i+1:]...)
numRectanglesToProcess--
} else {
i++
}
}
b.pruneFreeList()
b.Boxes = append(b.Boxes, box)
return true
}
func (b *Bin) scoreFor(box *Box) *Score {
copyBox := NewBox(box.W, box.H)
score := b.Heuristic.FindPositionForNewNode(copyBox, b.FreeRectangles)
return score
}
func (b *Bin) isLargerThan(box *Box) bool {
return (b.W >= box.W && b.H >= box.H) || (b.H >= box.W && b.W >= box.H)
}
func (b *Bin) splitFreeNode(freeNode *FreeSpaceBox, usedNode *Box) bool {
if usedNode.X >= freeNode.X+freeNode.W ||
usedNode.X+usedNode.W <= freeNode.X ||
usedNode.Y >= freeNode.Y+freeNode.H ||
usedNode.Y+usedNode.H <= freeNode.Y {
return false
}
b.trySplitFreeNodeVertically(freeNode, usedNode)
b.trySplitFreeNodeHorizontally(freeNode, usedNode)
return true
}
func (b *Bin) trySplitFreeNodeVertically(freeNode *FreeSpaceBox, usedNode *Box) {
if usedNode.X < freeNode.X+freeNode.W && usedNode.X+usedNode.W > freeNode.X {
b.tryLeaveFreeSpaceAtTop(freeNode, usedNode)
b.tryLeaveFreeSpaceAtBottom(freeNode, usedNode)
}
}
func (b *Bin) tryLeaveFreeSpaceAtTop(freeNode *FreeSpaceBox, usedNode *Box) {
if usedNode.Y > freeNode.Y && usedNode.Y < freeNode.Y+freeNode.H {
newNode := &FreeSpaceBox{freeNode.W, freeNode.H, freeNode.X, freeNode.Y}
newNode.H = usedNode.Y - newNode.Y
b.FreeRectangles = append(b.FreeRectangles, newNode)
}
}
func (b *Bin) tryLeaveFreeSpaceAtBottom(freeNode *FreeSpaceBox, usedNode *Box) {
if usedNode.Y+usedNode.H < freeNode.Y+freeNode.H {
newNode := &FreeSpaceBox{freeNode.W, freeNode.H, freeNode.X, freeNode.Y}
newNode.Y = usedNode.Y + usedNode.H
newNode.H = freeNode.Y + freeNode.H - (usedNode.Y + usedNode.H)
b.FreeRectangles = append(b.FreeRectangles, newNode)
}
}
func (b *Bin) trySplitFreeNodeHorizontally(freeNode *FreeSpaceBox, usedNode *Box) {
if usedNode.Y < freeNode.Y+freeNode.H && usedNode.Y+usedNode.H > freeNode.Y {
b.tryLeaveFreeSpaceOnLeft(freeNode, usedNode)
b.tryLeaveFreeSpaceOnRight(freeNode, usedNode)
}
}
func (b *Bin) tryLeaveFreeSpaceOnLeft(freeNode *FreeSpaceBox, usedNode *Box) {
if usedNode.X > freeNode.X && usedNode.X < freeNode.X+freeNode.W {
newNode := &FreeSpaceBox{freeNode.W, freeNode.H, freeNode.X, freeNode.Y}
newNode.W = usedNode.X - newNode.X
b.FreeRectangles = append(b.FreeRectangles, newNode)
}
}
func (b *Bin) tryLeaveFreeSpaceOnRight(freeNode *FreeSpaceBox, usedNode *Box) {
if usedNode.X+usedNode.W < freeNode.X+freeNode.W {
newNode := &FreeSpaceBox{freeNode.W, freeNode.H, freeNode.X, freeNode.Y}
newNode.X = usedNode.X + usedNode.W
newNode.W = freeNode.X + freeNode.W - (usedNode.X + usedNode.W)
b.FreeRectangles = append(b.FreeRectangles, newNode)
}
}
/**
* * Goes through the free rectangle list and removes any redundant entries.
* */
func (b *Bin) pruneFreeList() {
i := 0
for i < len(b.FreeRectangles) {
j := i + 1
for j < len(b.FreeRectangles) {
if b.isContainedIn(b.FreeRectangles[i], b.FreeRectangles[j]) {
b.FreeRectangles = append(b.FreeRectangles[:i], b.FreeRectangles[i+1:]...)
i--
break
}
if b.isContainedIn(b.FreeRectangles[j], b.FreeRectangles[i]) {
b.FreeRectangles = append(b.FreeRectangles[:j], b.FreeRectangles[j+1:]...)
} else {
j++
}
}
i++
}
}
func (b *Bin) isContainedIn(rectA, rectB *FreeSpaceBox) bool {
return rectA != nil && rectB != nil &&
rectA.X >= rectB.X && rectA.Y >= rectB.Y &&
rectA.X+rectA.W <= rectB.X+rectB.W &&
rectA.Y+rectA.H <= rectB.Y+rectB.H
} | bin.go | 0.593727 | 0.459804 | bin.go | starcoder |
package gozxing
type Binarizer interface {
GetLuminanceSource() LuminanceSource
/**
* Converts one row of luminance data to 1 bit data. May actually do the conversion, or return
* cached data. Callers should assume this method is expensive and call it as seldom as possible.
* This method is intended for decoding 1D barcodes and may choose to apply sharpening.
* For callers which only examine one row of pixels at a time, the same BitArray should be reused
* and passed in with each call for performance. However it is legal to keep more than one row
* at a time if needed.
*
* @param y The row to fetch, which must be in [0, bitmap height)
* @param row An optional preallocated array. If null or too small, it will be ignored.
* If used, the Binarizer will call BitArray.clear(). Always use the returned object.
* @return The array of bits for this row (true means black).
* @throws NotFoundException if row can't be binarized
*/
GetBlackRow(y int, row *BitArray) (*BitArray, error)
/**
* Converts a 2D array of luminance data to 1 bit data. As above, assume this method is expensive
* and do not call it repeatedly. This method is intended for decoding 2D barcodes and may or
* may not apply sharpening. Therefore, a row from this matrix may not be identical to one
* fetched using getBlackRow(), so don't mix and match between them.
*
* @return The 2D array of bits for the image (true means black).
* @throws NotFoundException if image can't be binarized to make a matrix
*/
GetBlackMatrix() (*BitMatrix, error)
/**
* Creates a new object with the same type as this Binarizer implementation, but with pristine
* state. This is needed because Binarizer implementations may be stateful, e.g. keeping a cache
* of 1 bit data. See Effective Java for why we can't use Java's clone() method.
*
* @param source The LuminanceSource this Binarizer will operate on.
* @return A new concrete Binarizer implementation object.
*/
CreateBinarizer(source LuminanceSource) Binarizer
GetWidth() int
GetHeight() int
} | binarizr.go | 0.887339 | 0.640003 | binarizr.go | starcoder |
package algorithm
import (
"math"
)
// ALVNode alv tree node
type ALVNode struct {
Data int32
LNode *ALVNode
RNode *ALVNode
Height int16
}
// PreorderTraversal preorder traver
func PreorderTraversal(root *ALVNode) []int32 {
var list []int32
if root.LNode != nil {
list = append(list, PreorderTraversal(root.LNode)...)
}
list = append(list, root.Data)
if root.RNode != nil {
list = append(list, PreorderTraversal(root.RNode)...)
}
return list
}
// Insert insert node in alv tree
func Insert(data int32, root *ALVNode) *ALVNode {
if root == nil {
return &ALVNode{
Data: data,
LNode: nil,
RNode: nil,
Height: 0,
}
}
if data < root.Data {
root.LNode = Insert(data, root.LNode)
root.setHeight()
rootBF := root.getNodeBF()
lNodeBF := root.LNode.getNodeBF()
if math.Abs(float64(rootBF)) > 1 {
sumBF := rootBF + lNodeBF
if sumBF > 2 || sumBF < -2 {
if rootBF > 0 {
root = rightRotate(root)
} else {
root = leftRotate(root)
}
} else {
if rootBF > 0 {
root.LNode = leftRotate(root.LNode)
root = rightRotate(root)
} else {
root.LNode = rightRotate(root.LNode)
root = leftRotate(root)
}
}
}
}
if data > root.Data {
root.RNode = Insert(data, root.RNode)
rootBF := root.getNodeBF()
rNodeBF := root.RNode.getNodeBF()
if math.Abs(float64(rootBF)) > 1 {
sumBF := rootBF + rNodeBF
if sumBF > 2 || sumBF < -2 {
if rootBF > 0 {
root = rightRotate(root)
} else {
root = leftRotate(root)
}
} else {
if rootBF > 0 {
root.RNode = leftRotate(root.RNode)
root = rightRotate(root)
} else {
root.RNode = rightRotate(root.RNode)
root = leftRotate(root)
}
}
}
}
root.setHeight()
return root
}
func (n *ALVNode) getNodeBF() int16 {
var l, r int16
if n.LNode == nil {
l = -1
} else {
l = n.LNode.Height
}
if n.RNode == nil {
r = -1
} else {
r = n.RNode.Height
}
return l - r
}
func rightRotate(node *ALVNode) *ALVNode {
var newNode *ALVNode
newNode = node.LNode
node.LNode = newNode.RNode
newNode.RNode = node
node.setHeight()
newNode.setHeight()
return newNode
}
func leftRotate(node *ALVNode) *ALVNode {
var newNode *ALVNode
newNode = node.RNode
node.RNode = newNode.LNode
newNode.LNode = node
node.setHeight()
newNode.setHeight()
return newNode
}
func (n *ALVNode) setHeight() {
if n.LNode == nil && n.RNode == nil {
n.Height = 0
} else if n.LNode == nil {
n.Height = n.RNode.Height + 1
} else if n.RNode == nil {
n.Height = n.LNode.Height + 1
} else {
n.Height = int16(math.Max(float64(n.LNode.Height), float64(n.RNode.Height))) + 1
}
} | algorithm/alvtree.go | 0.517571 | 0.463869 | alvtree.go | starcoder |
package iso20022
// Choice between types of payment instrument, ie, cheque, credit transfer, direct debit, investment account or payment card.
type PaymentInstrument12Choice struct {
// Electronic money product that provides the cardholder with a portable and specialised computer device, which typically contains a microprocessor.
PaymentCardDetails *PaymentCard2 `xml:"PmtCardDtls"`
// Payment instrument between a debtor and a creditor, which flows through one or more financial institutions or systems.
CreditTransferDetails *CreditTransfer6 `xml:"CdtTrfDtls"`
// Instruction, initiated by the creditor, to debit a debtor's account in favour of the creditor. A direct debit can be pre-authorised or not. In most countries, authorisation is in the form of a mandate between the debtor and creditor.
DirectDebitDetails *DirectDebitMandate4 `xml:"DrctDbtDtls"`
// Written order on which instructions are given to an account holder (a financial institution) to pay a stated sum to a named recipient (the payee).
ChequeDetails *Cheque3 `xml:"ChqDtls"`
// Cheque drawn by a bank on itself or its agent. A person who owes money to another buys the draft from a bank for cash and hands it to the creditor.
BankersDraftDetails *Cheque3 `xml:"BkrsDrftDtls"`
// Part of the investment account to or from which cash entries are made.
CashAccountDetails *InvestmentAccount20 `xml:"CshAcctDtls"`
}
func (p *PaymentInstrument12Choice) AddPaymentCardDetails() *PaymentCard2 {
p.PaymentCardDetails = new(PaymentCard2)
return p.PaymentCardDetails
}
func (p *PaymentInstrument12Choice) AddCreditTransferDetails() *CreditTransfer6 {
p.CreditTransferDetails = new(CreditTransfer6)
return p.CreditTransferDetails
}
func (p *PaymentInstrument12Choice) AddDirectDebitDetails() *DirectDebitMandate4 {
p.DirectDebitDetails = new(DirectDebitMandate4)
return p.DirectDebitDetails
}
func (p *PaymentInstrument12Choice) AddChequeDetails() *Cheque3 {
p.ChequeDetails = new(Cheque3)
return p.ChequeDetails
}
func (p *PaymentInstrument12Choice) AddBankersDraftDetails() *Cheque3 {
p.BankersDraftDetails = new(Cheque3)
return p.BankersDraftDetails
}
func (p *PaymentInstrument12Choice) AddCashAccountDetails() *InvestmentAccount20 {
p.CashAccountDetails = new(InvestmentAccount20)
return p.CashAccountDetails
} | PaymentInstrument12Choice.go | 0.639511 | 0.468851 | PaymentInstrument12Choice.go | starcoder |
package shortestpath
import (
"context"
"math"
"github.com/PacktPublishing/Hands-On-Software-Engineering-with-Golang/Chapter08/bspgraph"
"github.com/PacktPublishing/Hands-On-Software-Engineering-with-Golang/Chapter08/bspgraph/message"
"golang.org/x/xerrors"
)
// Calculator implements a shortest path calculator from a single vertex to
// all other vertices in a connected graph.
type Calculator struct {
g *bspgraph.Graph
srcID string
executorFactory bspgraph.ExecutorFactory
}
// NewCalculator returns a new shortest path calculator instance.
func NewCalculator(numWorkers int) (*Calculator, error) {
c := &Calculator{
executorFactory: bspgraph.NewExecutor,
}
var err error
if c.g, err = bspgraph.NewGraph(bspgraph.GraphConfig{
ComputeFn: c.findShortestPath,
ComputeWorkers: numWorkers,
}); err != nil {
return nil, err
}
return c, nil
}
// Close cleans up any allocated graph resources.
func (c *Calculator) Close() error {
return c.g.Close()
}
// SetExecutorFactory configures the calculator to use the a custom executor
// factory when CalculateShortestPaths is invoked.
func (c *Calculator) SetExecutorFactory(factory bspgraph.ExecutorFactory) {
c.executorFactory = factory
}
// AddVertex inserts a new vertex with the specified ID into the graph.
func (c *Calculator) AddVertex(id string) {
c.g.AddVertex(id, nil)
}
// AddEdge creates a directed edge from srcID to dstID with the specified cost.
// An error will be returned if a negative cost value is specified.
func (c *Calculator) AddEdge(srcID, dstID string, cost int) error {
if cost < 0 {
return xerrors.Errorf("negative edge costs not supported")
}
return c.g.AddEdge(srcID, dstID, cost)
}
// CalculateShortestPaths finds the shortest path costs from srcID to all other
// vertices in the graph.
func (c *Calculator) CalculateShortestPaths(ctx context.Context, srcID string) error {
c.srcID = srcID
exec := c.executorFactory(c.g, bspgraph.ExecutorCallbacks{
PostStepKeepRunning: func(_ context.Context, _ *bspgraph.Graph, activeInStep int) (bool, error) {
return activeInStep != 0, nil
},
})
return exec.RunToCompletion(ctx)
}
// ShortestPathTo returns the shortest path from the source vertex to the
// specified destination together with its cost.
func (c *Calculator) ShortestPathTo(dstID string) ([]string, int, error) {
vertMap := c.g.Vertices()
v, exists := vertMap[dstID]
if !exists {
return nil, 0, xerrors.Errorf("unknown vertex with ID %q", dstID)
}
var (
minDist = v.Value().(*pathState).minDist
path []string
)
for ; v.ID() != c.srcID; v = vertMap[v.Value().(*pathState).prevInPath] {
path = append(path, v.ID())
}
path = append(path, c.srcID)
// Reverse in place to get path from src->dst
for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
path[i], path[j] = path[j], path[i]
}
return path, minDist, nil
}
// PathCostMessage is used to advertise the cost of a path through a vertex.
type PathCostMessage struct {
// The ID of the vertex this cost announcement originates from.
FromID string
// The cost of the path from this vertex to the source vertex via FromID.
Cost int
}
// Type returns the type of this message.
func (pc PathCostMessage) Type() string { return "cost" }
type pathState struct {
minDist int
prevInPath string
}
func (c *Calculator) findShortestPath(g *bspgraph.Graph, v *bspgraph.Vertex, msgIt message.Iterator) error {
if g.Superstep() == 0 {
v.SetValue(&pathState{
minDist: int(math.MaxInt64),
})
}
minDist := int(math.MaxInt64)
if v.ID() == c.srcID {
minDist = 0
}
// Process cost messages from neighbors and update minDist if
// we receive a better path announcement.
var via string
for msgIt.Next() {
m := msgIt.Message().(*PathCostMessage)
if m.Cost < minDist {
minDist = m.Cost
via = m.FromID
}
}
// If a better path was found through this vertex, announce it
// to all neighbors so they can update their own scores.
st := v.Value().(*pathState)
if minDist < st.minDist {
st.minDist = minDist
st.prevInPath = via
for _, e := range v.Edges() {
costMsg := &PathCostMessage{
FromID: v.ID(),
Cost: minDist + e.Value().(int),
}
if err := g.SendMessage(e.DstID(), costMsg); err != nil {
return err
}
}
}
// We are done unless we receive a better path announcement.
v.Freeze()
return nil
} | Chapter08/shortestpath/path.go | 0.814201 | 0.417628 | path.go | starcoder |
package swea
import (
"strconv"
"time"
)
// Language is the container for a language ID
type Language string
const (
// English is the english language ID
English = Language("en")
// Swedish is the swedish language ID
Swedish = Language("sv")
)
// SearchGroupSeries represents searchable group series
type SearchGroupSeries struct {
GroupID string
SeriesID string
}
// CrossPair are the series to compare in a currency exchange
type CrossPair struct {
BaseSeriesID string
CounterSeriesID string
}
// CrossRateInfo is an exchange rate between two currencies
type CrossRateInfo struct {
Base string
Counter string
Date time.Time
Period string
Value string
Average string
}
// DayInfo represents a date in the context of the central bank
type DayInfo struct {
Date time.Time
Week int
WeekYear int
IsBankDay bool
}
// CrossSeriesInfo represents a interest or currency conversion series information
type CrossSeriesInfo struct {
ID string
Name string
Description string
}
// SeriesInfo represents a interest or currency conversion series information
type SeriesInfo struct {
ID string
GroupID string
Name string
Description string
LongDescription string
Source string
Type string
From time.Time
To time.Time
}
// RateInfo represents information about a rate for a series in a period
type RateInfo struct {
GroupID string
GroupName string
SeriesID string
SeriesName string
Date time.Time
Period string
Average string
Min string
Max string
Ultimo string
Value string
}
// GroupInfo represents a grouping of interest or exchange rates
type GroupInfo struct {
ID string
ParentID string
Name string
Description string
}
// GroupsInfo represents several groups group info
type GroupsInfo []GroupInfo
func (gis GroupsInfo) Len() int {
return len(gis)
}
func (gis GroupsInfo) Swap(i, j int) {
gis[i], gis[j] = gis[j], gis[i]
}
func (gis GroupsInfo) Less(i, j int) bool {
a, _ := strconv.Atoi(gis[i].ID)
b, _ := strconv.Atoi(gis[j].ID)
return a < b
}
// GetCalendarDaysRequest represents the parameters to get all business days between two dates
type GetCalendarDaysRequest struct {
From time.Time
To time.Time
}
// GetCalendarDaysResponse contains the
type GetCalendarDaysResponse struct {
From time.Time
To time.Time
Days []DayInfo
}
// GetAllCrossNamesRequest represents the parameters get all the exchange rate series suitable for cross rate names
type GetAllCrossNamesRequest struct {
Language Language
}
// GetAllCrossNamesResponse contains the currency conversion series
type GetAllCrossNamesResponse struct {
Language Language
Series []CrossSeriesInfo
}
// GetCrossRatesRequest represents the parameters to get all change rates
type GetCrossRatesRequest struct {
CrossPairs []CrossPair
From time.Time
To time.Time
Language Language
AggregateMethod string
}
// GetCrossRatesResponse contains exchange rates
type GetCrossRatesResponse struct {
CrossRates []CrossRateInfo
CrossPairs []CrossPair
From time.Time
To time.Time
Language Language
AggregateMethod string
}
// GetInterestAndExchangeRatesRequest represents the parameters to get exchange and interest rates
type GetInterestAndExchangeRatesRequest struct {
Series []SearchGroupSeries
From time.Time
To time.Time
Language Language
AggregateMethod string
Average bool
Min bool
Max bool
Ultimo bool
}
// GetInterestAndExchangeRatesResponse contains interest and exchange rates
type GetInterestAndExchangeRatesResponse struct {
Rates []RateInfo
Series []SearchGroupSeries
From time.Time
To time.Time
Language Language
AggregateMethod string
Average bool
Min bool
Max bool
Ultimo bool
}
// GetInterestAndExchangeGroupNamesRequest represents the parameters to get a list of all groups
type GetInterestAndExchangeGroupNamesRequest struct {
Language Language
}
// GetInterestAndExchangeGroupNamesResponse contains all groups
type GetInterestAndExchangeGroupNamesResponse struct {
Groups []GroupInfo
Language Language
}
// GetInterestAndExchangeNamesRequest represents the parameters to get all series for a group
type GetInterestAndExchangeNamesRequest struct {
GroupID string
Language Language
}
// GetInterestAndExchangeNamesResponse contains all series for a group
type GetInterestAndExchangeNamesResponse struct {
Series []SeriesInfo
GroupID string
Language Language
} | swea/types.go | 0.54819 | 0.452717 | types.go | starcoder |
package curves
import (
"github.com/wieku/danser-go/framework/math/vector"
)
type BSpline struct {
points []vector.Vector2f
timing []float32
subPoints []vector.Vector2f
path []*Bezier
ApproxLength float32
}
func NewBSpline(points1 []vector.Vector2f, timing []int64) *BSpline {
pointsLen := len(points1)
points := make([]vector.Vector2f, 0)
points = append(points, points1[0])
points = append(points, points1[2:pointsLen-2]...)
points = append(points, points1[pointsLen-1], points1[1], points1[pointsLen-2])
newTiming := make([]float32, len(timing))
for i := range newTiming {
newTiming[i] = float32(timing[i] - timing[0])
}
spline := &BSpline{points: points, timing: newTiming}
n := len(points) - 2
d := make([]vector.Vector2f, n)
d[0] = points[n].Sub(points[0])
d[n-1] = points[n+1].Sub(points[n-1]).Scl(-1)
A := make([]vector.Vector2f, len(points))
Bi := make([]float32, len(points))
Bi[1] = -0.25
A[1] = points[2].Sub(points[0]).Sub(d[0]).Scl(1.0 / 4)
for i := 2; i < n-1; i++ {
Bi[i] = -1 / (4 + Bi[i-1])
A[i] = points[i+1].Sub(points[i-1]).Sub(A[i-1]).Scl(-1 * Bi[i])
}
for i := n - 2; i > 0; i-- {
d[i] = A[i].Add(d[i+1].Scl(Bi[i]))
}
converted := make([]float32, len(timing))
for i, time := range timing {
if i > 0 {
converted[i-1] = float32(time - timing[i-1])
}
}
firstMul := float32(1.0)
if converted[0] > 600 {
firstMul = converted[0] / 2
}
secondMul := float32(1.0)
spline.subPoints = append(spline.subPoints, points[0], points[0].Add(d[0].SclOrDenorm(firstMul)))
for i := 1; i < n-1; i++ {
if converted[i] > 600 {
secondMul = converted[i] / 2
} else {
secondMul = 1.0
}
spline.subPoints = append(spline.subPoints, points[i].Sub(d[i].SclOrDenorm(firstMul)), points[i], points[i].Add(d[i].SclOrDenorm(secondMul)))
firstMul = secondMul
}
spline.subPoints = append(spline.subPoints, points[len(points)-3].Sub(d[n-1].SclOrDenorm(firstMul)), points[len(points)-3])
spline.ApproxLength = spline.timing[len(spline.timing)-1]
for i := 0; i < len(spline.subPoints)-3; i += 3 {
c := NewBezierNA(spline.subPoints[i : i+4])
spline.path = append(spline.path, c)
}
return spline
}
func (spline *BSpline) PointAt(t float32) vector.Vector2f {
desiredWidth := spline.ApproxLength * t
lineI := len(spline.timing) - 2
for i, k := range spline.timing[:len(spline.timing)-1] {
if k <= desiredWidth {
lineI = i
}
}
line := spline.path[lineI]
return line.PointAt((desiredWidth - spline.timing[lineI]) / (spline.timing[lineI+1] - spline.timing[lineI]))
}
func (spline *BSpline) GetLength() float32 {
return spline.ApproxLength
}
func (spline *BSpline) GetStartAngle() float32 {
return spline.points[0].AngleRV(spline.PointAt(1.0 / spline.ApproxLength))
}
func (spline *BSpline) GetEndAngle() float32 {
return spline.points[len(spline.points)-1].AngleRV(spline.PointAt((spline.ApproxLength - 1) / spline.ApproxLength))
} | framework/math/curves/bspline.go | 0.675336 | 0.535888 | bspline.go | starcoder |
package math
import (
"errors"
"fmt"
"math"
"math/rand"
"sync"
"github.com/antongulenko/golib"
"github.com/bitflow-stream/go-bitflow/bitflow"
"github.com/bitflow-stream/go-bitflow/script/reg"
log "github.com/sirupsen/logrus"
)
func RegisterSphere(b reg.ProcessorRegistry) {
create := func(p *bitflow.SamplePipeline, params map[string]interface{}) error {
radius := params["radius"].(float64)
hasRadius := radius > 0
radiusMetric := params["radius_metric"].(int)
hasRadiusMetric := radiusMetric > 0
if hasRadius == hasRadiusMetric {
return errors.New("Need either 'radius' or 'radius_metric' parameter")
}
p.Add(&SpherePoints{
RandomSeed: int64(params["seed"].(int)),
NumPoints: params["points"].(int),
RadiusMetric: radiusMetric,
Radius: radius,
})
return nil
}
b.RegisterStep("sphere", create,
"Treat every sample as the center of a multi-dimensional sphere, and output a number of random points on the hull of the resulting sphere. The radius can either be fixed or given as one of the metrics").
Required("points", reg.Int()).
Optional("seed", reg.Int(), 1).
Optional("radius", reg.Float(), 0.0).
Optional("radius_metric", reg.Int(), -1)
}
type SpherePoints struct {
bitflow.NoopProcessor
RandomSeed int64
NumPoints int
RadiusMetric int // If >= 0, use to get radius. Otherwise, use Radius field.
Radius float64
rand *rand.Rand
}
func (p *SpherePoints) Start(wg *sync.WaitGroup) golib.StopChan {
p.rand = rand.New(rand.NewSource(p.RandomSeed))
return p.NoopProcessor.Start(wg)
}
func (p *SpherePoints) Sample(sample *bitflow.Sample, header *bitflow.Header) error {
if len(header.Fields) < 1 {
return errors.New("Cannot calculate sphere points with 0 metrics")
}
if p.RadiusMetric < 0 || p.RadiusMetric >= len(sample.Values) {
return fmt.Errorf("SpherePoints.RadiusMetrics = %v out of range, sample has %v metrics", p.RadiusMetric, len(sample.Values))
}
// If we use a metric as radius, remove it from the header
values := sample.Values
radius := p.Radius
if p.RadiusMetric >= 0 {
radius = float64(values[p.RadiusMetric])
fields := header.Fields
copy(fields[p.RadiusMetric:], fields[p.RadiusMetric+1:])
fields = fields[:len(fields)-1]
header = header.Clone(fields)
copy(values[p.RadiusMetric:], values[p.RadiusMetric+1:])
values = values[:len(values)-1]
}
for i := 0; i < p.NumPoints; i++ {
out := sample.Clone()
out.Values = p.randomSpherePoint(radius, values)
if err := p.NoopProcessor.Sample(out, header); err != nil {
return err
}
}
return nil
}
// https://de.wikipedia.org/wiki/Kugelkoordinaten#Verallgemeinerung_auf_n-dimensionale_Kugelkoordinaten
func (p *SpherePoints) randomSpherePoint(radius float64, center []bitflow.Value) []bitflow.Value {
sinValues := make([]float64, len(center))
cosValues := make([]float64, len(center))
for i := range center {
angle := p.randomAngle()
sinValues[i] = math.Sin(angle)
cosValues[i] = math.Cos(angle)
}
// Calculate point for a sphere around the point (0, 0, 0, ...)
result := make([]bitflow.Value, len(center), cap(center))
for i := range center {
coordinate := radius
for j := 0; j < i; j++ {
coordinate *= sinValues[j]
}
if i < len(center)-1 {
coordinate *= cosValues[i]
}
result[i] = bitflow.Value(coordinate)
}
// Sanity check
var sum float64
for _, v := range result {
sum += float64(v) * float64(v)
}
radSq := radius * radius
if math.Abs(sum-radSq) > (sum * 0.0000000001) {
log.Warnf("Illegal sphere point. Radius: %v. Diff: %v. Point: %v", radius, math.Abs(sum-radSq), result)
}
// Move the point so it is part of the sphere around the given center
for i, val := range center {
result[i] += val
}
return result
}
func (p *SpherePoints) randomAngle() float64 {
return p.rand.Float64() * 2 * math.Pi // Random angle in 0..90 degrees
} | steps/math/sphere.go | 0.738763 | 0.460774 | sphere.go | starcoder |
package gft
import (
"image"
"image/draw"
"github.com/infastin/gul/gm32"
"github.com/infastin/gul/tools"
"github.com/srwiley/rasterx"
)
type cropRectangleFilter struct {
startX, startY float32
width, height float32
mergeCount uint
}
func (f *cropRectangleFilter) Bounds(src image.Rectangle) image.Rectangle {
srcb := src.Bounds()
srcWidth := float32(srcb.Dx())
srcHeight := float32(srcb.Dy())
dstWidth := int(gm32.Floor(srcWidth * f.width))
dstHeight := int(gm32.Floor(srcHeight * f.height))
return image.Rect(0, 0, dstWidth, dstHeight)
}
func (f *cropRectangleFilter) Apply(dst draw.Image, src image.Image, parallel bool) {
srcb := src.Bounds()
srcWidth := srcb.Dx()
srcHeight := srcb.Dy()
dstb := dst.Bounds()
startX := int(gm32.Floor(float32(srcWidth)*f.startX)) + srcb.Min.X
startY := int(gm32.Floor(float32(srcHeight)*f.startY)) + srcb.Min.Y
pixGetter := newPixelGetter(src)
pixSetter := newPixelSetter(dst)
procs := 1
if parallel {
procs = 0
}
tools.Parallelize(procs, dstb.Min.Y, dstb.Max.Y, 1, func(start, end int) {
for yi := start; yi < end; yi++ {
for xi := dstb.Min.X; xi < dstb.Max.X; xi++ {
x2 := xi - dstb.Min.X + startX
y2 := yi - dstb.Min.Y + startY
pix := pixGetter.getPixel(x2, y2)
pixSetter.setPixel(xi, yi, pix)
}
}
})
}
func (f *cropRectangleFilter) CanMerge(filter Filter) bool {
if _, ok := filter.(*cropRectangleFilter); ok {
return true
}
return false
}
func (f *cropRectangleFilter) Merge(filter Filter) {
filt := filter.(*cropRectangleFilter)
f.startX += filt.startX * f.width
f.startY += filt.startY * f.height
f.width *= filt.width
f.height *= filt.height
f.mergeCount++
}
func (f *cropRectangleFilter) CanUndo(filter Filter) bool {
if _, ok := filter.(*cropRectangleFilter); ok {
return true
}
return false
}
func (f *cropRectangleFilter) Undo(filter Filter) bool {
filt := filter.(*cropRectangleFilter)
f.height /= filt.height
f.width /= filt.width
f.startX -= filt.startX * f.width
f.startY -= filt.startY * f.height
f.mergeCount--
return f.mergeCount == 0
}
func (f *cropRectangleFilter) Skip() bool {
return f.startX == 0 && f.startY == 0 && f.height == 1 && f.width == 1
}
func (f *cropRectangleFilter) Copy() Filter {
return &cropRectangleFilter{
startX: f.startX,
startY: f.startY,
width: f.width,
height: f.height,
}
}
// Crops an image starting at a given position (startX, startY) with a rectangle of a given size (width, height).
// The position and size parameters must be in the range [0, 1].
// Example: You have an image and you want to crop the bottom-right quarter of it.
// Then pos will be (0.5, 0.5) and size will be (0.5, 0.5).
func CropRectangle(startX, startY, width, height float32) MergingFilter {
if startX == 0 && startY == 0 && height == 1 && width == 1 {
return nil
}
startX = gm32.Clamp(startX, 0, 1)
startY = gm32.Clamp(startY, 0, 1)
width = gm32.Clamp(width, 0, 1)
height = gm32.Clamp(height, 0, 1)
if startX+width > 1 {
width = 1 - startX
}
if startY+height > 1 {
height = 1 - startY
}
return &cropRectangleFilter{
startX: startX,
startY: startY,
width: width,
height: height,
mergeCount: 1,
}
}
type cropEllipseFilter struct {
cx, cy float32
rx, ry float32
}
func (f *cropEllipseFilter) Bounds(src image.Rectangle) image.Rectangle {
srcb := src.Bounds()
srcWidth := float32(srcb.Dx())
srcHeight := float32(srcb.Dy())
leftX := gm32.Min(f.rx, f.cx)
rightX := gm32.Min(f.rx, 1-f.cx)
topY := gm32.Min(f.ry, f.cy)
botY := gm32.Min(f.ry, 1-f.cy)
dstWidth := int(gm32.Round(srcWidth * (leftX + rightX)))
dstHeight := int(gm32.Round(srcHeight * (topY + botY)))
return image.Rect(0, 0, dstWidth, dstHeight)
}
func (f *cropEllipseFilter) Apply(dst draw.Image, src image.Image, parallel bool) {
srcb := src.Bounds()
srcWidth := float32(srcb.Dx())
srcHeight := float32(srcb.Dy())
leftX := gm32.Min(f.rx, f.cx)
topY := gm32.Min(f.ry, f.cy)
startX := f.cx - leftX
startY := f.cy - topY
offset := image.Point{
X: int(gm32.Round(startX * srcWidth)),
Y: int(gm32.Round(startY * srcHeight)),
}
dstb := dst.Bounds()
dstWidth := dstb.Dx()
dstHeight := dstb.Dy()
cx := float64(srcWidth * (f.cx - startX))
cy := float64(srcHeight * (f.cy - startY))
rx := float64(srcWidth * f.rx)
ry := float64(srcHeight * f.ry)
scanner := rasterx.NewScannerGV(dstWidth, dstHeight, dst, dstb)
scanner.Source = src
scanner.Offset = offset
filler := rasterx.NewFiller(dstWidth, dstHeight, scanner)
rasterx.AddEllipse(cx, cy, rx, ry, 0, filler)
filler.Draw()
}
// Crops an image with an ellipse of a radii (rx, ry) with the center at a given position (cx, cy).
// The position and radii parameters must be in the range [0, 1].
func CropEllipse(cx, cy, rx, ry float32) Filter {
maxRadius := gm32.Sqrt(cx*cx + cy*cy)
maxRadius = gm32.RoundN(maxRadius, 2)
if rx >= maxRadius && ry >= maxRadius {
return nil
}
cx = gm32.Clamp(cx, 0, 1)
cy = gm32.Clamp(cy, 0, 1)
rx = gm32.Clamp(rx, 0, 1)
ry = gm32.Clamp(ry, 0, 1)
return &cropEllipseFilter{
cx: cx,
cy: cy,
rx: rx,
ry: ry,
}
} | gft/crop.go | 0.822368 | 0.533641 | crop.go | starcoder |
package machine
import (
"fmt"
"github.com/pseidemann/turingmachine/machine/tape"
)
// A Movement describes in which direction the machine's head will move.
type Movement int
// All possible movements of the machine's head.
const (
MoveNone Movement = iota
MoveLeft
MoveRight
)
// Machine is a Turing machine.
type Machine struct {
States []string
TapeAlphabet []rune
BlankSymbol rune
InputSymbols []rune
InitialState string
FinalStates []string
TransFunc TransitionFunction
state string
tape tape.Tape
}
// TransitionFunction is a map which describes what the machine should do next for a given state and symbol.
type TransitionFunction map[TransIn]TransOut
// TransIn is an input for the transition function.
type TransIn struct {
State string
Symbol rune
}
// TransOut is an output from transition function.
type TransOut struct {
State string
Symbol rune
Move Movement
}
// ResetWithTape restarts the machine with a new tape input.
func (m *Machine) ResetWithTape(input string) {
m.state = m.InitialState
m.tape = tape.New(m.BlankSymbol, input)
}
// Step reads the symbol currently under the head and decides, while looking at the current state
// - if it should replace the current symbol
// - if it should move the head
// - what the new state is
func (m *Machine) Step() (notHalted bool) {
out := m.TransFunc[TransIn{m.state, m.tape.GetHead()}]
if out.isUndefined() {
return false
}
m.tape.SetHead(out.Symbol)
m.move(out.Move)
m.state = out.State
return !m.isFinalState(m.state)
}
// GetConfiguration returns the current configuration of the machine by way of illustration.
func (m *Machine) GetConfiguration() string {
return fmt.Sprintf(
"state:%s head:%s tape:%s",
m.state,
string(m.tape.GetHead()),
m.tape.GetContentWithHead(),
)
}
// Accepted returns true if the machine is currently in an accepting state.
func (m *Machine) Accepted() bool {
return m.isFinalState(m.state)
}
// GetTape returns the tape content with leading and trailing blank symbols removed.
func (m *Machine) GetTape() string {
return m.tape.GetContent()
}
func (m *Machine) isFinalState(state string) bool {
for _, s := range m.FinalStates {
if state == s {
return true
}
}
return false
}
func (m *Machine) move(move Movement) {
switch move {
case MoveLeft:
m.tape.MoveLeft()
case MoveRight:
m.tape.MoveRight()
}
}
func (t *TransOut) isUndefined() bool {
return t.State == ""
} | machine/machine.go | 0.686895 | 0.45847 | machine.go | starcoder |
package assert
import (
"errors"
"fmt"
"strings"
"testing"
"github.com/kode4food/ale/data"
"github.com/stretchr/testify/assert"
)
type (
// Any is the friendly name for a generic interface
Any interface{}
// Wrapper wraps the testify assertions module in order to perform
// checking and conversion that is system-specific
Wrapper struct {
*testing.T
*assert.Assertions
}
)
// Error messages
const (
ErrInvalidTestExpression = "invalid test expression: %v"
ErrProperErrorNotRaised = "proper error not raised"
ErrValueNotFound = "value not found in object: %s"
)
// New instantiates a new Wrapper instance from the specified test
func New(t *testing.T) *Wrapper {
return &Wrapper{
T: t,
Assertions: assert.New(t),
}
}
// String tests a Value for string equality
func (w *Wrapper) String(expect string, expr Any) {
w.Helper()
switch s := expr.(type) {
case string:
w.Assertions.Equal(expect, s)
case data.Value:
w.Assertions.Equal(expect, s.String())
default:
panic(fmt.Errorf(ErrInvalidTestExpression, expr))
}
}
// Number tests a Value for numeric equality
func (w *Wrapper) Number(expect float64, expr Any) {
w.Helper()
switch n := expr.(type) {
case float64:
w.Assertions.Equal(expect, n)
case int:
w.Assertions.Equal(int64(expect), int64(n))
case data.Number:
w.Assertions.Equal(data.EqualTo, data.Float(expect).Cmp(n))
default:
panic(fmt.Errorf(ErrInvalidTestExpression, expr))
}
}
// Equal tests a Value for some kind of equality. Performs checks to do so
func (w *Wrapper) Equal(expect Any, expr Any) {
w.Helper()
switch expect := expect.(type) {
case data.String:
w.String(string(expect), expr)
case data.Number:
num := expr.(data.Number)
w.Assertions.Equal(data.EqualTo, expect.Cmp(num))
case data.Value:
if expr, ok := expr.(data.Value); ok {
w.True(expect.Equal(expr))
} else {
w.String(expect.String(), expr)
}
default:
w.Assertions.Equal(expect, expr)
}
}
// True tests a Value for boolean true
func (w *Wrapper) True(expr Any) {
w.Helper()
if b, ok := expr.(data.Bool); ok {
w.Assertions.True(bool(b))
return
}
w.Assertions.True(expr.(bool))
}
// Truthy tests a Value for system-specific Truthy
func (w *Wrapper) Truthy(expr data.Value) {
w.Helper()
w.Assertions.True(data.Truthy(expr))
}
// False tests a Value for boolean false
func (w *Wrapper) False(expr Any) {
w.Helper()
if b, ok := expr.(data.Bool); ok {
w.Assertions.False(bool(b))
return
}
w.Assertions.False(expr.(bool))
}
// Falsey tests a Value for system-specific Falsey
func (w *Wrapper) Falsey(expr data.Value) {
w.Helper()
w.Assertions.False(data.Truthy(expr))
}
// Contains check if the expected string is in the provided Value
func (w *Wrapper) Contains(expect string, expr data.Value) {
w.Helper()
val := expr.String()
w.Assertions.True(strings.Contains(val, expect))
}
// NotContains checks if the expected string is not in the provided Value
func (w *Wrapper) NotContains(expect string, expr data.Value) {
w.Helper()
val := expr.String()
w.Assertions.False(strings.Contains(val, expect))
}
// Identical tests that two values are referentially identical
func (w *Wrapper) Identical(expect Any, expr Any) {
w.Helper()
w.Assertions.Equal(expect, expr)
}
// NotIdentical tests that two values are not referentially identical
func (w *Wrapper) NotIdentical(expect Any, expr Any) {
w.Helper()
w.Assertions.NotEqual(expect, expr)
}
// Compare tests if the Comparison of two Numbers is correct
func (w *Wrapper) Compare(c data.Comparison, l data.Number, r data.Number) {
w.Helper()
w.Assertions.Equal(c, l.Cmp(r))
}
// ExpectPanic is used with a defer to make sure an error was triggered
func (w *Wrapper) ExpectPanic(errStr string) {
w.Helper()
if rec := recover(); rec != nil {
if re, ok := rec.(error); ok {
for e := errors.Unwrap(re); e != nil; e = errors.Unwrap(e) {
re = e
}
recStr := re.Error()
w.True(strings.HasPrefix(recStr, errStr))
return
}
}
panic(ErrProperErrorNotRaised)
}
// ExpectProgrammerError is used with a defer to make sure a programmer error
// was triggered
func (w *Wrapper) ExpectProgrammerError(errStr string) {
w.Helper()
if rec := recover(); rec != nil {
if recStr, ok := rec.(string); ok {
w.Equal(errStr, recStr)
return
}
}
panic(ErrProperErrorNotRaised)
}
// ExpectNoPanic is used with defer to make sure no error occurs
func (w *Wrapper) ExpectNoPanic() {
w.Helper()
rec := recover()
w.Nil(rec)
}
// MustGet retrieves a Value from a Mapped or explodes
func (w *Wrapper) MustGet(m data.Mapped, k data.Value) data.Value {
if v, ok := m.Get(k); ok {
return v
}
panic(fmt.Errorf(ErrValueNotFound, k))
} | internal/assert/wrapper.go | 0.704973 | 0.606877 | wrapper.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"regexp"
"strconv"
"strings"
)
/**
--- Day 4: Passport Processing ---
You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world.
It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary.
Due to some questionable network security, you realize you might be able to solve both of these problems at the same time.
The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows:
byr (Birth Year)
iyr (Issue Year)
eyr (Expiration Year)
hgt (Height)
hcl (Hair Color)
ecl (Eye Color)
pid (Passport ID)
cid (Country ID)
Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines.
Here is an example batch file containing four passports:
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field).
The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid.
The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid.
According to the above rules, your improved system would report 2 valid passports.
Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid?
*/
// " byr:1960\nhgt:183cm pid:764315947 eyr:2030\nhcl:#ceb3a1 ecl:brn\n-------------\n"
type passport struct {
byr []string // (Birth Year)
iyr []string // (Issue Year)
eyr []string // (Expiration Year)
hgt []string // (Height)
hcl []string // (Hair Color)
ecl []string // (Eye Color)
pid []string // (Passport ID)
// we ignore cid entirely
}
var byrRE = regexp.MustCompile(`byr:(#?\w+)`)
var iyrRE = regexp.MustCompile(`iyr:(#?\w+)`)
var eyrRE = regexp.MustCompile(`eyr:(#?\w+)`)
var hgtRE = regexp.MustCompile(`hgt:(#?\w+)`) // cm|inch
var hclRE = regexp.MustCompile(`hcl:(#?\w+)`)
var eclRE = regexp.MustCompile(`ecl:(#?\w+)`)
var pidRE = regexp.MustCompile(`pid:(#?\w+)`)
func day4_part1() {
contents := getFilesContents("day05.input")
passports := strings.Split(contents, "\n\n")
validPassportsCount := 0
for _, pass := range passports {
fmt.Println("-------------\n", pass)
data := passport{
byr: byrRE.FindStringSubmatch(pass),
iyr: iyrRE.FindStringSubmatch(pass),
eyr: eyrRE.FindStringSubmatch(pass),
hgt: hgtRE.FindStringSubmatch(pass),
hcl: hclRE.FindStringSubmatch(pass),
ecl: eclRE.FindStringSubmatch(pass),
pid: pidRE.FindStringSubmatch(pass),
}
fmt.Print(data)
if data.byr != nil &&
data.iyr != nil &&
data.eyr != nil &&
data.hgt != nil &&
data.hcl != nil &&
data.ecl != nil &&
data.pid != nil {
validPassportsCount++
fmt.Print(" --- passport is valid \n")
} else {
fmt.Print(" --- passport is NOT valid --- \n")
}
}
fmt.Println("Found a total of", validPassportsCount, "valid passports.")
}
/**
--- Part Two ---
The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick!
You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation:
byr (Birth Year) - four digits; at least 1920 and at most 2002.
iyr (Issue Year) - four digits; at least 2010 and at most 2020.
eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
hgt (Height) - a number followed by either cm or in:
If cm, the number must be at least 150 and at most 193.
If in, the number must be at least 59 and at most 76.
hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
pid (Passport ID) - a nine-digit number, including leading zeroes.
cid (Country ID) - ignored, missing or not.
Your job is to count the passports where all required fields are both present and valid according to the above rules.
*/
var byr2RE = regexp.MustCompile(`byr:(\d+)(\W|$)`)
var iyr2RE = regexp.MustCompile(`iyr:(\d+)(\W|$)`)
var eyr2RE = regexp.MustCompile(`eyr:(\d+)(\W|$)`)
var hgt2RE = regexp.MustCompile(`hgt:(\d+)(in|cm)(\W|$)`) // cm|inch
var hcl2RE = regexp.MustCompile(`hcl:#(([a-f]|[0-9]){6})(\W|$)`)
var ecl2RE = regexp.MustCompile(`ecl:(\w+)(\W|$)`)
var pid2RE = regexp.MustCompile(`pid:(\d{9})(\D|$)`)
func day4_part2() {
file, err := os.Open("day04.input")
if err != nil {
log.Fatal(err)
}
defer func() {
if err = file.Close(); err != nil {
log.Fatal(err)
}
}()
filebyt, err := ioutil.ReadAll(file)
filestr := string(filebyt)
passports := strings.Split(filestr, "\n\n")
validPassportsCount := 0
for _, pass := range passports {
fmt.Println("-------------\n", pass)
data := passport{
byr: byr2RE.FindStringSubmatch(pass), // 4 digits 1920-2002
iyr: iyr2RE.FindStringSubmatch(pass), // 4 digits 2010-2020
eyr: eyr2RE.FindStringSubmatch(pass), // 4 digits 2020-2030
hgt: hgt2RE.FindStringSubmatch(pass), // \d cm 150-193, in 59-76
hcl: hcl2RE.FindStringSubmatch(pass), // # + 6 [0-9]|[a-f]
ecl: ecl2RE.FindStringSubmatch(pass), // [amb blu brn gry grn hzl oth] + len(3)
pid: pid2RE.FindStringSubmatch(pass), // 9 \d including leading 0's
}
fmt.Println(data)
if data.byr == nil ||
data.iyr == nil ||
data.eyr == nil ||
data.hgt == nil ||
data.hcl == nil ||
data.ecl == nil ||
data.pid == nil {
fmt.Println("Missing information or invalid match")
continue
}
if byr, err := strconv.Atoi(data.byr[1]); err != nil {
log.Panicln(err)
} else {
if byr < 1920 || byr > 2002 {
fmt.Println("found byr but not in range 1920-2002")
continue
}
}
if iyr, err := strconv.Atoi(data.iyr[1]); err != nil {
log.Panicln(err)
} else {
if iyr < 2010 || iyr > 2020 {
fmt.Println("found iyr but not in range 2010-2020")
continue
}
}
if eyr, err := strconv.Atoi(data.eyr[1]); err != nil {
log.Panicln(err)
} else {
if eyr < 2020 || eyr > 2030 {
fmt.Println("found eyr but not in range 2020-2030")
continue
}
}
if hgt, err := strconv.Atoi(data.hgt[1]); err != nil {
log.Panicln(err)
} else {
if data.hgt[2] == "cm" {
if hgt < 150 || hgt > 193 {
fmt.Println("height in cm but not in range 150-193cm")
continue
}
} else if data.hgt[2] == "in" {
if hgt < 59 || hgt > 76 {
fmt.Println("height in in but not in range 59-76in")
continue
}
} else {
fmt.Println("invalid height")
continue
}
}
ecl := data.ecl[1]
if len(ecl) != 3 {
fmt.Println("found ecl but not 3 chars long")
continue
}
ecls := "amb blu brn gry grn hzl oth"
if !strings.Contains(ecls, ecl) {
fmt.Println("invalid ecl, not in range [amb blu brn gry grn hzl oth]")
continue
}
pid := data.pid[1]
if len(pid) != 9 {
fmt.Println("found pid but not 9 chars long")
continue
}
validPassportsCount++
fmt.Println("VALID")
}
fmt.Println("\nFound a total of", validPassportsCount, "valid passports.\n")
} | day04.go | 0.526099 | 0.41745 | day04.go | starcoder |
package textrank
import (
"math"
"strings"
)
// minWordSentence is the minimum number of words a sentence can have to become
// a node in the graph.
const minWordSentence = 5
// RankSentences ranks the sentences in the given text based on the TextRank
// algorithm and returned a list of the ranked sentences in descending order or
// score.
func RankSentences(text string, iterations int) []string {
graph := &textgraph{}
// Setup graph.
seenNodes := make(map[string]bool) // prevent duplication
for _, token := range tokenizeSentences(text) {
if _, ok := seenNodes[token]; ok {
continue
}
graph.addNode(token, nodeInitialScore)
seenNodes[token] = true
}
linkSentences(graph)
// Score sentence nodes.
for _, node := range *graph {
node.Score = scoreNode(node, iterations)
}
return graph.normalize()
}
// linkSentences links sentence nodes within a graph.
func linkSentences(tg *textgraph) *textgraph {
seenEdges := make(map[[2]string]bool) // prevent duplication
for _, nodeA := range *tg {
for _, nodeB := range *tg {
// Disallow reflexive nodes and duplicate edges.
_, seen := seenEdges[[2]string{nodeA.Text, nodeB.Text}]
if seen || nodeA.Text == nodeB.Text {
continue
}
seenEdges[[2]string{nodeA.Text, nodeB.Text}] = true
seenEdges[[2]string{nodeB.Text, nodeA.Text}] = true
// Connect nodes based on similarity.
if sentenceSimilarity(nodeA.Text, nodeB.Text) > 1 {
nodeA.Edges = append(nodeA.Edges, nodeB)
nodeB.Edges = append(nodeB.Edges, nodeA)
}
}
}
return tg
}
// sentenceSimilarity calculates the similarity between two sentences,
// normalizing for sentence length.
func sentenceSimilarity(a, b string) float64 {
tokensA := tokenizeWords(a)
tokensB := tokenizeWords(b)
if len(tokensA) < minWordSentence || len(tokensB) < minWordSentence {
return 0
}
similarWords := make(map[string]bool)
for _, tokenA := range tokensA {
wordA := strings.ToLower(tokenA)
// Ignore stopwords. Only need to check wordA because if wordA is not a
// stopword and wordB is a stopword, then they are not going to match.
if _, ok := stopwords[wordA]; ok {
continue
}
for _, tokenB := range tokensB {
wordB := strings.ToLower(tokenB)
if strings.Compare(wordA, wordB) == 0 {
similarWords[wordA] = true
}
}
}
numSimilarWords := float64(len(similarWords))
numWordsMult := float64(len(tokensA) * len(tokensB))
if numWordsMult == 1 {
return 0
}
return numSimilarWords / math.Log(numWordsMult)
} | backend/textrank/sentence.go | 0.661923 | 0.445831 | sentence.go | starcoder |
package arrays
// StringIndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func StringIndexOf(slice []string, element string) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// RuneIndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func RuneIndexOf(slice []rune, element rune) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// ByteIndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func ByteIndexOf(slice []byte, element byte) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// UintIndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func UintIndexOf(slice []uint, element uint) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// IntIndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func IntIndexOf(slice []int, element int) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// Int8IndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func Int8IndexOf(slice []int8, element int8) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// Uint8IndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func Uint8IndexOf(slice []uint8, element uint8) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// Int16IndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func Int16IndexOf(slice []int16, element int16) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// Uint16IndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func Uint16IndexOf(slice []uint16, element uint16) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// Int32IndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func Int32IndexOf(slice []int32, element int32) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// Uint32IndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func Uint32IndexOf(slice []uint32, element uint32) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// Int64IndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func Int64IndexOf(slice []int64, element int64) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
}
// Uint64IndexOf returns the index of the given element in the given slice.
// -1 is returned if the element is not in the slice.
func Uint64IndexOf(slice []uint64, element uint64) int {
for i, e := range slice {
if e == element {
return i
}
}
return -1
} | arrays/indexof.go | 0.870046 | 0.590602 | indexof.go | starcoder |
package main
import (
"errors"
"fmt"
"math"
"os"
"time"
)
const (
RSI_PERIODS = 14
)
type StockPrice struct {
open, close, high, low float64
}
type StockInstance struct {
periodTime time.Time
periodType string //This can be one of 1min, 5min, 15min, 30min, 1hr, 2hr, 4hr, 12hr, 1day, 1wk, 1mo, 3mo, 6mo, 1yr
open, close, high, low, percentChange, absoluteChange float64
}
func CalculatePercentLossGain(candlestick *StockPrice) (periodPercentGainedLost float64) {
periodPercentGainedLost = (candlestick.close - candlestick.open) / candlestick.open * 100.0
return periodPercentGainedLost
}
func CalculateTotalLossGain(candlestick *StockPrice) (periodGainedLost float64) {
periodGainedLost = candlestick.close - candlestick.open
return periodGainedLost
}
func CalculateAverageGain(vals []float64) (averageGainLoss float64) {
averageGainLoss = (vals[len(vals)-1] - vals[0]) / float64(len(vals))
return averageGainLoss
}
func RelativeStrengthIndex(prices []float64, period int64, prevAvgGain, prevAvgLoss float64) (relativeStrengthIndex, avgGain, avgLoss float64, err error) {
err = nil
totalGain, totalLoss := 0.0, 0.0
fmt.Printf("length of prices slice: %v\nPrices: %v\n", len(prices), prices)
if int64(len(prices)) < period {
err = errors.New("not enough data points to calculate RSI, previous Avg. Gain, and previous Avg. Loss for given lookback period\n")
return 0.0, 0.0, 0.0, err
} else if int64(len(prices)) == period {
for _, dailyGain := range prices {
if dailyGain <= 0.0 {
totalLoss += math.Abs(dailyGain)
} else {
totalGain += math.Abs(dailyGain)
}
}
avgGain = totalGain / float64(period)
avgLoss = totalLoss / float64(period)
} else {
fmt.Printf("prevAvgGain: %.2f\n", prevAvgGain)
fmt.Printf("prevAvgLoss: %.2f\n", prevAvgLoss)
fmt.Printf("last price in slice: %.2f\n", prices[len(prices)-1])
fmt.Printf("length: %d\n", len(prices))
if prices[len(prices)-1] > 0.0 {
avgGain = (prevAvgGain*(float64(period)-1.0) + math.Abs(prices[len(prices)-1])) / float64(period)
avgLoss = prevAvgLoss * (float64(period) - 1.0) / float64(period)
} else {
avgGain = prevAvgGain * (float64(period) - 1.0) / float64(period)
avgLoss = (prevAvgLoss*(float64(period)-1.0) + math.Abs(prices[len(prices)-1])) / float64(period)
}
}
relativeStrengthIndex = 100.0 - (100 / (1 + avgGain/avgLoss))
return relativeStrengthIndex, avgGain, avgLoss, err
}
func SimpleMovingAverage(vals []float64) (simpleMovingAverage float64, err error) {
var sum = float64(0.0)
err = nil
if len(vals) == 0 {
err = errors.New("Invalid array length of zero")
return 0.00, err
}
for _, price := range vals {
sum += price
}
simpleMovingAverage = sum / float64(len(vals))
if simpleMovingAverage < 0.0 {
err = errors.New("Simple Moving average is below zero. Expected value greater than or equal to zero.")
}
return simpleMovingAverage, err
}
func main() {
defer os.Exit(0)
testPriceArray := []float64{3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0}
testRSIArray := []float64{-2.77, 4.79, 8.6, -0.18, 6.02, 1.23, -16.7, 9.64, 8.68, -0.8, 4.88,
-1.83, -3.96, -9.09, -7.79, 2.3, -1.94, -7.72, 7.67, -9.48, 4.06, -1.22, 1, 4.91, 7.96,
-6.99, 7.09, 4.92, -5.61}
sma, err := SimpleMovingAverage(testPriceArray)
if err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
}
fmt.Printf("Simple Moving Average: %.2f\n", sma)
//calculate first RSI value using the
rsi, prevAvgGain, prevAvgLoss, err := RelativeStrengthIndex(testRSIArray[0:RSI_PERIODS], RSI_PERIODS, 0.0, 0.0)
if err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
}
fmt.Printf("Relative Strength Index: %.2f\n", rsi)
fmt.Printf("Previous Average Gain: %.2f\n", prevAvgGain)
fmt.Printf("Previous Average Loss: %.2f\n", prevAvgLoss)
rsi2, prevAvgGain2, prevAvgLoss2, err := RelativeStrengthIndex(testRSIArray[0:RSI_PERIODS+1], RSI_PERIODS, prevAvgGain, prevAvgLoss)
if err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
}
fmt.Printf("Relative Strength Index 2: %.2f\nPrevious Average Gain 2: %.2f\nPrevious Average Loss 2: %.2f\n", rsi2, prevAvgGain2, prevAvgLoss2)
fmt.Printf("==== Blank Lines Denoting Start of Run through RSI Test Array ====\n\n\n\n")
for index, _ := range testRSIArray {
rsi, prevAvgGain, prevAvgLoss, err = RelativeStrengthIndex(testRSIArray[0:index+1], RSI_PERIODS, prevAvgGain, prevAvgLoss)
fmt.Printf("RSI for index %v is %.2f\n", index, rsi)
}
} | stocktools/stockutils.go | 0.575469 | 0.40489 | stockutils.go | starcoder |
package bsegtree
import (
"math"
"sort"
)
type node struct {
from uint64
to uint64
left, right *node
overlap []Interval
}
func (n *node) CompareTo(other Interval) int {
if other.From > n.to || other.To < n.from {
return DISJOINT
}
if other.From <= n.from && other.To >= n.to {
return SUBSET
}
return INTERSECT_OR_SUPERSET
}
func (n *node) Disjoint(from, to uint64) bool {
if from > n.to || to < n.from {
return true
}
return false
}
type Interval struct {
ID int // unique
From uint64
To uint64
}
// Disjoint returns true if Segment does not overlap with interval
func (p Interval) Disjoint(from, to uint64) bool {
if from > p.To || to < p.From {
return true
}
return false
}
// Endpoints returns a slice with all endpoints (sorted, unique)
func Endpoints(base []Interval) (result []uint64, min, max uint64) {
baseLen := len(base)
points := make([]uint64, baseLen*2)
for i, interval := range base {
points[i] = interval.From
points[i+baseLen] = interval.To
}
result = Dedup(points)
min = result[0]
max = result[len(result)-1]
return
}
// Creates a slice of elementary intervals from a slice of (sorted) endpoints
// Input: [p1, p2, ..., pn]
// Output: [{p1 : p1}, {p1 : p2}, {p2 : p2},... , {pn : pn}
func elementaryIntervals(endpoints []uint64) [][2]uint64 {
if len(endpoints) == 1 {
return [][2]uint64{{endpoints[0], endpoints[0]}}
}
intervals := make([][2]uint64, len(endpoints)*2-1)
for i := 0; i < len(endpoints); i++ {
intervals[i*2] = [2]uint64{endpoints[i], endpoints[i]}
if i < len(endpoints)-1 {
intervals[i*2+1] = [2]uint64{endpoints[i], endpoints[i+1]}
}
}
return intervals
}
type endpoints []uint64
func (e endpoints) Len() int {
return len(e)
}
func (e endpoints) Less(i, j int) bool {
return e[i] < e[j]
}
func (e endpoints) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
// Dedup removes duplicates from a given slice
func Dedup(e []uint64) []uint64 {
sort.Sort(endpoints(e))
cnt := len(e)
cntDup := 0
for i := 1; i < cnt; i++ {
if e[i] == e[i-1] {
cntDup++
} else {
e[i-cntDup] = e[i]
}
}
return e[:cnt-cntDup]
}
// Inserts interval into given tree structure
func (n *node) insertInterval(i Interval) {
if n.CompareTo(i) == SUBSET {
// interval of node is a subset of the specified interval or equal
if n.overlap == nil {
n.overlap = make([]Interval, 0, 2)
}
n.overlap = append(n.overlap, i)
} else {
if n.left != nil && n.left.CompareTo(i) != DISJOINT {
n.left.insertInterval(i)
}
if n.right != nil && n.right.CompareTo(i) != DISJOINT {
n.right.insertInterval(i)
}
}
}
// round rounds a float64 and cuts it by n.
// n: decimal places.
// e.g.
// f = 1.001, n = 2, return 1.00
func round(f float64, n int) float64 {
pow10n := math.Pow10(n)
return math.Trunc(f*pow10n+0.5) / pow10n
} | helper.go | 0.750553 | 0.423696 | helper.go | starcoder |
package pkg
// Package is a go package
type Package struct {
Qualifier string
Name string
BaseURL string
TypeDecls []TypeDecl
Iters []Iter
Clients []Client
}
// Type is type literal or qualified identifier. It may be used inline, or
// as part of a type declaration.
type Type interface {
Equal(Type) bool
}
// IdentType is a qualified identifier for another type
type IdentType struct {
Qualifier string
Name string
Marshal bool // does this use marshaltext or is it natively represented
}
// Equal implements equality for Types
func (t *IdentType) Equal(o Type) bool {
if ot, ok := o.(*IdentType); ok {
return *t == *ot
}
return false
}
// PointerType is a pointer type
type PointerType struct {
Type
}
// Equal implements equality for Types
func (t *PointerType) Equal(o Type) bool {
if ot, ok := o.(*PointerType); ok {
return t.Type.Equal(ot.Type)
}
return false
}
// SliceType is a slice type
type SliceType struct {
Type
}
// Equal implements equality for Types
func (t *SliceType) Equal(o Type) bool {
if ot, ok := o.(*SliceType); ok {
return t.Type.Equal(ot.Type)
}
return false
}
// StructType is a struct type
type StructType struct {
Fields []Field
}
// Equal implements equality for Types
func (t *StructType) Equal(o Type) bool {
if ot, ok := o.(*StructType); ok {
if len(t.Fields) != len(ot.Fields) {
return false
}
for i, f := range t.Fields {
if !f.equal(ot.Fields[i]) {
return false
}
}
return true
}
return false
}
// IterType is used for return types, indicating they're iterators
type IterType struct {
Type
}
// Equal implements equality for Types
func (t *IterType) Equal(o Type) bool {
if ot, ok := o.(*IterType); ok {
return t.Type.Equal(ot.Type)
}
return false
}
// MapType is a map type
type MapType struct {
Key Type
Value Type
}
// Equal implements equality for Types
func (t *MapType) Equal(o Type) bool {
if ot, ok := o.(*MapType); ok {
return t.Key.Equal(ot.Key) && t.Value.Equal(ot.Value)
}
return false
}
// InterfaceType is an empty interface
type InterfaceType struct{}
// Equal implements equality for Types
func (t *InterfaceType) Equal(o Type) bool {
_, ok := o.(*InterfaceType)
return ok
}
// TypeDecl is a type declaration.
type TypeDecl struct {
Name string
Comment string
Type Type
}
// Iter is an iterator over multiple results/pages from a response
type Iter struct {
Name string
Return Type
}
// Field is a struct field
type Field struct {
ID string
Type Type
Comment string
Orig string // optional name of field as it is originally from the spec
Kind Kind // optional. Used for Opts structs
Collection Collection // optional. Used for Opts structs
}
func (f Field) equal(of Field) bool {
return f.ID == of.ID &&
f.Type.Equal(of.Type) &&
f.Comment == of.Comment &&
f.Orig == of.Orig &&
f.Kind == of.Kind &&
f.Collection == of.Collection
}
// Client is a struct that holds the methods for communicating with an API
// endpoint
type Client struct {
Name string
Comment string
ContextName string
Methods []Method
}
// Method is a struct method on a Client for calling a remote API
type Method struct {
Receiver struct {
ID string
Arg string
Type string
}
Name string
Params []Param
Return []Type
Comment string
Errors map[int]Type // Non-success status codes to types. -1 is default
HTTPMethod string
Path string // Path to endpoint, in printf format, including base path.
}
// Kind is the kind of parameter; ie where it maps to in the request
type Kind uint8
// The possible Kinds
const (
Body Kind = iota
Query
Path
Header
Opts // Opts struct holding optional values
)
// Collection specifies how to encode slice parameters
type Collection uint8
// The possible Collection formats
const (
None Collection = 0
CSV Collection = iota
SSV
TSV
Pipes
Multi
)
// Param is a function parameter
type Param struct {
ID string
Orig string // original name, ie for query params or headers
Arg string // argument name, this is to avoid reserved keywords being used
Type Type
Kind Kind
Collection Collection
} | pkg/pkg.go | 0.728265 | 0.402187 | pkg.go | starcoder |
package ckks
import (
"fmt"
"math"
"sort"
)
// PrecisionStats is a struct storing statistic about the precision of a CKKS plaintext
type PrecisionStats struct {
MaxDelta Stats
MinDelta Stats
MaxPrecision Stats
MinPrecision Stats
MeanDelta Stats
MeanPrecision Stats
MedianDelta Stats
MedianPrecision Stats
STDFreq float64
STDTime float64
RealDist, ImagDist, L2Dist []struct {
Prec float64
Count int
}
cdfResol int
}
// Stats is a struct storing the real, imaginary and L2 norm (modulus)
// about the precision of a complex value.
type Stats struct {
Real, Imag, L2 float64
}
func (prec PrecisionStats) String() string {
return fmt.Sprintf(`
┌─────────┬───────┬───────┬───────┐
│ Log2 │ REAL │ IMAG │ L2 │
├─────────┼───────┼───────┼───────┤
│MIN Prec │ %5.2f │ %5.2f │ %5.2f │
│MAX Prec │ %5.2f │ %5.2f │ %5.2f │
│AVG Prec │ %5.2f │ %5.2f │ %5.2f │
│MED Prec │ %5.2f │ %5.2f │ %5.2f │
└─────────┴───────┴───────┴───────┘
Err STD Slots : %5.2f Log2
Err STD Coeffs : %5.2f Log2
`,
prec.MinPrecision.Real, prec.MinPrecision.Imag, prec.MinPrecision.L2,
prec.MaxPrecision.Real, prec.MaxPrecision.Imag, prec.MaxPrecision.L2,
prec.MeanPrecision.Real, prec.MeanPrecision.Imag, prec.MeanPrecision.L2,
prec.MedianPrecision.Real, prec.MedianPrecision.Imag, prec.MedianPrecision.L2,
math.Log2(prec.STDFreq),
math.Log2(prec.STDTime))
}
// GetPrecisionStats generates a PrecisionStats struct from the reference values and the decrypted values
// vWant.(type) must be either []complex128 or []float64
// element.(type) must be either *Plaintext, *Ciphertext, []complex128 or []float64. If not *Ciphertext, then decryptor can be nil.
func GetPrecisionStats(params Parameters, encoder Encoder, decryptor Decryptor, vWant, element interface{}, logSlots int, sigma float64) (prec PrecisionStats) {
var valuesTest []complex128
switch element := element.(type) {
case *Ciphertext:
valuesTest = encoder.DecodePublic(decryptor.DecryptNew(element), logSlots, sigma)
case *Plaintext:
valuesTest = encoder.DecodePublic(element, logSlots, sigma)
case []complex128:
valuesTest = element
case []float64:
valuesTest = make([]complex128, len(element))
for i := range element {
valuesTest[i] = complex(element[i], 0)
}
}
var valuesWant []complex128
switch element := vWant.(type) {
case []complex128:
valuesWant = element
case []float64:
valuesWant = make([]complex128, len(element))
for i := range element {
valuesWant[i] = complex(element[i], 0)
}
}
var deltaReal, deltaImag, deltaL2 float64
slots := len(valuesWant)
diff := make([]Stats, slots)
prec.MaxDelta = Stats{0, 0, 0}
prec.MinDelta = Stats{1, 1, 1}
prec.MeanDelta = Stats{0, 0, 0}
prec.cdfResol = 500
prec.RealDist = make([]struct {
Prec float64
Count int
}, prec.cdfResol)
prec.ImagDist = make([]struct {
Prec float64
Count int
}, prec.cdfResol)
prec.L2Dist = make([]struct {
Prec float64
Count int
}, prec.cdfResol)
precReal := make([]float64, len(valuesWant))
precImag := make([]float64, len(valuesWant))
precL2 := make([]float64, len(valuesWant))
for i := range valuesWant {
deltaReal = math.Abs(real(valuesTest[i]) - real(valuesWant[i]))
deltaImag = math.Abs(imag(valuesTest[i]) - imag(valuesWant[i]))
deltaL2 = math.Sqrt(deltaReal*deltaReal + deltaImag*deltaImag)
precReal[i] = math.Log2(1 / deltaReal)
precImag[i] = math.Log2(1 / deltaImag)
precL2[i] = math.Log2(1 / deltaL2)
diff[i].Real = deltaReal
diff[i].Imag = deltaImag
diff[i].L2 = deltaL2
prec.MeanDelta.Real += deltaReal
prec.MeanDelta.Imag += deltaImag
prec.MeanDelta.L2 += deltaL2
if deltaReal > prec.MaxDelta.Real {
prec.MaxDelta.Real = deltaReal
}
if deltaImag > prec.MaxDelta.Imag {
prec.MaxDelta.Imag = deltaImag
}
if deltaL2 > prec.MaxDelta.L2 {
prec.MaxDelta.L2 = deltaL2
}
if deltaReal < prec.MinDelta.Real {
prec.MinDelta.Real = deltaReal
}
if deltaImag < prec.MinDelta.Imag {
prec.MinDelta.Imag = deltaImag
}
if deltaL2 < prec.MinDelta.L2 {
prec.MinDelta.L2 = deltaL2
}
}
prec.calcCDF(precReal, prec.RealDist)
prec.calcCDF(precImag, prec.ImagDist)
prec.calcCDF(precL2, prec.L2Dist)
prec.MinPrecision = deltaToPrecision(prec.MaxDelta)
prec.MaxPrecision = deltaToPrecision(prec.MinDelta)
prec.MeanDelta.Real /= float64(slots)
prec.MeanDelta.Imag /= float64(slots)
prec.MeanDelta.L2 /= float64(slots)
prec.MeanPrecision = deltaToPrecision(prec.MeanDelta)
prec.MedianDelta = calcmedian(diff)
prec.MedianPrecision = deltaToPrecision(prec.MedianDelta)
prec.STDFreq = encoder.GetErrSTDSlotDomain(valuesWant[:], valuesTest[:], params.DefaultScale())
prec.STDTime = encoder.GetErrSTDCoeffDomain(valuesWant, valuesTest, params.DefaultScale())
return prec
}
func deltaToPrecision(c Stats) Stats {
return Stats{math.Log2(1 / c.Real), math.Log2(1 / c.Imag), math.Log2(1 / c.L2)}
}
func (prec *PrecisionStats) calcCDF(precs []float64, res []struct {
Prec float64
Count int
}) {
sortedPrecs := make([]float64, len(precs))
copy(sortedPrecs, precs)
sort.Float64s(sortedPrecs)
minPrec := sortedPrecs[0]
maxPrec := sortedPrecs[len(sortedPrecs)-1]
for i := 0; i < prec.cdfResol; i++ {
curPrec := minPrec + float64(i)*(maxPrec-minPrec)/float64(prec.cdfResol)
for countSmaller, p := range sortedPrecs {
if p >= curPrec {
res[i].Prec = curPrec
res[i].Count = countSmaller
break
}
}
}
}
func calcmedian(values []Stats) (median Stats) {
tmp := make([]float64, len(values))
for i := range values {
tmp[i] = values[i].Real
}
sort.Float64s(tmp)
for i := range values {
values[i].Real = tmp[i]
}
for i := range values {
tmp[i] = values[i].Imag
}
sort.Float64s(tmp)
for i := range values {
values[i].Imag = tmp[i]
}
for i := range values {
tmp[i] = values[i].L2
}
sort.Float64s(tmp)
for i := range values {
values[i].L2 = tmp[i]
}
index := len(values) / 2
if len(values)&1 == 1 || index+1 == len(values) {
return Stats{values[index].Real, values[index].Imag, values[index].L2}
}
return Stats{(values[index].Real + values[index+1].Real) / 2,
(values[index].Imag + values[index+1].Imag) / 2,
(values[index].L2 + values[index+1].L2) / 2}
} | ckks/precision.go | 0.660282 | 0.584271 | precision.go | starcoder |
// This is an example of using type hierarchies with a OOP pattern.
// This is not something we want to do in Go. Go does not have the concept of sub-typing.
// All types are their own and the concepts of base and derived types do not exist in Go.
// This pattern does not provide a good design principle in a Go program.
package main
import "fmt"
// Animal contains all the base fields for animals.
type Animal struct {
Name string
IsMammal bool
}
// Speak provides generic behavior for all animals and how they speak.
// This is kind of useless because animals themselves cannot speak. This cannot apply to all
// animals.
func (a *Animal) Speak() {
fmt.Println("UGH!",
"My name is", a.Name,
", it is", a.IsMammal,
"I am a mammal")
}
// Dog contains everything an Animal is but specific attributes that only a Dog has.
type Dog struct {
Animal
PackFactor int
}
// Speak knows how to speak like a dog.
func (d *Dog) Speak() {
fmt.Println("Woof!",
"My name is", d.Name,
", it is", d.IsMammal,
"I am a mammal with a pack factor of", d.PackFactor)
}
// Cat contains everything an Animal is but specific attributes that only a Cat has.
type Cat struct {
Animal
ClimbFactor int
}
// Speak knows how to speak like a cat.
func (c *Cat) Speak() {
fmt.Println("Meow!",
"My name is", c.Name,
", it is", c.IsMammal,
"I am a mammal with a climb factor of", c.ClimbFactor)
}
func main() {
// It's all fine until this one. This code will not compile.
// Here, we try to group the Cat and Dog based on the fact that they are Animals. We are trying
// to leverage sub typing in Go. However, Go doesn't have it.
// Go doesn't say let group thing by a common DNA.
// We need to stop designing APIs around this idea that types have a common DNA because if we
// only focus on who we are, it is very limiting on who can we group with.
// Sub typing doesn't promote diversity. We lock type in a very small subset that can be
// grouped with. But when we focus on behavior, we open up entire world to us.
animals := []Animal{
// Create a Dog by initializing its Animal parts and then its specific Dog attributes.
Dog{
Animal: Animal{
Name: "Fido",
IsMammal: true,
},
PackFactor: 5,
},
// Create a Cat by initializing its Animal parts and then its specific Cat attributes.
Cat{
Animal: Animal{
Name: "Milo",
IsMammal: true,
},
ClimbFactor: 4,
},
}
// Have the Animals speak.
for _, animal := range animals {
animal.Speak()
}
}
// ----------
// Conclusion
// ----------
// This code smells bad because:
// - The Animal type is providing an abstraction layer of reusable state.
// - The program never needs to create or solely use a value of type Animal.
// - The implementation of the Speak method for the Animal type is a generalization.
// - The Speak method for the Animal type is never going to be called. | go/design/grouping_types_1.go | 0.634656 | 0.419172 | grouping_types_1.go | starcoder |
package openapi
import (
"encoding/json"
)
// QueryCollectionRequest A request to perform a search using a pipeline.
type QueryCollectionRequest struct {
Pipeline *QueryCollectionRequestPipeline `json:"pipeline,omitempty"`
// The initial values for the variables the pipeline operates on and transforms throughout its steps. The most important variable is `q` which is the query the user entered, for example: ```json { \"q\": \"search terms\" } ``` To paginate through results, set the variables `page` and `resultsPerPage`, for example: ```json { \"q\": \"search terms\", \"page\": 5, \"resultsPerPage\": 20 } ``` To sort results, set the variable `sort` to the name of one of your collection's schema fields, for example: ```json { \"q\": \"search terms\", \"sort\": \"name\" } ``` To sort in reverse, prefix the schema field with a minus sign `-`, for example: ```json { \"q\": \"search terms\", \"sort\": \"-name\" } ```
Variables map[string]map[string]interface{} `json:"variables"`
Tracking *QueryCollectionRequestTracking `json:"tracking,omitempty"`
}
// NewQueryCollectionRequest instantiates a new QueryCollectionRequest object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewQueryCollectionRequest(variables map[string]map[string]interface{}) *QueryCollectionRequest {
this := QueryCollectionRequest{}
this.Variables = variables
return &this
}
// NewQueryCollectionRequestWithDefaults instantiates a new QueryCollectionRequest object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewQueryCollectionRequestWithDefaults() *QueryCollectionRequest {
this := QueryCollectionRequest{}
return &this
}
// GetPipeline returns the Pipeline field value if set, zero value otherwise.
func (o *QueryCollectionRequest) GetPipeline() QueryCollectionRequestPipeline {
if o == nil || o.Pipeline == nil {
var ret QueryCollectionRequestPipeline
return ret
}
return *o.Pipeline
}
// GetPipelineOk returns a tuple with the Pipeline field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *QueryCollectionRequest) GetPipelineOk() (*QueryCollectionRequestPipeline, bool) {
if o == nil || o.Pipeline == nil {
return nil, false
}
return o.Pipeline, true
}
// HasPipeline returns a boolean if a field has been set.
func (o *QueryCollectionRequest) HasPipeline() bool {
if o != nil && o.Pipeline != nil {
return true
}
return false
}
// SetPipeline gets a reference to the given QueryCollectionRequestPipeline and assigns it to the Pipeline field.
func (o *QueryCollectionRequest) SetPipeline(v QueryCollectionRequestPipeline) {
o.Pipeline = &v
}
// GetVariables returns the Variables field value
func (o *QueryCollectionRequest) GetVariables() map[string]map[string]interface{} {
if o == nil {
var ret map[string]map[string]interface{}
return ret
}
return o.Variables
}
// GetVariablesOk returns a tuple with the Variables field value
// and a boolean to check if the value has been set.
func (o *QueryCollectionRequest) GetVariablesOk() (*map[string]map[string]interface{}, bool) {
if o == nil {
return nil, false
}
return &o.Variables, true
}
// SetVariables sets field value
func (o *QueryCollectionRequest) SetVariables(v map[string]map[string]interface{}) {
o.Variables = v
}
// GetTracking returns the Tracking field value if set, zero value otherwise.
func (o *QueryCollectionRequest) GetTracking() QueryCollectionRequestTracking {
if o == nil || o.Tracking == nil {
var ret QueryCollectionRequestTracking
return ret
}
return *o.Tracking
}
// GetTrackingOk returns a tuple with the Tracking field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *QueryCollectionRequest) GetTrackingOk() (*QueryCollectionRequestTracking, bool) {
if o == nil || o.Tracking == nil {
return nil, false
}
return o.Tracking, true
}
// HasTracking returns a boolean if a field has been set.
func (o *QueryCollectionRequest) HasTracking() bool {
if o != nil && o.Tracking != nil {
return true
}
return false
}
// SetTracking gets a reference to the given QueryCollectionRequestTracking and assigns it to the Tracking field.
func (o *QueryCollectionRequest) SetTracking(v QueryCollectionRequestTracking) {
o.Tracking = &v
}
func (o QueryCollectionRequest) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Pipeline != nil {
toSerialize["pipeline"] = o.Pipeline
}
if true {
toSerialize["variables"] = o.Variables
}
if o.Tracking != nil {
toSerialize["tracking"] = o.Tracking
}
return json.Marshal(toSerialize)
}
type NullableQueryCollectionRequest struct {
value *QueryCollectionRequest
isSet bool
}
func (v NullableQueryCollectionRequest) Get() *QueryCollectionRequest {
return v.value
}
func (v *NullableQueryCollectionRequest) Set(val *QueryCollectionRequest) {
v.value = val
v.isSet = true
}
func (v NullableQueryCollectionRequest) IsSet() bool {
return v.isSet
}
func (v *NullableQueryCollectionRequest) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableQueryCollectionRequest(val *QueryCollectionRequest) *NullableQueryCollectionRequest {
return &NullableQueryCollectionRequest{value: val, isSet: true}
}
func (v NullableQueryCollectionRequest) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableQueryCollectionRequest) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | internal/openapi/model_query_collection_request.go | 0.870515 | 0.703651 | model_query_collection_request.go | starcoder |
package processor
import (
"fmt"
"sort"
"sync"
"time"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message/tracing"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/gabs/v2"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeWorkflow] = TypeSpec{
constructor: NewWorkflow,
Categories: []Category{
CategoryComposition,
},
Status: docs.StatusBeta,
Summary: `
Executes a topology of ` + "[`branch` processors][processors.branch]" + `,
performing them in parallel where possible.`,
Description: `
## Why Use a Workflow
### Performance
Most of the time the best way to compose processors is also the simplest, just configure them in series. This is because processors are often CPU bound, low-latency, and you can gain vertical scaling by increasing the number of processor pipeline threads, allowing Benthos to process [multiple messages in parallel][configuration.pipelines].
However, some processors such as ` + "[`http`][processors.http], [`lambda`][processors.lambda] or [`cache`][processors.cache]" + ` interact with external services and therefore spend most of their time waiting for a response. These processors tend to be high-latency and low CPU activity, which causes messages to process slowly.
When a processing pipeline contains multiple network processors that aren't dependent on each other we can benefit from performing these processors in parallel for each individual message, reducing the overall message processing latency.
### Simplifying Processor Topology
A workflow is often expressed as a [DAG][dag_wiki] of processing stages, where each stage can result in N possible next stages, until finally the flow ends at an exit node.
For example, if we had processing stages A, B, C and D, where stage A could result in either stage B or C being next, always followed by D, it might look something like this:
` + "```text" + `
/--> B --\
A --| |--> D
\--> C --/
` + "```" + `
This flow would be easy to express in a standard Benthos config, we could simply use a ` + "[`switch` processor][processors.switch]" + ` to route to either B or C depending on a condition on the result of A. However, this method of flow control quickly becomes unfeasible as the DAG gets more complicated, imagine expressing this flow using switch processors:
` + "```text" + `
/--> B -------------|--> D
/ /
A --| /--> E --|
\--> C --| \
\----------|--> F
` + "```" + `
And imagine doing so knowing that the diagram is subject to change over time. Yikes! Instead, with a workflow we can either trust it to automatically resolve the DAG or express it manually as simply as ` + "`order: [ [ A ], [ B, C ], [ E ], [ D, F ] ]`" + `, and the conditional logic for determining if a stage is executed is defined as part of the branch itself.`,
Footnotes: `
## Structured Metadata
When the field ` + "`meta_path`" + ` is non-empty the workflow processor creates an object describing which workflows were successful, skipped or failed for each message and stores the object within the message at the end.
The object is of the following form:
` + "```json" + `
{
"succeeded": [ "foo" ],
"skipped": [ "bar" ],
"failed": {
"baz": "the error message from the branch"
}
}
` + "```" + `
If a message already has a meta object at the given path when it is processed then the object is used in order to determine which branches have already been performed on the message (or skipped) and can therefore be skipped on this run.
This is a useful pattern when replaying messages that have failed some branches previously. For example, given the above example object the branches foo and bar would automatically be skipped, and baz would be reattempted.
The previous meta object will also be preserved in the field ` + "`<meta_path>.previous`" + ` when the new meta object is written, preserving a full record of all workflow executions.
If a field ` + "`<meta_path>.apply`" + ` exists in the meta object for a message and is an array then it will be used as an explicit list of stages to apply, all other stages will be skipped.
## Resources
It's common to configure processors (and other components) [as resources][configuration.resources] in order to keep the pipeline configuration cleaner. With the workflow processor you can include branch processors configured as resources within your workflow either by specifying them by name in the field ` + "`order`" + `, if Benthos doesn't find a branch within the workflow configuration of that name it'll refer to the resources.
Alternatively, if you do not wish to have an explicit ordering, you can add resource names to the field ` + "`branch_resources`" + ` and they will be included in the workflow with automatic DAG resolution along with any branches configured in the ` + "`branches`" + ` field.
### Resource Error Conditions
There are two error conditions that could potentially occur when resources included in your workflow are mutated, and if you are planning to mutate resources in your workflow it is important that you understand them.
The first error case is that a resource in the workflow is removed and not replaced, when this happens the workflow will still be executed but the individual branch will fail. This should only happen if you explicitly delete a branch resource, as any mutation operation will create the new resource before removing the old one.
The second error case is when automatic DAG resolution is being used and a resource in the workflow is changed in a way that breaks the DAG (circular dependencies, etc). When this happens it is impossible to execute the workflow and therefore the processor will fail, which is possible to capture and handle using [standard error handling patterns][configuration.error-handling].
## Error Handling
The recommended approach to handle failures within a workflow is to query against the [structured metadata](#structured-metadata) it provides, as it provides granular information about exactly which branches failed and which ones succeeded and therefore aren't necessary to perform again.
For example, if our meta object is stored at the path ` + "`meta.workflow`" + ` and we wanted to check whether a message has failed for any branch we can do that using a [Bloblang query][guides.bloblang] like ` + "`this.meta.workflow.failed.length() | 0 > 0`" + `, or to check whether a specific branch failed we can use ` + "`this.exists(\"meta.workflow.failed.foo\")`" + `.
However, if structured metadata is disabled by setting the field ` + "`meta_path`" + ` to empty then the workflow processor instead adds a general error flag to messages when any executed branch fails. In this case it's possible to handle failures using [standard error handling patterns][configuration.error-handling].
[dag_wiki]: https://en.wikipedia.org/wiki/Directed_acyclic_graph
[processors.switch]: /docs/components/processors/switch
[processors.http]: /docs/components/processors/http
[processors.lambda]: /docs/components/processors/lambda
[processors.cache]: /docs/components/processors/cache
[processors.branch]: /docs/components/processors/branch
[guides.bloblang]: /docs/guides/bloblang/about
[configuration.pipelines]: /docs/configuration/processing_pipelines
[configuration.error-handling]: /docs/configuration/error_handling
[configuration.resources]: /docs/configuration/resources
`,
Examples: []docs.AnnotatedExample{
{
Title: "Automatic Ordering",
Summary: `
When the field ` + "`order`" + ` is omitted a best attempt is made to determine a dependency tree between branches based on their request and result mappings. In the following example the branches foo and bar will be executed first in parallel, and afterwards the branch baz will be executed.`,
Config: `
pipeline:
processors:
- workflow:
meta_path: meta.workflow
branches:
foo:
request_map: 'root = ""'
processors:
- http:
url: TODO
result_map: 'root.foo = this'
bar:
request_map: 'root = this.body'
processors:
- lambda:
function: TODO
result_map: 'root.bar = this'
baz:
request_map: |
root.fooid = this.foo.id
root.barstuff = this.bar.content
processors:
- cache:
resource: TODO
operator: set
key: ${! json("fooid") }
value: ${! json("barstuff") }
`,
},
{
Title: "Conditional Branches",
Summary: `
Branches of a workflow are skipped when the ` + "`request_map`" + ` assigns ` + "`deleted()`" + ` to the root. In this example the branch A is executed when the document type is "foo", and branch B otherwise. Branch C is executed afterwards and is skipped unless either A or B successfully provided a result at ` + "`tmp.result`" + `.`,
Config: `
pipeline:
processors:
- workflow:
branches:
A:
request_map: |
root = if this.document.type != "foo" {
deleted()
}
processors:
- http:
url: TODO
result_map: 'root.tmp.result = this'
B:
request_map: |
root = if this.document.type == "foo" {
deleted()
}
processors:
- lambda:
function: TODO
result_map: 'root.tmp.result = this'
C:
request_map: |
root = if this.tmp.result != null {
deleted()
}
processors:
- http:
url: TODO_SOMEWHERE_ELSE
result_map: 'root.tmp.result = this'
`,
},
{
Title: "Resources",
Summary: `
The ` + "`order`" + ` field can be used in order to refer to [branch processor resources](#resources), this can sometimes make your pipeline configuration cleaner, as well as allowing you to reuse branch configurations in order places. It's also possible to mix and match branches configured within the workflow and configured as resources.`,
Config: `
pipeline:
processors:
- workflow:
order: [ [ foo, bar ], [ baz ] ]
branches:
bar:
request_map: 'root = this.body'
processors:
- lambda:
function: TODO
result_map: 'root.bar = this'
resources:
processors:
foo:
branch:
request_map: 'root = ""'
processors:
- http:
url: TODO
result_map: 'root.foo = this'
baz:
branch:
request_map: |
root.fooid = this.foo.id
root.barstuff = this.bar.content
processors:
- cache:
resource: TODO
operator: set
key: ${! json("fooid") }
value: ${! json("barstuff") }
`,
},
},
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("meta_path", "A [dot path](/docs/configuration/field_paths) indicating where to store and reference [structured metadata](#structured-metadata) about the workflow execution."),
docs.FieldDeprecated("stages"),
docs.FieldCommon(
"order",
"An explicit declaration of branch ordered tiers, which describes the order in which parallel tiers of branches should be executed. Branches should be identified by the name as they are configured in the field `branches`. It's also possible to specify branch processors configured [as a resource](#resources). ",
[][]string{{"foo", "bar"}, {"baz"}},
[][]string{{"foo"}, {"bar"}, {"baz"}},
),
docs.FieldAdvanced(
"branch_resources",
"An optional list of [`branch` processor](/docs/components/processors/branch) names that are configured as [resources](#resources). These resources will be included in the workflow with any branches configured inline within the [`branches`](#branches) field. The order and parallelism in which branches are executed is automatically resolved based on the mappings of each branch. When using resources with an explicit order it is not necessary to list resources in this field.",
).AtVersion("3.38.0"),
docs.FieldCommon(
"branches",
"An object of named [`branch` processors](/docs/components/processors/branch) that make up the workflow. The order and parallelism in which branches are executed can either be made explicit with the field `order`, or if omitted an attempt is made to automatically resolve an ordering based on the mappings of each branch.",
),
},
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
sanitBranches := map[string]interface{}{}
for k, v := range conf.Workflow.Branches {
sanit, err := v.Sanitise()
if err != nil {
return nil, err
}
sanitBranches[k] = sanit
}
m := map[string]interface{}{
"meta_path": conf.Workflow.MetaPath,
"order": conf.Workflow.Order,
"branch_resources": conf.Workflow.BranchResources,
"branches": sanitBranches,
}
if len(conf.Workflow.Stages) > 0 {
sanitChildren := map[string]interface{}{}
for k, v := range conf.Workflow.Stages {
sanit, err := v.Sanitise()
if err != nil {
return nil, err
}
sanit["dependencies"] = v.Dependencies
sanitChildren[k] = sanit
}
m["stages"] = sanitChildren
}
return m, nil
},
}
}
//------------------------------------------------------------------------------
// WorkflowConfig is a config struct containing fields for the Workflow
// processor.
type WorkflowConfig struct {
MetaPath string `json:"meta_path" yaml:"meta_path"`
Order [][]string `json:"order" yaml:"order"`
BranchResources []string `json:"branch_resources" yaml:"branch_resources"`
Branches map[string]BranchConfig `json:"branches" yaml:"branches"`
Stages map[string]DepProcessMapConfig `json:"stages" yaml:"stages"`
}
// NewWorkflowConfig returns a default WorkflowConfig.
func NewWorkflowConfig() WorkflowConfig {
return WorkflowConfig{
MetaPath: "meta.workflow",
Order: [][]string{},
BranchResources: []string{},
Branches: map[string]BranchConfig{},
Stages: map[string]DepProcessMapConfig{},
}
}
//------------------------------------------------------------------------------
// Workflow is a processor that applies a list of child processors to a new
// payload mapped from the original, and after processing attempts to overlay
// the results back onto the original payloads according to more mappings.
type Workflow struct {
log log.Modular
stats metrics.Type
children *workflowBranchMap
allStages map[string]struct{}
metaPath []string
mCount metrics.StatCounter
mSent metrics.StatCounter
mSentParts metrics.StatCounter
mSkippedNoStages metrics.StatCounter
mErr metrics.StatCounter
mErrJSON metrics.StatCounter
mErrMeta metrics.StatCounter
mErrOverlay metrics.StatCounter
mErrStages map[string]metrics.StatCounter
mSuccStages map[string]metrics.StatCounter
}
// NewWorkflow returns a new workflow processor.
func NewWorkflow(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
if len(conf.Workflow.Stages) > 0 {
if len(conf.Workflow.Branches) > 0 {
return nil, fmt.Errorf("cannot combine both workflow branches and stages in the same processor")
}
if len(conf.Workflow.Order) > 0 {
return nil, fmt.Errorf("cannot combine both manual ordering and stages in the same processor")
}
return newWorkflowDeprecated(conf, mgr, log, stats)
}
w := &Workflow{
log: log,
stats: stats,
mErrStages: map[string]metrics.StatCounter{},
mSuccStages: map[string]metrics.StatCounter{},
metaPath: nil,
allStages: map[string]struct{}{},
}
if len(conf.Workflow.MetaPath) > 0 {
w.metaPath = gabs.DotPathToSlice(conf.Workflow.MetaPath)
}
var err error
if w.children, err = newWorkflowBranchMap(conf.Workflow, mgr, log, stats); err != nil {
return nil, err
}
for k := range w.children.branches {
w.allStages[k] = struct{}{}
}
w.mCount = stats.GetCounter("count")
w.mSent = stats.GetCounter("sent")
w.mSentParts = stats.GetCounter("parts.sent")
w.mSkippedNoStages = stats.GetCounter("skipped.no_stages")
w.mErr = stats.GetCounter("error")
w.mErrJSON = stats.GetCounter("error.json_parse")
w.mErrMeta = stats.GetCounter("error.meta_set")
w.mErrOverlay = stats.GetCounter("error.overlay")
return w, nil
}
//------------------------------------------------------------------------------
func (w *Workflow) incrStageErr(id string) {
if ctr, exists := w.mErrStages[id]; exists {
ctr.Incr(1)
return
}
ctr := w.stats.GetCounter(fmt.Sprintf("%v.error", id))
ctr.Incr(1)
w.mErrStages[id] = ctr
}
func (w *Workflow) incrStageSucc(id string) {
if ctr, exists := w.mSuccStages[id]; exists {
ctr.Incr(1)
return
}
ctr := w.stats.GetCounter(fmt.Sprintf("%v.success", id))
ctr.Incr(1)
w.mSuccStages[id] = ctr
}
//------------------------------------------------------------------------------
type resultTracker struct {
succeeded map[string]struct{}
skipped map[string]struct{}
failed map[string]string
sync.Mutex
}
func trackerFromTree(tree [][]string) *resultTracker {
r := &resultTracker{
succeeded: map[string]struct{}{},
skipped: map[string]struct{}{},
failed: map[string]string{},
}
for _, layer := range tree {
for _, k := range layer {
r.succeeded[k] = struct{}{}
}
}
return r
}
func (r *resultTracker) Skipped(k string) {
r.Lock()
delete(r.succeeded, k)
r.skipped[k] = struct{}{}
r.Unlock()
}
func (r *resultTracker) Failed(k, why string) {
r.Lock()
delete(r.succeeded, k)
delete(r.skipped, k)
r.failed[k] = why
r.Unlock()
}
func (r *resultTracker) ToObject() map[string]interface{} {
succeeded := make([]interface{}, 0, len(r.succeeded))
skipped := make([]interface{}, 0, len(r.skipped))
failed := make(map[string]interface{}, len(r.failed))
for k := range r.succeeded {
succeeded = append(succeeded, k)
}
sort.Slice(succeeded, func(i, j int) bool {
return succeeded[i].(string) < succeeded[j].(string)
})
for k := range r.skipped {
skipped = append(skipped, k)
}
sort.Slice(skipped, func(i, j int) bool {
return skipped[i].(string) < skipped[j].(string)
})
for k, v := range r.failed {
failed[k] = v
}
m := map[string]interface{}{}
if len(succeeded) > 0 {
m["succeeded"] = succeeded
}
if len(skipped) > 0 {
m["skipped"] = skipped
}
if len(failed) > 0 {
m["failed"] = failed
}
return m
}
// Returns a map of enrichment IDs that should be skipped for this payload.
func (w *Workflow) skipFromMeta(root interface{}) map[string]struct{} {
skipList := map[string]struct{}{}
if len(w.metaPath) == 0 {
return skipList
}
gObj := gabs.Wrap(root)
// If a whitelist is provided for this flow then skip stages that aren't
// within it.
if apply, ok := gObj.S(append(w.metaPath, "apply")...).Data().([]interface{}); ok {
if len(apply) > 0 {
for k := range w.allStages {
skipList[k] = struct{}{}
}
for _, id := range apply {
if idStr, isString := id.(string); isString {
delete(skipList, idStr)
}
}
}
}
// Skip stages that already succeeded in a previous run of this workflow.
if succeeded, ok := gObj.S(append(w.metaPath, "succeeded")...).Data().([]interface{}); ok {
for _, id := range succeeded {
if idStr, isString := id.(string); isString {
if _, exists := w.allStages[idStr]; exists {
skipList[idStr] = struct{}{}
}
}
}
}
// Skip stages that were already skipped in a previous run of this workflow.
if skipped, ok := gObj.S(append(w.metaPath, "skipped")...).Data().([]interface{}); ok {
for _, id := range skipped {
if idStr, isString := id.(string); isString {
if _, exists := w.allStages[idStr]; exists {
skipList[idStr] = struct{}{}
}
}
}
}
return skipList
}
// ProcessMessage applies workflow stages to each part of a message type.
func (w *Workflow) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
w.mCount.Incr(1)
payload := msg.DeepCopy()
// Prevent resourced branches from being updated mid-flow.
dag, children, unlock, err := w.children.Lock()
if err != nil {
w.mErr.Incr(1)
w.log.Errorf("Failed to establish workflow: %v\n", err)
payload.Iter(func(i int, p types.Part) error {
FlagErr(p, err)
return nil
})
w.mSentParts.Incr(int64(payload.Len()))
w.mSent.Incr(1)
return []types.Message{payload}, nil
}
defer unlock()
skipOnMeta := make([]map[string]struct{}, msg.Len())
payload.Iter(func(i int, p types.Part) error {
p.Get()
p.Metadata()
if jObj, err := p.JSON(); err == nil {
skipOnMeta[i] = w.skipFromMeta(jObj)
} else {
skipOnMeta[i] = map[string]struct{}{}
}
return nil
})
propMsg, _ := tracing.WithChildSpans("workflow", payload)
records := make([]*resultTracker, payload.Len())
for i := range records {
records[i] = trackerFromTree(dag)
}
for _, layer := range dag {
results := make([][]types.Part, len(layer))
errors := make([]error, len(layer))
wg := sync.WaitGroup{}
wg.Add(len(layer))
for i, eid := range layer {
go func(id string, index int) {
branchMsg, branchSpans := tracing.WithChildSpans(id, propMsg.Copy())
branchParts := make([]types.Part, branchMsg.Len())
branchMsg.Iter(func(partIndex int, part types.Part) error {
// Remove errors so that they aren't propagated into the
// branch.
ClearFail(part)
if _, exists := skipOnMeta[partIndex][id]; !exists {
branchParts[partIndex] = part
}
return nil
})
var mapErrs []branchMapError
results[index], mapErrs, errors[index] = children[id].createResult(branchParts, branchMsg)
for _, s := range branchSpans {
s.Finish()
}
for j, p := range results[index] {
if p == nil {
records[j].Skipped(id)
}
}
for _, e := range mapErrs {
records[e.index].Failed(id, e.err.Error())
}
wg.Done()
}(eid, i)
}
wg.Wait()
for i, id := range layer {
var failed []branchMapError
err := errors[i]
if err == nil {
if failed, err = children[id].overlayResult(payload, results[i]); err != nil {
w.mErrOverlay.Incr(1)
}
}
if err != nil {
w.incrStageErr(id)
w.mErr.Incr(1)
w.log.Errorf("Failed to perform enrichment '%v': %v\n", id, err)
for j := range records {
records[j].Failed(id, err.Error())
}
continue
}
for _, e := range failed {
records[e.index].Failed(id, e.err.Error())
}
w.incrStageSucc(id)
}
}
// Finally, set the meta records of each document.
if len(w.metaPath) > 0 {
payload.Iter(func(i int, p types.Part) error {
pJSON, err := p.JSON()
if err != nil {
w.mErr.Incr(1)
w.mErrMeta.Incr(1)
w.log.Errorf("Failed to parse message for meta update: %v\n", err)
FlagErr(p, err)
return nil
}
gObj := gabs.Wrap(pJSON)
previous := gObj.S(w.metaPath...).Data()
current := records[i].ToObject()
if previous != nil {
current["previous"] = previous
}
gObj.Set(current, w.metaPath...)
p.SetJSON(gObj.Data())
return nil
})
} else {
payload.Iter(func(i int, p types.Part) error {
if lf := len(records[i].failed); lf > 0 {
failed := make([]string, 0, lf)
for k := range records[i].failed {
failed = append(failed, k)
}
sort.Strings(failed)
FlagErr(p, fmt.Errorf("workflow branches failed: %v", failed))
}
return nil
})
}
tracing.FinishSpans(propMsg)
w.mSentParts.Incr(int64(payload.Len()))
w.mSent.Incr(1)
msgs := [1]types.Message{payload}
return msgs[:], nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (w *Workflow) CloseAsync() {
w.children.CloseAsync()
}
// WaitForClose blocks until the processor has closed down.
func (w *Workflow) WaitForClose(timeout time.Duration) error {
return w.children.WaitForClose(timeout)
}
//------------------------------------------------------------------------------ | lib/processor/workflow.go | 0.813275 | 0.793506 | workflow.go | starcoder |
package rtc
import "log"
// M4 is a 4x4 matrix.
type M4 [4]Tuple
// Get returns a value within the matrix.
func (m M4) Get(row, col int) float64 {
return m[row][col]
}
// Equal tests if two matrices are equal.
func (m M4) Equal(other M4) bool {
return m[0].Equal(other[0]) &&
m[1].Equal(other[1]) &&
m[2].Equal(other[2]) &&
m[3].Equal(other[3])
}
// Mult multiplies two M4 matrices. Order is important.
func (m M4) Mult(other M4) M4 {
oc := M4{other.Column(0), other.Column(1), other.Column(2), other.Column(3)}
return M4{
Tuple{m[0].Dot(oc[0]), m[0].Dot(oc[1]), m[0].Dot(oc[2]), m[0].Dot(oc[3])},
Tuple{m[1].Dot(oc[0]), m[1].Dot(oc[1]), m[1].Dot(oc[2]), m[1].Dot(oc[3])},
Tuple{m[2].Dot(oc[0]), m[2].Dot(oc[1]), m[2].Dot(oc[2]), m[2].Dot(oc[3])},
Tuple{m[3].Dot(oc[0]), m[3].Dot(oc[1]), m[3].Dot(oc[2]), m[3].Dot(oc[3])},
}
}
// MultTuple multiples a M4 matrix by a tuple.
func (m M4) MultTuple(other Tuple) Tuple {
return Tuple{
m[0].Dot(other),
m[1].Dot(other),
m[2].Dot(other),
m[3].Dot(other),
}
}
// M4Identity returns a 4x4 identity matrix.
func M4Identity() M4 {
return M4{
Tuple{1, 0, 0, 0},
Tuple{0, 1, 0, 0},
Tuple{0, 0, 1, 0},
Tuple{0, 0, 0, 1},
}
}
// Transpose transposes a 4x4 matrix.
func (m M4) Transpose() M4 {
return M4{
m.Column(0),
m.Column(1),
m.Column(2),
m.Column(3),
}
}
// Submatrix returns a 3x3 submatrix with a row and column removed from a 4x4 matrix.
func (m M4) Submatrix(row, col int) M3 {
v := func(r, c int) float64 {
if r >= row {
r++
}
if c >= col {
c++
}
return m[r][c]
}
return M3{
Tuple{v(0, 0), v(0, 1), v(0, 2)},
Tuple{v(1, 0), v(1, 1), v(1, 2)},
Tuple{v(2, 0), v(2, 1), v(2, 2)},
}
}
// Minor returns the determinant of a submatrix of a 4x4 matrix.
func (m M4) Minor(row, col int) float64 {
return m.Submatrix(row, col).Determinant()
}
// Cofactor returns the cofactor of a submatrix of a 4x4 matrix.
func (m M4) Cofactor(row, col int) float64 {
minor := m.Minor(row, col)
if (row+col)%2 == 1 {
minor = -minor
}
return minor
}
// Determinant returns the determinant of the 4x4 matrix.
func (m M4) Determinant() float64 {
return m[0][0]*m.Cofactor(0, 0) + m[0][1]*m.Cofactor(0, 1) + m[0][2]*m.Cofactor(0, 2) + m[0][3]*m.Cofactor(0, 3)
}
// Invertible returns the invertibility of the 4x4 matrix.
func (m M4) Invertible() bool {
return m.Determinant() != 0
}
// Inverse calculates the inverse of the 4x4 matrix.
func (m M4) Inverse() M4 {
d := m.Determinant()
if d == 0 {
log.Fatalf("cannot take inverse of non-invertible matrix: %v", m)
}
v := func(row, col int) float64 {
return m.Cofactor(col, row) / d // transpose happens here: row,col=>col,row
}
return M4{
Tuple{v(0, 0), v(0, 1), v(0, 2), v(0, 3)},
Tuple{v(1, 0), v(1, 1), v(1, 2), v(1, 3)},
Tuple{v(2, 0), v(2, 1), v(2, 2), v(2, 3)},
Tuple{v(3, 0), v(3, 1), v(3, 2), v(3, 3)},
}
}
// Column returns a column of the matrix as a Tuple.
func (m M4) Column(col int) Tuple {
return Tuple{m[0][col], m[1][col], m[2][col], m[3][col]}
}
// M3 is a 3x3 matrix.
type M3 [3]Tuple
// Get returns a value within the matrix.
func (m M3) Get(row, col int) float64 {
return m[row][col]
}
// Equal tests if two matrices are equal.
func (m M3) Equal(other M3) bool {
return m[0].Equal(other[0]) &&
m[1].Equal(other[1]) &&
m[2].Equal(other[2])
}
// Submatrix returns a 2x2 submatrix with a row and column removed from a 3x3 matrix.
func (m M3) Submatrix(row, col int) M2 {
v := func(r, c int) float64 {
if r >= row {
r++
}
if c >= col {
c++
}
return m[r][c]
}
return M2{
Tuple{v(0, 0), v(0, 1)},
Tuple{v(1, 0), v(1, 1)},
}
}
// Minor returns the determinant of a submatrix of a 3x3 matrix.
func (m M3) Minor(row, col int) float64 {
return m.Submatrix(row, col).Determinant()
}
// Cofactor returns the cofactor of a submatrix of a 3x3 matrix.
func (m M3) Cofactor(row, col int) float64 {
minor := m.Minor(row, col)
if (row+col)%2 == 1 {
minor = -minor
}
return minor
}
// Determinant returns the determinant of the 3x3 matrix.
func (m M3) Determinant() float64 {
return m[0][0]*m.Cofactor(0, 0) + m[0][1]*m.Cofactor(0, 1) + m[0][2]*m.Cofactor(0, 2)
}
// M2 is a 2x2 matrix.
type M2 [2]Tuple
// Get returns a value within the matrix.
func (m M2) Get(row, col int) float64 {
return m[row][col]
}
// Equal tests if two matrices are equal.
func (m M2) Equal(other M2) bool {
return m[0].Equal(other[0]) &&
m[1].Equal(other[1])
}
// Determinant finds the determinant of a 2x2 matrix.
func (m M2) Determinant() float64 {
return m[0][0]*m[1][1] - m[0][1]*m[1][0]
} | rtc/matrix.go | 0.901057 | 0.75611 | matrix.go | starcoder |
package cluster
import (
"errors"
"fmt"
"math"
)
var (
weightVector *DenseVector
)
// HammingDistance is a basic dissimilarity function for the kmodes algorithm.
func HammingDistance(a, b *DenseVector) (float64, error) {
if a.Len() != b.Len() {
return -1, errors.New("hamming distance: vectors lengths do not match")
}
var distance float64
for i := 0; i < a.Len(); i++ {
if a.At(i, 0) != b.At(i, 0) {
distance++
}
}
return distance, nil
}
// WeightedHammingDistance dissimilarity function is based on hamming distance
// but it adds improttance to attributes.
func WeightedHammingDistance(a, b *DenseVector) (float64, error) {
if a.Len() != b.Len() {
return -1, errors.New("hamming distance: vectors lengths do not match")
}
if a.Len() != weightVector.Len() {
return -1, fmt.Errorf("weighted hamming distance: wrong weight vector length: %d", weightVector.Len())
}
var distance float64
for i := 0; i < a.Len(); i++ {
if a.At(i, 0) != b.At(i, 0) {
distance += 1 * weightVector.At(i, 0)
}
}
return distance, nil
}
// EuclideanDistance computes eucdlidean distance between two vectors.
func EuclideanDistance(a, b *DenseVector) (float64, error) {
if a.Len() != b.Len() {
return -1, errors.New("euclidean distance: vectors lengths do not match")
}
var distance float64
for i := 0; i < a.Len(); i++ {
diff := (a.At(i, 0) - b.At(i, 0))
distance += diff * diff
}
return math.Sqrt(distance), nil
}
// SetWeights sets the weight vector used in WeightedHammingDistance function.
func SetWeights(newWeights []float64) {
weightVector = NewDenseVector(len(newWeights), newWeights)
}
// ComputeWeights derives weights based on the frequency of attribute values
// (more different values means lower weight).
func ComputeWeights(X *DenseMatrix, imp float64) []float64 {
xRows, xCols := X.Dims()
weights := make([]float64, xCols)
for i := 0; i < xCols; i++ {
column := X.ColView(i)
frequencies := make(map[float64]float64)
for j := 0; j < xRows; j++ {
frequencies[column.At(j, 0)] = frequencies[column.At(j, 0)] + 1
}
if w := 1 / float64(len(frequencies)); w == 1 {
weights[i] = 0
} else {
weights[i] = w
}
}
m := maxVal(weights)
if m == 0 {
for i := range weights {
weights[i] = 1
}
return weights
}
mult := imp / m
for i := range weights {
weights[i] *= mult
}
return weights
}
func maxVal(table []float64) float64 {
max := 0.0
for _, e := range table {
if e > max {
max = e
}
}
return max
} | cluster/distances.go | 0.823825 | 0.578448 | distances.go | starcoder |
package cost
import (
"sort"
)
var isPlanned = true
// Plan is the cost difference between two State instances. It is not tied to any specific cloud provider or IaC tool.
// Instead, it is a representation of the differences between two snapshots of cloud resources, with their associated
// costs. The Plan instance can be used to calculate the total cost difference of a plan, as well as cost differences
// of each resource (and their components) separately.
type Plan struct {
Prior, Planned *State
}
// NewPlan returns a new Plan from Prior and Planned State.
func NewPlan(prior, planned *State) *Plan {
return &Plan{Prior: prior, Planned: planned}
}
// PriorCost returns the total cost of the Prior State or decimal.Zero if it isn't included in the plan.
func (p Plan) PriorCost() (Cost, error) {
if p.Prior == nil {
return Zero, nil
}
return p.Prior.Cost()
}
// PlannedCost returns the total cost of the Planned State or decimal.Zero if it isn't included in the plan.
func (p Plan) PlannedCost() (Cost, error) {
if p.Planned == nil {
return Zero, nil
}
return p.Planned.Cost()
}
// ResourceDifferences merges the Prior and Planned State and returns a slice of differences between resources.
// The order of the elements in the slice is undefined and unstable.
func (p Plan) ResourceDifferences() []ResourceDiff {
rdmap := make(map[string]ResourceDiff)
if p.Prior != nil {
mergeResourceDiffsFromState(rdmap, p.Prior, !isPlanned)
}
if p.Planned != nil {
mergeResourceDiffsFromState(rdmap, p.Planned, isPlanned)
}
rds := make([]ResourceDiff, 0, len(rdmap))
for _, rd := range rdmap {
rds = append(rds, rd)
}
return rds
}
// SkippedAddresses returns the addresses of resources that were excluded from the estimation process.
// The order of the elements in the slice is undefined and unstable.
func (p Plan) SkippedAddresses() []string {
skippedMap := make(map[string]struct{})
if p.Prior != nil {
for addr, res := range p.Prior.Resources {
if res.Skipped {
skippedMap[addr] = struct{}{}
}
}
}
if p.Planned != nil {
for addr, res := range p.Planned.Resources {
if res.Skipped {
skippedMap[addr] = struct{}{}
}
}
}
skippedList := make([]string, 0, len(skippedMap))
for addr := range skippedMap {
skippedList = append(skippedList, addr)
}
sort.Strings(skippedList)
return skippedList
}
// mergeResourceDiffsFromState adds all the resources from the State to the provided ResourceDiff map. Each component
// of every resource is then placed into an appropriate ComponentDiff field based on the value of the `planned` argument.
func mergeResourceDiffsFromState(rdmap map[string]ResourceDiff, state *State, planned bool) {
for address, res := range state.Resources {
if res.Skipped {
continue
}
if _, ok := rdmap[address]; !ok {
rdmap[address] = ResourceDiff{
Address: address,
Provider: res.Provider,
Type: res.Type,
ComponentDiffs: make(map[string]*ComponentDiff),
}
}
for label, comp := range res.Components {
comp := comp
cd, ok := rdmap[address].ComponentDiffs[label]
if !ok {
cd = &ComponentDiff{}
rdmap[address].ComponentDiffs[label] = cd
}
if planned {
cd.Planned = &comp
} else {
cd.Prior = &comp
}
}
}
} | cost/plan.go | 0.747984 | 0.454714 | plan.go | starcoder |
package main
import (
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"time"
lib "github.com/cncf/devstatscode"
yaml "gopkg.in/yaml.v2"
)
// metrics contain list of metrics to evaluate
type metrics struct {
Metrics []metric `yaml:"metrics"`
}
// metric contain each metric data
// some metrics can be allowed to fail
type metric struct {
Name string `yaml:"name"`
Periods string `yaml:"periods"`
SeriesNameOrFunc string `yaml:"series_name_or_func"`
MetricSQL string `yaml:"sql"`
MetricSQLs *[]string `yaml:"sqls"`
AddPeriodToName bool `yaml:"add_period_to_name"`
Histogram bool `yaml:"histogram"`
Aggregate string `yaml:"aggregate"`
Skip string `yaml:"skip"`
Desc string `yaml:"desc"`
MultiValue bool `yaml:"multi_value"`
EscapeValueName bool `yaml:"escape_value_name"`
AnnotationsRanges bool `yaml:"annotations_ranges"`
MergeSeries string `yaml:"merge_series"`
CustomData bool `yaml:"custom_data"`
StartFrom *time.Time `yaml:"start_from"`
LastHours int `yaml:"last_hours"`
SeriesNameMap map[string]string `yaml:"series_name_map"`
EnvMap map[string]string `yaml:"env"`
Disabled bool `yaml:"disabled"`
Drop string `yaml:"drop"`
Project string `yaml:"project"`
AllowFail bool `yaml:"allow_fail"`
}
// randomize - shufflues array of metrics to calculate, making sure that ctx.LastSeries is still last
func (m *metrics) randomize(ctx *lib.Ctx) {
lib.Printf("Randomizing metrics calculation order\n")
rand.Seed(time.Now().UnixNano())
rand.Shuffle(len(m.Metrics), func(i, j int) { m.Metrics[i], m.Metrics[j] = m.Metrics[j], m.Metrics[i] })
idx := -1
lastI := len(m.Metrics) - 1
for i, m := range m.Metrics {
if m.SeriesNameOrFunc == ctx.LastSeries {
idx = i
break
}
}
if idx >= 0 && idx != lastI {
m.Metrics[idx], m.Metrics[lastI] = m.Metrics[lastI], m.Metrics[idx]
}
}
// Add _period to all array items
func addPeriodSuffix(seriesArr []string, period string) (result []string) {
for _, series := range seriesArr {
result = append(result, series+"_"+period)
}
return
}
// Return cartesian product of all arrays starting with prefix, joined by "join" ending with suffix
func joinedCartesian(mat [][]string, prefix, join, suffix string) (result []string) {
// rows - number of arrays to join, rowsm1 (last index of array to join)
rows := len(mat)
rowsm1 := rows - 1
// lens[i] - i-th row length - 1 (last i-th row column index)
// curr[i] - current position in i-th row, we're processing N x M x ... positions
// All possible combinations = Cartesian
var (
lens []int
curr []int
)
for _, row := range mat {
lens = append(lens, len(row)-1)
curr = append(curr, 0)
}
// While not for all i curr[i] == lens[i]
for {
// Create one of output combinations
str := prefix
for i := 0; i < rows; i++ {
str += mat[i][curr[i]]
if i < rowsm1 {
str += join
}
}
str += suffix
result = append(result, str)
// Stop if for all i curr[i] == lens[i]
// Which means we processed all possible combinations
stop := true
for i := 0; i < rows; i++ {
if curr[i] < lens[i] {
stop = false
break
}
}
if stop {
break
}
// increase curr[i] for some i
for i := 0; i < rows; i++ {
// We can move to next permutation at this i
if curr[i] < lens[i] {
curr[i]++
break
} else {
// We have to go to another row and zero all lower positions
for j := 0; j <= i; j++ {
curr[j] = 0
}
}
}
}
// Retunrs "result" containing all possible permutations
return
}
// Parse formula in format "=prefix;suffix;join;list1item1,list1item2,...;list2item1,list2item2,...;..."
func createSeriesFromFormula(def string) (result []string) {
ary := strings.Split(def[1:], ";")
if len(ary) < 4 {
lib.Fatalf(
"series formula must have at least 4 paramaters: "+
"prefix, suffix, join, list, %v",
def,
)
}
// prefix, join value (how to connect strings from different arrays), suffix
prefix, suffix, join := ary[0], ary[1], ary[2]
// Create "matrix" of strings (not a real matrix because rows can have different counts)
var matrix [][]string
for _, list := range ary[3:] {
vals := strings.Split(list, ",")
matrix = append(matrix, vals)
}
// Create cartesian result with all possible combinations
result = joinedCartesian(matrix, prefix, join, suffix)
return
}
// If env variable ends with ? then only set this value when it's not yet set or is empty
// If env variable ends with ?? then only set this value when not already defined
// so if env variable is defined but empty, it will not set its value
// while version with a single ? will
func processEnvMap(inMap map[string]string) (outMap map[string]string) {
conditional := false
for k := range inMap {
if strings.HasSuffix(k, "?") {
conditional = true
break
}
}
if !conditional {
outMap = inMap
return
}
outMap = make(map[string]string)
for k, v := range inMap {
if strings.HasSuffix(k, "??") {
k2 := k[0 : len(k)-2]
_, ok := os.LookupEnv(k2)
if !ok {
outMap[k2] = v
}
continue
}
if strings.HasSuffix(k, "?") {
k2 := k[0 : len(k)-1]
val := os.Getenv(k2)
if val == "" {
outMap[k2] = v
}
continue
}
outMap[k] = v
}
return
}
func sync(ctx *lib.Ctx, args []string) {
// Strip function to be used by MapString
stripFunc := func(x string) string { return strings.TrimSpace(x) }
// Orgs & Repos
sOrg := ""
if len(args) > 0 {
sOrg = args[0]
}
sRepo := ""
if len(args) > 1 {
sRepo = args[1]
}
org := lib.StringsMapToArray(stripFunc, strings.Split(sOrg, ","))
repo := lib.StringsMapToArray(stripFunc, strings.Split(sRepo, ","))
lib.Printf("gha2db_sync.go: Running on: %s/%s\n", strings.Join(org, "+"), strings.Join(repo, "+"))
// Local or cron mode?
dataPrefix := ctx.DataDir
if ctx.Local {
dataPrefix = "./"
}
cmdPrefix := ""
if ctx.LocalCmd {
cmdPrefix = "./"
}
// Connect to Postgres DB
con := lib.PgConn(ctx)
defer func() { lib.FatalOnError(con.Close()) }()
// Get max event date from Postgres database
var maxDtPtr *time.Time
maxDtPg := ctx.DefaultStartDate
if !ctx.ForceStartDate {
lib.FatalOnError(lib.QueryRowSQL(con, ctx, "select max(dt) from gha_parsed").Scan(&maxDtPtr))
if maxDtPtr != nil {
maxDtPg = maxDtPtr.Add(1 * time.Hour)
}
}
// Get max series date from TS database
maxDtTSDB := ctx.DefaultStartDate
if !ctx.ForceStartDate {
table := "s" + ctx.LastSeries
if lib.TableExists(con, ctx, table) {
lib.FatalOnError(lib.QueryRowSQL(con, ctx, "select max(time) from "+table).Scan(&maxDtPtr))
if maxDtPtr != nil {
maxDtTSDB = *maxDtPtr
}
}
}
lib.Printf("Using start dates: pg: %s, tsdb: %s\n", lib.ToYMDHDate(maxDtPg), lib.ToYMDHDate(maxDtTSDB))
// Create date range
// Just to get into next GHA hour
from := maxDtPg
to := time.Now()
nowHour := time.Now().Hour()
fromDate := lib.ToYMDDate(from)
fromHour := strconv.Itoa(from.Hour())
toDate := lib.ToYMDDate(to)
toHour := strconv.Itoa(to.Hour())
// Get new GHAs
if !ctx.SkipPDB {
// Clear old DB logs
lib.ClearDBLogs()
// gha2db
lib.Printf("GHA range: %s %s - %s %s\n", fromDate, fromHour, toDate, toHour)
_, err := lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "gha2db",
fromDate,
fromHour,
toDate,
toHour,
strings.Join(org, ","),
strings.Join(repo, ","),
},
nil,
)
lib.FatalOnError(err)
// Only run commits analysis for current DB here
// We have updated repos to the newest state as 1st step in "devstats" call
// We have also fetched all data from current GHA hour using "gha2db"
// Now let's update new commits files (from newest hour)
if !ctx.SkipGetRepos {
lib.Printf("Update git commits\n")
_, err = lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "get_repos",
},
map[string]string{
"GHA2DB_PROCESS_COMMITS": "1",
"GHA2DB_PROJECTS_COMMITS": ctx.Project,
},
)
lib.FatalOnError(err)
}
// GitHub API calls to get open issues state
// It updates milestone and/or label(s) when different sice last comment state
if !ctx.SkipGHAPI {
lib.Printf("Update data from GitHub API\n")
// Recompute views and DB summaries
ctx.ExecFatal = false
_, err = lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "ghapi2db",
},
nil,
)
ctx.ExecFatal = true
if err != nil {
lib.Printf("Error executing ghapi2db: %+v\n", err)
fmt.Fprintf(os.Stderr, "Error executing ghapi2db: %+v\n", err)
}
}
// Eventual postprocess SQL's from 'structure' call
lib.Printf("Update structure\n")
// Recompute views and DB summaries
_, err = lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "structure",
},
map[string]string{
"GHA2DB_SKIPTABLE": "1",
"GHA2DB_MGETC": "y",
},
)
lib.FatalOnError(err)
}
// If ElasticSearch output is enabled
if ctx.UseESRaw {
// Regenerate points from this date
esFromDate := fromDate
esFromHour := fromHour
if ctx.ResetESRaw {
esFromDate = lib.ToYMDDate(ctx.DefaultStartDate)
esFromHour = strconv.Itoa(ctx.DefaultStartDate.Hour())
}
lib.Printf("Update ElasticSearch raw index\n")
lib.Printf("ES range: %s %s - %s %s\n", esFromDate, esFromHour, toDate, toHour)
// Recompute views and DB summaries
_, err := lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "gha2es",
esFromDate,
esFromHour,
toDate,
toHour,
},
nil,
)
lib.FatalOnError(err)
}
// Calc metric
if !ctx.SkipTSDB || ctx.UseESOnly {
metricsDir := dataPrefix + "metrics"
if ctx.Project != "" {
metricsDir += "/" + ctx.Project
}
// Regenerate points from this date
if ctx.ResetTSDB {
from = ctx.DefaultStartDate
} else {
from = maxDtTSDB
}
lib.Printf("TS range: %s - %s\n", lib.ToYMDHDate(from), lib.ToYMDHDate(to))
// TSDB tags (repo groups template variable currently)
if !ctx.SkipTags {
if ctx.ResetTSDB || nowHour == 0 {
_, err := lib.ExecCommand(ctx, []string{cmdPrefix + "tags"}, nil)
lib.FatalOnError(err)
} else {
lib.Printf("Skipping `tags` recalculation, it is only computed once per day\n")
}
}
// When resetting all TSDB data, adding new TS points will race for update TSDB structure
// While we can just run "columns" once to ensure thay match tags output
// Event if there are new columns after that - they will be very few not all of them to add at once
if ctx.ResetTSDB && !ctx.SkipColumns {
_, err := lib.ExecCommand(ctx, []string{cmdPrefix + "columns"}, nil)
lib.FatalOnError(err)
}
// Annotations
if !ctx.SkipAnnotations {
if ctx.Project != "" && (ctx.ResetTSDB || nowHour == 0) {
_, err := lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "annotations",
},
nil,
)
lib.FatalOnError(err)
} else {
lib.Printf("Skipping `annotations` recalculation, it is only computed once per day\n")
}
}
// Get Quick Ranges from TSDB (it is filled by annotations command)
quickRanges := lib.GetTagValues(con, ctx, "quick_ranges", "quick_ranges_suffix")
lib.Printf("Quick ranges: %+v\n", quickRanges)
// Read metrics configuration
data, err := lib.ReadFile(ctx, dataPrefix+ctx.MetricsYaml)
if err != nil {
lib.FatalOnError(err)
return
}
var allMetrics metrics
lib.FatalOnError(yaml.Unmarshal(data, &allMetrics))
// randomize metrics order
if !ctx.SkipRand {
allMetrics.randomize(ctx)
}
// Keep all histograms here
var hists [][]string
var envMaps []map[string]string
var allowFails []bool
onlyMetrics := false
if len(ctx.OnlyMetrics) > 0 {
onlyMetrics = true
}
skipMetrics := false
if len(ctx.SkipMetrics) > 0 {
skipMetrics = true
}
metricsList := []metric{}
// Iterate all metrics
for _, metric := range allMetrics.Metrics {
if lib.ExcludedForProject(ctx.Project, metric.Project) {
lib.Printf("Metric %s have project setting %s which is skipped for the current %s project\n", metric.Name, metric.Project, ctx.Project)
continue
}
if metric.Histogram && metric.Drop != "" {
lib.Fatalf("you cannot use drop series property on histogram metrics: %+v", metric)
}
if metric.MetricSQLs != nil {
if metric.MetricSQL != "" {
lib.Fatalf("you cannot use both 'sql' and 'sqls' fields'")
}
dropAdded := false
for _, sql := range *metric.MetricSQLs {
newMetric := metric
newMetric.MetricSQLs = nil
newMetric.MetricSQL = sql
if !dropAdded {
dropAdded = true
} else {
newMetric.Drop = ""
}
metricsList = append(metricsList, newMetric)
}
continue
}
metricsList = append(metricsList, metric)
}
// Iterate all metrics
for _, metric := range metricsList {
if metric.Disabled {
continue
}
if onlyMetrics {
_, ok := ctx.OnlyMetrics[metric.MetricSQL]
if !ok {
continue
}
}
if skipMetrics {
_, skip := ctx.SkipMetrics[metric.MetricSQL]
if skip {
continue
}
}
dropProcessed := false
// handle start_from (datetime) or last_hours (from now - N hours)
fromDate := from
if metric.StartFrom != nil && metric.LastHours > 0 {
lib.Fatalf("you cannot use both StartFrom %v and LastHours %d", *metric.StartFrom, metric.LastHours)
}
if metric.StartFrom != nil && fromDate.Before(*metric.StartFrom) {
fromDate = *metric.StartFrom
}
if metric.LastHours > 0 {
dt := time.Now().Add(time.Hour * time.Duration(-metric.LastHours))
if fromDate.Before(dt) {
fromDate = dt
}
}
if ctx.Debug > 0 && fromDate != from {
lib.Printf("Using non-standard start date: %v, instead of %v\n", fromDate, from)
}
if fromDate != from && fromDate.After(to) {
if ctx.Debug >= 0 {
lib.Printf("Non-standard start date: %v (used instead of %v) is after end date %v, skipping\n", fromDate, from, to)
}
continue
}
extraParams := []string{}
if ctx.ProjectScale != 1.0 {
extraParams = append(extraParams, fmt.Sprintf("project_scale:%f", ctx.ProjectScale))
}
if metric.Histogram {
extraParams = append(extraParams, "hist")
}
if metric.MultiValue {
extraParams = append(extraParams, "multivalue")
}
if metric.EscapeValueName {
extraParams = append(extraParams, "escape_value_name")
}
if metric.Desc != "" {
extraParams = append(extraParams, "desc:"+metric.Desc)
}
if metric.MergeSeries != "" {
extraParams = append(extraParams, "merge_series:"+metric.MergeSeries)
}
if metric.CustomData {
extraParams = append(extraParams, "custom_data")
}
if metric.SeriesNameMap != nil {
extraParams = append(extraParams, "series_name_map:"+fmt.Sprintf("%v", metric.SeriesNameMap))
}
periods := strings.Split(metric.Periods, ",")
aggregate := metric.Aggregate
if aggregate == "" {
aggregate = "1"
}
if metric.AnnotationsRanges {
extraParams = append(extraParams, "annotations_ranges")
periods = quickRanges
aggregate = "1"
}
aggregateArr := strings.Split(aggregate, ",")
skips := strings.Split(metric.Skip, ",")
skipMap := make(map[string]struct{})
for _, skip := range skips {
skipMap[skip] = struct{}{}
}
if !ctx.ResetTSDB && !ctx.ResetRanges {
extraParams = append(extraParams, "skip_past")
}
for _, aggrStr := range aggregateArr {
_, err := strconv.Atoi(aggrStr)
lib.FatalOnError(err)
aggrSuffix := aggrStr
if aggrSuffix == "1" {
aggrSuffix = ""
}
for _, period := range periods {
periodAggr := period + aggrSuffix
_, found := skipMap[periodAggr]
if found {
if ctx.Debug > 0 {
lib.Printf("Skipped period %s\n", periodAggr)
}
continue
}
recalc := lib.ComputePeriodAtThisDate(ctx, period, to, metric.Histogram)
if ctx.Debug > 0 {
lib.Printf("Recalculate period \"%s%s\", hist %v for date to %v: %v\n", period, aggrSuffix, metric.Histogram, to, recalc)
}
if (!ctx.ResetTSDB || ctx.ComputePeriods != nil) && !recalc {
lib.Printf("Skipping recalculating period \"%s%s\", hist %v for date to %v\n", period, aggrSuffix, metric.Histogram, to)
continue
}
seriesNameOrFunc := metric.SeriesNameOrFunc
if metric.AddPeriodToName {
seriesNameOrFunc += "_" + periodAggr
}
// Histogram metrics usualy take long time, but executes single query, so there is no way to
// Implement multi threading inside "calc_metric" call for them
// So we're creating array of such metrics to be executed at the end - each in a separate go routine
eParams := extraParams
if ctx.EnableMetricsDrop && !dropProcessed {
if metric.Drop != "" {
eParams = append(eParams, "drop:"+metric.Drop)
}
dropProcessed = true
}
envMap := processEnvMap(metric.EnvMap)
if metric.Histogram {
lib.Printf("Scheduled histogram metric %v, period %v, desc: '%v', aggregate: '%v' ...\n", metric.Name, period, metric.Desc, aggrSuffix)
hists = append(
hists,
[]string{
cmdPrefix + "calc_metric",
seriesNameOrFunc,
fmt.Sprintf("%s/%s.sql", metricsDir, metric.MetricSQL),
lib.ToYMDHDate(fromDate),
lib.ToYMDHDate(to),
periodAggr,
strings.Join(extraParams, ","),
},
)
envMaps = append(envMaps, envMap)
allowFails = append(allowFails, metric.AllowFail)
} else {
lib.Printf("Calculate metric %v, period %v, desc: '%v', aggregate: '%v' ...\n", metric.Name, period, metric.Desc, aggrSuffix)
execFatal := ctx.ExecFatal
if metric.AllowFail {
ctx.ExecFatal = false
}
_, err = lib.ExecCommand(
ctx,
[]string{
cmdPrefix + "calc_metric",
seriesNameOrFunc,
fmt.Sprintf("%s/%s.sql", metricsDir, metric.MetricSQL),
lib.ToYMDHDate(fromDate),
lib.ToYMDHDate(to),
periodAggr,
strings.Join(eParams, ","),
},
envMap,
)
if metric.AllowFail {
ctx.ExecFatal = execFatal
}
if !metric.AllowFail {
lib.FatalOnError(err)
} else if err != nil {
lib.Printf("WARNING: %+v failed: %+v\n", metric, err)
err = nil
}
}
}
}
}
// randomize histograms
if !ctx.SkipRand {
lib.Printf("Randomizing histogram metrics calculation order\n")
rand.Seed(time.Now().UnixNano())
rand.Shuffle(
len(hists),
func(i, j int) {
hists[i], hists[j] = hists[j], hists[i]
envMaps[i], envMaps[j] = envMaps[j], envMaps[i]
allowFails[i], allowFails[j] = allowFails[j], allowFails[i]
},
)
}
// Process histograms (possibly MT)
// Get number of CPUs available
thrN := lib.GetThreadsNum(ctx)
if thrN > 1 {
lib.Printf("Now processing %d histograms using MT%d version\n", len(hists), thrN)
ch := make(chan bool)
nThreads := 0
for idx, hist := range hists {
go calcHistogram(ch, ctx, hist, envMaps[idx], allowFails[idx])
nThreads++
if nThreads == thrN {
<-ch
nThreads--
}
}
lib.Printf("Final threads join\n")
for nThreads > 0 {
<-ch
nThreads--
}
} else {
lib.Printf("Now processing %d histograms using ST version\n", len(hists))
for idx, hist := range hists {
calcHistogram(nil, ctx, hist, envMaps[idx], allowFails[idx])
}
}
// TSDB ensure that calculated metric have all columns from tags
if !ctx.SkipColumns {
if ctx.RunColumns || ctx.ResetTSDB || nowHour == 0 {
_, err := lib.ExecCommand(ctx, []string{cmdPrefix + "columns"}, nil)
lib.FatalOnError(err)
} else {
lib.Printf("Skipping `columns` recalculation, it is only computed once per day\n")
}
}
}
// Vars (some tables/dashboards require vars calculation)
if (!ctx.SkipPDB || ctx.UseESOnly) && !ctx.SkipVars {
varsFN := os.Getenv("GHA2DB_VARS_FN_YAML")
if varsFN == "" {
varsFN = "sync_vars.yaml"
}
_, err := lib.ExecCommand(
ctx,
[]string{cmdPrefix + "vars"},
map[string]string{
"GHA2DB_VARS_FN_YAML": varsFN,
},
)
lib.FatalOnError(err)
}
lib.Printf("Sync success\n")
}
// calcHistogram - calculate single histogram by calling "calc_metric" program with parameters from "hist"
func calcHistogram(ch chan bool, ctx *lib.Ctx, hist []string, envMap map[string]string, allowFail bool) {
if len(hist) != 7 {
lib.Fatalf("calcHistogram, expected 7 strings, got: %d: %v", len(hist), hist)
}
lib.Printf(
"Calculate histogram %s,%s,%s,%s,%s,%s ...\n",
hist[1],
hist[2],
hist[3],
hist[4],
hist[5],
hist[6],
)
execFatal := ctx.ExecFatal
if allowFail {
ctx.ExecFatal = false
}
// Execute "calc_metric"
_, err := lib.ExecCommand(
ctx,
[]string{
hist[0],
hist[1],
hist[2],
hist[3],
hist[4],
hist[5],
hist[6],
},
envMap,
)
if allowFail {
ctx.ExecFatal = execFatal
}
if !allowFail {
lib.FatalOnError(err)
} else if err != nil {
lib.Printf("WARNING: histogram %+v %+v failed: %+v\n", envMap, hist, err)
err = nil
}
// Synchronize go routine
if ch != nil {
ch <- true
}
}
// Return per project args (if no args given) or get args from command line (if given)
// When no args given and no project set (via GHA2DB_PROJECT) it panics
func getSyncArgs(ctx *lib.Ctx, osArgs []string) []string {
// User commandline override
if len(osArgs) > 1 {
return osArgs[1:]
}
// No user commandline, get args specific to project GHA2DB_PROJECT
if ctx.Project == "" {
lib.Fatalf(
"you have to set project via GHA2DB_PROJECT environment variable if you provide no commandline arguments",
)
}
// Local or cron mode?
dataPrefix := ctx.DataDir
if ctx.Local {
dataPrefix = "./"
}
// Are we running from "devstats" which already sets ENV from projects.yaml?
envSet := os.Getenv("ENV_SET") != ""
// Read defined projects
data, err := lib.ReadFile(ctx, dataPrefix+ctx.ProjectsYaml)
if err != nil {
lib.FatalOnError(err)
return []string{}
}
var projects lib.AllProjects
lib.FatalOnError(yaml.Unmarshal(data, &projects))
proj, ok := projects.Projects[ctx.Project]
if ok {
if proj.StartDate != nil && !ctx.ForceStartDate {
ctx.DefaultStartDate = *proj.StartDate
}
if !envSet && proj.Env != nil {
for envK, envV := range proj.Env {
lib.FatalOnError(os.Setenv(envK, envV))
}
}
if proj.ProjectScale != nil && *proj.ProjectScale >= 0.0 {
ctx.ProjectScale = *proj.ProjectScale
}
return proj.CommandLine
}
// No user commandline and project not found
lib.Fatalf(
"project '%s' is not defined in '%s'",
ctx.Project,
ctx.ProjectsYaml,
)
return []string{}
}
func main() {
dtStart := time.Now()
// Environment context parse
var ctx lib.Ctx
ctx.Init()
sync(&ctx, getSyncArgs(&ctx, os.Args))
dtEnd := time.Now()
lib.Printf("Time: %v\n", dtEnd.Sub(dtStart))
} | cmd/gha2db_sync/gha2db_sync.go | 0.57678 | 0.453927 | gha2db_sync.go | starcoder |
package stripe
import "encoding/json"
// CreditNoteReason is the reason why a given credit note was created.
type CreditNoteReason string
// List of values that CreditNoteReason can take.
const (
CreditNoteReasonDuplicate CreditNoteReason = "duplicate"
CreditNoteReasonFraudulent CreditNoteReason = "fraudulent"
CreditNoteReasonOrderChange CreditNoteReason = "order_change"
CreditNoteReasonProductUnsatisfactory CreditNoteReason = "product_unsatisfactory"
)
// CreditNoteStatus is the list of allowed values for the credit note's status.
type CreditNoteStatus string
// List of values that CreditNoteStatus can take.
const (
CreditNoteStatusIssued CreditNoteStatus = "issued"
CreditNoteStatusVoid CreditNoteStatus = "void"
)
// CreditNoteType is the list of allowed values for the credit note's type.
type CreditNoteType string
// List of values that CreditNoteType can take.
const (
CreditNoteTypePostPayment CreditNoteType = "post_payment"
CreditNoteTypePrePayment CreditNoteType = "pre_payment"
)
// CreditNoteParams is the set of parameters that can be used when creating or updating a credit note.
// For more details see https://stripe.com/docs/api/credit_notes/create, https://stripe.com/docs/api/credit_notes/update.
type CreditNoteParams struct {
Params `form:"*"`
Amount *int64 `form:"amount"`
CreditAmount *int64 `form:"credit_amount"`
Invoice *string `form:"invoice"`
Memo *string `form:"memo"`
OutOfBandAmount *int64 `form:"out_of_band_amount"`
Reason *string `form:"reason"`
Refund *string `form:"refund"`
RefundAmount *int64 `form:"refund_amount"`
}
// CreditNoteListParams is the set of parameters that can be used when listing credit notes.
// For more details see https://stripe.com/docs/api/credit_notes/list.
type CreditNoteListParams struct {
ListParams `form:"*"`
Customer *string `form:"customer"`
Invoice *string `form:"invoice"`
}
// CreditNotePreviewParams is the set of parameters that can be used when previewing a credit note.
// For more details see https://stripe.com/docs/api/credit_notes/preview.
type CreditNotePreviewParams struct {
Params `form:"*"`
Amount *int64 `form:"amount"`
CreditAmount *int64 `form:"credit_amount"`
Invoice *string `form:"invoice"`
Memo *string `form:"memo"`
OutOfBandAmount *int64 `form:"out_of_band_amount"`
Reason *string `form:"reason"`
Refund *string `form:"refund"`
RefundAmount *int64 `form:"refund_amount"`
}
// CreditNoteVoidParams is the set of parameters that can be used when voiding invoices.
type CreditNoteVoidParams struct {
Params `form:"*"`
}
// CreditNote is the resource representing a Stripe credit note.
// For more details see https://stripe.com/docs/api/credit_notes/object.
type CreditNote struct {
Amount int64 `json:"amount"`
Created int64 `json:"created"`
Currency Currency `json:"currency"`
Customer *Customer `json:"customer"`
CustomerBalanceTransaction *CustomerBalanceTransaction `json:"customer_balance_transaction"`
Invoice *Invoice `json:"invoice"`
ID string `json:"id"`
Livemode bool `json:"livemode"`
Memo string `json:"memo"`
Metadata map[string]string `json:"metadata"`
Number string `json:"number"`
Object string `json:"object"`
PDF string `json:"pdf"`
Reason CreditNoteReason `json:"reason"`
Refund *Refund `json:"refund"`
Status CreditNoteStatus `json:"status"`
Type CreditNoteType `json:"type"`
VoidedAt int64 `json:"voided_at"`
}
// CreditNoteList is a list of credit notes as retrieved from a list endpoint.
type CreditNoteList struct {
ListMeta
Data []*CreditNote `json:"data"`
}
// UnmarshalJSON handles deserialization of a CreditNote.
// This custom unmarshaling is needed because the resulting
// property may be an id or the full struct if it was expanded.
func (i *CreditNote) UnmarshalJSON(data []byte) error {
if id, ok := ParseID(data); ok {
i.ID = id
return nil
}
type note CreditNote
var v note
if err := json.Unmarshal(data, &v); err != nil {
return err
}
*i = CreditNote(v)
return nil
} | creditnote.go | 0.686685 | 0.443118 | creditnote.go | starcoder |
package peertest
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"go.uber.org/yarpc/api/peer"
)
// TransportDeps are passed through all the TransportActions in order to pass certain
// state in between Actions
type TransportDeps struct {
PeerIdentifiers map[string]peer.Identifier
Subscribers map[string]peer.Subscriber
}
// TransportAction defines actions that can be applied to an Transport
type TransportAction interface {
// Apply runs a function on the Transport and asserts the result
Apply(*testing.T, peer.Transport, TransportDeps)
}
// RetainAction will execute the RetainPeer method on the Transport
type RetainAction struct {
InputIdentifierID string
InputSubscriberID string
ExpectedErr error
ExpectedPeerID string
}
// Apply will execute the RetainPeer method on the Transport
func (a RetainAction) Apply(t *testing.T, transport peer.Transport, deps TransportDeps) {
peerID := deps.PeerIdentifiers[a.InputIdentifierID]
sub := deps.Subscribers[a.InputSubscriberID]
p, err := transport.RetainPeer(peerID, sub)
if a.ExpectedErr != nil {
assert.Equal(t, a.ExpectedErr, err)
assert.Nil(t, p)
return
}
if assert.NoError(t, err) && assert.NotNil(t, p) {
assert.Equal(t, a.ExpectedPeerID, p.Identifier())
}
}
// ReleaseAction will execute the ReleasePeer method on the Transport
type ReleaseAction struct {
InputIdentifierID string
InputSubscriberID string
ExpectedErrType error
}
// Apply will execute the ReleasePeer method on the Transport
func (a ReleaseAction) Apply(t *testing.T, transport peer.Transport, deps TransportDeps) {
peerID := deps.PeerIdentifiers[a.InputIdentifierID]
sub := deps.Subscribers[a.InputSubscriberID]
err := transport.ReleasePeer(peerID, sub)
if a.ExpectedErrType != nil && assert.Error(t, err) {
assert.IsType(t, a.ExpectedErrType, err)
} else {
assert.Nil(t, err)
}
}
// ApplyTransportActions runs all the TransportActions on the peer Transport
func ApplyTransportActions(t *testing.T, transport peer.Transport, actions []TransportAction, d TransportDeps) {
for i, action := range actions {
t.Run(fmt.Sprintf("action #%d: %T", i, action), func(t *testing.T) {
action.Apply(t, transport, d)
})
}
} | api/peer/peertest/transportaction.go | 0.629433 | 0.433142 | transportaction.go | starcoder |
package image
import (
"strconv"
)
// A Point is an X, Y coordinate pair. The axes increase right and down.
type Point struct {
X, Y int
}
// String returns a string representation of p like "(3,4)".
func (p Point) String() string {
return "(" + strconv.Itoa(p.X) + "," + strconv.Itoa(p.Y) + ")"
}
// Add returns the vector p+q.
func (p Point) Add(q Point) Point {
return Point{p.X + q.X, p.Y + q.Y}
}
// Sub returns the vector p-q.
func (p Point) Sub(q Point) Point {
return Point{p.X - q.X, p.Y - q.Y}
}
// Mul returns the vector p*k.
func (p Point) Mul(k int) Point {
return Point{p.X * k, p.Y * k}
}
// Div returns the vector p/k.
func (p Point) Div(k int) Point {
return Point{p.X / k, p.Y / k}
}
// Mod returns the point q in r such that p.X-q.X is a multiple of r's width
// and p.Y-q.Y is a multiple of r's height.
func (p Point) Mod(r Rectangle) Point {
w, h := r.Dx(), r.Dy()
p = p.Sub(r.Min)
p.X = p.X % w
if p.X < 0 {
p.X += w
}
p.Y = p.Y % h
if p.Y < 0 {
p.Y += h
}
return p.Add(r.Min)
}
// Eq returns whether p and q are equal.
func (p Point) Eq(q Point) bool {
return p.X == q.X && p.Y == q.Y
}
// ZP is the zero Point.
var ZP Point
// Pt is shorthand for Point{X, Y}.
func Pt(X, Y int) Point {
return Point{X, Y}
}
// A Rectangle contains the points with Min.X <= X < Max.X, Min.Y <= Y < Max.Y.
// It is well-formed if Min.X <= Max.X and likewise for Y. Points are always
// well-formed. A rectangle's methods always return well-formed outputs for
// well-formed inputs.
type Rectangle struct {
Min, Max Point
}
// String returns a string representation of r like "(3,4)-(6,5)".
func (r Rectangle) String() string {
return r.Min.String() + "-" + r.Max.String()
}
// Dx returns r's width.
func (r Rectangle) Dx() int {
return r.Max.X - r.Min.X
}
// Dy returns r's height.
func (r Rectangle) Dy() int {
return r.Max.Y - r.Min.Y
}
// Size returns r's width and height.
func (r Rectangle) Size() Point {
return Point{
r.Max.X - r.Min.X,
r.Max.Y - r.Min.Y,
}
}
// Add returns the rectangle r translated by p.
func (r Rectangle) Add(p Point) Rectangle {
return Rectangle{
Point{r.Min.X + p.X, r.Min.Y + p.Y},
Point{r.Max.X + p.X, r.Max.Y + p.Y},
}
}
// Add returns the rectangle r translated by -p.
func (r Rectangle) Sub(p Point) Rectangle {
return Rectangle{
Point{r.Min.X - p.X, r.Min.Y - p.Y},
Point{r.Max.X - p.X, r.Max.Y - p.Y},
}
}
// Inset returns the rectangle r inset by n, which may be negative. If either
// of r's dimensions is less than 2*n then an empty rectangle near the center
// of r will be returned.
func (r Rectangle) Inset(n int) Rectangle {
if r.Dx() < 2*n {
r.Min.X = (r.Min.X + r.Max.X) / 2
r.Max.X = r.Min.X
} else {
r.Min.X += n
r.Max.X -= n
}
if r.Dy() < 2*n {
r.Min.Y = (r.Min.Y + r.Max.Y) / 2
r.Max.Y = r.Min.Y
} else {
r.Min.Y += n
r.Max.Y -= n
}
return r
}
// Intersect returns the largest rectangle contained by both r and s. If the
// two rectangles do not overlap then the zero rectangle will be returned.
func (r Rectangle) Intersect(s Rectangle) Rectangle {
if r.Min.X < s.Min.X {
r.Min.X = s.Min.X
}
if r.Min.Y < s.Min.Y {
r.Min.Y = s.Min.Y
}
if r.Max.X > s.Max.X {
r.Max.X = s.Max.X
}
if r.Max.Y > s.Max.Y {
r.Max.Y = s.Max.Y
}
if r.Min.X > r.Max.X || r.Min.Y > r.Max.Y {
return ZR
}
return r
}
// Union returns the smallest rectangle that contains both r and s.
func (r Rectangle) Union(s Rectangle) Rectangle {
if r.Min.X > s.Min.X {
r.Min.X = s.Min.X
}
if r.Min.Y > s.Min.Y {
r.Min.Y = s.Min.Y
}
if r.Max.X < s.Max.X {
r.Max.X = s.Max.X
}
if r.Max.Y < s.Max.Y {
r.Max.Y = s.Max.Y
}
return r
}
// Empty returns whether the rectangle contains no points.
func (r Rectangle) Empty() bool {
return r.Min.X >= r.Max.X || r.Min.Y >= r.Max.Y
}
// Eq returns whether r and s are equal.
func (r Rectangle) Eq(s Rectangle) bool {
return r.Min.X == s.Min.X && r.Min.Y == s.Min.Y &&
r.Max.X == s.Max.X && r.Max.Y == s.Max.Y
}
// Overlaps returns whether r and s have a non-empty intersection.
func (r Rectangle) Overlaps(s Rectangle) bool {
return r.Min.X < s.Max.X && s.Min.X < r.Max.X &&
r.Min.Y < s.Max.Y && s.Min.Y < r.Max.Y
}
// Contains returns whether r contains p.
func (r Rectangle) Contains(p Point) bool {
return p.X >= r.Min.X && p.X < r.Max.X &&
p.Y >= r.Min.Y && p.Y < r.Max.Y
}
// Canon returns the canonical version of r. The returned rectangle has minimum
// and maximum coordinates swapped if necessary so that it is well-formed.
func (r Rectangle) Canon() Rectangle {
if r.Max.X < r.Min.X {
r.Min.X, r.Max.X = r.Max.X, r.Min.X
}
if r.Max.Y < r.Min.Y {
r.Min.Y, r.Max.Y = r.Max.Y, r.Min.Y
}
return r
}
// ZR is the zero Rectangle.
var ZR Rectangle
// Rect is shorthand for Rectangle{Pt(x0, y0), Pt(x1, y1)}.
func Rect(x0, y0, x1, y1 int) Rectangle {
if x0 > x1 {
x0, x1 = x1, x0
}
if y0 > y1 {
y0, y1 = y1, y0
}
return Rectangle{Point{x0, y0}, Point{x1, y1}}
} | src/pkg/image/geom.go | 0.921645 | 0.625352 | geom.go | starcoder |
package model
import (
"math/rand"
"time"
"github.com/hculpan/go-sdl-lib/component"
)
type GameBoard [][]byte
type GameOfLife struct {
component.BaseGame
BoardWidth int
BoardHeight int
Cycle int
activeBoard *GameBoard
targetBoard *GameBoard
livingRatio float32
}
var Game *GameOfLife
func NewGameOfLife(gameWidth, gameHeight int32, livingRatio float32) *GameOfLife {
rand.Seed(time.Now().UnixNano())
result := &GameOfLife{
BoardWidth: int(gameWidth / 4),
BoardHeight: int(gameHeight / 4),
}
result.activeBoard = result.initalizeBoard()
result.targetBoard = result.initalizeBoard()
result.livingRatio = livingRatio
result.Reset()
result.Initialize(gameWidth, gameHeight)
Game = result
return result
}
func (g GameOfLife) initalizeBoard() *GameBoard {
var result GameBoard = make([][]byte, g.BoardWidth)
for x := 0; x < int(g.BoardWidth); x++ {
result[x] = make([]byte, g.BoardHeight)
for y := 0; y < int(g.BoardHeight); y++ {
result[x][y] = 0
}
}
return &result
}
func (g *GameOfLife) SwitchBoards() {
g.activeBoard, g.targetBoard = g.targetBoard, g.activeBoard
}
func (g GameOfLife) GetCurrentBoardState(x, y int) byte {
if x < 0 || x >= g.BoardWidth || y < 0 || y >= g.BoardHeight {
return 0
}
return (*g.activeBoard)[x][y]
}
func (g *GameOfLife) SetTargetBoardState(x, y int, newState byte) {
(*g.targetBoard)[x][y] = newState
}
func (g GameOfLife) countNeighbors(x, y int) byte {
var result byte = 0
if g.GetCurrentBoardState(x-1, y-1) > 0 {
result++
}
if g.GetCurrentBoardState(x, y-1) > 0 {
result++
}
if g.GetCurrentBoardState(x+1, y-1) > 0 {
result++
}
if g.GetCurrentBoardState(x-1, y) > 0 {
result++
}
if g.GetCurrentBoardState(x+1, y) > 0 {
result++
}
if g.GetCurrentBoardState(x-1, y+1) > 0 {
result++
}
if g.GetCurrentBoardState(x, y+1) > 0 {
result++
}
if g.GetCurrentBoardState(x+1, y+1) > 0 {
result++
}
return result
}
func (g *GameOfLife) Update() error {
g.Cycle++
for x := 0; x < int(g.BoardWidth); x++ {
for y := 0; y < int(g.BoardHeight); y++ {
n := g.countNeighbors(x, y)
switch {
case n < 2 || n > 3:
g.SetTargetBoardState(x, y, 0)
case n == 2:
if g.GetCurrentBoardState(x, y) > 0 {
g.SetTargetBoardState(x, y, 1)
} else {
g.SetTargetBoardState(x, y, 0)
}
case n == 3:
g.SetTargetBoardState(x, y, 1)
}
}
}
g.SwitchBoards()
return nil
}
func (g *GameOfLife) Reset() error {
g.Cycle = 0
for x := 0; x < g.BoardWidth; x++ {
for y := 0; y < g.BoardHeight; y++ {
if g.livingRatio > rand.Float32() {
g.SetTargetBoardState(x, y, 1)
} else {
g.SetTargetBoardState(x, y, 0)
}
}
}
g.SwitchBoards()
return nil
} | app/model/gameoflife.go | 0.571049 | 0.443721 | gameoflife.go | starcoder |
package expression
import (
"time"
)
// ValueType represents the type of a value in an expression
type ValueType int
const (
// ValueTypeUnspecified is an unspecified type
ValueTypeUnspecified ValueType = iota
// ValueTypeString is a string
ValueTypeString
// ValueTypeSignedInt8 is a signed 8-bit integer
ValueTypeSignedInt8
// ValueTypeSignedInt16 is a signed 16-bit integer
ValueTypeSignedInt16
// ValueTypeSignedInt32 is a signed 32-bit integer
ValueTypeSignedInt32
// ValueTypeSignedInt64 is a signed 64-bit integer
ValueTypeSignedInt64
// ValueTypeUnsignedInt8 is an unisnged 8-bit integer
ValueTypeUnsignedInt8
// ValueTypeUnsignedInt16 is an unsigned 16-bit integer
ValueTypeUnsignedInt16
// ValueTypeUnsignedInt32 is an unsigned 32-bit integer
ValueTypeUnsignedInt32
// ValueTypeUnsignedInt64 is an unsigned 64-bit integer
ValueTypeUnsignedInt64
// ValueTypeBool is a bool
ValueTypeBool
// ValueTypeDouble is a 64-bit floating point
ValueTypeDouble
// ValueTypeTimestamp is a timestamp with nanosecond granularity
ValueTypeTimestamp
)
// ValueTypeStrings is a mapping of value type constants to human-readble string
// representations
var ValueTypeStrings = map[ValueType]string{
ValueTypeUnspecified: "<<invalid>>",
ValueTypeString: "string",
ValueTypeSignedInt8: "int8",
ValueTypeSignedInt16: "int16",
ValueTypeSignedInt32: "int32",
ValueTypeSignedInt64: "int64",
ValueTypeUnsignedInt8: "uint8",
ValueTypeUnsignedInt16: "uint16",
ValueTypeUnsignedInt32: "uint32",
ValueTypeUnsignedInt64: "uint64",
ValueTypeBool: "bool",
ValueTypeDouble: "float64",
ValueTypeTimestamp: "uint64",
}
// ValueTypeOf returns the value type of a value
func ValueTypeOf(i interface{}) ValueType {
switch i.(type) {
case string:
return ValueTypeString
case int8:
return ValueTypeSignedInt8
case int16:
return ValueTypeSignedInt16
case int32:
return ValueTypeSignedInt32
case int64:
return ValueTypeSignedInt64
case uint8:
return ValueTypeUnsignedInt8
case uint16:
return ValueTypeUnsignedInt16
case uint32:
return ValueTypeUnsignedInt32
case uint64:
return ValueTypeUnsignedInt64
case bool:
return ValueTypeBool
case float64:
return ValueTypeDouble
case time.Time:
return ValueTypeTimestamp
}
return ValueTypeUnspecified
}
// IsInteger returns true if the specified ValueType represents an integer
// type.
func (t ValueType) IsInteger() bool {
switch t {
case ValueTypeSignedInt8, ValueTypeSignedInt16, ValueTypeSignedInt32,
ValueTypeSignedInt64, ValueTypeUnsignedInt8,
ValueTypeUnsignedInt16, ValueTypeUnsignedInt32,
ValueTypeUnsignedInt64:
return true
}
return false
}
// IsNumeric returns true if the specified ValueType represents a numeric type
// type (integer or double)
func (t ValueType) IsNumeric() bool {
switch t {
case ValueTypeSignedInt8, ValueTypeSignedInt16, ValueTypeSignedInt32,
ValueTypeSignedInt64, ValueTypeUnsignedInt8,
ValueTypeUnsignedInt16, ValueTypeUnsignedInt32,
ValueTypeUnsignedInt64, ValueTypeDouble:
return true
}
return false
}
// IsString returns true if the specified ValueType represents a string type.
func (t ValueType) IsString() bool {
return t == ValueTypeString
}
type binaryOp int
const (
binaryOpInvalid binaryOp = iota
binaryOpLogicalAnd
binaryOpLogicalOr
binaryOpEQ
binaryOpNE
binaryOpLT
binaryOpLE
binaryOpGT
binaryOpGE
binaryOpLike
binaryOpBitwiseAnd
)
var binaryOpStrings = map[binaryOp]string{
binaryOpLogicalAnd: "AND",
binaryOpLogicalOr: "OR",
binaryOpEQ: "=",
binaryOpNE: "!=",
binaryOpLT: "<",
binaryOpLE: "<=",
binaryOpGT: ">",
binaryOpGE: ">=",
binaryOpLike: "LIKE",
binaryOpBitwiseAnd: "&",
}
var binaryOpKernelStrings = map[binaryOp]string{
binaryOpLogicalAnd: "&&",
binaryOpLogicalOr: "||",
binaryOpEQ: "==",
binaryOpNE: "!=",
binaryOpLT: "<",
binaryOpLE: "<=",
binaryOpGT: ">",
binaryOpGE: ">=",
binaryOpLike: "~",
binaryOpBitwiseAnd: "&",
}
type unaryOp int
const (
unaryOpInvalid unaryOp = iota
unaryOpIsNull
unaryOpIsNotNull
)
var unaryOpStrings = map[unaryOp]string{
unaryOpIsNull: "IS NULL",
unaryOpIsNotNull: "IS NOT NULL",
} | pkg/expression/token.go | 0.661814 | 0.515315 | token.go | starcoder |
package ent
import (
"fmt"
"strings"
"time"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
)
// Bouncer is the model entity for the Bouncer schema.
type Bouncer struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// CreatedAt holds the value of the "created_at" field.
CreatedAt time.Time `json:"created_at,omitempty"`
// UpdatedAt holds the value of the "updated_at" field.
UpdatedAt time.Time `json:"updated_at,omitempty"`
// Name holds the value of the "name" field.
Name string `json:"name,omitempty"`
// APIKey holds the value of the "api_key" field.
APIKey string `json:"api_key,omitempty"`
// Revoked holds the value of the "revoked" field.
Revoked bool `json:"revoked,omitempty"`
// IPAddress holds the value of the "ip_address" field.
IPAddress string `json:"ip_address,omitempty"`
// Type holds the value of the "type" field.
Type string `json:"type,omitempty"`
// Version holds the value of the "version" field.
Version string `json:"version,omitempty"`
// Until holds the value of the "until" field.
Until time.Time `json:"until,omitempty"`
// LastPull holds the value of the "last_pull" field.
LastPull time.Time `json:"last_pull,omitempty"`
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Bouncer) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case bouncer.FieldRevoked:
values[i] = new(sql.NullBool)
case bouncer.FieldID:
values[i] = new(sql.NullInt64)
case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion:
values[i] = new(sql.NullString)
case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Bouncer", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Bouncer fields.
func (b *Bouncer) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case bouncer.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
b.ID = int(value.Int64)
case bouncer.FieldCreatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field created_at", values[i])
} else if value.Valid {
b.CreatedAt = value.Time
}
case bouncer.FieldUpdatedAt:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
} else if value.Valid {
b.UpdatedAt = value.Time
}
case bouncer.FieldName:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field name", values[i])
} else if value.Valid {
b.Name = value.String
}
case bouncer.FieldAPIKey:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field api_key", values[i])
} else if value.Valid {
b.APIKey = value.String
}
case bouncer.FieldRevoked:
if value, ok := values[i].(*sql.NullBool); !ok {
return fmt.Errorf("unexpected type %T for field revoked", values[i])
} else if value.Valid {
b.Revoked = value.Bool
}
case bouncer.FieldIPAddress:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field ip_address", values[i])
} else if value.Valid {
b.IPAddress = value.String
}
case bouncer.FieldType:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field type", values[i])
} else if value.Valid {
b.Type = value.String
}
case bouncer.FieldVersion:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field version", values[i])
} else if value.Valid {
b.Version = value.String
}
case bouncer.FieldUntil:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field until", values[i])
} else if value.Valid {
b.Until = value.Time
}
case bouncer.FieldLastPull:
if value, ok := values[i].(*sql.NullTime); !ok {
return fmt.Errorf("unexpected type %T for field last_pull", values[i])
} else if value.Valid {
b.LastPull = value.Time
}
}
}
return nil
}
// Update returns a builder for updating this Bouncer.
// Note that you need to call Bouncer.Unwrap() before calling this method if this Bouncer
// was returned from a transaction, and the transaction was committed or rolled back.
func (b *Bouncer) Update() *BouncerUpdateOne {
return (&BouncerClient{config: b.config}).UpdateOne(b)
}
// Unwrap unwraps the Bouncer entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (b *Bouncer) Unwrap() *Bouncer {
tx, ok := b.config.driver.(*txDriver)
if !ok {
panic("ent: Bouncer is not a transactional entity")
}
b.config.driver = tx.drv
return b
}
// String implements the fmt.Stringer.
func (b *Bouncer) String() string {
var builder strings.Builder
builder.WriteString("Bouncer(")
builder.WriteString(fmt.Sprintf("id=%v", b.ID))
builder.WriteString(", created_at=")
builder.WriteString(b.CreatedAt.Format(time.ANSIC))
builder.WriteString(", updated_at=")
builder.WriteString(b.UpdatedAt.Format(time.ANSIC))
builder.WriteString(", name=")
builder.WriteString(b.Name)
builder.WriteString(", api_key=")
builder.WriteString(b.APIKey)
builder.WriteString(", revoked=")
builder.WriteString(fmt.Sprintf("%v", b.Revoked))
builder.WriteString(", ip_address=")
builder.WriteString(b.IPAddress)
builder.WriteString(", type=")
builder.WriteString(b.Type)
builder.WriteString(", version=")
builder.WriteString(b.Version)
builder.WriteString(", until=")
builder.WriteString(b.Until.Format(time.ANSIC))
builder.WriteString(", last_pull=")
builder.WriteString(b.LastPull.Format(time.ANSIC))
builder.WriteByte(')')
return builder.String()
}
// Bouncers is a parsable slice of Bouncer.
type Bouncers []*Bouncer
func (b Bouncers) config(cfg config) {
for _i := range b {
b[_i].config = cfg
}
} | pkg/database/ent/bouncer.go | 0.629661 | 0.403214 | bouncer.go | starcoder |
package main
import (
"fmt"
"math"
"sync"
)
type Frame struct {
Ids []uint16 `json:"ids"`
Positions []float64 `json:"positions"`
}
type Vector3 struct {
X float64
Y float64
Z float64
}
type Point struct {
Acceleration *Vector3
Id uint16
Mass float64
Position *Vector3
Velocity *Vector3
}
type NBodySimulation struct {
Points []*Point
}
func (n *NBodySimulation) AddPoint(p *Point) []*Point {
p.Id = uint16(len(n.Points) + 1)
n.Points = append(n.Points, p)
return n.Points
}
const STEP_SIZE = 1.0 / 60.0
const G = 0.0000000000667
func Simulator(wg sync.WaitGroup, positionChan chan<- *Frame, endSimulationChan chan<- bool) {
defer wg.Done()
fmt.Println("Simulator called")
n := NBodySimulation{
Points: make([]*Point, 0),
}
n.AddPoint(&Point{
Acceleration: &Vector3{0, 0, 0},
Mass: 500000000,
Position: &Vector3{0, 0, 0},
Velocity: &Vector3{0, 0, 0},
})
n.AddPoint(&Point{
Acceleration: &Vector3{0, 0, 0},
Mass: 2,
Position: &Vector3{5, 0, 0},
Velocity: &Vector3{0, -0.03, 0},
})
n.AddPoint(&Point{
Acceleration: &Vector3{0, 0, 0},
Mass: 10000,
Position: &Vector3{-12, 0, 0},
Velocity: &Vector3{0, 0.015, 0},
})
n.AddPoint(&Point{
Acceleration: &Vector3{0, 0, 0},
Mass: 1000000,
Position: &Vector3{10, 0, -3},
Velocity: &Vector3{0, -0.01, -0.01},
})
n.AddPoint(&Point{
Acceleration: &Vector3{0, 0, 0},
Mass: 10000000,
Position: &Vector3{20, 20, -12},
Velocity: &Vector3{0, 0, 0.01},
})
fmt.Println("Created new simulator struct")
dT := 1.0
for i := 0; i < 1500000; i++ {
numPoints := len(n.Points)
idBuffer := make([]uint16, numPoints)
positionsBuffer := make([]float64, numPoints*3)
for i1 := 0; i1 < numPoints; i1++ {
for i2 := 0; i2 < numPoints; i2++ {
// Don't need to calculate effect of force on itself
if i1 == i2 {
continue
}
n.Points[i1].Velocity.X += n.Points[i1].Acceleration.X * dT * 0.5
n.Points[i1].Velocity.Y += n.Points[i1].Acceleration.Y * dT * 0.5
n.Points[i1].Velocity.Z += n.Points[i1].Acceleration.Z * dT * 0.5
n.Points[i1].Position.X += n.Points[i1].Velocity.X * dT
n.Points[i1].Position.Y += n.Points[i1].Velocity.Y * dT
n.Points[i1].Position.Z += n.Points[i1].Velocity.Z * dT
dx := n.Points[i2].Position.X - n.Points[i1].Position.X
dy := n.Points[i2].Position.Y - n.Points[i1].Position.Y
dz := n.Points[i2].Position.Z - n.Points[i1].Position.Z
invR3 := math.Pow(
math.Pow(dx, 2)+math.Pow(dy, 2)+math.Pow(dz, 2),
-1.5,
)
coeff := invR3 * n.Points[i2].Mass * G
n.Points[i1].Acceleration.X = dx * coeff
n.Points[i1].Acceleration.Y = dy * coeff
n.Points[i1].Acceleration.Z = dz * coeff
n.Points[i1].Velocity.X += n.Points[i1].Acceleration.X * dT * 0.5
n.Points[i1].Velocity.Y += n.Points[i1].Acceleration.Y * dT * 0.5
n.Points[i1].Velocity.Z += n.Points[i1].Acceleration.Z * dT * 0.5
}
positionsBuffer[i1*3] = n.Points[i1].Position.X
positionsBuffer[i1*3+1] = n.Points[i1].Position.Y
positionsBuffer[i1*3+2] = n.Points[i1].Position.Z
idBuffer[i1] = n.Points[i1].Id
}
positionChan <- &Frame{
Ids: idBuffer,
Positions: positionsBuffer,
}
}
endSimulationChan <- true
} | simulator.go | 0.573678 | 0.608856 | simulator.go | starcoder |
// Package event provides support for event based telemetry.
package event
import (
"fmt"
"time"
)
type eventType uint8
const (
invalidType = eventType(iota)
LogType // an event that should be recorded in a log
StartSpanType // the start of a span of time
EndSpanType // the end of a span of time
LabelType // some values that should be noted for later events
DetachType // an event that causes a context to detach
RecordType // a value that should be tracked
)
// sTags is used to hold a small number of tags inside an event whichout
// requiring a separate allocation.
// As tags are often on the stack, this avoids an allocation at all for
// the very common cases of simple events.
// The length needs to be large enough to cope with the majority of events
// but no so large as to cause undue stack pressure.
// A log message with two values will use 3 tags (one for each value and
// one for the message itself).
type sTags [3]Tag
// Event holds the information about an event of note that ocurred.
type Event struct {
At time.Time
typ eventType
static sTags // inline storage for the first few tags
dynamic []Tag // dynamically sized storage for remaining tags
}
// eventTagMap implements TagMap for a the tags of an Event.
type eventTagMap struct {
event Event
}
func (ev Event) IsLog() bool { return ev.typ == LogType }
func (ev Event) IsEndSpan() bool { return ev.typ == EndSpanType }
func (ev Event) IsStartSpan() bool { return ev.typ == StartSpanType }
func (ev Event) IsLabel() bool { return ev.typ == LabelType }
func (ev Event) IsDetach() bool { return ev.typ == DetachType }
func (ev Event) IsRecord() bool { return ev.typ == RecordType }
func (ev Event) Format(f fmt.State, r rune) {
tagMap := TagMap(ev)
if !ev.At.IsZero() {
fmt.Fprint(f, ev.At.Format("2006/01/02 15:04:05 "))
}
msg := Msg.Get(tagMap)
err := Err.Get(tagMap)
fmt.Fprint(f, msg)
if err != nil {
if f.Flag('+') {
fmt.Fprintf(f, ": %+v", err)
} else {
fmt.Fprintf(f, ": %v", err)
}
}
for index := 0; ev.Valid(index); index++ {
tag := ev.Tag(index)
// msg and err were both already printed above, so we skip them to avoid
// double printing
if !tag.Valid() || tag.Key == Msg || tag.Key == Err {
continue
}
fmt.Fprintf(f, "\n\t%v", tag)
}
}
func (ev Event) Valid(index int) bool {
return index >= 0 && index < len(ev.static)+len(ev.dynamic)
}
func (ev Event) Tag(index int) Tag {
if index < len(ev.static) {
return ev.static[index]
}
return ev.dynamic[index-len(ev.static)]
}
func (ev Event) Find(key interface{}) Tag {
for _, tag := range ev.static {
if tag.Key == key {
return tag
}
}
for _, tag := range ev.dynamic {
if tag.Key == key {
return tag
}
}
return Tag{}
}
func makeEvent(typ eventType, static sTags, tags []Tag) Event {
return Event{
typ: typ,
static: static,
dynamic: tags,
}
} | vendor/golang.org/x/tools/internal/telemetry/event/event.go | 0.601828 | 0.456349 | event.go | starcoder |
package plotter
import (
"errors"
"image/color"
"math"
"github.com/gonum/plot"
"github.com/gonum/plot/vg"
"github.com/gonum/plot/vg/draw"
)
// Bubbles implements the Plotter interface, drawing
// a bubble plot of x, y, z triples where the z value
// determines the radius of the bubble.
type Bubbles struct {
XYZs
// Color is the color of the bubbles.
color.Color
// MinRadius and MaxRadius give the minimum
// and maximum bubble radius respectively.
// The radii of each bubble is interpolated linearly
// between these two values.
MinRadius, MaxRadius vg.Length
// MinZ and MaxZ are the minimum and
// maximum Z values from the data.
MinZ, MaxZ float64
}
// NewBubbles creates as new bubble plot plotter for
// the given data, with a minimum and maximum
// bubble radius.
func NewBubbles(xyz XYZer, min, max vg.Length) (*Bubbles, error) {
cpy, err := CopyXYZs(xyz)
if err != nil {
return nil, err
}
if min > max {
return nil, errors.New("Min bubble radius is greater than the max radius")
}
minz := cpy[0].Z
maxz := cpy[0].Z
for _, d := range cpy {
minz = math.Min(minz, d.Z)
maxz = math.Max(maxz, d.Z)
}
return &Bubbles{
XYZs: cpy,
MinRadius: min,
MaxRadius: max,
MinZ: minz,
MaxZ: maxz,
}, nil
}
// Plot implements the Plot method of the plot.Plotter interface.
func (bs *Bubbles) Plot(c draw.Canvas, plt *plot.Plot) {
trX, trY := plt.Transforms(&c)
c.SetColor(bs.Color)
for _, d := range bs.XYZs {
x := trX(d.X)
y := trY(d.Y)
if !c.Contains(draw.Point{x, y}) {
continue
}
rad := bs.radius(d.Z)
// draw a circle centered at x, y
var p vg.Path
p.Move(x+rad, y)
p.Arc(x, y, rad, 0, 2*math.Pi)
p.Close()
c.Fill(p)
}
}
// radius returns the radius of a bubble by linear interpolation.
func (bs *Bubbles) radius(z float64) vg.Length {
rng := bs.MaxRadius - bs.MinRadius
if bs.MaxZ == bs.MinZ {
return rng/2 + bs.MinRadius
}
d := (z - bs.MinZ) / (bs.MaxZ - bs.MinZ)
return vg.Length(d)*rng + bs.MinRadius
}
// DataRange implements the DataRange method
// of the plot.DataRanger interface.
func (bs *Bubbles) DataRange() (xmin, xmax, ymin, ymax float64) {
return XYRange(XYValues{bs.XYZs})
}
// GlyphBoxes implements the GlyphBoxes method
// of the plot.GlyphBoxer interface.
func (bs *Bubbles) GlyphBoxes(plt *plot.Plot) []plot.GlyphBox {
boxes := make([]plot.GlyphBox, len(bs.XYZs))
for i, d := range bs.XYZs {
boxes[i].X = plt.X.Norm(d.X)
boxes[i].Y = plt.Y.Norm(d.Y)
r := bs.radius(d.Z)
boxes[i].Rectangle = draw.Rectangle{
Min: draw.Point{-r, -r},
Max: draw.Point{+r, +r},
}
}
return boxes
} | Godeps/_workspace/src/github.com/gonum/plot/plotter/bubbles.go | 0.894675 | 0.580798 | bubbles.go | starcoder |
package trace
import (
"github.com/df-mc/dragonfly/server/block/cube"
"github.com/go-gl/mathgl/mgl64"
"math"
)
// TraverseBlocks performs a ray trace between the start and end coordinates.
// A function 'f' is passed which is called for each voxel, if f returns false, the function will return.
// TraverseBlocks panics if the start and end positions are the same.
func TraverseBlocks(start, end mgl64.Vec3, f func(pos cube.Pos) (con bool)) {
dir := end.Sub(start)
if mgl64.FloatEqual(dir.LenSqr(), 0) {
panic("start and end points are the same, giving a zero direction vector")
}
dir = dir.Normalize()
b := cube.PosFromVec3(start)
step := signVec3(dir)
stepX, stepY, stepZ := int(step[0]), int(step[1]), int(step[2])
max := boundaryVec3(start, dir)
delta := safeDivideVec3(step, dir)
r := start.Sub(end).Len()
for {
if !f(b) {
return
}
if max[0] < max[1] && max[0] < max[2] {
if max[0] > r {
return
}
b[0] += stepX
max[0] += delta[0]
} else if max[1] < max[2] {
if max[1] > r {
return
}
b[1] += stepY
max[1] += delta[1]
} else {
if max[2] > r {
return
}
b[2] += stepZ
max[2] += delta[2]
}
}
}
// safeDivideVec3 ...
func safeDivideVec3(dividend, divisor mgl64.Vec3) mgl64.Vec3 {
return mgl64.Vec3{
safeDivide(dividend[0], divisor[0]),
safeDivide(dividend[1], divisor[1]),
safeDivide(dividend[2], divisor[2]),
}
}
// safeDivide divides the dividend by the divisor, but if the divisor is 0, it returns 0.
func safeDivide(dividend, divisor float64) float64 {
if divisor == 0.0 {
return 0.0
}
return dividend / divisor
}
// boundaryVec3 ...
func boundaryVec3(v1, v2 mgl64.Vec3) mgl64.Vec3 {
return mgl64.Vec3{boundary(v1[0], v2[0]), boundary(v1[1], v2[1]), boundary(v1[2], v2[2])}
}
// boundary returns the distance that must be travelled on an axis from the start point with the direction vector
// component to cross a block boundary.
func boundary(start, dir float64) float64 {
if dir == 0.0 {
return math.Inf(1)
}
if dir < 0.0 {
start, dir = -start, -dir
if math.Floor(start) == start {
return 0.0
}
}
return (1 - (start - math.Floor(start))) / dir
}
// signVec3 ...
func signVec3(v1 mgl64.Vec3) mgl64.Vec3 {
return mgl64.Vec3{sign(v1[0]), sign(v1[1]), sign(v1[2])}
}
// sign ...
func sign(f float64) float64 {
switch {
case f > 0.0:
return 1.0
case f < 0.0:
return -1.0
}
return 0.0
} | server/block/cube/trace/trace.go | 0.806396 | 0.574066 | trace.go | starcoder |
package rusnum
import (
"math"
)
var (
tenthNumberCase = [3]string{"десятая", "десятых", "десятых"}
hundredthNumberCase = [3]string{"сотая", "сотых", "сотых"}
thousandthNumberCase = [3]string{"тысячная", "тысячных", "тысячных"}
tenthousandthNumberCase = [3]string{"десятитысячная", "десятитысячных", "десятитысячных"}
hundredthousandthNumberCase = [3]string{"стотысячная", "стотысячных", "стотысячных"}
millionthNumberCase = [3]string{"миллионная", "миллионных", "миллионных"}
tenmillionthNumberCase = [3]string{"десятимиллионная", "десятимиллионных", "десятимиллионных"}
hundredmillionthNumberCase = [3]string{"стомиллионная", "стомиллионных", "стомиллионных"}
milliardthNumberCase = [3]string{"миллиардная", "миллиардных", "миллиардных"}
tenmilliardthNumberCase = [3]string{"десятимиллиардная", "десятимиллиардных", "десятимиллиардных"}
hundredmilliardthNumberCase = [3]string{"стомиллиардная", "стомиллиардных", "стомиллиардных"}
)
// FracInWords returns 'frac' expressed in 'fraction's in russian words.
// If result is 0 and 'showZero' is false, empty string is returned.
func FracInWords(frac float64, fraction Fraction, showZero bool) string {
if fraction == NoFraction {
return IntInWords(int64(frac), showZero, Masculine)
}
absFrac := math.Abs(frac)
var numberCase [3]string
switch fraction {
case Tenth:
absFrac *= 10
numberCase = tenthNumberCase
case Hundredth:
absFrac *= 100
numberCase = hundredthNumberCase
case Thousandth:
absFrac *= 1000
numberCase = thousandthNumberCase
case Tenthousandth:
absFrac *= 10000
numberCase = tenthousandthNumberCase
case Hundredthousandth:
absFrac *= 100000
numberCase = hundredthousandthNumberCase
case Millionth:
absFrac *= 1000000
numberCase = millionthNumberCase
case Tenmillionth:
absFrac *= 10000000
numberCase = tenmillionthNumberCase
case Hundredmillionth:
absFrac *= 100000000
numberCase = hundredmillionthNumberCase
case Milliardth:
absFrac *= 1000000000
numberCase = milliardthNumberCase
case Tenmilliardth:
absFrac *= 10000000000
numberCase = tenmilliardthNumberCase
// all other fractions are treated as Hundredmilliardth
default:
absFrac *= 100000000000
numberCase = hundredmilliardthNumberCase
}
fint, _ := math.Modf(absFrac)
ifint := int64(fint)
if ifint == 0 && !showZero {
return ""
}
r := IntInWords(ifint, false, Feminine) + " " + numberCase[getNumeralNumberCase(ifint)]
if frac < 0 {
return "минус " + r
}
return r
}
// FracInWordsAuto is like FracInWords but determines the Fraction automatically.
func FracInWordsAuto(frac float64, showZero bool) string {
return FracInWords(frac, fractionFromFloat(frac), showZero)
} | fracInWords.go | 0.500977 | 0.529203 | fracInWords.go | starcoder |
package foundation
// #include "index_set.h"
import "C"
import (
"unsafe"
"github.com/hsiafan/cocoa/objc"
)
type IndexSet interface {
objc.Object
ContainsIndex(value uint) bool
ContainsIndexes(indexSet IndexSet) bool
ContainsIndexesInRange(_range Range) bool
IntersectsIndexesInRange(_range Range) bool
CountOfIndexesInRange(_range Range) uint
IsEqualToIndexSet(indexSet IndexSet) bool
IndexLessThanIndex(value uint) uint
IndexLessThanOrEqualToIndex(value uint) uint
IndexGreaterThanOrEqualToIndex(value uint) uint
IndexGreaterThanIndex(value uint) uint
Count() uint
FirstIndex() uint
LastIndex() uint
}
type NSIndexSet struct {
objc.NSObject
}
func MakeIndexSet(ptr unsafe.Pointer) NSIndexSet {
return NSIndexSet{
NSObject: objc.MakeObject(ptr),
}
}
func IndexSet_() NSIndexSet {
result_ := C.C_NSIndexSet_IndexSet_()
return MakeIndexSet(result_)
}
func IndexSetWithIndex(value uint) NSIndexSet {
result_ := C.C_NSIndexSet_IndexSetWithIndex(C.uint(value))
return MakeIndexSet(result_)
}
func IndexSetWithIndexesInRange(_range Range) NSIndexSet {
result_ := C.C_NSIndexSet_IndexSetWithIndexesInRange(*(*C.NSRange)(unsafe.Pointer(&_range)))
return MakeIndexSet(result_)
}
func (n NSIndexSet) InitWithIndex(value uint) NSIndexSet {
result_ := C.C_NSIndexSet_InitWithIndex(n.Ptr(), C.uint(value))
return MakeIndexSet(result_)
}
func (n NSIndexSet) InitWithIndexesInRange(_range Range) NSIndexSet {
result_ := C.C_NSIndexSet_InitWithIndexesInRange(n.Ptr(), *(*C.NSRange)(unsafe.Pointer(&_range)))
return MakeIndexSet(result_)
}
func (n NSIndexSet) InitWithIndexSet(indexSet IndexSet) NSIndexSet {
result_ := C.C_NSIndexSet_InitWithIndexSet(n.Ptr(), objc.ExtractPtr(indexSet))
return MakeIndexSet(result_)
}
func AllocIndexSet() NSIndexSet {
result_ := C.C_NSIndexSet_AllocIndexSet()
return MakeIndexSet(result_)
}
func (n NSIndexSet) Init() NSIndexSet {
result_ := C.C_NSIndexSet_Init(n.Ptr())
return MakeIndexSet(result_)
}
func NewIndexSet() NSIndexSet {
result_ := C.C_NSIndexSet_NewIndexSet()
return MakeIndexSet(result_)
}
func (n NSIndexSet) Autorelease() NSIndexSet {
result_ := C.C_NSIndexSet_Autorelease(n.Ptr())
return MakeIndexSet(result_)
}
func (n NSIndexSet) Retain() NSIndexSet {
result_ := C.C_NSIndexSet_Retain(n.Ptr())
return MakeIndexSet(result_)
}
func (n NSIndexSet) ContainsIndex(value uint) bool {
result_ := C.C_NSIndexSet_ContainsIndex(n.Ptr(), C.uint(value))
return bool(result_)
}
func (n NSIndexSet) ContainsIndexes(indexSet IndexSet) bool {
result_ := C.C_NSIndexSet_ContainsIndexes(n.Ptr(), objc.ExtractPtr(indexSet))
return bool(result_)
}
func (n NSIndexSet) ContainsIndexesInRange(_range Range) bool {
result_ := C.C_NSIndexSet_ContainsIndexesInRange(n.Ptr(), *(*C.NSRange)(unsafe.Pointer(&_range)))
return bool(result_)
}
func (n NSIndexSet) IntersectsIndexesInRange(_range Range) bool {
result_ := C.C_NSIndexSet_IntersectsIndexesInRange(n.Ptr(), *(*C.NSRange)(unsafe.Pointer(&_range)))
return bool(result_)
}
func (n NSIndexSet) CountOfIndexesInRange(_range Range) uint {
result_ := C.C_NSIndexSet_CountOfIndexesInRange(n.Ptr(), *(*C.NSRange)(unsafe.Pointer(&_range)))
return uint(result_)
}
func (n NSIndexSet) IsEqualToIndexSet(indexSet IndexSet) bool {
result_ := C.C_NSIndexSet_IsEqualToIndexSet(n.Ptr(), objc.ExtractPtr(indexSet))
return bool(result_)
}
func (n NSIndexSet) IndexLessThanIndex(value uint) uint {
result_ := C.C_NSIndexSet_IndexLessThanIndex(n.Ptr(), C.uint(value))
return uint(result_)
}
func (n NSIndexSet) IndexLessThanOrEqualToIndex(value uint) uint {
result_ := C.C_NSIndexSet_IndexLessThanOrEqualToIndex(n.Ptr(), C.uint(value))
return uint(result_)
}
func (n NSIndexSet) IndexGreaterThanOrEqualToIndex(value uint) uint {
result_ := C.C_NSIndexSet_IndexGreaterThanOrEqualToIndex(n.Ptr(), C.uint(value))
return uint(result_)
}
func (n NSIndexSet) IndexGreaterThanIndex(value uint) uint {
result_ := C.C_NSIndexSet_IndexGreaterThanIndex(n.Ptr(), C.uint(value))
return uint(result_)
}
func (n NSIndexSet) Count() uint {
result_ := C.C_NSIndexSet_Count(n.Ptr())
return uint(result_)
}
func (n NSIndexSet) FirstIndex() uint {
result_ := C.C_NSIndexSet_FirstIndex(n.Ptr())
return uint(result_)
}
func (n NSIndexSet) LastIndex() uint {
result_ := C.C_NSIndexSet_LastIndex(n.Ptr())
return uint(result_)
} | foundation/index_set.go | 0.608943 | 0.490907 | index_set.go | starcoder |
package ring
import "github.com/gholt/holdme"
// BuilderNode is a node within a builder; a node represents a single
// assignment target of replicas of partitions, such as a single disk in a
// distributed storage system.
type BuilderNode struct {
builder *Builder
index int
info string
}
// Info returns the user-defined info string; this info is not used directly by
// the builder.
func (n *BuilderNode) Info() string {
return n.info
}
// SetInfo sets the user-defined info string; this info is not used directly by
// the builder.
func (n *BuilderNode) SetInfo(v string) {
n.info = v
}
// Capacity specifies, relative to other nodes, how many assignments the node
// should have.
func (n *BuilderNode) Capacity() int {
return n.builder.ring.NodeToCapacity[n.index]
}
// SetCapacity specifies, relative to other nodes, how many assignments the
// node should have.
func (n *BuilderNode) SetCapacity(v int) {
n.builder.ring.NodeToCapacity[n.index] = v
}
// Group returns the parent group of the node; it may return nil if there is no
// parent group.
func (n *BuilderNode) Group() *BuilderGroup {
return n.builder.groups[n.builder.ring.NodeToGroup[n.index]]
}
// SetGroup sets the parent group of the node; it may be set to nil to have no
// parent group.
func (n *BuilderNode) SetGroup(group *BuilderGroup) {
n.builder.ring.NodeToGroup[n.index] = group.index
}
// Partitions returns the list of partitions assigned to this node; the list
// will be in ascending order with no duplicates.
func (n *BuilderNode) Partitions() []int {
n.builder.ring.FillReplicaToNodeToPartitions()
var partitions holdme.OrderedIntsNoDups
for _, nodeToPartitions := range n.builder.ring.ReplicaToNodeToPartitions {
for _, partition := range nodeToPartitions[n.index] {
partitions.Add(int(partition))
}
}
return partitions
}
// ReplicaPartitions returns the list of partitions assigned to this node for
// the given replica; the list will be in ascending order with no duplicates.
func (n *BuilderNode) ReplicaPartitions(replica int) []int {
n.builder.ring.FillReplicaToNodeToPartitions()
partitions := make([]int, len(n.builder.ring.ReplicaToNodeToPartitions[replica][n.index]))
for i, partition := range n.builder.ring.ReplicaToNodeToPartitions[replica][n.index] {
partitions[i] = int(partition)
}
return partitions
}
// Responsible returns the replica number this node is responsible for with
// respect to the key given; will return -1 if this node is not responsible for
// any replica for the key.
func (n *BuilderNode) Responsible(key int) int {
partition := key % len(n.builder.ring.ReplicaToPartitionToNode[0])
for replica, partitionToNode := range n.builder.ring.ReplicaToPartitionToNode {
if int(partitionToNode[partition]) == n.index {
return replica
}
}
return -1
}
// ResponsibleForReplicaPartition returns true if this node is reponsible for
// the specific replica and partition given.
func (n *BuilderNode) ResponsibleForReplicaPartition(replica, partition int) bool {
for _, partitionToNode := range n.builder.ring.ReplicaToPartitionToNode {
if int(partitionToNode[partition]) == n.index {
return true
}
}
return false
}
// Assign will override the current builder's assignment and set a specific
// replica of a partition to this specific node. This is mostly just useful for
// testing, as future calls to Rebalance may move this assignment.
func (n *BuilderNode) Assign(replica, partition int) {
n.builder.Assign(replica, partition, n)
} | buildernode.go | 0.847416 | 0.414247 | buildernode.go | starcoder |
package mat
import (
"math"
)
var black = NewColor(0, 0, 0)
var white = NewColor(1, 1, 1)
type Pattern interface {
PatternAt(point Tuple4) Tuple4
SetPatternTransform(transform Mat4x4)
GetTransform() Mat4x4
GetInverse() Mat4x4
}
func NewStripePattern(colorA Tuple4, colorB Tuple4) *StripePattern {
m1 := New4x4() // NewMat4x4(make([]float64, 16))
//copy(m1.Elems, IdentityMatrix.Elems)
inv := New4x4() //NewMat4x4(make([]float64, 16))
//copy(inv.Elems, IdentityMatrix.Elems)
return &StripePattern{A: colorA, B: colorB, Transform: m1, Inverse: inv}
}
type TestPattern struct {
Transform Mat4x4
Inverse Mat4x4
}
func NewTestPattern() *TestPattern {
m1 := New4x4() //NewMat4x4(make([]float64, 16))
//copy(m1.Elems, IdentityMatrix.Elems)
inv := New4x4() //NewMat4x4(make([]float64, 16))
//copy(inv.Elems, IdentityMatrix.Elems)
return &TestPattern{Transform: m1, Inverse: inv}
}
func (t *TestPattern) PatternAt(point Tuple4) Tuple4 {
return NewColor(point.Get(0), point.Get(1), point.Get(2))
}
func (t *TestPattern) SetPatternTransform(transform Mat4x4) {
t.Transform = transform
t.Inverse = Inverse(t.Transform)
}
func (t *TestPattern) GetTransform() Mat4x4 {
return t.Transform
}
func (t *TestPattern) GetInverse() Mat4x4 {
return t.Inverse
}
type StripePattern struct {
A Tuple4
B Tuple4
Transform Mat4x4
Inverse Mat4x4
}
func (p *StripePattern) GetTransform() Mat4x4 {
return p.Transform
}
func (p *StripePattern) GetInverse() Mat4x4 {
return p.Inverse
}
func (p *StripePattern) SetPatternTransform(transform Mat4x4) {
p.Transform = transform
p.Inverse = Inverse(p.Transform)
}
func (p *StripePattern) PatternAt(point Tuple4) Tuple4 {
if int(math.Floor(point.Get(0)))%2 == 0 {
return p.A
}
return p.B
}
type GradientPattern struct {
FromColor Tuple4
ToColor Tuple4
Transform Mat4x4
Inverse Mat4x4
}
func NewGradientPattern(from, to Tuple4) *GradientPattern {
m1 := New4x4() //NewMat4x4(make([]float64, 16))
//copy(m1.Elems, IdentityMatrix.Elems)
inv := New4x4() //NewMat4x4(make([]float64, 16))
//copy(inv.Elems, IdentityMatrix.Elems)
return &GradientPattern{FromColor: from, ToColor: to, Transform: m1, Inverse: inv}
}
func (g *GradientPattern) PatternAt(point Tuple4) Tuple4 {
distance := Sub(g.ToColor, g.FromColor)
fraction := point.Get(0) - math.Floor(point.Get(0))
return Add(g.FromColor, MultiplyByScalar(distance, fraction))
}
func (g *GradientPattern) SetPatternTransform(transform Mat4x4) {
g.Transform = transform
g.Inverse = Inverse(g.Transform)
}
func (g *GradientPattern) GetTransform() Mat4x4 {
return g.Transform
}
func (g *GradientPattern) GetInverse() Mat4x4 {
return g.Inverse
}
type RingPattern struct {
A Tuple4
B Tuple4
Transform Mat4x4
Inverse Mat4x4
}
func NewRingPattern(a Tuple4, b Tuple4) *RingPattern {
m1 := New4x4() //NewMat4x4(make([]float64, 16))
// copy(m1.Elems, IdentityMatrix.Elems)
inv := New4x4() //NewMat4x4(make([]float64, 16))
// copy(inv.Elems, IdentityMatrix.Elems)
return &RingPattern{A: a, B: b, Transform: m1, Inverse: inv}
}
func (r *RingPattern) PatternAt(point Tuple4) Tuple4 {
flooredDistance := math.Floor(math.Sqrt(point.Get(0)*point.Get(0) + point.Get(2)*point.Get(2)))
if int(flooredDistance)%2 == 0 {
return r.A
}
return r.B
}
func (r *RingPattern) SetPatternTransform(transform Mat4x4) {
r.Transform = transform
r.Inverse = Inverse(r.Transform)
}
func (r *RingPattern) GetTransform() Mat4x4 {
return r.Transform
}
func (r *RingPattern) GetInverse() Mat4x4 {
return r.Inverse
}
func PatternAtShape(pattern Pattern, s Shape, worldPoint Tuple4) Tuple4 {
// Convert from world space to object space by inversing the shape transform and then multiply it by the point
//objectPoint := MultiplyByTuple(Inverse(s.GetTransform()), worldPoint)
objectPoint := WorldToObject(s, worldPoint)
//patternPoint := MultiplyByTuple(Inverse(pattern.GetTransform()), objectPoint)
patternPoint := MultiplyByTuple(pattern.GetInverse(), objectPoint)
return pattern.PatternAt(patternPoint)
}
// use this new
//world_to_object() function when converting points from world space to object
//space.
func NewCheckerPattern(colorA Tuple4, colorB Tuple4) *CheckerPattern {
m1 := New4x4() //NewMat4x4(make([]float64, 16))
//copy(m1.Elems, IdentityMatrix.Elems)
inv := New4x4() //NewMat4x4(make([]float64, 16))
//copy(inv.Elems, IdentityMatrix.Elems)
return &CheckerPattern{ColorA: colorA, ColorB: colorB, Transform: m1, Inverse: inv}
}
type CheckerPattern struct {
ColorA Tuple4
ColorB Tuple4
Transform Mat4x4
Inverse Mat4x4
}
func (c *CheckerPattern) PatternAt(point Tuple4) Tuple4 {
all := math.Floor(point.Get(0)) + math.Floor(point.Get(1)) + math.Floor(point.Get(2))
if int(math.Round(all))%2 == 0 {
return c.ColorA
}
return c.ColorB
}
func (c *CheckerPattern) SetPatternTransform(transform Mat4x4) {
c.Transform = transform
c.Inverse = Inverse(c.Transform)
}
func (c *CheckerPattern) GetTransform() Mat4x4 {
return c.Transform
}
func (c *CheckerPattern) GetInverse() Mat4x4 {
return c.Inverse
} | internal/pkg/mat/pattern.go | 0.740737 | 0.540136 | pattern.go | starcoder |
package komblobulate
import (
"errors"
"fmt"
"io"
)
const (
ResistType_None = byte(0)
ResistType_Rs = byte(1)
CipherType_None = byte(0)
CipherType_Aead = byte(1)
)
// Given three things that might equal each other, finds the
// value that occurs at least twice according to the equality
// function, or nil if they all differ.
func findAgreement(things [3]interface{}, equals func(interface{}, interface{}) bool) interface{} {
if things[0] != nil {
if equals(things[0], things[1]) || equals(things[0], things[2]) {
return things[0]
}
}
if things[1] != nil && equals(things[1], things[2]) {
return things[1]
}
return nil
}
// Given a reader of a kblobbed output, creates a reader of the
// unblobbed contents. The kblob itself will contain its
// configuration.
func NewReader(kblob io.ReadSeeker, params KCodecParams) (unblob io.Reader, err error) {
// Work out how big this blob is:
var bloblen int64
bloblen, err = kblob.Seek(0, 2)
if err != nil {
return nil, err
}
// The config is stored in three places -- twice at
// the beginning and once at the end. Read out
// all three, ignoring errors so long as we manage
// to get agreement:
var configBlocks [3]interface{}
var resistBlocks [3]interface{}
var cipherBlocks [3]interface{}
configBlocks[0], resistBlocks[0], cipherBlocks[0], err = ReadConfig(kblob, 0)
configBlocks[1], resistBlocks[1], cipherBlocks[1], err = ReadConfig(kblob, 3*ConfigSize)
configBlocks[2], resistBlocks[2], cipherBlocks[2], err = ReadConfig(kblob, bloblen-3*ConfigSize)
config, ok := findAgreement(configBlocks, func(a interface{}, b interface{}) bool {
return a.(*Config).ConfigEquals(b.(*Config))
}).(*Config)
if !ok || config == nil {
if err == nil {
err = errors.New("No config agreement")
}
return
}
if config.Version > CurrentVersion {
err = errors.New(fmt.Sprintf("Version %d not supported", config.Version))
return
}
kcodecequal := func(a interface{}, b interface{}) bool {
return a.(KCodec).ConfigEquals(b)
}
resist, ok := findAgreement(resistBlocks, kcodecequal).(KCodec)
if !ok || resist == nil {
err = errors.New("No resist agreement")
return
}
cipher, ok := findAgreement(cipherBlocks, kcodecequal).(KCodec)
if !ok || resist == nil {
err = errors.New("No cipher agreement")
return
}
unConfig, err := NewKblobReader(kblob, bloblen)
if err != nil {
return
}
unResist, unResistLength, err := resist.NewReader(unConfig, bloblen-int64(9*ConfigSize), params)
if err != nil {
return
}
unblob, _, err = cipher.NewReader(unResist, unResistLength, params)
return
}
// Given a writer of where the user wants the kblobbed output to
// go and a configuration, creates a writer for unblobbed contents.
func NewWriter(kblob io.WriteSeeker, resistType byte, cipherType byte, params KCodecParams) (unblob io.WriteCloser, err error) {
var resist, cipher KCodec
switch resistType {
case ResistType_None:
resist = &NullConfig{}
case ResistType_Rs:
dataPieceSize, dataPieceCount, parityPieceCount := params.GetRsParams()
resist = &RsConfig{int32(dataPieceSize), int8(dataPieceCount), int8(parityPieceCount), 0}
default:
panic("Bad resist type")
}
switch cipherType {
case CipherType_None:
cipher = &NullConfig{}
case CipherType_Aead:
cipher, err = NewAeadConfig(int64(params.GetAeadChunkSize()))
if err != nil {
panic(err.Error())
}
default:
panic("Bad cipher type")
}
config := &Config{CurrentVersion, resistType, cipherType}
// Write the whole config twice at the start:
err = config.WriteConfig(kblob, resist, cipher)
if err != nil {
return
}
err = config.WriteConfig(kblob, resist, cipher)
if err != nil {
return
}
// Create the inner writers:
resistWriter, err := resist.NewWriter(kblob, params)
if err != nil {
return
}
cipherWriter, err := cipher.NewWriter(resistWriter, params)
if err != nil {
return
}
unblob = &KblobWriter{config, resist, cipher, kblob, resistWriter, cipherWriter}
return
} | komblobulate.go | 0.627951 | 0.413063 | komblobulate.go | starcoder |
package value
import (
"strings"
)
type valueType int
const (
intType valueType = iota
charType
bigIntType
bigRatType
bigFloatType
vectorType
matrixType
numType
)
var typeName = [...]string{"int", "char", "big int", "rational", "float", "vector", "matrix"}
func (t valueType) String() string {
return typeName[t]
}
type unaryFn func(Context, Value) Value
type unaryOp struct {
name string
elementwise bool // whether the operation applies elementwise to vectors and matrices
fn [numType]unaryFn
}
func (op *unaryOp) EvalUnary(c Context, v Value) Value {
which := whichType(v)
fn := op.fn[which]
if fn == nil {
if op.elementwise {
switch which {
case vectorType:
return unaryVectorOp(c, op.name, v)
case matrixType:
return unaryMatrixOp(c, op.name, v)
}
}
Errorf("unary %s not implemented on type %s", op.name, which)
}
return fn(c, v)
}
type binaryFn func(Context, Value, Value) Value
type binaryOp struct {
name string
elementwise bool // whether the operation applies elementwise to vectors and matrices
whichType func(a, b valueType) valueType
fn [numType]binaryFn
}
func whichType(v Value) valueType {
switch v.Inner().(type) {
case Int:
return intType
case Char:
return charType
case BigInt:
return bigIntType
case BigRat:
return bigRatType
case BigFloat:
return bigFloatType
case Vector:
return vectorType
case *Matrix:
return matrixType
}
Errorf("unknown type %T in whichType", v)
panic("which type")
}
func (op *binaryOp) EvalBinary(c Context, u, v Value) Value {
if op.whichType == nil {
// At the moment, "text" is the only operator that leaves
// both arg types alone. Perhaps more will arrive.
if op.name != "text" {
Errorf("internal error: nil whichType")
}
return op.fn[0](c, u, v)
}
which := op.whichType(whichType(u), whichType(v))
conf := c.Config()
u = u.toType(conf, which)
v = v.toType(conf, which)
fn := op.fn[which]
if fn == nil {
if op.elementwise {
switch which {
case vectorType:
return binaryVectorOp(c, u, op.name, v)
case matrixType:
return binaryMatrixOp(c, u, op.name, v)
}
}
Errorf("binary %s not implemented on type %s", op.name, which)
}
return fn(c, u, v)
}
// Product computes a compound product, such as an inner product
// "+.*" or outer product "o.*". The op is known to contain a
// period. The operands are all at least vectors, and for inner product
// they must both be vectors.
func Product(c Context, u Value, op string, v Value) Value {
dot := strings.IndexByte(op, '.')
left := op[:dot]
right := op[dot+1:]
which := atLeastVectorType(whichType(u), whichType(v))
u = u.toType(c.Config(), which)
v = v.toType(c.Config(), which)
if left == "o" {
return outerProduct(c, u, right, v)
}
return innerProduct(c, u, left, right, v)
}
// inner product computes an inner product such as "+.*".
// u and v are known to be the same type and at least Vectors.
func innerProduct(c Context, u Value, left, right string, v Value) Value {
switch u := u.(type) {
case Vector:
v := v.(Vector)
u.sameLength(v)
var x Value
for k, e := range u {
tmp := c.EvalBinary(e, right, v[k])
if k == 0 {
x = tmp
} else {
x = c.EvalBinary(x, left, tmp)
}
}
return x
case *Matrix:
// Say we're doing +.*
// result[i,j] = +/(u[row i] * v[column j])
// Number of columns of u must be the number of rows of v.
// The result is has shape (urows, vcols).
v := v.(*Matrix)
if u.Rank() != 2 || v.Rank() != 2 {
Errorf("can't do inner product on shape %s times %s", NewIntVector(u.shape), NewIntVector(v.shape))
}
urows := u.shape[0]
ucols := u.shape[1]
vrows := v.shape[0]
vcols := v.shape[1]
if vrows != ucols {
Errorf("inner product; column count of left (%d) not equal to row count on right (%d)", ucols, vrows)
}
data := make(Vector, urows*vcols)
shape := []int{urows, vcols}
i := 0
for urow := 0; urow < urows; urow++ {
for vcol := 0; vcol < vcols; vcol++ {
acc := c.EvalBinary(u.data[urow*ucols], right, v.data[vcol])
for vrow := 1; vrow < vrows; vrow++ {
acc = c.EvalBinary(acc, left, c.EvalBinary(u.data[urow*ucols+vrow], right, v.data[vrow*vcols+vcol]))
}
data[i] = acc
i++
}
}
return NewMatrix(shape, data)
}
Errorf("can't do inner product on %s", whichType(u))
panic("not reached")
}
// outer product computes an outer product such as "o.*".
// u and v are known to be at least Vectors.
func outerProduct(c Context, u Value, op string, v Value) Value {
switch u := u.(type) {
case Vector:
v := v.(Vector)
m := Matrix{
shape: []int{len(u), len(v)},
data: NewVector(make(Vector, len(u)*len(v))),
}
index := 0
for _, vu := range u {
for _, vv := range v {
m.data[index] = c.EvalBinary(vu, op, vv)
index++
}
}
return &m // TODO: Shrink?
case *Matrix:
v := v.(*Matrix)
m := Matrix{
shape: append(u.Shape(), v.Shape()...),
data: NewVector(make(Vector, len(u.Data())*len(v.Data()))),
}
index := 0
for _, vu := range u.Data() {
for _, vv := range v.Data() {
m.data[index] = c.EvalBinary(vu, op, vv)
index++
}
}
return &m // TODO: Shrink?
}
Errorf("can't do outer product on %s", whichType(u))
panic("not reached")
}
// Reduce computes a reduction such as +/. The slash has been removed.
func Reduce(c Context, op string, v Value) Value {
// We must be right associative; that is the grammar.
// -/1 2 3 == 1-2-3 is 1-(2-3) not (1-2)-3. Answer: 2.
switch v := v.(type) {
case Int, BigInt, BigRat:
return v
case Vector:
if len(v) == 0 {
return v
}
acc := v[len(v)-1]
for i := len(v) - 2; i >= 0; i-- {
acc = c.EvalBinary(v[i], op, acc)
}
return acc
case *Matrix:
if v.Rank() < 2 {
Errorf("shape for matrix is degenerate: %s", NewIntVector(v.shape))
}
stride := v.shape[v.Rank()-1]
if stride == 0 {
Errorf("shape for matrix is degenerate: %s", NewIntVector(v.shape))
}
shape := v.shape[:v.Rank()-1]
data := make(Vector, size(shape))
index := 0
for i := range data {
pos := index + stride - 1
acc := v.data[pos]
pos--
for i := 1; i < stride; i++ {
acc = c.EvalBinary(v.data[pos], op, acc)
pos--
}
data[i] = acc
index += stride
}
if len(shape) == 1 { // TODO: Matrix.shrink()?
return NewVector(data)
}
return NewMatrix(shape, data)
}
Errorf("can't do reduce on %s", whichType(v))
panic("not reached")
}
// Scan computes a scan of the op; the \ has been removed.
// It gives the successive values of reducing op through v.
// We must be right associative; that is the grammar.
func Scan(c Context, op string, v Value) Value {
switch v := v.(type) {
case Int, BigInt, BigRat:
return v
case Vector:
if len(v) == 0 {
return v
}
values := make(Vector, len(v))
acc := v[0]
values[0] = acc
// TODO: This is n^2.
for i := 1; i < len(v); i++ {
values[i] = Reduce(c, op, v[:i+1])
}
return NewVector(values)
case *Matrix:
if v.Rank() < 2 {
Errorf("shape for matrix is degenerate: %s", NewIntVector(v.shape))
}
stride := v.shape[v.Rank()-1]
if stride == 0 {
Errorf("shape for matrix is degenerate: %s", NewIntVector(v.shape))
}
data := make(Vector, len(v.data))
index := 0
nrows := 1
for i := 0; i < v.Rank()-1; i++ {
// Guaranteed by NewMatrix not to overflow.
nrows *= v.shape[i]
}
for i := 0; i < nrows; i++ {
acc := v.data[index]
data[index] = acc
// TODO: This is n^2.
for j := 1; j < stride; j++ {
data[index+j] = Reduce(c, op, v.data[index:index+j+1])
}
index += stride
}
return NewMatrix(v.shape, data)
}
Errorf("can't do scan on %s", whichType(v))
panic("not reached")
}
// unaryVectorOp applies op elementwise to i.
func unaryVectorOp(c Context, op string, i Value) Value {
u := i.(Vector)
n := make([]Value, len(u))
for k := range u {
n[k] = c.EvalUnary(op, u[k])
}
return NewVector(n)
}
// unaryMatrixOp applies op elementwise to i.
func unaryMatrixOp(c Context, op string, i Value) Value {
u := i.(*Matrix)
n := make([]Value, len(u.data))
for k := range u.data {
n[k] = c.EvalUnary(op, u.data[k])
}
return NewMatrix(u.shape, NewVector(n))
}
// binaryVectorOp applies op elementwise to i and j.
func binaryVectorOp(c Context, i Value, op string, j Value) Value {
u, v := i.(Vector), j.(Vector)
if len(u) == 1 {
n := make([]Value, len(v))
for k := range v {
n[k] = c.EvalBinary(u[0], op, v[k])
}
return NewVector(n)
}
if len(v) == 1 {
n := make([]Value, len(u))
for k := range u {
n[k] = c.EvalBinary(u[k], op, v[0])
}
return NewVector(n)
}
u.sameLength(v)
n := make([]Value, len(u))
for k := range u {
n[k] = c.EvalBinary(u[k], op, v[k])
}
return NewVector(n)
}
// binaryMatrixOp applies op elementwise to i and j.
func binaryMatrixOp(c Context, i Value, op string, j Value) Value {
u, v := i.(*Matrix), j.(*Matrix)
shape := u.shape
var n []Value
// One or the other may be a scalar in disguise.
switch {
case isScalar(u):
// Scalar op Matrix.
shape = v.shape
n = make([]Value, len(v.data))
for k := range v.data {
n[k] = c.EvalBinary(u.data[0], op, v.data[k])
}
case isScalar(v):
// Matrix op Scalar.
n = make([]Value, len(u.data))
for k := range u.data {
n[k] = c.EvalBinary(u.data[k], op, v.data[0])
}
case isVector(u, v.shape):
// Vector op Matrix.
shape = v.shape
n = make([]Value, len(v.data))
dim := u.shape[0]
index := 0
for k := range v.data {
n[k] = c.EvalBinary(u.data[index], op, v.data[k])
index++
if index >= dim {
index = 0
}
}
case isVector(v, u.shape):
// Vector op Matrix.
n = make([]Value, len(u.data))
dim := v.shape[0]
index := 0
for k := range u.data {
n[k] = c.EvalBinary(v.data[index], op, u.data[k])
index++
if index >= dim {
index = 0
}
}
default:
// Matrix op Matrix.
u.sameShape(v)
n = make([]Value, len(u.data))
for k := range u.data {
n[k] = c.EvalBinary(u.data[k], op, v.data[k])
}
}
return NewMatrix(shape, NewVector(n))
}
// isScalar reports whether u is a 1x1x1x... item, that is, a scalar promoted to matrix.
func isScalar(u *Matrix) bool {
for _, dim := range u.shape {
if dim != 1 {
return false
}
}
return true
}
// isVector reports whether u is an 1x1x...xn item where n is the last dimension
// of the shape, that is, an n-vector promoted to matrix.
func isVector(u *Matrix, shape []int) bool {
if u.Rank() == 0 || len(shape) == 0 || u.shape[0] != shape[len(shape)-1] {
return false
}
for _, dim := range u.shape[1:] {
if dim != 1 {
return false
}
}
return true
} | vendor/robpike.io/ivy/value/eval.go | 0.593256 | 0.628122 | eval.go | starcoder |
package imgdiff
import (
"image"
"image/color"
"math"
"sync"
)
var (
// white values of XYZ colorspace
whiteX, whiteY, whiteZ float64
)
const (
// LAB colorspace
epsilon = 216.0 / 24389.0
kappa = 24389.0 / 27.0
)
func init() {
whiteX, whiteY, whiteZ = xyz(color.RGBA{0xff, 0xff, 0xff, 0xff}, 1)
}
type perceptual struct {
gamma float64
// luminance
lum float64
// test for luminance only
nocolor bool
// field of view
fov float64
// color factor
cf float64
// num one degree pixels
odp float64
// adaptation level index, starting from 0
ai int
}
// NewPerceptual creates a new Differ based on perceptual diff algorithm.
func NewPerceptual(gamma, luminance, fov, cf float64, nocolor bool) Differ {
d := &perceptual{
gamma: gamma,
lum: luminance,
fov: fov,
cf: cf,
nocolor: nocolor,
odp: 2 * math.Tan(fov*0.5*math.Pi/180) * 180 / math.Pi,
}
for n := 1.0; !(n > d.odp); n *= 2 {
d.ai++
if d.ai == lapLevels-1 {
break
}
}
return d
}
// NewDefaultPerceptual returns the result of calling NewPerceptual with:
// gamma = 2.2
// luminance = 100.0
// fov = 45.0
// cf = 1.0
// nocolor = false
func NewDefaultPerceptual() Differ {
return NewPerceptual(2.2, 100.0, 45.0, 1.0, false)
}
// Compare compares a and b using pdiff algorithm.
func (d *perceptual) Compare(a, b image.Image) (image.Image, int, error) {
ab, bb := a.Bounds(), b.Bounds()
w, h := ab.Dx(), ab.Dy()
if w != bb.Dx() || h != bb.Dy() {
return nil, -1, ErrSize
}
diff := image.NewNRGBA(image.Rect(0, 0, w, h))
var (
wg sync.WaitGroup
aLAB, bLAB [][]*labColor
aLap, bLap [][][]float64
)
wg.Add(2)
go func() {
aLAB, aLap = labLap(a, d.gamma, d.lum)
wg.Done()
}()
go func() {
bLAB, bLap = labLap(b, d.gamma, d.lum)
wg.Done()
}()
cpd := make([]float64, lapLevels) // cycles per degree
cpd[0] = 0.5 * float64(w) / d.odp // 0.5 * pixels per degree
for i := 1; i < lapLevels; i++ {
cpd[i] = 0.5 * cpd[i-1]
}
csfMax := csf(3.248, 100.0)
freq := make([]float64, lapLevels-2)
for i := 0; i < lapLevels-2; i++ {
freq[i] = csfMax / csf(cpd[i], 100.0)
}
wg.Wait()
var npix int // num of diff pixels
for y := 0; y < h; y++ {
for x := 0; x < w; x++ {
adapt := math.Max(0.5*(aLap[d.ai][y][x]+bLap[d.ai][y][x]), 1e-5)
mask := make([]float64, lapLevels-2)
contrast := make([]float64, lapLevels-2)
var contrastSum float64
for i := 0; i < lapLevels-2; i++ {
n1 := math.Abs(aLap[i][y][x] - aLap[i+1][y][x])
n2 := math.Abs(bLap[i][y][x] - bLap[i+1][y][x])
d1 := math.Abs(aLap[i+2][y][x])
d2 := math.Abs(bLap[i+2][y][x])
d := math.Max(d1, d2)
contrast[i] = math.Max(n1, n2) / math.Max(d, 1e-5)
mask[i] = vmask(contrast[i] * csf(cpd[i], adapt))
contrastSum += contrast[i]
}
if contrastSum < 1e-5 {
contrastSum = 1e-5
}
var factor float64
for i := 0; i < lapLevels-2; i++ {
factor += contrast[i] * freq[i] * mask[i] / contrastSum
}
if factor < 1 {
factor = 1
} else if factor > 10 {
factor = 10
}
delta := math.Abs(aLap[0][y][x] - bLap[0][y][x])
pass := true
// pure luminance test
if delta > factor*tvi(adapt) {
pass = false
} else if !d.nocolor {
// CIE delta E test with modifications
cf := d.cf
// ramp down the color test in scotopic regions
if adapt < 10.0 {
// don't do color test at all
cf = 0.0
}
da := aLAB[y][x].a - bLAB[y][x].a
db := aLAB[y][x].b - bLAB[y][x].b
if (da*da+db*db)*cf > factor {
pass = false
}
}
c := color.NRGBA{0, 0, 0, 0xff}
if !pass {
npix++
c.R = 0xff
//ar, ag, ab, _ := a.At(x, y).RGBA()
//br, bg, bb, _ := b.At(x, y).RGBA()
//c.R = uint8((math.Abs(float64(ar)-float64(br)) / 0xffff) * 0xff)
//c.G = uint8((math.Abs(float64(ag)-float64(bg)) / 0xffff) * 0xff)
//c.B = uint8((math.Abs(float64(ab)-float64(bb)) / 0xffff) * 0xff)
}
diff.Set(x, y, c)
}
}
return diff, npix, nil
}
type labColor struct {
l, a, b float64
}
func lab(x, y, z float64) *labColor {
r := [3]float64{x / whiteX, y / whiteY, z / whiteZ}
var f [3]float64
for i := 0; i < 3; i++ {
if r[i] > epsilon {
f[i] = math.Pow(r[i], 1.0/3.0)
continue
}
f[i] = (kappa*r[i] + 16.0) / 116.0
}
return &labColor{
l: 116.0*f[1] - 16.0,
a: 500.0 * (f[0] - f[1]),
b: 200.0 * (f[1] - f[2]),
}
}
func xyz(c color.Color, gamma float64) (float64, float64, float64) {
r, g, b, _ := c.RGBA()
rg := math.Pow(float64(r)/0xffff, gamma)
gg := math.Pow(float64(g)/0xffff, gamma)
bg := math.Pow(float64(b)/0xffff, gamma)
x := rg*0.576700 + gg*0.185556 + bg*0.188212
y := rg*0.297361 + gg*0.627355 + bg*0.0752847
z := rg*0.0270328 + gg*0.0706879 + bg*0.991248
return x, y, z
}
func labLap(m image.Image, gamma, lum float64) ([][]*labColor, [][][]float64) {
w, h := m.Bounds().Dx(), m.Bounds().Dy()
aLum, aLAB := make([][]float64, h), make([][]*labColor, h)
for y := 0; y < h; y++ {
aLum[y], aLAB[y] = make([]float64, w), make([]*labColor, w)
for x := 0; x < w; x++ {
cx, cy, cz := xyz(m.At(x, y), gamma)
aLAB[y][x] = lab(cx, cy, cz)
aLum[y][x] = cy * lum
}
}
return aLAB, pyramid(aLum)
}
var (
// max levels
lapLevels = 8
// filter kernel
lapKernel = [5]float64{0.05, 0.25, 0.4, 0.25, 0.05}
)
// pyramid creates a Laplacian Pyramid out of the image m.
// The result is [level][y][x] where level ranges from 0 to lapLevels.
func pyramid(m [][]float64) [][][]float64 {
h, w := len(m), len(m[0])
p := make([][][]float64, lapLevels)
for l := 0; l < lapLevels; l++ {
p[l] = make([][]float64, h)
// first level is a copy
if l == 0 {
for y := 0; y < h; y++ {
p[l][y] = make([]float64, w)
copy(p[l][y], m[y])
}
continue
}
// next levels are convolution of the previous one
for y := 0; y < h; y++ {
p[l][y] = make([]float64, w)
for x := 0; x < w; x++ {
for i := -2; i <= 2; i++ {
for j := -2; j <= 2; j++ {
ny := y + j
if ny < 0 {
ny = -ny
}
if ny >= h {
ny = 2*h - ny - 1
}
nx := x + i
if nx < 0 {
nx = -nx
}
if nx >= w {
nx = 2*w - nx - 1
}
p[l][y][x] += lapKernel[i+2] * lapKernel[j+2] * p[l-1][ny][nx]
}
}
}
}
}
return p
}
// csf computes the contrast sensitivity function (<NAME> 1989)
// given the cycles per degree cpd and luminance lum.
func csf(cpd, lum float64) float64 {
a := 440.0 * math.Pow((1.0+0.7/lum), -0.2)
b := 0.3 * math.Pow((1.0+100.0/lum), 0.15)
return a * cpd * math.Exp(-b*cpd) * math.Sqrt(1.0+0.06*math.Exp(b*cpd))
}
// vmask is Visual Masking from Daly 1993, computed from contrast c.
func vmask(c float64) float64 {
a := math.Pow(392.498*c, 0.7)
b := math.Pow(0.0153*a, 4.0)
return math.Pow(1.0+b, 0.25)
}
// tvi, Threshold vs Intensity, computes the threshold of visibility
// given the adaptation luminance al in candelas per square meter.
// It is based on <NAME> Siggraph 1997.
func tvi(al float64) float64 {
var r float64
al = math.Log10(al)
switch {
case al < -3.94:
r = -2.86
case al < -1.44:
r = math.Pow(0.405*al+1.6, 2.18) - 2.86
case al < -0.0184:
r = al - 0.395
case al < 1.9:
r = math.Pow(0.249*al+0.65, 2.7) - 0.72
default:
r = al - 1.255
}
return math.Pow(10.0, r)
} | perceptual.go | 0.635109 | 0.414366 | perceptual.go | starcoder |
package ungraph
import "fmt"
type EdgeInfo int // EdgeInfo, e.g. weight
type VertexType interface{} // VertexType, e.g. 1, 2, 3 or A, B, C
type Edge struct {
isVisited bool // mark that whether the current edge is visited
uIdx, vIdx int // the index of two vertices of the current edge in adj_multi_list
uNextEdge, vNextEdge *Edge // Respectively point to the next edge attached to these two vertices
info EdgeInfo
}
type Vertex struct {
data VertexType
firstEdge *Edge
}
// Graph Undirected graph implemented with Adjacency Multi-list
type Graph struct {
vertexNum, edgeNum int
adjMultiList []Vertex
}
// UnGraph a undirected graph "class"
type UnGraph struct {
g *Graph
vertexSet []VertexType
edgeSet [][]VertexType
degree int
}
// Create a function to create a undirected graph
// input edge set:
// e.g.
// [][]VertexType{{1, 3}, {3, 1}}
// [][]VertexType{{1, 3, 2}, {3, 1, 5}}
func (udg UnGraph) Create(edges [][]VertexType) UnGraph {
udg.setVertexSet(edges)
// init a undirected graph
graph := Graph{
vertexNum: len(udg.vertexSet),
edgeNum: len(udg.edgeSet),
adjMultiList: nil,
}
// init all vertices in adjacency multi-list
graph.adjMultiList = make([]Vertex, graph.vertexNum)
for i, vertex := range udg.vertexSet {
graph.adjMultiList[i].data = vertex
graph.adjMultiList[i].firstEdge = nil
}
// init all edges
for _, edge := range udg.edgeSet {
var e Edge
e.uIdx = udg.locateVertex(edge[0])
e.vIdx = udg.locateVertex(edge[1])
e.isVisited = false
e.uNextEdge = udg.g.adjMultiList[e.uIdx].firstEdge
e.vNextEdge = udg.g.adjMultiList[e.vIdx].firstEdge
udg.g.adjMultiList[e.uIdx].firstEdge = &e
udg.g.adjMultiList[e.vIdx].firstEdge = &e
}
udg.g = &graph
return udg
}
func (udg UnGraph) GetNeighborVertices(v VertexType) []VertexType {
idx := udg.locateVertex(v)
tmp := udg.g.adjMultiList[idx].firstEdge
for tmp != nil {
if tmp.uIdx == idx {
}
}
return nil
}
func (udg UnGraph) GetNeighborEdges(v VertexType) [][]VertexType {
return nil
}
// BFS Breadth-First-Search
// Queue: enqueue, dequeue
func (udg UnGraph) BFS() []VertexType {
return nil
}
// DFS Breadth-First-Search
// Stack: push, pop
func (udg UnGraph) DFS(v VertexType) {
idx := udg.locateVertex(v)
visited := make(map[VertexType]bool)
visited[v] = true
tmp := udg.g.adjMultiList[idx].firstEdge
if tmp != nil {
udg.DFS(udg.g.adjMultiList[tmp.vIdx].data)
}
}
func (udg UnGraph) GetDegree(node VertexType) int {
return 0
}
func (udg UnGraph) GetVertexSet() []VertexType {
return udg.vertexSet
}
func (udg UnGraph) GetEdgeSet() [][]VertexType {
return udg.edgeSet
}
func (udg UnGraph) locateVertex(v VertexType) int {
for i := 0; i < udg.g.vertexNum; i++ {
if udg.g.adjMultiList[i].data == v {
return i
}
}
return -1
}
func (udg UnGraph) setVertexSet(edges [][]VertexType) UnGraph {
// Firstly, set edges
udg.setEdgeSet(edges)
var vs []VertexType
for _, edge := range udg.edgeSet {
vs = append(vs, edge[0])
vs = append(vs, edge[1])
}
filteredVertices := filterVertices(vs)
udg.vertexSet = filteredVertices
return udg
}
func (udg UnGraph) setEdgeSet(edges [][]VertexType) UnGraph {
filteredEdges := filterEdges(edges)
udg.edgeSet = filteredEdges
return udg
}
func filterEdges(edges [][]VertexType) [][]VertexType {
//edges := [][]VertexType{
// {1, 2, 3},
// {2, 3, 10},
// {1, 3, 5},
// {4, 3, 10},
// {3, 4, 15}, // will be deleted
// {1, 2, 15}, // will be deleted
// {4, 5},
// {1, 2}, // will be deleted
// {1}, // will be deleted
//}
var filtered [][]VertexType
for _, edge := range edges {
if len(edge) > 1 && len(edge) < 4 {
// filter insignificant info
filtered = append(filtered, edge)
}
}
for i, edge := range filtered {
for j := i + 1; j < len(filtered); j++ {
if (edge[0] == filtered[j][0] && edge[1] == filtered[j][1]) ||
(edge[0] == filtered[j][1] && edge[1] == filtered[j][0]) {
// delete filtered[j], due to the repeated with previous
filtered = append(filtered[:j], filtered[j+1:]...)
}
}
}
return filtered
}
func filterVertices(vertices []VertexType) []VertexType {
// create a map: {VertexType: true}
existed := make(map[VertexType]bool)
var filtered []VertexType
// If the key(values of the slice) is not equal
// to the already present value in new slice (filtered)
// then we append it. else we jump on another element.
for _, vertex := range vertices {
if _, value := existed[vertex]; !value {
existed[vertex] = true
filtered = append(filtered, vertex)
}
}
return filtered
}
func PrintEdges(edges [][]VertexType) {
for _, edge := range edges {
fmt.Printf("(%v, %v)\n", edge[0], edge[1])
}
}
func TestUndirectedGraph() {
/* multiDimensionArr := [][]VertexType{
{1, 2, 3},
{2, 3, 10},
{1, 3, 5},
{4, 3, 10},
{3, 4, 15},
{4, 5},
{1, 2},
{1},
}*/
multiDimensionArr := [][]VertexType{
{"A", "B", 3},
{"B", "C", 10},
{"A", "C", 5},
{"D", "C", 10},
{"C", "D", 15},
{"D", "E"},
{"A", "B"},
{"A"},
}
fmt.Printf("1: %v\n", multiDimensionArr)
filtered := filterEdges(multiDimensionArr)
PrintEdges(filtered)
fmt.Printf("4: %v\n", multiDimensionArr)
fmt.Printf("5: %v\n", filtered)
} | structure/graph/ungraph/graph.go | 0.54359 | 0.590366 | graph.go | starcoder |
package raft
import (
"errors"
"github.com/relab/raft/commonpb"
)
// Keys for indexing term and who was voted for.
const (
KeyTerm uint64 = iota
KeyVotedFor
KeyFirstIndex
KeyNextIndex
KeySnapshot
)
// Storage provides an interface for storing and retrieving Raft state.
type Storage interface {
Set(key uint64, value uint64) error
Get(key uint64) (uint64, error)
// Entries must be stored such that Entry.Index can be used to retrieve
// that entry in the future.
StoreEntries([]*commonpb.Entry) error
// Retrieves entry with Entry.Index == index.
GetEntry(index uint64) (*commonpb.Entry, error)
// Get the inclusive range of entries from first to last.
GetEntries(first, last uint64) ([]*commonpb.Entry, error)
// Remove the inclusive range of entries from first to last.
RemoveEntries(first, last uint64) error
// Should return 1 if not set.
FirstIndex() (uint64, error)
// Should return 1 if not set.
NextIndex() (uint64, error)
SetSnapshot(*commonpb.Snapshot) error
GetSnapshot() (*commonpb.Snapshot, error)
}
// TODO Create LogStore wrapper.
// Memory implements the Storage interface as an in-memory storage.
type Memory struct {
kvstore map[uint64]uint64
log map[uint64]*commonpb.Entry
}
// NewMemory returns a memory backed storage.
func NewMemory(kvstore map[uint64]uint64, log map[uint64]*commonpb.Entry) *Memory {
if _, ok := kvstore[KeyFirstIndex]; !ok {
kvstore[KeyFirstIndex] = 1
}
if _, ok := kvstore[KeyNextIndex]; !ok {
kvstore[KeyNextIndex] = 1
}
return &Memory{
kvstore: kvstore,
log: log,
}
}
// Set implements the Storage interface.
func (m *Memory) Set(key, value uint64) error {
m.kvstore[key] = value
return nil
}
// Get implements the Storage interface.
func (m *Memory) Get(key uint64) (uint64, error) {
return m.kvstore[key], nil
}
// StoreEntries implements the Storage interface.
func (m *Memory) StoreEntries(entries []*commonpb.Entry) error {
i := m.kvstore[KeyNextIndex]
for _, entry := range entries {
m.log[i] = entry
i++
}
return m.Set(KeyNextIndex, i)
}
// GetEntry implements the Storage interface.
func (m *Memory) GetEntry(index uint64) (*commonpb.Entry, error) {
entry, ok := m.log[index]
if !ok {
return nil, ErrKeyNotFound
}
return entry, nil
}
// GetEntries implements the Storage interface.
func (m *Memory) GetEntries(first, last uint64) ([]*commonpb.Entry, error) {
entries := make([]*commonpb.Entry, last-first+1)
i := first
for j := range entries {
entries[j] = m.log[i]
i++
}
return entries, nil
}
// RemoveEntries implements the Storage interface.
func (m *Memory) RemoveEntries(first, last uint64) error {
for i := first; i <= last; i++ {
delete(m.log, i)
}
return m.Set(KeyNextIndex, first)
}
// FirstIndex implements the Storage interface.
func (m *Memory) FirstIndex() (uint64, error) {
first := m.kvstore[KeyFirstIndex]
return first, nil
}
// NextIndex implements the Storage interface.
func (m *Memory) NextIndex() (uint64, error) {
next := m.kvstore[KeyNextIndex]
return next, nil
}
// SetSnapshot implements the Storage interface.
func (m *Memory) SetSnapshot(*commonpb.Snapshot) error {
return nil
}
// GetSnapshot implements the Storage interface.
func (m *Memory) GetSnapshot() (*commonpb.Snapshot, error) {
return nil, errors.New("not implemented")
} | vendor/github.com/relab/raft/storage.go | 0.539226 | 0.420897 | storage.go | starcoder |
package geo
import (
"math"
"strings"
)
// decodePolylinePoints decodes encoded Polyline according to the polyline algorithm: https://developers.google.com/maps/documentation/utilities/polylinealgorithm
func decodePolylinePoints(encoded string, precision int) []float64 {
idx := 0
latitude := float64(0)
longitude := float64(0)
bytes := []byte(encoded)
results := []float64{}
for idx < len(bytes) {
var deltaLat float64
idx, deltaLat = decodePointValue(idx, bytes)
latitude += deltaLat
var deltaLng float64
idx, deltaLng = decodePointValue(idx, bytes)
longitude += deltaLng
results = append(results,
longitude/math.Pow10(precision),
latitude/math.Pow10(precision))
}
return results
}
func decodePointValue(idx int, bytes []byte) (int, float64) {
res := int32(0)
shift := 0
for byte := byte(0x20); byte >= 0x20; {
if idx > len(bytes)-1 {
return idx, 0
}
byte = bytes[idx] - 63
idx++
res |= int32(byte&0x1F) << shift
shift += 5
}
var pointValue float64
if (res & 1) == 1 {
pointValue = float64(^(res >> 1))
} else {
pointValue = float64(res >> 1)
}
return idx, pointValue
}
// encodePolylinePoints encodes provided points using the algorithm: https://developers.google.com/maps/documentation/utilities/polylinealgorithm
// Assumes there are no malformed points - length of the input slice should be even.
func encodePolylinePoints(points []float64, precision int) string {
lastLat := 0
lastLng := 0
var res strings.Builder
for i := 1; i < len(points); i += 2 {
lat := int(math.Round(points[i-1] * math.Pow10(precision)))
lng := int(math.Round(points[i] * math.Pow10(precision)))
res = encodePointValue(lng-lastLng, res)
res = encodePointValue(lat-lastLat, res)
lastLat = lat
lastLng = lng
}
return res.String()
}
func encodePointValue(diff int, b strings.Builder) strings.Builder {
var shifted int
shifted = diff << 1
if diff < 0 {
shifted = ^shifted
}
rem := shifted
for rem >= 0x20 {
b.WriteRune(rune(0x20 | (rem & 0x1f) + 63))
rem = rem >> 5
}
b.WriteRune(rune(rem + 63))
return b
} | pkg/geo/polyline.go | 0.840848 | 0.468669 | polyline.go | starcoder |
package model
import (
"fmt"
local "github.com/networkservicemesh/networkservicemesh/controlplane/api/local/connection"
"github.com/networkservicemesh/networkservicemesh/controlplane/api/nsm/connection"
remote "github.com/networkservicemesh/networkservicemesh/controlplane/api/remote/connection"
)
// DataplaneState describes state of dataplane
type DataplaneState int8
const (
// DataplaneStateNone means there is no active connection in dataplane
DataplaneStateNone DataplaneState = 0 // In case dataplane is not yet configured for connection
// DataplaneStateReady means there is an active connection in dataplane
DataplaneStateReady DataplaneState = 1 // In case dataplane is configured for connection.
)
// Dataplane structure in Model that describes dataplane
type Dataplane struct {
RegisteredName string
SocketLocation string
LocalMechanisms []connection.Mechanism
RemoteMechanisms []connection.Mechanism
MechanismsConfigured bool
}
// Clone returns pointer to copy of Dataplane
func (d *Dataplane) clone() cloneable {
if d == nil {
return nil
}
lm := make([]connection.Mechanism, 0, len(d.LocalMechanisms))
for _, m := range d.LocalMechanisms {
lm = append(lm, m.Clone())
}
rm := make([]connection.Mechanism, 0, len(d.RemoteMechanisms))
for _, m := range d.RemoteMechanisms {
rm = append(rm, m.Clone())
}
return &Dataplane{
RegisteredName: d.RegisteredName,
SocketLocation: d.SocketLocation,
LocalMechanisms: lm,
RemoteMechanisms: rm,
MechanismsConfigured: d.MechanismsConfigured,
}
}
// SetLocalMechanisms sets dataplane local mechanisms
func (d *Dataplane) SetLocalMechanisms(mechanisms []*local.Mechanism) {
lm := make([]connection.Mechanism, 0, len(mechanisms))
for _, m := range mechanisms {
lm = append(lm, m)
}
d.LocalMechanisms = lm
}
// SetRemoteMechanisms sets dataplane remote mechanisms
func (d *Dataplane) SetRemoteMechanisms(mechanisms []*remote.Mechanism) {
rm := make([]connection.Mechanism, 0, len(mechanisms))
for _, m := range mechanisms {
rm = append(rm, m)
}
d.RemoteMechanisms = rm
}
type dataplaneDomain struct {
baseDomain
}
func newDataplaneDomain() dataplaneDomain {
return dataplaneDomain{
baseDomain: newBase(),
}
}
func (d *dataplaneDomain) AddDataplane(dp *Dataplane) {
d.store(dp.RegisteredName, dp)
}
func (d *dataplaneDomain) GetDataplane(name string) *Dataplane {
v, _ := d.load(name)
if v != nil {
return v.(*Dataplane)
}
return nil
}
func (d *dataplaneDomain) DeleteDataplane(name string) {
d.delete(name)
}
func (d *dataplaneDomain) UpdateDataplane(dp *Dataplane) {
d.store(dp.RegisteredName, dp)
}
func (d *dataplaneDomain) SelectDataplane(dataplaneSelector func(dp *Dataplane) bool) (*Dataplane, error) {
var rv *Dataplane
d.kvRange(func(key string, value interface{}) bool {
dp := value.(*Dataplane)
if dataplaneSelector == nil {
rv = dp
return false
}
if dataplaneSelector(dp) {
rv = dp
return false
}
return true
})
if rv == nil {
return nil, fmt.Errorf("no appropriate dataplanes found")
}
return rv, nil
}
func (d *dataplaneDomain) SetDataplaneModificationHandler(h *ModificationHandler) func() {
return d.addHandler(h)
} | controlplane/pkg/model/dataplane.go | 0.699768 | 0.501709 | dataplane.go | starcoder |
package iplib
import (
"net"
"strings"
)
// Net describes an iplib.Net object, the enumerated functions are those that
// are required for comparison, sorting, generic initialization and for
// ancillary functions such as those found in the iid and iana submodules
type Net interface {
Contains(ip net.IP) bool
ContainsNet(network Net) bool
FirstAddress() net.IP
IP() net.IP
LastAddress() net.IP
Mask() net.IPMask
String() string
Version() int
}
// NewNet returns a new Net object containing ip at the specified masklen. In
// the Net6 case the hostbits value will be set to 0. If the masklen is set
// to an insane value (greater than 32 for IPv4 or 128 for IPv6) an empty Net
// will be returned
func NewNet(ip net.IP, masklen int) Net {
if EffectiveVersion(ip) == 6 {
return NewNet6(ip, masklen, 0)
}
return NewNet4(ip, masklen)
}
// NewNetBetween takes two net.IP's as input and will return the largest
// netblock that can fit between them (exclusive of the IP's themselves).
// If there is an exact fit it will set a boolean to true, otherwise the bool
// will be false. If no fit can be found (probably because a >= b) an
// ErrNoValidRange will be returned.
func NewNetBetween(a, b net.IP) (Net, bool, error) {
if CompareIPs(a, b) != -1 {
return nil, false, ErrNoValidRange
}
if EffectiveVersion(a) != EffectiveVersion(b) {
return nil, false, ErrNoValidRange
}
return fitNetworkBetween(NextIP(a), PreviousIP(b), 1)
}
// ByNet implements sort.Interface for iplib.Net based on the
// starting address of the netblock, with the netmask as a tie breaker. So if
// two Networks are submitted and one is a subset of the other, the enclosing
// network will be returned first.
type ByNet []Net
// Len implements sort.interface Len(), returning the length of the
// ByNetwork array
func (bn ByNet) Len() int {
return len(bn)
}
// Swap implements sort.interface Swap(), swapping two elements in our array
func (bn ByNet) Swap(a, b int) {
bn[a], bn[b] = bn[b], bn[a]
}
// Less implements sort.interface Less(), given two elements in the array it
// returns true if the LHS should sort before the RHS. For details on the
// implementation, see CompareNets()
func (bn ByNet) Less(a, b int) bool {
val := CompareNets(bn[a], bn[b])
if val == -1 {
return true
}
return false
}
// ParseCIDR returns a new Net object. It is a passthrough to net.ParseCIDR
// and will return any error it generates to the caller. There is one major
// difference between how net.IPNet manages addresses and how ipnet.Net does,
// and this function exposes it: net.ParseCIDR *always* returns an IPv6
// address; if given a v4 address it returns the RFC4291 IPv4-mapped IPv6
// address internally, but treats it like v4 in practice. In contrast
// iplib.ParseCIDR will re-encode it as a v4
func ParseCIDR(s string) (net.IP, Net, error) {
ip, ipnet, err := net.ParseCIDR(s)
if err != nil {
return ip, nil, err
}
masklen, _ := ipnet.Mask.Size()
if strings.Contains(s, ".") {
return ForceIP4(ip), NewNet4(ForceIP4(ip), masklen), err
}
if EffectiveVersion(ip) == 4 && masklen <= 32 {
return ip, NewNet4(ip, masklen), err
}
return ip, NewNet6(ip, masklen, 0), err
}
func fitNetworkBetween(a, b net.IP, mask int) (Net, bool, error) {
xnet := NewNet(a, mask)
va := CompareIPs(xnet.FirstAddress(), a)
vb := CompareIPs(xnet.LastAddress(), b)
if va >= 0 && vb < 0 {
return xnet, false, nil
}
if va == 0 && vb == 0 {
return xnet, true, nil
}
return fitNetworkBetween(a, b, mask + 1)
} | net.go | 0.638723 | 0.407451 | net.go | starcoder |
package flow
import (
"github.com/Shnifer/magellan/graph"
. "github.com/Shnifer/magellan/v2"
)
type updDrawPointer interface {
update(dt float64)
drawPoint(p Point, Q *graph.DrawQueue)
}
type Point struct {
lifeTime float64
maxTime float64
pos V2
updDraw updDrawPointer
attr map[string]float64
}
func (p Point) Req(Q *graph.DrawQueue) {
p.updDraw.drawPoint(p, Q)
}
type AttrF = func(p Point) float64
func NewAttrFs() map[string]AttrF {
return make(map[string]AttrF)
}
type VelocityF func(pos V2) V2
type SpawnPosF func() (pos V2)
type Params struct {
SpawnPeriod float64
SpawnPos SpawnPosF
SpawnLife func() float64
SpawnUpdDrawer func() updDrawPointer
VelocityF VelocityF
AttrFs map[string]AttrF
}
type Flow struct {
params Params
points []Point
spawnT float64
isActiveSpawn bool
isEmpty bool
}
func (fp Params) New() *Flow {
if fp.VelocityF == nil {
fp.VelocityF = func(V2) V2 { return ZV }
}
if fp.SpawnPos == nil {
fp.SpawnPos = func() V2 { return ZV }
}
if fp.SpawnLife == nil {
fp.SpawnLife = func() float64 { return 1 }
}
if fp.AttrFs == nil {
fp.AttrFs = make(map[string]func(p Point) float64)
}
return &Flow{
params: fp,
points: []Point{},
isActiveSpawn: true,
}
}
func (f *Flow) Update(dt float64) {
//check for life time
l := len(f.points)
for i := 0; i < l; i++ {
f.points[i].lifeTime += dt
if f.points[i].lifeTime > f.points[i].maxTime {
f.points[i] = f.points[l-1]
f.points = f.points[:l-1]
l--
}
}
if f.isActiveSpawn && f.params.SpawnPeriod > 0 {
f.isEmpty = false
//spawn new
f.spawnT += dt
for f.spawnT >= f.params.SpawnPeriod {
f.spawnT -= f.params.SpawnPeriod
f.newPoint()
}
} else if l == 0 {
f.isEmpty = true
}
//move
for i, p := range f.points {
vel := f.params.VelocityF(p.pos)
p.pos.DoAddMul(vel, dt)
f.points[i] = p
}
//attr update
for i, p := range f.points {
for name, F := range f.params.AttrFs {
if F != nil {
f.points[i].attr[name] = F(p)
}
}
}
//draw update
for _, p := range f.points {
p.updDraw.update(dt)
}
}
func (f *Flow) Req(Q *graph.DrawQueue) {
for _, p := range f.points {
Q.Append(p)
}
}
func (f *Flow) newPoint() {
p := Point{
maxTime: f.params.SpawnLife(),
pos: f.params.SpawnPos(),
updDraw: f.params.SpawnUpdDrawer(),
attr: make(map[string]float64),
}
f.points = append(f.points, p)
}
func (f *Flow) SetActive(activeSpawn bool) {
f.isActiveSpawn = activeSpawn
}
func (f *Flow) IsEmpty() bool {
return f.isEmpty
} | graph/flow/flow.go | 0.614394 | 0.407982 | flow.go | starcoder |
package kubernetes
import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
)
func statefulSetSpecFields(isUpdatable bool) map[string]*schema.Schema {
s := map[string]*schema.Schema{
"pod_management_policy": {
Type: schema.TypeString,
Description: "Controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.",
Optional: true,
ForceNew: true,
Computed: true,
ValidateFunc: validation.StringInSlice([]string{
"OrderedReady",
"Parallel",
}, false),
},
"replicas": {
Type: schema.TypeInt,
Optional: true,
Default: 1,
Description: "The desired number of replicas of the given Template, in the sense that they are instantiations of the same Template. Value must be a positive integer.",
ValidateFunc: validatePositiveInteger,
},
"revision_history_limit": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Computed: true,
ValidateFunc: validatePositiveInteger,
Description: "The maximum number of revisions that will be maintained in the StatefulSet's revision history. The default value is 10.",
},
"selector": {
Type: schema.TypeList,
Description: "A label query over pods that should match the replica count. It must match the pod template's labels.",
Required: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: labelSelectorFields(),
},
},
"service_name": {
Type: schema.TypeString,
Description: "The name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set.",
Required: true,
ForceNew: true,
},
"template": {
Type: schema.TypeList,
Description: "The object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template.",
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: podTemplateFields(isUpdatable),
},
},
"update_strategy": {
Type: schema.TypeList,
Description: "The strategy that the StatefulSet controller will use to perform updates.",
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"type": {
Type: schema.TypeString,
Description: "Indicates the type of the StatefulSet update strategy. Default is RollingUpdate",
Optional: true,
Default: "RollingUpdate",
ValidateFunc: validation.StringInSlice([]string{
"RollingUpdate",
"OnDelete",
}, false),
},
"rolling_update": {
Type: schema.TypeList,
Description: "RollingUpdate strategy type for StatefulSet",
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"partition": {
Type: schema.TypeInt,
Optional: true,
Description: "Indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.",
Default: 0,
},
},
},
},
},
},
},
"volume_claim_template": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Description: "A list of claims that pods are allowed to reference. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.",
Elem: &schema.Resource{
Schema: persistentVolumeClaimFields(),
},
},
}
return s
} | vendor/github.com/terraform-providers/terraform-provider-kubernetes/kubernetes/schema_stateful_set_spec.go | 0.610453 | 0.449755 | schema_stateful_set_spec.go | starcoder |
package unit
// Mass represents a SI unit of mass (in grams, G)
type Mass Unit
// ...
const (
// SI
Yoctogram = Gram * 1e-24
Zeptogram = Gram * 1e-21
Attogram = Gram * 1e-18
Femtogram = Gram * 1e-15
Picogram = Gram * 1e-12
Nanogram = Gram * 1e-9
Microgram = Gram * 1e-6
Milligram = Gram * 1e-3
Centigram = Gram * 1e-2
Decigram = Gram * 1e-1
Gram = Kilogram * 1e-3
Decagram = Gram * 1e1
Hectogram = Gram * 1e2
Kilogram Mass = 1e0
Megagram = Gram * 1e6
Gigagram = Gram * 1e9
Teragram = Gram * 1e12
Petagram = Gram * 1e15
Exagram = Gram * 1e18
Zettagram = Gram * 1e21
Yottagram = Gram * 1e24
// non-SI
Tonne = Megagram
Kilotonne = Gigagram
Megatonne = Teragram
Gigatonne = Petagram
Teratonne = Exagram
Petatonne = Zettagram
Exatonne = Yottagram
// US, avoirdupois
TroyGrain = Milligram * 64.79891
AvoirdupoisDram = AvoirdupoisOunce / 16
AvoirdupoisOunce = TroyGrain * 437.5
AvoirdupoisPound = TroyGrain * 7000
UsStone = AvoirdupoisPound * 14
UsQuarter = ShortHundredweight / 4
ShortHundredweight = AvoirdupoisPound * 100
// UK
UkStone = Gram * 6350.29318
UkQuarter = LongHundredweight / 4
LongHundredweight = UkStone * 8
TroyOunce = TroyGrain * 480
TroyPound = TroyGrain * 5760
// aliases
CentalHundredweight = ShortHundredweight // british
ImperialHundredweight = LongHundredweight // british
)
// Yoctograms returns the mass in yg
func (m Mass) Yoctograms() float64 {
return float64(m / Yoctogram)
}
// Zeptograms returns the mass in zg
func (m Mass) Zeptograms() float64 {
return float64(m / Zeptogram)
}
// Attograms returns the mass in ag
func (m Mass) Attograms() float64 {
return float64(m / Attogram)
}
// Femtograms returns the mass in fg
func (m Mass) Femtograms() float64 {
return float64(m / Femtogram)
}
// Picograms returns the mass in pg
func (m Mass) Picograms() float64 {
return float64(m / Picogram)
}
// Nanograms returns the mass in ng
func (m Mass) Nanograms() float64 {
return float64(m / Nanogram)
}
// Micrograms returns the mass in µg
func (m Mass) Micrograms() float64 {
return float64(m / Microgram)
}
// Milligrams returns the mass in mg
func (m Mass) Milligrams() float64 {
return float64(m / Milligram)
}
// Centigrams returns the mass in cg
func (m Mass) Centigrams() float64 {
return float64(m / Centigram)
}
// Decigrams returns the mass in dg
func (m Mass) Decigrams() float64 {
return float64(m / Decigram)
}
// Grams returns the mass in g
func (m Mass) Grams() float64 {
return float64(m / Gram)
}
// Decagrams returns the mass in dag
func (m Mass) Decagrams() float64 {
return float64(m / Decagram)
}
// Hectograms returns the mass in hg
func (m Mass) Hectograms() float64 {
return float64(m / Hectogram)
}
// Kilograms returns the mass in kg
func (m Mass) Kilograms() float64 {
return float64(m)
}
// Megagrams returns the mass in Mg
func (m Mass) Megagrams() float64 {
return float64(m / Megagram)
}
// Gigagrams returns the mass in Gg
func (m Mass) Gigagrams() float64 {
return float64(m / Gigagram)
}
// Teragrams returns the mass in Tg
func (m Mass) Teragrams() float64 {
return float64(m / Teragram)
}
// Petagrams returns the mass in Pg
func (m Mass) Petagrams() float64 {
return float64(m / Petagram)
}
// Exagrams returns the mass in Eg
func (m Mass) Exagrams() float64 {
return float64(m / Exagram)
}
// Zettagrams returns the mass in Zg
func (m Mass) Zettagrams() float64 {
return float64(m / Zettagram)
}
// Yottagrams returns the mass in Yg
func (m Mass) Yottagrams() float64 {
return float64(m / Yottagram)
}
// Tonnes returns the mass in t
func (m Mass) Tonnes() float64 {
return float64(m / Tonne)
}
// Kilotonnes returns the mass in ktǂ
func (m Mass) Kilotonnes() float64 {
return float64(m / Kilotonne)
}
// Megatonnes returns the mass in Mt
func (m Mass) Megatonnes() float64 {
return float64(m / Megatonne)
}
// Gigatonnes returns the mass in Gt
func (m Mass) Gigatonnes() float64 {
return float64(m / Gigatonne)
}
// Teratonnes returns the mass in Tt
func (m Mass) Teratonnes() float64 {
return float64(m / Teratonne)
}
// Petatonnes returns the mass in Pt
func (m Mass) Petatonnes() float64 {
return float64(m / Petatonne)
}
// Exatonnes returns the mass in Et
func (m Mass) Exatonnes() float64 {
return float64(m / Exatonne)
}
// TroyGrains returns the mass in gr
func (m Mass) TroyGrains() float64 {
return float64(m / TroyGrain)
}
// AvoirdupoisOunces returns the mass in oz
func (m Mass) AvoirdupoisOunces() float64 {
return float64(m / AvoirdupoisOunce)
}
// AvoirdupoisDrams returns the mass in XXX
func (m Mass) AvoirdupoisDrams() float64 {
return float64(m / AvoirdupoisDram)
}
// AvoirdupoisPounds returns the mass in lb
func (m Mass) AvoirdupoisPounds() float64 {
return float64(m / AvoirdupoisPound)
}
// TroyOunces returns the mass in oz
func (m Mass) TroyOunces() float64 {
return float64(m / TroyOunce)
}
// TroyPounds returns the mass in lb
func (m Mass) TroyPounds() float64 {
return float64(m / TroyPound)
}
// UsStones returns the mass in st
func (m Mass) UsStones() float64 {
return float64(m / UsStone)
}
// UkStones returns the mass in st
func (m Mass) UkStones() float64 {
return float64(m / UkStone)
}
// UsQuarters returns the mass in qr av
func (m Mass) UsQuarters() float64 {
return float64(m / UsQuarter)
}
// UkQuarters returns the mass in qr av
func (m Mass) UkQuarters() float64 {
return float64(m / UkQuarter)
}
// LongHundredweights returns the mass in cwt
func (m Mass) LongHundredweights() float64 {
return float64(m / LongHundredweight)
}
// ShortHundredweights returns the mass in cwt
func (m Mass) ShortHundredweights() float64 {
return float64(m / ShortHundredweight)
} | mass.go | 0.849971 | 0.555676 | mass.go | starcoder |
package filter
import (
"fmt"
"strconv"
"sync/atomic"
"time"
"github.com/AdRoll/baker"
log "github.com/sirupsen/logrus"
)
const (
formatTimeHelp = `
This filter formats and converts date/time strings from one format to another.
It requires the source and destination field names along with 2 format strings, the
first one indicates how to parse the input field while the second how to format it.
The source time parsing can fail if the time value does not match the provided format.
In this situation the filter clears the destination field, thus the user can filter out
those results with a __NotNull__ filter.
Most standard formats are supported out of the box and you can provide your own format
string, see [Go time layout](https://pkg.go.dev/time#pkg-constants).
Supported time format are:
- ` + "`ANSIC`" + ` format: "Mon Jan _2 15:04:05 2006"
- ` + "`UnixDate`" + ` format: "Mon Jan _2 15:04:05 MST 2006"
- ` + "`RubyDate`" + ` format: "Mon Jan 02 15:04:05 -0700 2006"
- ` + "`RFC822`" + ` format: "02 Jan 06 15:04 MST"
- ` + "`RFC822Z`" + ` that is RFC822 with numeric zone, format: "02 Jan 06 15:04 -0700"
- ` + "`RFC850`" + ` format: "Monday, 02-Jan-06 15:04:05 MST"
- ` + "`RFC1123`" + ` format: "Mon, 02 Jan 2006 15:04:05 MST"
- ` + "`RFC1123Z`" + ` that is RFC1123 with numeric zone, format: "Mon, 02 Jan 2006 15:04:05 -0700"
- ` + "`RFC3339`" + ` format: "2006-01-02T15:04:05Z07:00"
- ` + "`RFC3339Nano`" + ` format: "2006-01-02T15:04:05.999999999Z07:00"
- ` + "`unix`" + ` unix epoch in seconds
- ` + "`unixms`" + ` unix epoch in milliseconds
- ` + "`unixns`" + ` unix epoch in nanoseconds
`
ansic = "ANSIC"
unixdate = "UnixDate"
rubydate = "RubyDate"
rfc822 = "RFC822"
rfc822z = "RFC822Z"
rfc850 = "RFC850"
rfc1123 = "RFC1123"
rfc1123z = "RFC1123Z"
rfc3339 = "RFC3339"
rfc3339nano = "RFC3339Nano"
unix = "unix"
unixms = "unixms"
unixns = "unixns"
)
var FormatTimeDesc = baker.FilterDesc{
Name: "FormatTime",
New: NewFormatTime,
Config: &FormatTimeConfig{},
Help: formatTimeHelp,
}
type FormatTimeConfig struct {
SrcField string `help:"Field name of the input time" required:"true"`
DstField string `help:"Field name of the output time" required:"true"`
SrcFormat string `help:"Format of the input time" required:"false" default:"UnixDate"`
DstFormat string `help:"Format of the output time" required:"false" default:"unixms"`
}
func (cfg *FormatTimeConfig) fillDefaults() {
if cfg.SrcFormat == "" {
cfg.SrcFormat = unixdate
}
if cfg.DstFormat == "" {
cfg.DstFormat = unixms
}
}
type FormatTime struct {
src baker.FieldIndex
dst baker.FieldIndex
parse func(t []byte) (time.Time, error)
format func(t time.Time) []byte
// Shared state
numProcessedLines int64
}
func NewFormatTime(cfg baker.FilterParams) (baker.Filter, error) {
dcfg := cfg.DecodedConfig.(*FormatTimeConfig)
dcfg.fillDefaults()
f := &FormatTime{}
idx, ok := cfg.FieldByName(dcfg.SrcField)
if !ok {
return nil, fmt.Errorf("unknown field %q", dcfg.SrcField)
}
f.src = idx
idx, ok = cfg.FieldByName(dcfg.DstField)
if !ok {
return nil, fmt.Errorf("unknown field %q", dcfg.DstField)
}
f.dst = idx
f.parse = genParseFun(dcfg.SrcFormat)
f.format = genFormatFun(dcfg.DstFormat)
return f, nil
}
func (f *FormatTime) Stats() baker.FilterStats {
return baker.FilterStats{
NumProcessedLines: atomic.LoadInt64(&f.numProcessedLines),
}
}
func (f *FormatTime) Process(l baker.Record, next func(baker.Record)) {
atomic.AddInt64(&f.numProcessedLines, 1)
t, err := f.parse(l.Get(f.src))
if err != nil {
log.Errorf("can't parse time: %v", err)
l.Set(f.dst, nil)
} else {
l.Set(f.dst, f.format(t))
}
next(l)
}
func formatToLayout(format string) string {
switch format {
case ansic:
return time.ANSIC
case unixdate:
return time.UnixDate
case rubydate:
return time.RubyDate
case rfc822:
return time.RFC822
case rfc822z:
return time.RFC822Z
case rfc850:
return time.RFC850
case rfc1123:
return time.RFC1123
case rfc1123z:
return time.RFC1123Z
case rfc3339:
return time.RFC3339
case rfc3339nano:
return time.RFC3339Nano
default:
return format
}
}
func genParseFun(format string) func(b []byte) (time.Time, error) {
switch format {
case unix:
return func(b []byte) (time.Time, error) {
sec, err := strconv.ParseInt(string(b), 10, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(sec, 0), nil
}
case unixms:
return func(b []byte) (time.Time, error) {
msec, err := strconv.ParseInt(string(b), 10, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(0, msec*int64(time.Millisecond)), nil
}
case unixns:
return func(b []byte) (time.Time, error) {
nsec, err := strconv.ParseInt(string(b), 10, 64)
if err != nil {
return time.Time{}, err
}
return time.Unix(0, nsec), nil
}
default:
layout := formatToLayout(format)
return func(b []byte) (time.Time, error) {
t, err := time.Parse(layout, string(b))
if err != nil {
return time.Time{}, err
}
return t, nil
}
}
}
func genFormatFun(format string) func(t time.Time) []byte {
switch format {
case unix:
return func(t time.Time) []byte {
return []byte(strconv.FormatInt(t.Unix(), 10))
}
case unixms:
return func(t time.Time) []byte {
return []byte(strconv.FormatInt(t.UnixNano()/int64(time.Millisecond), 10))
}
case unixns:
return func(t time.Time) []byte {
return []byte(strconv.FormatInt(t.UnixNano(), 10))
}
default:
layout := formatToLayout(format)
return func(t time.Time) []byte {
return []byte(t.Format(layout))
}
}
} | filter/format_time.go | 0.676513 | 0.472014 | format_time.go | starcoder |
package fields
import (
"errors"
"fmt"
"github.com/robertkrimen/otto"
)
// Functions decodes, converts and validates payload using JavaScript functions
type Functions struct {
// Decoder is a JavaScript function that accepts the payload as byte array and
// returns an object containing the decoded values
Decoder string
// Converter is a JavaScript function that accepts the data as decoded by
// Decoder and returns an object containing the converted values
Converter string
// Validator is a JavaScript function that validates the data is converted by
// Converter and returns a boolean value indicating the validity of the data
Validator string
}
// Decode decodes the payload using the Decoder function into a map
func (f *Functions) Decode(payload []byte) (map[string]interface{}, error) {
if f.Decoder == "" {
return nil, errors.New("Decoder function not set")
}
vm := otto.New()
vm.Set("payload", payload)
value, err := vm.Run(fmt.Sprintf("(%s)(payload)", f.Decoder))
if err != nil {
return nil, err
}
if !value.IsObject() {
return nil, errors.New("Decoder does not return an object")
}
v, _ := value.Export()
return v.(map[string]interface{}), nil
}
// Convert converts the values in the specified map to a another map using the
// Converter function. If the Converter function is not set, this function
// returns the data as-is
func (f *Functions) Convert(data map[string]interface{}) (map[string]interface{}, error) {
if f.Converter == "" {
return data, nil
}
vm := otto.New()
vm.Set("data", data)
value, err := vm.Run(fmt.Sprintf("(%s)(data)", f.Converter))
if err != nil {
return nil, err
}
if !value.IsObject() {
return nil, errors.New("Converter does not return an object")
}
v, _ := value.Export()
return v.(map[string]interface{}), nil
}
// Validate validates the values in the specified map using the Validator
// function. If the Validator function is not set, this function returns true
func (f *Functions) Validate(data map[string]interface{}) (bool, error) {
if f.Validator == "" {
return true, nil
}
vm := otto.New()
vm.Set("data", data)
value, err := vm.Run(fmt.Sprintf("(%s)(data)", f.Validator))
if err != nil {
return false, err
}
if !value.IsBoolean() {
return false, errors.New("Validator does not return a boolean")
}
return value.ToBoolean()
}
// Process decodes the specified payload, converts it and test the validity
func (f *Functions) Process(payload []byte) (map[string]interface{}, bool, error) {
decoded, err := f.Decode(payload)
if err != nil {
return nil, false, err
}
converted, err := f.Convert(decoded)
if err != nil {
return nil, false, err
}
valid, err := f.Validate(converted)
return converted, valid, err
} | core/adapters/fields/functions.go | 0.845273 | 0.534552 | functions.go | starcoder |
package debug
// LogBuildDetails is no-op production alternative for the same function in present in debug build.
func LogBuildDetails() {}
// StartInjectionServer is no-op production alternative for the same function in present in debug build.
func StartInjectionServer() {}
// IsCSPCDeleteCollectionErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPCDeleteCollectionErrorInjected() bool {
return false
}
// IsCSPCDeleteErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPCDeleteErrorInjected() bool {
return false
}
// IsCSPCListErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPCListErrorInjected() bool {
return false
}
// IsCSPCGetErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPCGetErrorInjected() bool {
return false
}
// IsCSPCCreateErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPCCreateErrorInjected() bool {
return false
}
// IsCSPCUpdateErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPCUpdateErrorInjected() bool {
return false
}
// IsCSPCPatchErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPCPatchErrorInjected() bool {
return false
}
// IsCSPIDeleteCollectionErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPIDeleteCollectionErrorInjected() bool {
return false
}
// IsCSPIDeleteErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPIDeleteErrorInjected() bool {
return false
}
// IsCSPIListErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPIListErrorInjected() bool {
return false
}
// IsCSPIGetErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPIGetErrorInjected() bool {
return false
}
// IsCSPICreateErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPICreateErrorInjected() bool {
return false
}
// IsCSPIUpdateErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPIUpdateErrorInjected() bool {
return false
}
// IsCSPIPatchErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsCSPIPatchErrorInjected() bool {
return false
}
// IsDeploymentDeleteCollectionErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsDeploymentDeleteCollectionErrorInjected() bool {
return false
}
// IsDeploymentDeleteErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsDeploymentDeleteErrorInjected() bool {
return false
}
// IsDeploymentListErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsDeploymentListErrorInjected() bool {
return false
}
// IsDeploymentGetErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsDeploymentGetErrorInjected() bool {
return false
}
// IsDeploymentCreateErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsDeploymentCreateErrorInjected() bool {
return false
}
// IsDeploymentUpdateErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsDeploymentUpdateErrorInjected() bool {
return false
}
// IsDeploymentPatchErrorInjected is production alternative for the same function in present in debug build.
func (ei *ErrorInjection) IsDeploymentPatchErrorInjected() bool {
return false
} | pkg/debug/release.go | 0.62395 | 0.494507 | release.go | starcoder |
package internal
import (
"encoding/binary"
)
// Buffer is a variable-sized buffer of bytes with Read and Write methods.
// The zero value for Buffer is an empty buffer ready to use.
type Buffer interface {
ReadableBytes() uint32
WritableBytes() uint32
// Capacity returns the capacity of the buffer's underlying byte slice,
// that is, the total space allocated for the buffer's data.
Capacity() uint32
IsWritable() bool
Read(size uint32) []byte
Get(readerIndex uint32, size uint32) []byte
ReadableSlice() []byte
WritableSlice() []byte
// WrittenBytes advance the writer index when data was written in a slice
WrittenBytes(size uint32)
// MoveToFront copy the available portion of data at the beginning of the buffer
MoveToFront()
ReadUint16() uint16
ReadUint32() uint32
WriteUint16(n uint16)
WriteUint32(n uint32)
WriterIndex() uint32
ReaderIndex() uint32
Write(s []byte)
Put(writerIdx uint32, s []byte)
PutUint32(n uint32, writerIdx uint32)
Resize(newSize uint32)
// Clear will clear the current buffer data.
Clear()
}
type buffer struct {
data []byte
readerIdx uint32
writerIdx uint32
}
// NewBuffer creates and initializes a new Buffer using buf as its initial contents.
func NewBuffer(size int) Buffer {
return &buffer{
data: make([]byte, size),
readerIdx: 0,
writerIdx: 0,
}
}
func NewBufferWrapper(buf []byte) Buffer {
return &buffer{
data: buf,
readerIdx: 0,
writerIdx: uint32(len(buf)),
}
}
func (b *buffer) ReadableBytes() uint32 {
return b.writerIdx - b.readerIdx
}
func (b *buffer) WritableBytes() uint32 {
return uint32(cap(b.data)) - b.writerIdx
}
func (b *buffer) Capacity() uint32 {
return uint32(cap(b.data))
}
func (b *buffer) IsWritable() bool {
return b.WritableBytes() > 0
}
func (b *buffer) Read(size uint32) []byte {
res := b.data[b.readerIdx : b.readerIdx+size]
b.readerIdx += size
return res
}
func (b *buffer) Get(readerIdx uint32, size uint32) []byte {
return b.data[readerIdx : readerIdx+size]
}
func (b *buffer) ReadableSlice() []byte {
return b.data[b.readerIdx:b.writerIdx]
}
func (b *buffer) WritableSlice() []byte {
return b.data[b.writerIdx:]
}
func (b *buffer) WrittenBytes(size uint32) {
b.writerIdx += size
}
func (b *buffer) WriterIndex() uint32 {
return b.writerIdx
}
func (b *buffer) ReaderIndex() uint32 {
return b.readerIdx
}
func (b *buffer) MoveToFront() {
size := b.ReadableBytes()
copy(b.data, b.Read(size))
b.readerIdx = 0
b.writerIdx = size
}
func (b *buffer) Resize(newSize uint32) {
newData := make([]byte, newSize)
size := b.ReadableBytes()
copy(newData, b.Read(size))
b.data = newData
b.readerIdx = 0
b.writerIdx = size
}
func (b *buffer) resizeIfNeeded(spaceNeeded int) {
if b.WritableBytes() < uint32(spaceNeeded) {
capacityNeeded := uint32(cap(b.data) + spaceNeeded)
minCapacityIncrease := uint32(cap(b.data) * 3 / 2)
if capacityNeeded < minCapacityIncrease {
capacityNeeded = minCapacityIncrease
}
b.Resize(capacityNeeded)
}
}
func (b *buffer) ReadUint32() uint32 {
return binary.BigEndian.Uint32(b.Read(4))
}
func (b *buffer) ReadUint16() uint16 {
return binary.BigEndian.Uint16(b.Read(2))
}
func (b *buffer) WriteUint32(n uint32) {
b.resizeIfNeeded(4)
binary.BigEndian.PutUint32(b.WritableSlice(), n)
b.writerIdx += 4
}
func (b *buffer) PutUint32(n uint32, idx uint32) {
binary.BigEndian.PutUint32(b.data[idx:], n)
}
func (b *buffer) WriteUint16(n uint16) {
b.resizeIfNeeded(2)
binary.BigEndian.PutUint16(b.WritableSlice(), n)
b.writerIdx += 2
}
func (b *buffer) Write(s []byte) {
b.resizeIfNeeded(len(s))
copy(b.WritableSlice(), s)
b.writerIdx += uint32(len(s))
}
func (b *buffer) Put(writerIdx uint32, s []byte) {
copy(b.data[writerIdx:], s)
}
func (b *buffer) Clear() {
b.readerIdx = 0
b.writerIdx = 0
} | pulsar/internal/buffer.go | 0.73782 | 0.535888 | buffer.go | starcoder |
package linemath
import (
"errors"
"fmt"
"math"
)
type Vector3 struct {
X float32
Y float32
Z float32
}
// CreateVector3 创建一个新的矢量
func CreateVector3(x, y, z float32) Vector3 {
return Vector3{
x,
y,
z,
}
}
// Vector3_Zero 返回零值
func Vector3_Zero() Vector3 {
return Vector3{
0,
0,
0,
}
}
// Vector3_Invalid 返加一个无效的值 ,未赋值之前
func Vector3_Invalid() Vector3 {
return Vector3{
math.MaxFloat32,
math.MaxFloat32,
math.MaxFloat32,
}
}
func (v Vector3) String() string {
return fmt.Sprintf("{X:%f Y:%f Z:%f}", v.X, v.Y, v.Z)
}
func (v Vector3) ToV2() Vector2 {
return Vector2{X: v.X, Y: v.Y}
}
// IsInValid 是否有效
func (v Vector3) IsInValid() bool {
return v.IsEqual(Vector3_Invalid())
}
// IsZero 是否默认
func (v Vector3) IsZero() bool {
return v.IsEqual(Vector3_Zero())
}
// IsEqual 相等
func (v Vector3) IsEqual(r Vector3) bool {
if v.X-r.X > math.SmallestNonzeroFloat32 ||
v.X-r.X < -math.SmallestNonzeroFloat32 ||
v.Y-r.Y > math.SmallestNonzeroFloat32 ||
v.Y-r.Y < -math.SmallestNonzeroFloat32 ||
v.Z-r.Z > math.SmallestNonzeroFloat32 ||
v.Z-r.Z < -math.SmallestNonzeroFloat32 {
return false
}
return true
}
// Add 加
func (v Vector3) Add(o Vector3) Vector3 {
return Vector3{v.X + o.X, v.Y + o.Y, v.Z + o.Z}
}
// AddS 加到自己身上
func (v *Vector3) AddS(o Vector3) {
v.X += o.X
v.Y += o.Y
v.Z += o.Z
}
// Sub 减
func (v Vector3) Sub(o Vector3) Vector3 {
return Vector3{v.X - o.X, v.Y - o.Y, v.Z - o.Z}
}
// Distance 距离
func (v Vector3) Distance(o Vector3) float32 {
return v.Sub(o).Len()
}
// DistanceV2 2d距离
func (v Vector3) DistanceV2(o Vector3) float32 {
return v.ToV2().Sub(o.ToV2()).Len()
}
// SubS 自已身上减
func (v *Vector3) SubS(o Vector3) {
v.X -= o.X
v.Y -= o.Y
v.Z -= o.Z
}
// Mul 乘
func (v Vector3) Mul(o float32) Vector3 {
return Vector3{v.X * o, v.Y * o, v.Z * o}
}
// MulS 自己乘
func (v *Vector3) MulS(o float32) {
v.X *= o
v.Y *= o
v.Z *= o
}
// Cross 叉乘
func (v Vector3) Cross(o Vector3) Vector3 {
return Vector3{v.Y*o.Z - v.Z*o.Y, v.Z*o.X - v.X*o.Z, v.X*o.Y - v.Y*o.X}
}
// Dot 点乘
func (v Vector3) Dot(o Vector3) float32 {
return v.X*o.X + v.Y*o.Y + v.Z*o.Z
}
// Len 获取长度
func (v Vector3) Len() float32 {
return float32(math.Sqrt(float64(v.Dot(v))))
}
// AngleFromXYFloor XY平面上仰角
func (v Vector3) AngleFromXYFloor() (float32, error) {
if v.Len() == 0 {
return 0, errors.New("zero len")
}
a := math.Asin(Clamp64(float64(v.Z/v.Len()), -1, 1))
return float32(a) * 180 / math.Pi, nil
}
// AngleFromXYFloorEx 返回向量与XY平面夹角的弧度
func (v Vector3) AngleFromXYFloorEx() (float32, error) {
if v.Len() == 0 {
return 0, errors.New("zero len")
}
a := math.Asin(Clamp64(float64(v.Z/v.Len()), -1, 1))
return float32(a), nil
}
func (v *Vector3) Normalize() {
len := v.Len()
if len < math.SmallestNonzeroFloat32 {
return
}
v.X = v.X / len
v.Y = v.Y / len
v.Z = v.Z / len
}
func (v Vector3) Normalized() Vector3 {
len := v.Len()
if len < math.SmallestNonzeroFloat32 {
return Vector3_Zero()
}
newv := Vector3{
X: v.X / len,
Y: v.Y / len,
Z: v.Z / len,
}
return newv
}
func (v Vector3) ToRotationMatrix() Matrix3f {
var (
SP float64 = math.Sin(float64(v.Y))
SY float64 = math.Sin(float64(v.Z))
SR float64 = math.Sin(float64(v.X))
CP float64 = math.Cos(float64(v.Y))
CY float64 = math.Cos(float64(v.Z))
CR float64 = math.Cos(float64(v.X))
)
result := Matrix3f{
[3][3]float64{
{CP * CY, CP * SY, SP},
{SR*SP*CY - CR*SY, SR*SP*SY + CR*CY, -SR * CP},
{-(CR*SP*CY + SR*SY), CY*SR - CR*SP*SY, CR * CP},
},
}
// P:Y Y:Z R:X
return result
} | base/linemath/vector3.go | 0.652352 | 0.612252 | vector3.go | starcoder |
// Package padding provides functions for padding blocks of plain text in the
// context of block cipher mode of encryption like ECB or CBC.
package padding
import (
"bytes"
"errors"
)
// Padding interface defines functions Pad and Unpad implemented for PKCS #5 and
// PKCS #7 types of padding.
type Padding interface {
Pad(p []byte) ([]byte, error)
Unpad(p []byte) ([]byte, error)
}
// Padder struct embeds attributes necessary for the padding calculation
// (e.g. block size). It implements the Padding interface.
type Padder struct {
blockSize int
}
// NewPkcs5Padding returns a PKCS5 padding type structure. The blocksize
// defaults to 8 bytes (64-bit).
// See https://tools.ietf.org/html/rfc2898 PKCS #5: Password-Based Cryptography.
// Specification Version 2.0
func NewPkcs5Padding() Padding {
return &Padder{blockSize: 8}
}
// NewPkcs7Padding returns a PKCS7 padding type structure. The blocksize is
// passed as a parameter.
// See https://tools.ietf.org/html/rfc2315 PKCS #7: Cryptographic Message
// Syntax Version 1.5.
// For example the block size for AES is 16 bytes (128 bits).
func NewPkcs7Padding(blockSize int) Padding {
return &Padder{blockSize: blockSize}
}
// Pad returns the byte array passed as a parameter padded with bytes such that
// the new byte array will be an exact multiple of the expected block size.
// For example, if the expected block size is 8 bytes (e.g. PKCS #5) and that
// the initial byte array is:
// []byte{0x0A, 0x0B, 0x0C, 0x0D}
// the returned array will be:
// []byte{0x0A, 0x0B, 0x0C, 0x0D, 0x04, 0x04, 0x04, 0x04}
// The value of each octet of the padding is the size of the padding. If the
// array passed as a parameter is already an exact multiple of the block size,
// the original array will be padded with a full block.
func (p *Padder) Pad(buf []byte) ([]byte, error) {
bufLen := len(buf)
padLen := p.blockSize - (bufLen % p.blockSize)
padText := bytes.Repeat([]byte{byte(padLen)}, padLen)
return append(buf, padText...), nil
}
// Unpad removes the padding of a given byte array, according to the same rules
// as described in the Pad function. For example if the byte array passed as a
// parameter is:
// []byte{0x0A, 0x0B, 0x0C, 0x0D, 0x04, 0x04, 0x04, 0x04}
// the returned array will be:
// []byte{0x0A, 0x0B, 0x0C, 0x0D}
func (p *Padder) Unpad(buf []byte) ([]byte, error) {
bufLen := len(buf)
if bufLen == 0 {
return nil, errors.New("cryptgo/padding: invalid padding size")
}
pad := buf[bufLen-1]
padLen := int(pad)
if padLen == 0 || padLen > bufLen || padLen > p.blockSize {
return nil, errors.New("cryptgo/padding: invalid padding size")
}
for _, v := range buf[bufLen-padLen : bufLen-1] {
if v != pad {
return nil, errors.New("cryptgo/padding: invalid padding")
}
}
return buf[:bufLen-padLen], nil
} | padding/padding.go | 0.800185 | 0.639412 | padding.go | starcoder |
package app
import (
"fmt"
"regexp"
"sort"
"strings"
"github.com/spf13/cobra"
)
type e2eModeOptions struct {
name string
desc string
focus, skip string
parallel bool
}
const (
// E2eModeQuick runs a single E2E test and the systemd log tests.
E2eModeQuick string = "quick"
// E2eModeNonDisruptiveConformance runs all of the `Conformance` E2E tests which are not marked as disuprtive and the systemd log tests.
E2eModeNonDisruptiveConformance string = "non-disruptive-conformance"
// E2eModeCertifiedConformance runs all of the `Conformance` E2E tests and the systemd log tests.
E2eModeCertifiedConformance string = "certified-conformance"
// nonDisruptiveSkipList should generally just need to skip disruptive tests since upstream
// will disallow the other types of tests from being tagged as Conformance. However, in v1.16
// two disruptive tests were not marked as such, meaning we needed to specify them here to ensure
// user workload safety. See https://github.com/kubernetes/kubernetes/issues/82663
// and https://github.com/kubernetes/kubernetes/issues/82787
nonDisruptiveSkipList = `\[Disruptive\]|NoExecuteTaintManager`
conformanceFocus = `\[Conformance\]`
quickFocus = "Pods should be submitted and removed"
E2eModeConformanceLite = "conformance-lite"
)
var (
liteSkips = []string{
"Serial", "Slow", "Disruptive",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance]",
"[sig-network] EndpointSlice should create Endpoints and EndpointSlices for Pods matching a Service [Conformance]",
"[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance]",
"[sig-auth] ServiceAccounts ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance]",
"[sig-network] DNS should provide DNS for services [Conformance]",
"[sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance]",
"[sig-apps] Job should delete a job [Conformance]",
"[sig-network] DNS should provide DNS for ExternalName services [Conformance]",
"[sig-node] Variable Expansion should succeed in writing subpaths in container [Slow] [Conformance]",
"[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance]",
"[sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance]",
"[sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance]",
"[sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance]",
"[sig-node] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance]",
"[sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]",
"[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance]",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]",
`[sig-node] Probing container should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]`,
"[sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance]",
"[sig-node] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]",
"[sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance]",
"[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance]",
"[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]",
"[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance]",
"[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance]",
"[sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance]",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]",
"[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance]",
"[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance]",
"[sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance]",
"[sig-apps] CronJob should schedule multiple jobs concurrently [Conformance]",
"[sig-apps] CronJob should replace jobs when ReplaceConcurrent [Conformance]",
"[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance]",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]",
"[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]",
"[sig-node] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]",
"[sig-node] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance]",
`[sig-node] Probing container should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]`,
"[sig-node] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]",
"[sig-node] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]",
"[sig-apps] CronJob should not schedule jobs when suspended [Slow] [Conformance]",
"[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]",
"[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow] [Conformance]",
`[k8s.io] Probing container should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]`,
`[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]`,
`[sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance]`,
`[sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance]`,
`[k8s.io] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]`,
`[sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance]`,
`[k8s.io] Probing container should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]`,
`[sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance]`,
`[sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance]`,
`[k8s.io] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]`,
`[k8s.io] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]`,
`[sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]`,
`[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]`,
}
)
// validModes is a map of the various valid modes. Name is duplicated as the key and in the e2eModeOptions itself.
var validModes = map[string]e2eModeOptions{
E2eModeQuick: {
name: E2eModeQuick, focus: quickFocus,
desc: "Quick mode runs a single test to create and destroy a pod. Fastest way to check basic cluster operation.",
},
E2eModeNonDisruptiveConformance: {
name: E2eModeNonDisruptiveConformance, focus: conformanceFocus, skip: nonDisruptiveSkipList,
desc: "Non-destructive conformance mode runs all of the conformance tests except those that would disrupt other cluster operations (e.g. tests that may cause nodes to be restarted or impact cluster permissions).",
},
E2eModeCertifiedConformance: {
name: E2eModeCertifiedConformance, focus: conformanceFocus,
desc: "Certified conformance mode runs the entire conformance suite, even disruptive tests. This is typically run in a dev environment to earn the CNCF Certified Kubernetes status.",
},
E2eModeConformanceLite: {
name: E2eModeConformanceLite, focus: conformanceFocus, skip: genLiteSkips(), parallel: true,
desc: "An unofficial mode of running the e2e tests which removes some of the longest running tests so that your tests can complete in the fastest time possible while maximizing coverage.",
},
}
func genLiteSkips() string {
quoted := make([]string, len(liteSkips))
for i, v := range liteSkips {
quoted[i] = regexp.QuoteMeta(v)
// Quotes will cause the regexp to explode; easy to just change them to wildcards without an issue.
quoted[i] = strings.ReplaceAll(quoted[i], `"`, ".")
}
return strings.Join(quoted, "|")
}
func validE2eModes() []string {
keys := []string{}
for key := range validModes {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
type modesOptions struct {
verbose bool
}
func NewCmdModes() *cobra.Command {
f := modesOptions{}
var modesCmd = &cobra.Command{
Use: "modes",
Short: "Display the various modes in which to run the e2e plugin",
Run: func(cmd *cobra.Command, args []string) {
showModes(f)
},
Args: cobra.ExactArgs(0),
}
modesCmd.Flags().BoolVar(&f.verbose, "verbose", false, "Do not truncate output for each mode.")
return modesCmd
}
func showModes(opt modesOptions) {
count := 0
if !opt.verbose {
count = 200
}
for i, key := range validE2eModes() {
opt := validModes[key]
if i != 0 {
fmt.Println("")
}
fmt.Println(truncate(fmt.Sprintf("Mode: %v", opt.name), count))
fmt.Println(truncate(fmt.Sprintf("Description: %v", opt.desc), count))
fmt.Println(truncate(fmt.Sprintf("E2E_FOCUS: %v", opt.focus), count))
fmt.Println(truncate(fmt.Sprintf("E2E_SKIP: %v", opt.skip), count))
fmt.Println(truncate(fmt.Sprintf("E2E_PARALLEL: %v", opt.parallel), count))
}
}
func truncate(s string, count int) string {
if count <= 0 {
return s
}
if len(s) <= count {
return s
}
return s[0:count] + "... (truncated) ..."
} | cmd/sonobuoy/app/modes.go | 0.557845 | 0.469155 | modes.go | starcoder |
package exprxfunc
import (
"strings"
)
var iterator int
func Compose(expression string) string {
prefixExpression := infixToPrefix(expression)
expressionTree := prepareExpressionTree(prefixExpression)
var expressionSlice []string
expressionSlice = prepareExpression(expressionTree, expressionSlice)
return prepareExpressionFunction(expressionSlice)
}
func prepareExpressionTree(expression string) *Tree {
expressionSlice := strings.Fields(expression)
expressionTree := NewTree()
iterator = 0
buildTree(expressionSlice, expressionTree)
return expressionTree
}
func buildTree(op []string, t *Tree) {
if len(op) > iterator {
t.AddValue(op[iterator])
iterator++
if isOperator(op[iterator-1]) {
if t.Left == nil {
t.AddLeft(NewTree())
}
buildTree(op, t.Left)
if t.Right == nil {
t.AddRight(NewTree())
}
buildTree(op, t.Right)
}
}
return
}
func prepareExpression(t *Tree, expression []string) []string {
if t == nil {
return expression
}
expression = append(expression, operatorFunction(t.Value))
expression = prepareExpression(t.Left, expression)
expression = prepareExpression(t.Right, expression)
return expression
}
func prepareExpressionFunction(expressionSlice []string) string {
var expressionFunction string
bracketsToClose := 0
firstParamExists := false
for i := 0; i < len(expressionSlice); i++ {
if isOperatorFunction(expressionSlice[i]) {
expressionFunction = expressionFunction + expressionSlice[i] + LeftB
bracketsToClose++
firstParamExists = false
} else {
expressionFunction = expressionFunction + expressionSlice[i]
if !firstParamExists {
expressionFunction = expressionFunction + Comma
firstParamExists = true
} else {
expressionFunction = expressionFunction + RightB
bracketsToClose--
if bracketsToClose > 0 {
expressionFunction = expressionFunction + Comma
}
}
}
}
if last := len(expressionFunction) - 1; last >= 0 && expressionFunction[last] == ',' {
expressionFunction = expressionFunction[:last]
}
for i := 0; i < bracketsToClose; i++ {
expressionFunction = expressionFunction + RightB
}
return expressionFunction
} | compose.go | 0.524882 | 0.455622 | compose.go | starcoder |
package nnops
import (
G "gorgonia.org/gorgonia"
"gorgonia.org/tensor"
)
func Conv2d(im, filter *G.Node, kernelShape tensor.Shape, pad, stride, dilation []int) (retVal *G.Node, err error) {
var op *convolution
if op, err = makeConvolutionOp(im, filter, kernelShape, pad, stride, dilation); err != nil {
return nil, err
}
return G.ApplyOp(op, im, filter)
}
func Conv1d(in, filter *G.Node, kernel, pad, stride, dilation int) (*G.Node, error) {
return Conv2d(in, filter, tensor.Shape{1, kernel}, []int{0, pad}, []int{1, stride}, []int{1, dilation})
}
func MaxPool2D(x *G.Node, kernel tensor.Shape, pad, stride []int) (retVal *G.Node, err error) {
var op *maxpool
if op, err = newMaxPoolOp(x, kernel, pad, stride); err != nil {
return nil, err
}
return G.ApplyOp(op, x)
}
func Dropout(x *G.Node, prob float64) (retVal *G.Node, err error) {
var op *dropout
if op, err = newDropout(x, prob); err != nil {
return nil, err
}
// states := &scratchOp{x.Shape().Clone(), x.Dtype(), ""}
// m := G.NewUniqueNode(G.WithType(x.Type()), G.WithOp(states), G.In(x.Graph()), G.WithShape(states.shape...))
retVal, err = G.ApplyOp(op, x)
return
}
func Rectify(x *G.Node) (retVal *G.Node, err error) {
var op *activation
if op, err = newRelu(); err != nil {
return nil, err
}
retVal, err = G.ApplyOp(op, x)
return
}
func BatchNorm(x, scale, bias *G.Node, momentum, epsilon float64) (retVal, γ, β *G.Node, op *BatchNormOp, err error) {
dt, err := dtypeOf(x.Type())
if err != nil {
return nil, nil, nil, nil, err
}
// batches := x.Shape()[0]
channels := x.Shape()[1]
H, W := x.Shape()[2], x.Shape()[3]
// spatialDim := x.Shape().TotalSize() / (channels * batches)
scratchShape := tensor.Shape{1, channels, H, W}
// scaleScratch := &scratchOp{x.Shape().Clone(), dt, "scale"}
// biasScratch := &scratchOp{x.Shape().Clone(), dt, "bias"}
meanScratch := &gpuScratchOp{scratchOp{x.Shape().Clone(), dt, "mean"}}
varianceScratch := &gpuScratchOp{scratchOp{x.Shape().Clone(), dt, "variance"}}
cacheMeanScratch := &gpuScratchOp{scratchOp{scratchShape, dt, "cacheMean"}}
cacheVarianceScratch := &gpuScratchOp{scratchOp{scratchShape, dt, "cacheVariance"}}
g := x.Graph()
dims := len(x.Shape())
mean := G.NewTensor(g, dt, dims, G.WithShape(scratchShape.Clone()...), G.WithName(x.Name()+"_mean"), G.WithOp(meanScratch))
variance := G.NewTensor(g, dt, dims, G.WithShape(scratchShape.Clone()...), G.WithName(x.Name()+"_variance"), G.WithOp(varianceScratch))
cacheMean := G.NewTensor(g, dt, dims, G.WithShape(scratchShape.Clone()...), G.WithOp(cacheMeanScratch))
cacheVariance := G.NewTensor(g, dt, dims, G.WithShape(scratchShape.Clone()...), G.WithOp(cacheVarianceScratch))
if scale == nil {
scale = G.NewTensor(g, dt, dims, G.WithShape(scratchShape.Clone()...), G.WithName(x.Name()+"_γ"), G.WithInit(G.GlorotN(1.0)))
}
if bias == nil {
bias = G.NewTensor(g, dt, dims, G.WithShape(scratchShape.Clone()...), G.WithName(x.Name()+"_β"), G.WithInit(G.GlorotN(1.0)))
}
op = newBatchNormOp(momentum, epsilon)
retVal, err = G.ApplyOp(op, x, scale, bias, mean, variance, cacheMean, cacheVariance)
return retVal, scale, bias, op, err
} | ops/nn/api_cuda.go | 0.777933 | 0.520618 | api_cuda.go | starcoder |
package physics
import (
"github.com/doxxan/glitch/math/vector"
"math"
)
type AABB struct {
Min, Max vector.Vec2
}
func AABBvsAABB(m *Manifold) bool {
a := m.b1
b := m.b2
normal := vector.Sub2(b.Position, a.Position)
abox := a.Box
bbox := b.Box
aXExtent := (abox.Max.X - abox.Min.X) / 2.0
bXExtent := (bbox.Max.X - bbox.Min.X) / 2.0
aYExtent := (abox.Max.Y - abox.Min.Y) / 2.0
bYExtent := (bbox.Max.Y - bbox.Min.Y) / 2.0
// Calculate overlap
xOverlap := aXExtent + bXExtent - math.Abs(normal.X)
yOverlap := aYExtent + bYExtent - math.Abs(normal.Y)
if xOverlap <= 0.0 || yOverlap <= 0.0 {
return false
}
// Find axis of least penetration
if xOverlap < yOverlap {
// Point towards b knowing that normal points from a to b
if normal.X < 0.0 {
m.normal = vector.Vec2{X: -1.0, Y: 0.0}
} else {
m.normal = vector.Vec2{X: 1.0, Y: 0.0}
}
m.penetration = xOverlap
} else {
// Point towards b knowing that normal points from a to b
if normal.Y < 0.0 {
m.normal = vector.Vec2{X: 0.0, Y: -1.0}
} else {
m.normal = vector.Vec2{X: 0.0, Y: 1.0}
}
m.penetration = yOverlap
}
return true
}
type Body struct {
Position vector.Vec2
Velocity vector.Vec2
restitution float64
staticFriction float64
dynamicFriction float64
Force vector.Vec2
mass float64
invMass float64
Box AABB
}
func NewBody(mass float64) *Body {
b := Body{}
b.mass = mass
if mass == 0.0 {
b.invMass = 0.0
} else {
b.invMass = 1.0 / mass
}
b.restitution = 0.2
b.staticFriction = 0.5
b.dynamicFriction = 0.3
return &b
}
type Manifold struct {
b1, b2 *Body
penetration float64
normal vector.Vec2
}
func (m *Manifold) ResolveCollision() {
a := m.b1
b := m.b2
rv := vector.Sub2(b.Velocity, a.Velocity)
velAlongNormal := vector.Dot2(rv, m.normal)
if velAlongNormal > 0.0 {
return
}
// Restitution
e := math.Min(a.restitution, b.restitution)
gravityDt := vector.MulScalar2(vector.Vec2{X: 0.0, Y: -9.8}, 1.0/60.0)
if (rv.X*rv.X + rv.Y*rv.Y) < (gravityDt.X*gravityDt.X+gravityDt.Y*gravityDt.Y)+0.0001 {
e = 0.0
}
// Impulse scalar
j := -(1.0 + e) * velAlongNormal
j /= a.invMass + b.invMass
// Apply impulse
impulse := vector.MulScalar2(m.normal, j)
a.Velocity.Sub(vector.MulScalar2(impulse.Neg(), a.invMass))
b.Velocity.Add(vector.MulScalar2(impulse, b.invMass))
// Friction impulse
rv = vector.Sub2(b.Velocity, a.Velocity)
t := vector.Sub2(rv, vector.MulScalar2(m.normal, vector.Dot2(rv, m.normal)))
t = vector.Normalize2(t)
// Tangent magnitude
jt := -vector.Dot2(rv, t)
jt /= a.invMass + b.invMass
// Skip tiny friction impulses
if math.Abs(jt) < 0.0001 {
return
}
// Coulumb's law
sf := math.Sqrt(a.staticFriction * b.staticFriction)
df := math.Sqrt(a.dynamicFriction * b.dynamicFriction)
var tImpulse vector.Vec2
if math.Abs(jt) < j*sf {
tImpulse = vector.MulScalar2(t, jt)
} else {
tImpulse = vector.MulScalar2(vector.MulScalar2(t, -j), df)
}
a.Velocity.Sub(vector.MulScalar2(tImpulse.Neg(), a.invMass))
b.Velocity.Add(vector.MulScalar2(tImpulse, b.invMass))
}
func (m *Manifold) PositionalCorrection() {
percent := 0.2
slop := 0.01
a := m.b1
b := m.b2
correction := m.normal
correction.MulScalar(math.Max(m.penetration-slop, 0.0) / (a.invMass + b.invMass))
correction.MulScalar(percent)
a.Position.Sub(vector.MulScalar2(correction, a.invMass))
b.Position.Add(vector.MulScalar2(correction, b.invMass))
}
type World struct {
bodies []*Body
contacts []*Manifold
gravity vector.Vec2
iterations int
dt float64
}
func NewWorld(gravity vector.Vec2, iterations int, dt float64) *World {
return &World{
gravity: gravity,
iterations: iterations,
dt: dt,
}
}
func (w World) Step() {
// Broadphase
w.contacts = nil
for index, b1 := range w.bodies {
for _, b2 := range w.bodies[index+1:] {
if b1.invMass == 0.0 && b2.invMass == 0 {
continue
}
m := &Manifold{
b1: b1,
b2: b2,
}
if AABBvsAABB(m) {
w.contacts = append(w.contacts, m)
}
}
}
// Integrate forces
for _, body := range w.bodies {
if body.invMass == 0.0 {
continue
}
acceleration := vector.Add2(vector.MulScalar2(body.Force, body.invMass), w.gravity)
body.Velocity.Add(vector.MulScalar2(acceleration, w.dt/2.0))
// Angular Velocity
}
// Collisions
for t := 0; t < w.iterations; t++ {
for _, manifold := range w.contacts {
manifold.ResolveCollision()
}
}
// Integrate Velocity
for _, body := range w.bodies {
if body.invMass == 0.0 {
continue
}
body.Position.Add(vector.MulScalar2(body.Velocity, w.dt))
// Orientation
acceleration := vector.Add2(vector.MulScalar2(body.Force, body.invMass), w.gravity)
body.Velocity.Add(vector.MulScalar2(acceleration, w.dt/2.0))
body.Force = vector.Vec2{X: 0.0, Y: 0.0}
}
// Correct positions
for _, manifold := range w.contacts {
manifold.PositionalCorrection()
}
}
func (w *World) AddBody(body *Body) {
w.bodies = append(w.bodies, body)
} | physics/physics.go | 0.869424 | 0.583025 | physics.go | starcoder |
package algo
import (
"sort"
)
type Sortable []int
func (s Sortable) Partition(left, right int) int {
// rightmost element
pivot_index := right
// pivot value
pivot := s[pivot_index]
// move right index to left
right -= 1
for {
// move left index to right as long
// as its value is less than the pivot
for s[left] < pivot {
left += 1
}
// move right index to the left as long
// as its value is greater than the pivot
for s[right] > pivot {
right -= 1
}
// when left >= right break
if left >= right {
break
// swap values
} else {
s[left], s[right] = s[right], s[left]
left += 1
}
}
// final step, swap value of left with pivot index
s[left], s[pivot_index] = s[pivot_index], s[left]
// index of pivot
return left
}
func (s *Sortable) Quicksort(left, right int) {
if right-left <= 0 {
return
}
pivot_index := s.Partition(left, right)
s.Quicksort(left, pivot_index-1)
s.Quicksort(pivot_index+1, right)
}
func (s *Sortable) Quickselect(kth_lowest, left, right int) int {
if right-left == 0 {
return (*s)[left]
}
pivot_index := s.Partition(left, right)
if kth_lowest < pivot_index {
return s.Quickselect(kth_lowest, left, pivot_index-1)
} else if kth_lowest > pivot_index {
return s.Quickselect(kth_lowest, pivot_index+1, right)
} else {
return (*s)[pivot_index]
}
}
// use a sort to speed algo
func (s *Sortable) SortFirstFindDuplicate() bool {
s.Quicksort(0, len(*s)-1)
for i := 0; i < len(*s); i++ {
if (*s)[i] == (*s)[i+1] {
return true
}
}
return false
}
func (s *Sortable) SortIntFindDuplicate() bool {
sort.Ints(*s)
for i := 0; i < len(*s); i++ {
if (*s)[i] == (*s)[i+1] {
return true
}
}
return false
}
func (s *Sortable) GreatestProductAny3() int {
sort.Ints(*s)
return (*s)[len(*s)-1] * (*s)[len(*s)-2] * (*s)[len(*s)-3]
}
func (s Sortable) MissingNumber() (int, bool) {
sort.Ints(s)
for index, num := range s {
if index != num {
return index, true
}
}
return 0, false
}
func (s Sortable) LargestON() int {
largest := 0
for i := 0; i < len(s); i++ {
if s[i] > largest {
largest = s[i]
}
}
return largest
}
func (s Sortable) LargestNlogON() int {
sort.Ints(s)
return s[len(s)-1]
} | quicksort.go | 0.735831 | 0.480052 | quicksort.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.