code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package gomaddness
import "math"
// Hash is the data structure for MADDNESS hash function.
// It holds the learned balanced binary regression tree and the prototype
// vectors.
type Hash[F Float] struct {
TreeLevels []*HashingTreeLevel[F]
Prototypes Vectors[F]
}
// HashingTreeLevel is one level of the binary tree from a Hash.
type HashingTreeLevel[F Float] struct {
SplitIndex int
SplitThresholds Vector[F]
}
// TrainHash runs the learning process for MADDNESS hash function parameters,
// and return a new trained Hash.
func TrainHash[F Float](examples Vectors[F]) *Hash[F] {
buckets := Buckets[F]{
&Bucket[F]{
Level: -1,
NodeIndex: 0,
Vectors: examples,
},
}
levels := make([]*HashingTreeLevel[F], 4)
for i := range levels {
buckets, levels[i] = nextHashingTreeLevel(buckets)
}
return &Hash[F]{
TreeLevels: levels,
Prototypes: buckets.Prototypes(),
}
}
// Hash maps the given vector to an index, applying MADDNESS hash function.
func (h *Hash[F]) Hash(v Vector[F]) uint8 {
var i uint8 = 1
for _, level := range h.TreeLevels {
threshold := level.SplitThresholds[i-1]
i = 2 * i
if v[level.SplitIndex] < threshold {
i--
continue
}
}
return i - 1
}
func nextHashingTreeLevel[F Float](buckets Buckets[F]) (Buckets[F], *HashingTreeLevel[F]) {
indices := buckets.HeuristicSelectIndices()
bestLoss := F(math.Inf(+1))
bestSplitIndex := -1
var bestSplitThresholds Vector[F]
for _, splitIndex := range indices {
var loss F
splitThresholds := make(Vector[F], len(buckets))
for j, bucket := range buckets {
t, l := bucket.Vectors.OptimalSplitThreshold(splitIndex)
splitThresholds[j] = t
loss += l
}
if loss < bestLoss {
bestLoss = loss
bestSplitIndex = splitIndex
bestSplitThresholds = splitThresholds
}
}
newBuckets := make(Buckets[F], 0, len(buckets)*2)
for j, bucket := range buckets {
lt, gte := bucket.Vectors.SplitByThreshold(bestSplitIndex, bestSplitThresholds[j])
// TODO: check corner cases when lt or gte are empty
if len(lt) == 0 {
v := gte.Copy().SortByColumn(bestSplitIndex)[0].Copy()
v[bestSplitIndex] = F(math.Nextafter32(float32(v[bestSplitIndex]), float32(math.Inf(-1))))
lt = Vectors[F]{v}
}
if len(gte) == 0 {
v := lt.Copy().SortByColumn(bestSplitIndex)[len(lt)-1].Copy()
v[bestSplitIndex] = F(math.Nextafter32(float32(v[bestSplitIndex]), float32(math.Inf(+1))))
gte = Vectors[F]{v}
}
newBuckets = append(newBuckets, &Bucket[F]{
Level: bucket.Level + 1,
NodeIndex: j * 2,
Vectors: lt,
})
newBuckets = append(newBuckets, &Bucket[F]{
Level: bucket.Level + 1,
NodeIndex: j*2 + 1,
Vectors: gte,
})
}
nextLevel := &HashingTreeLevel[F]{
SplitIndex: bestSplitIndex,
SplitThresholds: bestSplitThresholds,
}
return newBuckets, nextLevel
} | hash.go | 0.620852 | 0.533094 | hash.go | starcoder |
package regionagogo
import (
"github.com/akhenakh/regionagogo/geostore"
"github.com/golang/geo/s2"
"github.com/kpawlik/geojson"
)
// Fences a slice of *Fence (type used mainly to return one GeoJSON of the regions)
type Fences []*Fence
// Fence is an s2 represented FenceStorage
// it contains an S2 loop and the associated metadata
type Fence struct {
Data map[string]string `json:"data"`
Loop *s2.Loop `json:"-"`
}
// NewFenceFromStorage returns a Fence from a FenceStorage
// Fence can be extended, FenceStorage is a protocol buffer instance
func NewFenceFromStorage(rs *geostore.FenceStorage) *Fence {
if rs == nil {
return nil
}
points := make([]s2.Point, len(rs.Points))
for i, c := range rs.Points {
// Points in Storage are lat lng points
ll := s2.LatLngFromDegrees(float64(c.Lat), float64(c.Lng))
point := s2.PointFromLatLng(ll)
points[i] = point
}
l := s2.LoopFromPoints(points)
return &Fence{Data: rs.Data, Loop: l}
}
// ToGeoJSON transforms a Region to a valid GeoJSON
func (f *Fence) ToGeoJSON() *geojson.FeatureCollection {
var geo geojson.FeatureCollection
var cs []geojson.Coordinate
points := f.Loop.Vertices()
for i, p := range points {
ll := s2.LatLngFromPoint(p)
c := geojson.Coordinate{
geojson.CoordType(ll.Lng.Degrees()),
geojson.CoordType(ll.Lat.Degrees()),
}
if i == len(points)-1 {
break
}
cs = append(cs, c)
}
coordinates := []geojson.Coordinates{cs}
poly := &geojson.Polygon{
Type: "Polygon",
Coordinates: coordinates,
}
properties := make(map[string]interface{})
for k, v := range f.Data {
properties[k] = v
}
geo.Features = []*geojson.Feature{
{
Type: "Feature",
Geometry: poly,
Properties: properties,
},
}
geo.Type = "FeatureCollection"
return &geo
}
// ToGeoJSON transforms a set of Fences to a valid GeoJSON
func (f *Fences) ToGeoJSON() *geojson.FeatureCollection {
var geo geojson.FeatureCollection
var features []*geojson.Feature
for _, fence := range *f {
var cs []geojson.Coordinate
points := fence.Loop.Vertices()
for _, p := range points {
ll := s2.LatLngFromPoint(p)
c := geojson.Coordinate{
geojson.CoordType(ll.Lng.Degrees()),
geojson.CoordType(ll.Lat.Degrees()),
}
cs = append(cs, c)
}
coordinates := []geojson.Coordinates{cs}
poly := &geojson.Polygon{
Type: "Polygon",
Coordinates: coordinates,
}
properties := make(map[string]interface{})
for k, v := range fence.Data {
properties[k] = v
}
f := &geojson.Feature{
Type: "Feature",
Geometry: poly,
Properties: properties,
}
features = append(features, f)
}
geo.Features = features
geo.Type = "FeatureCollection"
return &geo
}
type BySize []*Fence
func (d BySize) Len() int { return len(d) }
func (d BySize) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d BySize) Less(i, j int) bool {
// use approximated area to decide ordering
return d[i].Loop.RectBound().Area() < d[j].Loop.RectBound().Area()
} | fence.go | 0.814459 | 0.400632 | fence.go | starcoder |
package rt90
import "math"
// ToWGS84 transforms RT90 coordinates to WGS84 coordinates.
func ToWGS84(x, y float64) (lat float64, long float64) {
lat, long = gaussKrüger(x, y)
return
}
func gaussKrüger(x, y float64) (float64, float64) {
var axis float64 = 6378137.0
var flattening float64 = 1.0 / 298.257222101
var centralMeridian float64 = 15.0 + 48.0/60.0 + 22.624306/3600.0
var scale float64 = 1.00000561024
var falseNorthing float64 = -667.711
var falseEasting float64 = 1500064.274
var e2 float64 = flattening * (2.0 - flattening)
var n float64 = flattening / (2.0 - flattening)
var aRoof float64 = axis / (1.0 + n) * (1.0 + n*n/4.0 + n*n*n*n/64.0)
var delta1 float64 = n/2.0 - 2.0*n*n/3.0 + 37.0*n*n*n/96.0 - n*n*n*n/360.0
var delta2 float64 = n*n/48.0 + n*n*n/15.0 - 437.0*n*n*n*n/1440.0
var delta3 float64 = 17.0*n*n*n/480.0 - 37*n*n*n*n/840.0
var delta4 float64 = 4397.0 * n * n * n * n / 161280.0
var Astar float64 = e2 + e2*e2 + e2*e2*e2 + e2*e2*e2*e2
var Bstar float64 = -(7.0*e2*e2 + 17.0*e2*e2*e2 + 30.0*e2*e2*e2*e2) / 6.0
var Cstar float64 = (224.0*e2*e2*e2 + 889.0*e2*e2*e2*e2) / 120.0
var Dstar float64 = -(4279.0 * e2 * e2 * e2 * e2) / 1260.0
var degToRad float64 = math.Pi / 180
var lambdaZero float64 = centralMeridian * degToRad
var xi float64 = (x - falseNorthing) / (scale * aRoof)
var eta float64 = (y - falseEasting) / (scale * aRoof)
var xiPrim float64 = xi -
delta1*math.Sin(2.0*xi)*math.Cosh(2.0*eta) -
delta2*math.Sin(4.0*xi)*math.Cosh(4.0*eta) -
delta3*math.Sin(6.0*xi)*math.Cosh(6.0*eta) -
delta4*math.Sin(8.0*xi)*math.Cosh(8.0*eta)
var etaPrim float64 = eta -
delta1*math.Cos(2.0*xi)*math.Sinh(2.0*eta) -
delta2*math.Cos(4.0*xi)*math.Sinh(4.0*eta) -
delta3*math.Cos(6.0*xi)*math.Sinh(6.0*eta) -
delta4*math.Cos(8.0*xi)*math.Sinh(8.0*eta)
var phiStar float64 = math.Asin(math.Sin(xiPrim) / math.Cosh(etaPrim))
var deltaLambda float64 = math.Atan(math.Sinh(etaPrim) / math.Cos(xiPrim))
var lonRadian float64 = lambdaZero + deltaLambda
var latRadian float64 = phiStar + math.Sin(phiStar)*math.Cos(phiStar)*
(Astar+
Bstar*math.Pow(math.Sin(phiStar), 2)+
Cstar*math.Pow(math.Sin(phiStar), 4)+
Dstar*math.Pow(math.Sin(phiStar), 6))
var lat float64 = latRadian * 180.0 / math.Pi
var long float64 = lonRadian * 180.0 / math.Pi
return lat, long
} | rt90.go | 0.702326 | 0.540439 | rt90.go | starcoder |
package postgres
func args(a ...int) map[int]struct{} {
m := map[int]struct{}{}
for _, arg := range a {
m[arg] = struct{}{}
}
return m
}
var Functions = map[string]map[int]struct{}{
// https://www.postgresql.org/docs/current/functions-math.html
// Table 9.5. Mathematical Functions
"abs": args(1),
"cbrt": args(1),
"ceil": args(1),
"ceiling": args(1),
"degrees": args(1),
"div": args(2),
"exp": args(1),
"floor": args(1),
"ln": args(1),
"log": args(1, 2),
"mod": args(2),
"pi": args(0),
"power": args(2),
"radians": args(1),
"round": args(1, 2),
"scale": args(1),
"sign": args(1),
"sqrt": args(1),
"trunc": args(1, 2),
"width_bucket": args(2, 4),
// Table 9.6. Random Functions
"random": args(0),
"setseed": args(1),
// Table 9.7. Trigonometric Functions
"acos": args(1),
"acosd": args(1),
"asin": args(1),
"asind": args(1),
"atan": args(1),
"atan2": args(2),
"atan2d": args(2),
"atand": args(1),
"cos": args(1),
"cosd": args(1),
"cot": args(1),
"cotd": args(1),
"sin": args(1),
"sind": args(1),
"tan": args(1),
"tand": args(1),
// https://www.postgresql.org/docs/current/functions-string.html
// Table 9.8. SQL String Functions and Operators
"bit_length": args(1),
"char_length": args(1),
"character_length": args(1),
"lower": args(1),
"octet_length": args(1),
"overlay": args(3, 4),
"pg_catalog.position": args(2),
"substring": args(1, 2, 3),
"trim": args(2, 3),
"upper": args(1),
// Table 9.9. Other String Functions
"ascii": args(1),
"btrim": args(1, 2),
"chr": args(1),
"convert": args(3),
"convert_from": args(2),
"convert_to": args(2),
"decode": args(2),
"encode": args(2),
"initcap": args(1),
"left": args(2),
"length": args(1, 2),
"lpad": args(2, 3),
"ltrim": args(1, 2),
"md5": args(1),
"parse_ident": args(1, 2),
"pg_client_encoding": args(0),
"quote_ident": args(1),
"quote_literal": args(1),
"quote_nullable": args(1),
"regexp_match": args(2, 3),
"regexp_matches": args(2, 3),
"regexp_replace": args(3, 4),
"regexp_split_to_array": args(2, 3),
"regexp_split_to_table": args(2, 3),
"repeat": args(2),
"replace": args(3),
"reverse": args(1),
"right": args(2),
"rpad": args(2, 3),
"rtrim": args(1, 2),
"split_part": args(3),
"strpos": args(2),
"substr": args(2, 3),
"starts_with": args(2),
"to_ascii": args(1, 2),
"to_hex": args(1),
"translate": args(3),
// https://www.postgresql.org/docs/current/functions-binarystring.html
// Table 9.12. Other Binary String Functions
"get_bit": args(2),
"get_byte": args(2),
"set_bit": args(3),
"set_byte": args(3),
"sha224": args(1),
"sha256": args(1),
"sha384": args(1),
"sha512": args(1),
// https://www.postgresql.org/docs/current/functions-formatting.html
// Table 9.23. Formatting Functions
"to_char": args(2),
"to_date": args(2),
"to_number": args(2),
"to_timestamp": args(1, 2),
// https://www.postgresql.org/docs/current/functions-datetime.html
"age": args(1, 2),
"clock_timestamp": args(0),
"date_part": args(2),
"date_trunc": args(2),
"extract": args(2),
"isfinite": args(1),
"justify_days": args(1),
"justify_hours": args(1),
"justify_interval": args(1),
"make_date": args(3),
"make_time": args(3),
"make_timestamp": args(6),
"make_timestampz": args(6),
"now": args(0),
"statement_timestamp": args(0),
"timeofday": args(0),
"transaction_timestamp": args(0),
// https://www.postgresql.org/docs/current/functions-enum.html
// Table 9.32. Enum Support Functions
"enum_first": args(1),
"enum_last": args(1),
"enum_range": args(1, 2),
// https://www.postgresql.org/docs/current/functions-geometry.html
// Table 9.34. Geometric Functions
"area": args(1),
"center": args(1),
"diameter": args(1),
"height": args(1),
"isclosed": args(1),
"isopen": args(1),
"npoints": args(1),
"pclose": args(1),
"popen": args(1),
"radius": args(1),
"width": args(1),
// Table 9.35. Geometric Type Conversion Functions
"box": args(1, 2),
"bound_box": args(2),
"circle": args(1, 2),
"line": args(2),
"lseg": args(1, 2),
"path": args(1),
"point": args(1, 2),
"polygon": args(1, 2),
// https://www.postgresql.org/docs/current/functions-net.html
// Table 9.37. cidr and inet Functions
"abbrev": args(1),
"broadcast": args(1),
"family": args(1),
"host": args(1),
"hostmask": args(1),
"masklen": args(1),
"netmask": args(1),
"network": args(1),
"set_masklen": args(1),
"text": args(1),
"inet_same_family": args(1),
"inet_merge": args(1),
// https://www.postgresql.org/docs/current/functions-aggregate.html
"count": args(0, 1),
} | internal/postgres/funcs.go | 0.590897 | 0.507202 | funcs.go | starcoder |
package collector
import (
"encoding/json"
"fmt"
"github.com/prometheus/client_golang/prometheus"
)
var (
phasesSubsystem = "phases"
phasesLabels = []string{"id", "name"}
phasesStatusLabels = append(phasesLabels, "status_type")
phasesDesc = map[string]*prometheus.Desc{
"watts": colPromDesc(phasesSubsystem, "watts", "Integer phase power in Watts. Available only if phase power sensing is present and value is known (AC or DC).", phasesLabels),
"voltamps": colPromDesc(phasesSubsystem, "voltamps", "Integer phase apparent power in Volt-Amps. Available only if phase apparent power sensing is present and value is known.", phasesLabels),
"amps": colPromDesc(phasesSubsystem, "amps", "Floating point phase current in hundredth Amps. Available only if phase current sensing is present and value is known.", phasesLabels),
"crest_factor": colPromDesc(phasesSubsystem, "crest_factor", "Floating point phase crest factor in tenths. Available only if phase crest factor sensing is present and value is known.", phasesLabels),
"kilowatthours": colPromDesc(phasesSubsystem, "kilowatthours", "Floating point phase energy in tenth kilowatt-hours (kWh). Available only if energy sensing is present and value is known.", phasesLabels),
"nominal_volts": colPromDesc(phasesSubsystem, "nominal_volts", "Integer phase nominal voltage in Volts. Available only if phase voltage sensing present.", phasesLabels),
"power_factor": colPromDesc(phasesSubsystem, "power_factor", "Floating point phase power factor in hundredths. Available only if AC cord power factor sensing is present and value is known.", phasesLabels),
"reactance": colPromDesc(phasesSubsystem, "reactance", "Status of the measured phase reactance. Available only if phasepower factor sensing present and value is known (0 = Unknown, 1 = Capacitive, 2 = Inductive, 3 = Resistive.", phasesLabels),
"volts": colPromDesc(phasesSubsystem, "volts", "Floating point phase voltage in tenth Volts. Available only if voltage sensing is present and value is known. ", phasesLabels),
"volts_deviation": colPromDesc(phasesSubsystem, "volts_deviation", "Floating point phase deviation percentage from nominal voltage in tenths. Available only if phase voltage sensing present.", phasesLabels),
"state": colPromDesc(phasesSubsystem, "state", "State (1 = On, 0 = Off)).", phasesLabels),
"status": colPromDesc(phasesSubsystem, "status", "Status (1 = Normal, 0 = Not Normal).", phasesStatusLabels),
}
totalPhasesErrors = 0.0
)
func init() {
registerCollector(phasesSubsystem, enabledByDefault, NewPhasesCollector)
}
// PhasesCollector collects phases metrics, implemented as per the Collector interface.
type PhasesCollector struct{}
// NewPhasesCollector returns a new PhasesCollector.
func NewPhasesCollector() Collector {
return &PhasesCollector{}
}
// Get metrics and send to the Prometheus.Metric channel.
func (c *PhasesCollector) Get(ch chan<- prometheus.Metric, target, user, pass string) (float64, error) {
jsonPhases, err := getServerTechJSON(target, user, pass, "phases")
if err != nil {
totalPhasesErrors++
return totalPhasesErrors, fmt.Errorf("cannot get phasess: %s", err)
}
if err := processPhasesStats(ch, jsonPhases); err != nil {
totalPhasesErrors++
return totalPhasesErrors, err
}
return totalPhasesErrors, nil
}
func processPhasesStats(ch chan<- prometheus.Metric, jsonPhasesSum []byte) error {
var jsonPhases phasesData
if err := json.Unmarshal(jsonPhasesSum, &jsonPhases); err != nil {
return fmt.Errorf("cannot unmarshal phases json: %s", err)
}
for _, data := range jsonPhases {
labels := []string{data.ID, data.Name}
newGauge(ch, phasesDesc["watts"], data.ActivePower, labels...)
newGauge(ch, phasesDesc["voltamps"], data.ApparentPower, labels...)
newGauge(ch, phasesDesc["amps"], data.Current, labels...)
newGauge(ch, phasesDesc["crest_factor"], data.CrestFactor, labels...)
newGauge(ch, phasesDesc["kilowatthours"], data.Energy, labels...)
newGauge(ch, phasesDesc["nominal_volts"], data.NominalVoltage, labels...)
newGauge(ch, phasesDesc["power_factor"], data.PowerFactor, labels...)
newGauge(ch, phasesDesc["volts"], data.Voltage, labels...)
newGauge(ch, phasesDesc["volts_deviation"], data.VoltageDeviation, labels...)
reactanceMetric(ch, phasesDesc["reactance"], data.Reactance, labels)
statusMetric(ch, phasesDesc["status"], data.PowerFactorStatus, "power factor", labels)
statusMetric(ch, phasesDesc["status"], data.VoltageStatus, "voltage", labels)
statusMetric(ch, phasesDesc["status"], data.Status, "phase", labels)
stateMetric(ch, phasesDesc["state"], data.State, labels)
}
return nil
}
type phasesData []struct {
ID string `json:"id"`
Name string `json:"name"`
ActivePower float64 `json:"active_power"`
ApparentPower float64 `json:"apparent_power"`
CrestFactor float64 `json:"crest_factor"`
Current float64 `json:"current"`
Energy float64 `json:"energy"`
NominalVoltage float64 `json:"nominal_voltage"`
PowerFactor float64 `json:"power_factor"`
PowerFactorStatus string `json:"power_factor_status"`
Reactance string `json:"reactance"`
State string `json:"state"`
Status string `json:"status"`
Voltage float64 `json:"voltage"`
VoltageStatus string `json:"voltage_status"`
VoltageDeviation float64 `json:"voltage_deviation"`
} | collector/phases.go | 0.655887 | 0.414958 | phases.go | starcoder |
package main
import (
//"fmt"
"github.com/golang/geo/r2"
"github.com/golang/geo/s2"
"github.com/paulmach/go.geojson"
"math"
)
func computeBounds(g *geojson.Geometry) s2.Rect {
r := s2.EmptyRect()
if g == nil {
return r
}
switch g.Type {
case geojson.GeometryPoint:
if len(g.Point) >= 2 {
r = r.AddPoint(s2.LatLngFromDegrees(g.Point[1], g.Point[0]))
}
return r
case geojson.GeometryMultiPoint:
for _, p := range g.MultiPoint {
if len(p) >= 2 {
r = r.AddPoint(s2.LatLngFromDegrees(p[1], p[0]))
}
}
return r
case geojson.GeometryLineString:
return computeLineBounds(g.LineString)
case geojson.GeometryMultiLineString:
for _, line := range g.MultiLineString {
r = r.Union(computeLineBounds(line))
}
return r
case geojson.GeometryPolygon:
for _, ring := range g.Polygon {
r = r.Union(computeLineBounds(ring))
}
s2.ExpandForSubregions(r)
return r
case geojson.GeometryMultiPolygon:
for _, poly := range g.MultiPolygon {
for _, ring := range poly {
r = r.Union(computeLineBounds(ring))
}
s2.ExpandForSubregions(r)
}
return r
case geojson.GeometryCollection:
for _, geometry := range g.Geometries {
r = r.Union(computeBounds(geometry))
}
return r
default:
return r
}
}
func computeLineBounds(line [][]float64) s2.Rect {
r := s2.EmptyRect()
for _, p := range line {
if len(p) >= 2 {
r = r.AddPoint(s2.LatLngFromDegrees(p[1], p[0]))
}
}
return r
}
func EncodeBbox(r s2.Rect) []float64 {
if r.IsEmpty() {
return nil
} else {
bbox := [4]float64{
r.Lo().Lng.Degrees(),
r.Lo().Lat.Degrees(),
r.Hi().Lng.Degrees(),
r.Hi().Lat.Degrees(),
}
return bbox[0:4]
}
}
func getTileBounds(zoom int, x int, y int) s2.Rect {
r := s2.RectFromLatLng(unprojectWebMercator(zoom, float64(x), float64(y)))
return r.AddPoint(unprojectWebMercator(zoom, float64(x+1), float64(y+1)))
}
func projectWebMercator(p s2.LatLng) r2.Point {
siny := math.Sin(p.Lat.Radians())
siny = math.Min(math.Max(siny, -0.9999), 0.9999)
x := 256 * (0.5 + p.Lng.Degrees()/360)
y := 256 * (0.5 - math.Log((1+siny)/(1-siny))/(4*math.Pi))
return r2.Point{X: x, Y: y}
}
func unprojectWebMercator(zoom int, x float64, y float64) s2.LatLng {
// EPSG:3857 - https://epsg.io/3857
n := math.Pi - 2.0*math.Pi*y/math.Exp2(float64(zoom))
lat := 180.0 / math.Pi * math.Atan(0.5*(math.Exp(n)-math.Exp(-n)))
lng := x/math.Exp2(float64(zoom))*360.0 - 180.0
return s2.LatLngFromDegrees(lat, lng)
} | geometry.go | 0.676406 | 0.572245 | geometry.go | starcoder |
package encoding
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"github.com/lindb/lindb/pkg/stream"
)
// FixedOffsetEncoder represents the offset encoder with fixed length
// Make sure that added offset is increasing
type FixedOffsetEncoder struct {
values []int
max int
ensureIncreasing bool
}
// NewFixedOffsetEncoder creates the fixed length offset encoder
// ensureIncreasing=true ensure that added offsets are increasing, panic when value is smaller than before
// ensureIncreasing=false suppresses the increasing check.
// Offset must >= 0
func NewFixedOffsetEncoder(ensureIncreasing bool) *FixedOffsetEncoder {
return &FixedOffsetEncoder{ensureIncreasing: ensureIncreasing}
}
// IsEmpty returns if is empty
func (e *FixedOffsetEncoder) IsEmpty() bool {
return len(e.values) == 0
}
// Size returns the size
func (e *FixedOffsetEncoder) Size() int {
return len(e.values)
}
// Reset resets the encoder context for reuse
func (e *FixedOffsetEncoder) Reset() {
e.max = 0
e.values = e.values[:0]
}
// Add adds the start offset value,
func (e *FixedOffsetEncoder) Add(v int) {
if e.ensureIncreasing && len(e.values) > 0 && e.values[len(e.values)-1] > v {
panic("value added to FixedOffsetEncoder must be increasing")
}
if v < 0 {
panic("value add be FixedOffsetEncoder must > 0")
}
e.values = append(e.values, v)
if e.max < v {
e.max = v
}
}
// FromValues resets the encoder, then init it with multi values.
func (e *FixedOffsetEncoder) FromValues(values []int) {
e.Reset()
e.values = values
for _, value := range values {
if e.max < value {
e.max = value
}
}
}
// MarshalBinary marshals the values to binary
func (e *FixedOffsetEncoder) MarshalBinary() []byte {
var buf bytes.Buffer
buf.Grow(e.MarshalSize())
_ = e.Write(&buf)
return buf.Bytes()
}
func (e *FixedOffsetEncoder) MarshalSize() int {
return 1 + // width flag
stream.UvariantSize(uint64(len(e.values))) + // size
len(e.values)*e.width() // values
}
func (e *FixedOffsetEncoder) width() int {
return Uint32MinWidth(uint32(e.max))
}
// Write writes the data to the writer.
func (e *FixedOffsetEncoder) Write(writer io.Writer) error {
if len(e.values) == 0 {
return nil
}
width := e.width()
// fixed value width
if _, err := writer.Write([]byte{uint8(width)}); err != nil {
return err
}
// put all values with fixed length
var buf [binary.MaxVarintLen64]byte
// write size
sizeFlagWidth := binary.PutUvarint(buf[:], uint64(len(e.values)))
if _, err := writer.Write(buf[:sizeFlagWidth]); err != nil {
return err
}
// write values
for _, value := range e.values {
binary.LittleEndian.PutUint32(buf[:], uint32(value))
if _, err := writer.Write(buf[:width]); err != nil {
return err
}
}
return nil
}
// FixedOffsetDecoder represents the fixed offset decoder,
// supports random reads offset by index
type FixedOffsetDecoder struct {
offsetsBlock []byte
width int
size int
}
// NewFixedOffsetDecoder creates the fixed offset decoder
func NewFixedOffsetDecoder() *FixedOffsetDecoder {
return &FixedOffsetDecoder{}
}
// ValueWidth returns the width of all stored values
func (d *FixedOffsetDecoder) ValueWidth() int {
return d.width
}
// Size returns the size of offset values
func (d *FixedOffsetDecoder) Size() int {
if d.width == 0 {
return 0
}
return d.size
}
// Unmarshal unmarshals from data block, then return the remaining buffer.
func (d *FixedOffsetDecoder) Unmarshal(data []byte) (left []byte, err error) {
d.offsetsBlock = d.offsetsBlock[:0]
d.width = 0
d.size = 0
if len(data) < 2 {
return nil, fmt.Errorf("length too short of FixedOffsetDecoder: %d", len(data))
}
d.width = int(data[0])
if d.width < 0 || d.width > 4 {
return nil, fmt.Errorf("ivalid width of FixedOffsetDecoder: %d", d.width)
}
size, readBytes := binary.Uvarint(data[1:])
if readBytes <= 0 {
return nil, fmt.Errorf("invalid uvariant of FixedOffsetDecoder")
}
d.size = int(size)
wantLen := 1 + readBytes + d.width*d.size
if wantLen > len(data) || wantLen < 0 || 1+readBytes > wantLen {
return nil, fmt.Errorf("cannot unmarshal FixedOffsetDecoder with a invalid buffer: %d, want: %d",
len(data), wantLen)
}
d.offsetsBlock = data[1+readBytes : wantLen]
return data[wantLen:], nil
}
func (d *FixedOffsetDecoder) Get(index int) (int, bool) {
start := index * d.width
if start < 0 || len(d.offsetsBlock) == 0 || start >= len(d.offsetsBlock) || d.width > 4 {
return 0, false
}
end := start + d.width
if end > len(d.offsetsBlock) {
return 0, false
}
var scratch [4]byte
copy(scratch[:], d.offsetsBlock[start:end])
offset := int(binary.LittleEndian.Uint32(scratch[:]))
// on x32, data may overflow
if offset < 0 {
return 0, false
}
return offset, true
}
// GetBlock returns the block by offset range(start -> end) with index
// GetBlock is only supported when Offsets are increasing encoded.
func (d *FixedOffsetDecoder) GetBlock(index int, dataBlock []byte) (block []byte, err error) {
startOffset, ok := d.Get(index)
if !ok {
return nil, fmt.Errorf("corrupted FixedOffsetDecoder block, length: %d, startOffset: %d",
len(d.offsetsBlock), startOffset)
}
endOffset, ok := d.Get(index + 1)
if !ok {
endOffset = len(dataBlock)
}
if startOffset < 0 || endOffset < 0 || endOffset < startOffset || endOffset > len(dataBlock) {
return nil, fmt.Errorf("corrupted FixedOffsetDecoder block, "+
"data block length: %d, data range: [%d, %d]", len(dataBlock), startOffset, endOffset,
)
}
return dataBlock[startOffset:endOffset], nil
}
func ByteSlice2Uint32(slice []byte) uint32 {
var buf = make([]byte, 4)
copy(buf, slice)
return binary.LittleEndian.Uint32(buf)
} | pkg/encoding/fixed_offset.go | 0.734596 | 0.406391 | fixed_offset.go | starcoder |
package plaid
import (
"encoding/json"
"time"
)
// TransferSweep Describes a sweep of funds to / from the sweep account. A sweep is associated with many sweep events (events of type `swept` or `reverse_swept`) which can be retrieved by invoking the `/transfer/event/list` endpoint with the corresponding `sweep_id`. `swept` events occur when the transfer amount is credited or debited from your sweep account, depending on the `type` of the transfer. `reverse_swept` events occur when a transfer is reversed and Plaid undoes the credit or debit. The total sum of the `swept` and `reverse_swept` events is equal to the `amount` of the sweep Plaid creates and matches the amount of the entry on your sweep account ledger.
type TransferSweep struct {
// Identifier of the sweep.
Id string `json:"id"`
// The datetime when the sweep occurred, in RFC 3339 format.
CreatedAt time.Time `json:"created_at"`
// Signed decimal amount of the sweep as it appears on your sweep account ledger (e.g. \"-10.00\") If amount is not present, the sweep was net-settled to zero and outstanding debits and credits between the sweep account and Plaid are balanced.
Amount *string `json:"amount,omitempty"`
AdditionalProperties map[string]interface{}
}
type _TransferSweep TransferSweep
// NewTransferSweep instantiates a new TransferSweep object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewTransferSweep(id string, createdAt time.Time) *TransferSweep {
this := TransferSweep{}
this.Id = id
this.CreatedAt = createdAt
return &this
}
// NewTransferSweepWithDefaults instantiates a new TransferSweep object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewTransferSweepWithDefaults() *TransferSweep {
this := TransferSweep{}
return &this
}
// GetId returns the Id field value
func (o *TransferSweep) GetId() string {
if o == nil {
var ret string
return ret
}
return o.Id
}
// GetIdOk returns a tuple with the Id field value
// and a boolean to check if the value has been set.
func (o *TransferSweep) GetIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Id, true
}
// SetId sets field value
func (o *TransferSweep) SetId(v string) {
o.Id = v
}
// GetCreatedAt returns the CreatedAt field value
func (o *TransferSweep) GetCreatedAt() time.Time {
if o == nil {
var ret time.Time
return ret
}
return o.CreatedAt
}
// GetCreatedAtOk returns a tuple with the CreatedAt field value
// and a boolean to check if the value has been set.
func (o *TransferSweep) GetCreatedAtOk() (*time.Time, bool) {
if o == nil {
return nil, false
}
return &o.CreatedAt, true
}
// SetCreatedAt sets field value
func (o *TransferSweep) SetCreatedAt(v time.Time) {
o.CreatedAt = v
}
// GetAmount returns the Amount field value if set, zero value otherwise.
func (o *TransferSweep) GetAmount() string {
if o == nil || o.Amount == nil {
var ret string
return ret
}
return *o.Amount
}
// GetAmountOk returns a tuple with the Amount field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *TransferSweep) GetAmountOk() (*string, bool) {
if o == nil || o.Amount == nil {
return nil, false
}
return o.Amount, true
}
// HasAmount returns a boolean if a field has been set.
func (o *TransferSweep) HasAmount() bool {
if o != nil && o.Amount != nil {
return true
}
return false
}
// SetAmount gets a reference to the given string and assigns it to the Amount field.
func (o *TransferSweep) SetAmount(v string) {
o.Amount = &v
}
func (o TransferSweep) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["id"] = o.Id
}
if true {
toSerialize["created_at"] = o.CreatedAt
}
if o.Amount != nil {
toSerialize["amount"] = o.Amount
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *TransferSweep) UnmarshalJSON(bytes []byte) (err error) {
varTransferSweep := _TransferSweep{}
if err = json.Unmarshal(bytes, &varTransferSweep); err == nil {
*o = TransferSweep(varTransferSweep)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "id")
delete(additionalProperties, "created_at")
delete(additionalProperties, "amount")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableTransferSweep struct {
value *TransferSweep
isSet bool
}
func (v NullableTransferSweep) Get() *TransferSweep {
return v.value
}
func (v *NullableTransferSweep) Set(val *TransferSweep) {
v.value = val
v.isSet = true
}
func (v NullableTransferSweep) IsSet() bool {
return v.isSet
}
func (v *NullableTransferSweep) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableTransferSweep(val *TransferSweep) *NullableTransferSweep {
return &NullableTransferSweep{value: val, isSet: true}
}
func (v NullableTransferSweep) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableTransferSweep) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_transfer_sweep.go | 0.757705 | 0.443359 | model_transfer_sweep.go | starcoder |
package iso20022
// Provides the elements related to the interest amount calculation.
type InterestAmount1 struct {
// Indicates whether the interest request is new or updated.
InterestRequestSequence *InterestRequestSequence1Code `xml:"IntrstReqSeq"`
// Period for which the calculation has been performed.
InterestPeriod *DatePeriodDetails `xml:"IntrstPrd"`
// Amount of money representing an interest payment.
AccruedInterestAmount *ActiveCurrencyAndAmount `xml:"AcrdIntrstAmt"`
// Agreed date for the interest payment.
ValueDate *ISODate `xml:"ValDt"`
// Indicates whether the interest will be settled in cash or rolled in the existing collateral balance.
InterestMethod *InterestMethod1Code `xml:"IntrstMtd"`
// Percentage charged for the use of an amount of money, usually expressed at an annual rate. The interest rate is the ratio of the amount of interest paid during a certain period of time compared to the principal amount of the interest bearing financial instrument.
InterestRate *InterestRate1Choice `xml:"IntrstRate,omitempty"`
// Specifies the computation method of (accrued) interest of the security.
DayCountBasis *InterestComputationMethod2Code `xml:"DayCntBsis,omitempty"`
// Amount or percentage of a cash distribution that will be withheld by a tax authority.
AppliedWithholdingTax *YesNoIndicator `xml:"ApldWhldgTax,omitempty"`
// Specifies whether the interest is simple or compounded.
CalculationMethod *CalculationMethod1Code `xml:"ClctnMtd,omitempty"`
// Specifies the periodicity of the calculation of the interest.
CalculationFrequency *Frequency1Code `xml:"ClctnFrqcy,omitempty"`
// Specifies whether the collateral has been posted against the variation margin, the segregated independent amount or to cover any other risk defined with a proprietary code.
CollateralPurpose *CollateralPurpose1Choice `xml:"CollPurp"`
// Provides details about the opening collateral balance.
OpeningCollateralBalance *CollateralBalance1 `xml:"OpngCollBal,omitempty"`
// Provides details about the closing collateral balance.
ClosingCollateralBalance *CollateralBalance1 `xml:"ClsgCollBal"`
// Identifies the standard settlement instructions.
StandardSettlementInstructions *Max140Text `xml:"StdSttlmInstrs,omitempty"`
// Additionnal information related to interest request.
AdditionalInformation *Max210Text `xml:"AddtlInf,omitempty"`
// Additional references linked to the updated interest payement request.
ReferenceDetails *Reference20 `xml:"RefDtls,omitempty"`
}
func (i *InterestAmount1) SetInterestRequestSequence(value string) {
i.InterestRequestSequence = (*InterestRequestSequence1Code)(&value)
}
func (i *InterestAmount1) AddInterestPeriod() *DatePeriodDetails {
i.InterestPeriod = new(DatePeriodDetails)
return i.InterestPeriod
}
func (i *InterestAmount1) SetAccruedInterestAmount(value, currency string) {
i.AccruedInterestAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (i *InterestAmount1) SetValueDate(value string) {
i.ValueDate = (*ISODate)(&value)
}
func (i *InterestAmount1) SetInterestMethod(value string) {
i.InterestMethod = (*InterestMethod1Code)(&value)
}
func (i *InterestAmount1) AddInterestRate() *InterestRate1Choice {
i.InterestRate = new(InterestRate1Choice)
return i.InterestRate
}
func (i *InterestAmount1) SetDayCountBasis(value string) {
i.DayCountBasis = (*InterestComputationMethod2Code)(&value)
}
func (i *InterestAmount1) SetAppliedWithholdingTax(value string) {
i.AppliedWithholdingTax = (*YesNoIndicator)(&value)
}
func (i *InterestAmount1) SetCalculationMethod(value string) {
i.CalculationMethod = (*CalculationMethod1Code)(&value)
}
func (i *InterestAmount1) SetCalculationFrequency(value string) {
i.CalculationFrequency = (*Frequency1Code)(&value)
}
func (i *InterestAmount1) AddCollateralPurpose() *CollateralPurpose1Choice {
i.CollateralPurpose = new(CollateralPurpose1Choice)
return i.CollateralPurpose
}
func (i *InterestAmount1) AddOpeningCollateralBalance() *CollateralBalance1 {
i.OpeningCollateralBalance = new(CollateralBalance1)
return i.OpeningCollateralBalance
}
func (i *InterestAmount1) AddClosingCollateralBalance() *CollateralBalance1 {
i.ClosingCollateralBalance = new(CollateralBalance1)
return i.ClosingCollateralBalance
}
func (i *InterestAmount1) SetStandardSettlementInstructions(value string) {
i.StandardSettlementInstructions = (*Max140Text)(&value)
}
func (i *InterestAmount1) SetAdditionalInformation(value string) {
i.AdditionalInformation = (*Max210Text)(&value)
}
func (i *InterestAmount1) AddReferenceDetails() *Reference20 {
i.ReferenceDetails = new(Reference20)
return i.ReferenceDetails
} | InterestAmount1.go | 0.859015 | 0.518424 | InterestAmount1.go | starcoder |
package parser
import "github.com/google/gapid/gapis/gfxapi/gles/glsl/ast"
// This variable contains stub declarations of symbols normally present in a shader, but which
// are not yet fully supported. This allows us to parse programs referencing these symbols, even
// though the later stages (semantic analysis will fail). The symbols commented with 1.0 are
// present only in the 1.0 version of the specification.
var builtinSymbols = [...]ast.Symbol{
&ast.VariableSym{SymName: "gl_VertexID"},
&ast.VariableSym{SymName: "gl_InstanceID"},
&ast.VariableSym{SymName: "gl_Position"},
&ast.VariableSym{SymName: "gl_PointSize"},
&ast.VariableSym{SymName: "gl_FragCoord"},
&ast.VariableSym{SymName: "gl_FrontFacing"},
&ast.VariableSym{SymName: "gl_FragColor"}, // 1.0
&ast.VariableSym{SymName: "gl_FragData"}, // 1.0
&ast.VariableSym{SymName: "gl_FragDepth"},
&ast.VariableSym{SymName: "gl_PointCoord"},
&ast.VariableSym{SymName: "gl_MaxVertexAttribs"},
&ast.VariableSym{SymName: "gl_MaxVertexUniformVectors"},
&ast.VariableSym{SymName: "gl_MaxVaryingVectors"}, // 1.0
&ast.VariableSym{SymName: "gl_MaxVertexTextureImageUnits"}, // 1.0
&ast.VariableSym{SymName: "gl_MaxVertexOutputVectors"},
&ast.VariableSym{SymName: "gl_MaxFragmentInputVectors"},
&ast.VariableSym{SymName: "gl_MaxCombinedTextureImageUnits"},
&ast.VariableSym{SymName: "gl_MaxTextureImageUnits"},
&ast.VariableSym{SymName: "gl_MaxFragmentUniformVectors"},
&ast.VariableSym{SymName: "gl_MaxDrawBuffers"},
&ast.VariableSym{SymName: "gl_MinProgramTexelOffset"},
&ast.VariableSym{SymName: "gl_MaxProgramTexelOffset"},
&ast.StructSym{SymName: "gl_DepthRangeParameters"},
&ast.VariableSym{SymName: "gl_DepthRange"},
&ast.VariableSym{SymName: "gl_ViewID_OVR"}, // GL_OVR_multiview2
&ast.FunctionDecl{SymName: "textureSize"},
&ast.FunctionDecl{SymName: "texture"},
&ast.FunctionDecl{SymName: "textureProj"},
&ast.FunctionDecl{SymName: "textureLod"},
&ast.FunctionDecl{SymName: "textureOffset"},
&ast.FunctionDecl{SymName: "texelFetch"},
&ast.FunctionDecl{SymName: "texelFetchOffset"},
&ast.FunctionDecl{SymName: "textureProjOffset"},
&ast.FunctionDecl{SymName: "textureLodOffset"},
&ast.FunctionDecl{SymName: "textureProjLod"},
&ast.FunctionDecl{SymName: "textureProjLodOffset"},
&ast.FunctionDecl{SymName: "textureGrad"},
&ast.FunctionDecl{SymName: "textureGradOffset"},
&ast.FunctionDecl{SymName: "textureProjGrad"},
&ast.FunctionDecl{SymName: "textureProjGradOffset"},
&ast.FunctionDecl{SymName: "texture2D"}, // 1.0
&ast.FunctionDecl{SymName: "texture2DProj"}, // 1.0
&ast.FunctionDecl{SymName: "texture2DLod"}, // 1.0
&ast.FunctionDecl{SymName: "texture2DProjLod"}, // 1.0
&ast.FunctionDecl{SymName: "textureCube"}, // 1.0
&ast.FunctionDecl{SymName: "textureCubeLod"}, // 1.0
&ast.FunctionDecl{SymName: "shadow2DEXT"}, // GL_SAMPLER_2D_SHADOW_EXT
&ast.FunctionDecl{SymName: "shadow2DEXTProj"}, // GL_SAMPLER_2D_SHADOW_EXT
&ast.FunctionDecl{SymName: "dFdx"},
&ast.FunctionDecl{SymName: "dFdy"},
&ast.FunctionDecl{SymName: "fwidth"},
}
// FindBuiltin searches and returns the builtins for the symbol with the
// specified name. If no builtin has the specified name then nil is returned.
func FindBuiltin(name string) ast.Symbol {
for _, b := range builtinSymbols {
if b.Name() == name {
return b
}
}
return nil
} | gapis/gfxapi/gles/glsl/parser/shader_symbols.go | 0.519278 | 0.685027 | shader_symbols.go | starcoder |
package design
import "github.com/shogo82148/goa-v1/dslengine"
// Dup creates a copy the given data type.
func Dup(d DataType) DataType {
return newDupper().DupType(d)
}
// DupAtt creates a copy of the given attribute.
func DupAtt(att *AttributeDefinition) *AttributeDefinition {
return newDupper().DupAttribute(att)
}
// dupper implements recursive and cycle safe copy of data types.
type dupper struct {
dts map[string]*UserTypeDefinition
dmts map[string]*MediaTypeDefinition
}
// newDupper returns a new initialized dupper.
func newDupper() *dupper {
return &dupper{
dts: make(map[string]*UserTypeDefinition),
dmts: make(map[string]*MediaTypeDefinition),
}
}
// DupUserType creates a copy of the given user type.
func (d *dupper) DupUserType(ut *UserTypeDefinition) *UserTypeDefinition {
return &UserTypeDefinition{
AttributeDefinition: d.DupAttribute(ut.AttributeDefinition),
TypeName: ut.TypeName,
}
}
// DupAttribute creates a copy of the given attribute.
func (d *dupper) DupAttribute(att *AttributeDefinition) *AttributeDefinition {
var valDup *dslengine.ValidationDefinition
if att.Validation != nil {
valDup = att.Validation.Dup()
}
dup := AttributeDefinition{
Type: att.Type,
Description: att.Description,
Validation: valDup,
Metadata: att.Metadata,
DefaultValue: att.DefaultValue,
NonZeroAttributes: att.NonZeroAttributes,
View: att.View,
DSLFunc: att.DSLFunc,
Example: att.Example,
}
return &dup
}
// DupType creates a copy of the given data type.
func (d *dupper) DupType(t DataType) DataType {
switch actual := t.(type) {
case Primitive:
return t
case *Array:
return &Array{ElemType: d.DupAttribute(actual.ElemType)}
case Object:
res := make(Object, len(actual))
for n, att := range actual {
res[n] = d.DupAttribute(att)
}
return res
case *Hash:
return &Hash{
KeyType: d.DupAttribute(actual.KeyType),
ElemType: d.DupAttribute(actual.ElemType),
}
case *UserTypeDefinition:
if u, ok := d.dts[actual.TypeName]; ok {
return u
}
u := &UserTypeDefinition{
TypeName: actual.TypeName,
}
d.dts[u.TypeName] = u
u.AttributeDefinition = d.DupAttribute(actual.AttributeDefinition)
return u
case *MediaTypeDefinition:
if m, ok := d.dmts[actual.Identifier]; ok {
return m
}
m := &MediaTypeDefinition{
Identifier: actual.Identifier,
Links: actual.Links,
Views: actual.Views,
Resource: actual.Resource,
}
d.dmts[actual.Identifier] = m
m.UserTypeDefinition = d.DupUserType(actual.UserTypeDefinition)
return m
}
panic("unknown type " + t.Name())
} | design/dup.go | 0.670393 | 0.522933 | dup.go | starcoder |
package models
import (
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"xorm.io/builder"
)
// consistencyCheckable a type that can be tested for database consistency
type consistencyCheckable interface {
checkForConsistency(t *testing.T)
}
// CheckConsistencyForAll test that the entire database is consistent
func CheckConsistencyForAll(t *testing.T) {
CheckConsistencyFor(t,
&User{},
&Repository{},
&Issue{},
&PullRequest{},
&Milestone{},
&Label{},
&Team{},
&Action{})
}
// CheckConsistencyFor test that all matching database entries are consistent
func CheckConsistencyFor(t *testing.T, beansToCheck ...interface{}) {
for _, bean := range beansToCheck {
sliceType := reflect.SliceOf(reflect.TypeOf(bean))
sliceValue := reflect.MakeSlice(sliceType, 0, 10)
ptrToSliceValue := reflect.New(sliceType)
ptrToSliceValue.Elem().Set(sliceValue)
assert.NoError(t, x.Table(bean).Find(ptrToSliceValue.Interface()))
sliceValue = ptrToSliceValue.Elem()
for i := 0; i < sliceValue.Len(); i++ {
entity := sliceValue.Index(i).Interface()
checkable, ok := entity.(consistencyCheckable)
if !ok {
t.Errorf("Expected %+v (of type %T) to be checkable for consistency",
entity, entity)
} else {
checkable.checkForConsistency(t)
}
}
}
}
// getCount get the count of database entries matching bean
func getCount(t *testing.T, e Engine, bean interface{}) int64 {
count, err := e.Count(bean)
assert.NoError(t, err)
return count
}
// assertCount test the count of database entries matching bean
func assertCount(t *testing.T, bean interface{}, expected int) {
assert.EqualValues(t, expected, getCount(t, x, bean),
"Failed consistency test, the counted bean (of type %T) was %+v", bean, bean)
}
func (user *User) checkForConsistency(t *testing.T) {
assertCount(t, &Repository{OwnerID: user.ID}, user.NumRepos)
assertCount(t, &Star{UID: user.ID}, user.NumStars)
assertCount(t, &OrgUser{OrgID: user.ID}, user.NumMembers)
assertCount(t, &Team{OrgID: user.ID}, user.NumTeams)
assertCount(t, &Follow{UserID: user.ID}, user.NumFollowing)
assertCount(t, &Follow{FollowID: user.ID}, user.NumFollowers)
if user.Type != UserTypeOrganization {
assert.EqualValues(t, 0, user.NumMembers)
assert.EqualValues(t, 0, user.NumTeams)
}
}
func (repo *Repository) checkForConsistency(t *testing.T) {
assert.Equal(t, repo.LowerName, strings.ToLower(repo.Name), "repo: %+v", repo)
assertCount(t, &Star{RepoID: repo.ID}, repo.NumStars)
assertCount(t, &Milestone{RepoID: repo.ID}, repo.NumMilestones)
assertCount(t, &Repository{ForkID: repo.ID}, repo.NumForks)
if repo.IsFork {
AssertExistsAndLoadBean(t, &Repository{ID: repo.ForkID})
}
actual := getCount(t, x.Where("Mode<>?", RepoWatchModeDont), &Watch{RepoID: repo.ID})
assert.EqualValues(t, repo.NumWatches, actual,
"Unexpected number of watches for repo %+v", repo)
actual = getCount(t, x.Where("is_pull=?", false), &Issue{RepoID: repo.ID})
assert.EqualValues(t, repo.NumIssues, actual,
"Unexpected number of issues for repo %+v", repo)
actual = getCount(t, x.Where("is_pull=? AND is_closed=?", false, true), &Issue{RepoID: repo.ID})
assert.EqualValues(t, repo.NumClosedIssues, actual,
"Unexpected number of closed issues for repo %+v", repo)
actual = getCount(t, x.Where("is_pull=?", true), &Issue{RepoID: repo.ID})
assert.EqualValues(t, repo.NumPulls, actual,
"Unexpected number of pulls for repo %+v", repo)
actual = getCount(t, x.Where("is_pull=? AND is_closed=?", true, true), &Issue{RepoID: repo.ID})
assert.EqualValues(t, repo.NumClosedPulls, actual,
"Unexpected number of closed pulls for repo %+v", repo)
actual = getCount(t, x.Where("is_closed=?", true), &Milestone{RepoID: repo.ID})
assert.EqualValues(t, repo.NumClosedMilestones, actual,
"Unexpected number of closed milestones for repo %+v", repo)
}
func (issue *Issue) checkForConsistency(t *testing.T) {
actual := getCount(t, x.Where("type=?", CommentTypeComment), &Comment{IssueID: issue.ID})
assert.EqualValues(t, issue.NumComments, actual,
"Unexpected number of comments for issue %+v", issue)
if issue.IsPull {
pr := AssertExistsAndLoadBean(t, &PullRequest{IssueID: issue.ID}).(*PullRequest)
assert.EqualValues(t, pr.Index, issue.Index)
}
}
func (pr *PullRequest) checkForConsistency(t *testing.T) {
issue := AssertExistsAndLoadBean(t, &Issue{ID: pr.IssueID}).(*Issue)
assert.True(t, issue.IsPull)
assert.EqualValues(t, issue.Index, pr.Index)
}
func (milestone *Milestone) checkForConsistency(t *testing.T) {
assertCount(t, &Issue{MilestoneID: milestone.ID}, milestone.NumIssues)
actual := getCount(t, x.Where("is_closed=?", true), &Issue{MilestoneID: milestone.ID})
assert.EqualValues(t, milestone.NumClosedIssues, actual,
"Unexpected number of closed issues for milestone %+v", milestone)
}
func (label *Label) checkForConsistency(t *testing.T) {
issueLabels := make([]*IssueLabel, 0, 10)
assert.NoError(t, x.Find(&issueLabels, &IssueLabel{LabelID: label.ID}))
assert.EqualValues(t, label.NumIssues, len(issueLabels),
"Unexpected number of issue for label %+v", label)
issueIDs := make([]int64, len(issueLabels))
for i, issueLabel := range issueLabels {
issueIDs[i] = issueLabel.IssueID
}
expected := int64(0)
if len(issueIDs) > 0 {
expected = getCount(t, x.In("id", issueIDs).Where("is_closed=?", true), &Issue{})
}
assert.EqualValues(t, expected, label.NumClosedIssues,
"Unexpected number of closed issues for label %+v", label)
}
func (team *Team) checkForConsistency(t *testing.T) {
assertCount(t, &TeamUser{TeamID: team.ID}, team.NumMembers)
assertCount(t, &TeamRepo{TeamID: team.ID}, team.NumRepos)
}
func (action *Action) checkForConsistency(t *testing.T) {
repo := AssertExistsAndLoadBean(t, &Repository{ID: action.RepoID}).(*Repository)
assert.Equal(t, repo.IsPrivate, action.IsPrivate, "action: %+v", action)
}
// CountOrphanedLabels return count of labels witch are broken and not accessible via ui anymore
func CountOrphanedLabels() (int64, error) {
noref, err := x.Table("label").Where("repo_id=? AND org_id=?", 0, 0).Count("label.id")
if err != nil {
return 0, err
}
norepo, err := x.Table("label").
Join("LEFT", "repository", "label.repo_id=repository.id").
Where(builder.IsNull{"repository.id"}).And(builder.Gt{"label.repo_id": 0}).
Count("id")
if err != nil {
return 0, err
}
noorg, err := x.Table("label").
Join("LEFT", "`user`", "label.org_id=`user`.id").
Where(builder.IsNull{"`user`.id"}).And(builder.Gt{"label.org_id": 0}).
Count("id")
if err != nil {
return 0, err
}
return noref + norepo + noorg, nil
}
// DeleteOrphanedLabels delete labels witch are broken and not accessible via ui anymore
func DeleteOrphanedLabels() error {
// delete labels with no reference
if _, err := x.Table("label").Where("repo_id=? AND org_id=?", 0, 0).Delete(new(Label)); err != nil {
return err
}
// delete labels with none existing repos
if _, err := x.In("id", builder.Select("label.id").From("label").
Join("LEFT", "repository", "label.repo_id=repository.id").
Where(builder.IsNull{"repository.id"}).And(builder.Gt{"label.repo_id": 0})).
Delete(Label{}); err != nil {
return err
}
// delete labels with none existing orgs
if _, err := x.In("id", builder.Select("label.id").From("label").
Join("LEFT", "`user`", "label.org_id=`user`.id").
Where(builder.IsNull{"`user`.id"}).And(builder.Gt{"label.org_id": 0})).
Delete(Label{}); err != nil {
return err
}
return nil
}
// CountOrphanedIssues count issues without a repo
func CountOrphanedIssues() (int64, error) {
return x.Table("issue").
Join("LEFT", "repository", "issue.repo_id=repository.id").
Where(builder.IsNull{"repository.id"}).
Count("id")
}
// DeleteOrphanedIssues delete issues without a repo
func DeleteOrphanedIssues() error {
sess := x.NewSession()
defer sess.Close()
if err := sess.Begin(); err != nil {
return err
}
var ids []int64
if err := sess.Table("issue").Distinct("issue.repo_id").
Join("LEFT", "repository", "issue.repo_id=repository.id").
Where(builder.IsNull{"repository.id"}).GroupBy("issue.repo_id").
Find(&ids); err != nil {
return err
}
var attachmentPaths []string
for i := range ids {
paths, err := deleteIssuesByRepoID(sess, ids[i])
if err != nil {
return err
}
attachmentPaths = append(attachmentPaths, paths...)
}
if err := sess.Commit(); err != nil {
return err
}
// Remove issue attachment files.
for i := range attachmentPaths {
removeAllWithNotice(x, "Delete issue attachment", attachmentPaths[i])
}
return nil
}
// CountOrphanedObjects count subjects with have no existing refobject anymore
func CountOrphanedObjects(subject, refobject, joinCond string) (int64, error) {
return x.Table("`"+subject+"`").
Join("LEFT", refobject, joinCond).
Where(builder.IsNull{"`" + refobject + "`.id"}).
Count("id")
}
// DeleteOrphanedObjects delete subjects with have no existing refobject anymore
func DeleteOrphanedObjects(subject, refobject, joinCond string) error {
_, err := x.In("id", builder.Select("`"+subject+"`.id").
From("`"+subject+"`").
Join("LEFT", "`"+refobject+"`", joinCond).
Where(builder.IsNull{"`" + refobject + "`.id"})).
Delete("`" + subject + "`")
return err
}
// CountNullArchivedRepository counts the number of repositories with is_archived is null
func CountNullArchivedRepository() (int64, error) {
return x.Where(builder.IsNull{"is_archived"}).Count(new(Repository))
}
// FixNullArchivedRepository sets is_archived to false where it is null
func FixNullArchivedRepository() (int64, error) {
return x.Where(builder.IsNull{"is_archived"}).Cols("is_archived").Update(&Repository{
IsArchived: false,
})
} | models/consistency.go | 0.611962 | 0.692889 | consistency.go | starcoder |
package writer
import (
"time"
as "github.com/whisperverse/activitystream"
)
type Object map[string]interface{}
func NewObject() Object {
return Object{}
}
// ID provides the globally unique identifier for an Object or Link
func (object Object) ID(value string) Object {
object[as.PropertyID] = value
return object
}
// Type identifies the Object or Link type. If multiple values are present, then only the first value is returned.
func (object Object) Type(value interface{}) Object {
return object.Property(as.PropertyType, value)
}
// Actor describes one or more entities that either performed or are expected to perform the activity. Any single activity can have multiple actors. The actor MAY be specified using an indirect Link.
func (object Object) Actor(value interface{}) Object {
return object.Property(as.PropertyActor, value)
}
// Attachment identifies a resource attached or related to an object that potentially requires special handling. The intent is to provide a model that is at least semantically similar to attachments in email.
func (object Object) Attachment(value interface{}) Object {
return object.Property(as.PropertyAttachment, value)
}
// AttributedTo identifies one or more entities to which this object is attributed. The attributed entities might not be Actors. For instance, an object might be attributed to the completion of another activity.
func (object Object) AttributedTo(value interface{}) Object {
return object.Property(as.PropertyAttributedTo, value)
}
// Audience identifies one or more entities that represent the total population of entities for which the object can considered to be relevant.
func (object Object) Audience(value interface{}) Object {
return object.Property(as.PropertyAudience, value)
}
// Bcc identifies one or more Objects that are part of the private secondary audience of this Object.
func (object Object) Bcc(value interface{}) Object {
return object.Property(as.PropertyBcc, value)
}
// BTo identifies an Object that is part of the private primary audience of this Object.
func (object Object) BTo(value interface{}) Object {
return object.Property(as.PropertyBTo, value)
}
// Cc identifies an Object that is part of the aslic secondary audience of this Object.
func (object Object) Cc(value interface{}) Object {
return object.Property(as.PropertyCc, value)
}
// Closed indicates that a question has been closed, and answers are no longer accepted.
func (object Object) Closed(value time.Time) Object {
return object.Property(as.PropertyClosed, value)
}
// Content is the content or textual representation of the Object encoded as a JSON string. By default, the value of content is HTML. The mediaType property can be used in the object to indicate a different content type. The content MAY be expressed using multiple language-tagged values.
func (object Object) Content(value string, language string) Object {
return object.Map(as.PropertyContent, value, language)
}
// Context Identifies the context within which the object exists or an activity was performed. The notion of "context" used is intentionally vague. The intended function is to serve as a means of grouping objects and activities that share a common originating context or purpose. An example could be all activities relating to a common project or event.
func (object Object) Context(value interface{}) Object {
// TODO: incomplete
return object
}
// Duration - when the object describes a time-bound resource, such as an audio or video, a meeting, etc, the duration property indicates the object's approximate duration. The value MUST be expressed as an xsd:duration as defined by [ xmlschema11-2], section 3.3.6 (e.g. a period of 5 seconds is represented as "PT5S").
func (object Object) Duration(value time.Duration) Object {
return object.SimpleValue(as.PropertyDuration, value)
}
// EndTime represents the date and time describing the actual or expected ending time of the object. When used with an Activity object, for instance, the endTime property specifies the moment the activity concluded or is expected to conclude.
func (object Object) EndTime(value time.Time) Object {
return object.Property(as.PropertyEndTime, value)
}
// Generator identifies the entity (e.g. an application) that generated the object.
func (object Object) Generator(value interface{}) Object {
return object.Property(as.PropertyGenerator, value)
}
// Icon indicates an entity that describes an icon for this object. The image should have an aspect ratio of one (horizontal) to one (vertical) and should be suitable for presentation at a small size.
func (object Object) Icon(value interface{}) Object {
return object.Property(as.PropertyIcon, value)
}
// Image indicates an entity that describes an image for this object. Unlike the icon property, there are no aspect ratio or display size limitations assumed.
func (object Object) Image(value interface{}) Object {
return object.Property(as.PropertyImage, value)
}
// InReplyTo indicates one or more entities for which this object is considered a response.
func (object Object) InReplyTo(value interface{}) Object {
return object.Property(as.PropertyInReplyTo, value)
}
// Instrument identifies one or more objects used (or to be used) in the completion of an Activity.
func (object Object) Instrument(value interface{}) Object {
return object.Property(as.PropertyInstrument, value)
}
// Location indicates one or more physical or logical locations associated with the object.
func (object Object) Location(value interface{}) Object {
return object.Property(as.PropertyLocation, value)
}
// Name is a simple, human-readable, plain-text name for the object. HTML markup MUST NOT be included. The name MAY be expressed using multiple language-tagged values.
func (object Object) Name(value string, language string) Object {
return object.Map(as.PropertyName, value, language)
}
// Object
func (object Object) Object(value interface{}) Object {
return object.Property(as.PropertyObject, value)
}
// Origin describes an indirect object of the activity from which the activity is directed. The precise meaning of the origin is the object of the English preposition "from". For instance, in the activity "John moved an item to List B from List A", the origin of the activity is "List A".
func (object Object) Origin(value interface{}) Object {
return object.Property(as.PropertyOrigin, value)
}
// Preview identifies an entity that provides a preview of this object.
func (object Object) Preview(value interface{}) Object {
return object.Property(as.PropertyPreview, value)
}
// Published representsthe date and time at which the object was aslished
func (object Object) Published(value time.Time) Object {
return object.Property(as.PropertyPublished, value)
}
// Replies identifies a Collection containing objects considered to be responses to this object.
/*func (object Object) Replies(value interface{}) Collection {
return Collection{}
}*/
// StartTime represents the date and time describing the actual or expected starting time of the object. When used with an Activity object, for instance, the startTime property specifies the moment the activity began or is scheduled to begin
func (object Object) StartTime(value time.Time) Object {
return object.Property(as.PropertyStartTime, value)
}
// Summary is a natural language summarization of the object encoded as HTML. Multiple language tagged summaries MAY be provided.
func (object Object) Summary(value string, language string) Object {
return object.Map(as.PropertySummary, value, language)
}
// Tag represents one or more "tags" that have been associated with an objects. A tag can be any kind of Object. The key difference between attachment and tag is that the former implies association by inclusion, while the latter implies associated by reference.
func (object Object) Tag(value interface{}) Object {
return object.Property(as.PropertyTag, value)
}
// Target
func (object Object) Target(value interface{}) Object {
return object.Property(as.PropertyTarget, value)
}
// To identifies an entity considered to be part of the aslic primary audience of an Object
func (object Object) To(value interface{}) Object {
return object.Property(as.PropertyTo, value)
}
// Updated represents the date and time at which the object was updated
func (object Object) Updated(value time.Time) Object {
return object.Property(as.PropertyUpdated, value)
}
// URL identifies one or more links to representations of the object
func (object Object) URL(value interface{}) Object {
return object.Property(as.PropertyURL, value)
}
// MediaType identifies the MIME media type of the referenced resource. When used on an Object, identifies the MIME media type of the value of the content property. If not specified, the content property is assumed to contain text/html content.
func (object Object) MediaType(value interface{}) Object {
return object.Property(as.PropertyMediaType, value)
}
// HrefLang hints as to the language used by the target resource. Value MUST be a [BCP47] Language-Tag.
func (object Object) HrefLang(value interface{}) Object {
return object.Property(as.PropertyHrefLang, value)
}
// Rel represents a link relation associated with a Link. The value MUST conform to both the [HTML5] and [RFC5988] "link relation" definitions.
func (object Object) Rel(value interface{}) Object {
return object.Property(as.PropertyRel, value)
}
// Height specifies a hint as to the rendering height in device-independent pixels of the linked resource.
func (object Object) Height(value int64) Object {
return object.SimpleValue(as.PropertyHeight, value)
}
// Width specifies a hint as to the rendering width in device-independent pixels of the linked resource.
func (object Object) Width(value int64) Object {
return object.SimpleValue(as.PropertyWidth, value)
}
// Property sets the value of a property to whatever value is provided
func (object Object) Property(property string, value interface{}) Object {
// If the map does not have this key in it, then insert.
if _, ok := object[property]; !ok {
object[property] = value
return object
}
switch current := object[property].(type) {
case []interface{}:
// If we already have an array, then append to it.
object[property] = append(current, value)
default:
// Otherwise, make a new array, and append to it.
object[property] = []interface{}{current, value}
}
return object
}
// Map updates values that may/may-not be multi-language maps.
func (object Object) Map(property string, value string, language string) Object {
// If property is empty, do not change
if property == "" {
return object
}
// If value is empty, then do not change
if value == "" {
return object
}
// If we don't already have a default property, then add it now.
if _, ok := object[property]; !ok {
object[property] = value
}
// If language is empty, then do not put an entry in the map
if language == "" {
return object
}
// Otherwise, add an entry into the language map, too.
// If we don't already have a propertyMap, then add it now
propertyMap := property + "Map"
if _, ok := object[propertyMap]; !ok {
object[propertyMap] = map[string]string{}
}
// Safely set the value of the propertyMap
if propertyMap, ok := object[propertyMap].(map[string]string); ok {
propertyMap[language] = value
}
// Success!
return object
}
// SimpleValue assigns a value to a property with no other shenanigans.
func (object Object) SimpleValue(property string, value interface{}) Object {
object[property] = value
return object
} | writer/writer.go | 0.8474 | 0.481149 | writer.go | starcoder |
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
* HighlightValues
When true, values in dumps are highlighted using colours/colors
suitable for ANSI-compatible displays.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew | spew/doc.go | 0.730001 | 0.713606 | doc.go | starcoder |
package keeper
import (
"fmt"
"math"
"time"
sdk "github.com/cosmos/cosmos-sdk/types"
hardtypes "github.com/kava-labs/kava/x/hard/types"
"github.com/kava-labs/kava/x/incentive/types"
)
// AccumulateHardSupplyRewards updates the rewards accumulated for the input reward period
func (k Keeper) AccumulateHardSupplyRewards(ctx sdk.Context, rewardPeriod types.MultiRewardPeriod) error {
previousAccrualTime, found := k.GetPreviousHardSupplyRewardAccrualTime(ctx, rewardPeriod.CollateralType)
if !found {
k.SetPreviousHardSupplyRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
timeElapsed := CalculateTimeElapsed(rewardPeriod.Start, rewardPeriod.End, ctx.BlockTime(), previousAccrualTime)
if timeElapsed.IsZero() {
return nil
}
if rewardPeriod.RewardsPerSecond.IsZero() {
k.SetPreviousHardSupplyRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
totalSuppliedCoins, foundTotalSuppliedCoins := k.hardKeeper.GetSuppliedCoins(ctx)
if !foundTotalSuppliedCoins {
k.SetPreviousHardSupplyRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
totalSupplied := totalSuppliedCoins.AmountOf(rewardPeriod.CollateralType).ToDec()
if totalSupplied.IsZero() {
k.SetPreviousHardSupplyRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
previousRewardIndexes, found := k.GetHardSupplyRewardIndexes(ctx, rewardPeriod.CollateralType)
if !found {
for _, rewardCoin := range rewardPeriod.RewardsPerSecond {
rewardIndex := types.NewRewardIndex(rewardCoin.Denom, sdk.ZeroDec())
previousRewardIndexes = append(previousRewardIndexes, rewardIndex)
}
k.SetHardSupplyRewardIndexes(ctx, rewardPeriod.CollateralType, previousRewardIndexes)
}
hardFactor, found := k.hardKeeper.GetSupplyInterestFactor(ctx, rewardPeriod.CollateralType)
if !found {
k.SetPreviousHardSupplyRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
newRewardIndexes := previousRewardIndexes
for _, rewardCoin := range rewardPeriod.RewardsPerSecond {
newRewards := rewardCoin.Amount.ToDec().Mul(timeElapsed.ToDec())
previousRewardIndex, found := previousRewardIndexes.GetRewardIndex(rewardCoin.Denom)
if !found {
previousRewardIndex = types.NewRewardIndex(rewardCoin.Denom, sdk.ZeroDec())
}
// Calculate new reward factor and update reward index
rewardFactor := newRewards.Mul(hardFactor).Quo(totalSupplied)
newRewardFactorValue := previousRewardIndex.RewardFactor.Add(rewardFactor)
newRewardIndex := types.NewRewardIndex(rewardCoin.Denom, newRewardFactorValue)
i, found := newRewardIndexes.GetFactorIndex(rewardCoin.Denom)
if found {
newRewardIndexes[i] = newRewardIndex
} else {
newRewardIndexes = append(newRewardIndexes, newRewardIndex)
}
}
k.SetHardSupplyRewardIndexes(ctx, rewardPeriod.CollateralType, newRewardIndexes)
k.SetPreviousHardSupplyRewardAccrualTime(ctx, rewardPeriod.CollateralType, ctx.BlockTime())
return nil
}
// InitializeHardSupplyReward initializes the supply-side of a hard liquidity provider claim
// by creating the claim and setting the supply reward factor index
func (k Keeper) InitializeHardSupplyReward(ctx sdk.Context, deposit hardtypes.Deposit) {
claim, found := k.GetHardLiquidityProviderClaim(ctx, deposit.Depositor)
if !found {
claim = types.NewHardLiquidityProviderClaim(deposit.Depositor, sdk.Coins{}, nil, nil)
}
var supplyRewardIndexes types.MultiRewardIndexes
for _, coin := range deposit.Amount {
globalRewardIndexes, found := k.GetHardSupplyRewardIndexes(ctx, coin.Denom)
if !found {
globalRewardIndexes = types.RewardIndexes{}
}
supplyRewardIndexes = supplyRewardIndexes.With(coin.Denom, globalRewardIndexes)
}
claim.SupplyRewardIndexes = supplyRewardIndexes
k.SetHardLiquidityProviderClaim(ctx, claim)
}
// SynchronizeHardSupplyReward updates the claim object by adding any accumulated rewards
// and updating the reward index value
func (k Keeper) SynchronizeHardSupplyReward(ctx sdk.Context, deposit hardtypes.Deposit) {
claim, found := k.GetHardLiquidityProviderClaim(ctx, deposit.Depositor)
if !found {
return
}
for _, coin := range deposit.Amount {
globalRewardIndexes, found := k.GetHardSupplyRewardIndexes(ctx, coin.Denom)
if !found {
// The global factor is only not found if
// - the supply denom has not started accumulating rewards yet (either there is no reward specified in params, or the reward start time hasn't been hit)
// - OR it was wrongly deleted from state (factors should never be removed while unsynced claims exist)
// If not found we could either skip this sync, or assume the global factor is zero.
// Skipping will avoid storing unnecessary factors in the claim for non rewarded denoms.
// And in the event a global factor is wrongly deleted, it will avoid this function panicking when calculating rewards.
continue
}
userRewardIndexes, found := claim.SupplyRewardIndexes.Get(coin.Denom)
if !found {
// Normally the reward indexes should always be found.
// But if a denom was not rewarded then becomes rewarded (ie a reward period is added to params), then the indexes will be missing from claims for that supplied denom.
// So given the reward period was just added, assume the starting value for any global reward indexes, which is an empty slice.
userRewardIndexes = types.RewardIndexes{}
}
newRewards, err := k.CalculateRewards(userRewardIndexes, globalRewardIndexes, coin.Amount.ToDec())
if err != nil {
// Global reward factors should never decrease, as it would lead to a negative update to claim.Rewards.
// This panics if a global reward factor decreases or disappears between the old and new indexes.
panic(fmt.Sprintf("corrupted global reward indexes found: %v", err))
}
claim.Reward = claim.Reward.Add(newRewards...)
claim.SupplyRewardIndexes = claim.SupplyRewardIndexes.With(coin.Denom, globalRewardIndexes)
}
k.SetHardLiquidityProviderClaim(ctx, claim)
}
// UpdateHardSupplyIndexDenoms adds any new deposit denoms to the claim's supply reward index
func (k Keeper) UpdateHardSupplyIndexDenoms(ctx sdk.Context, deposit hardtypes.Deposit) {
claim, found := k.GetHardLiquidityProviderClaim(ctx, deposit.Depositor)
if !found {
claim = types.NewHardLiquidityProviderClaim(deposit.Depositor, sdk.Coins{}, nil, nil)
}
depositDenoms := getDenoms(deposit.Amount)
supplyRewardIndexDenoms := claim.SupplyRewardIndexes.GetCollateralTypes()
supplyRewardIndexes := claim.SupplyRewardIndexes
// Create a new multi-reward index in the claim for every new deposit denom
uniqueDepositDenoms := setDifference(depositDenoms, supplyRewardIndexDenoms)
for _, denom := range uniqueDepositDenoms {
globalSupplyRewardIndexes, found := k.GetHardSupplyRewardIndexes(ctx, denom)
if !found {
globalSupplyRewardIndexes = types.RewardIndexes{}
}
supplyRewardIndexes = supplyRewardIndexes.With(denom, globalSupplyRewardIndexes)
}
// Delete multi-reward index from claim if the collateral type is no longer deposited
uniqueSupplyRewardDenoms := setDifference(supplyRewardIndexDenoms, depositDenoms)
for _, denom := range uniqueSupplyRewardDenoms {
supplyRewardIndexes = supplyRewardIndexes.RemoveRewardIndex(denom)
}
claim.SupplyRewardIndexes = supplyRewardIndexes
k.SetHardLiquidityProviderClaim(ctx, claim)
}
// SynchronizeHardLiquidityProviderClaim adds any accumulated rewards
func (k Keeper) SynchronizeHardLiquidityProviderClaim(ctx sdk.Context, owner sdk.AccAddress) {
// Synchronize any hard liquidity supply-side rewards
deposit, foundDeposit := k.hardKeeper.GetDeposit(ctx, owner)
if foundDeposit {
k.SynchronizeHardSupplyReward(ctx, deposit)
}
// Synchronize any hard liquidity borrow-side rewards
borrow, foundBorrow := k.hardKeeper.GetBorrow(ctx, owner)
if foundBorrow {
k.SynchronizeHardBorrowReward(ctx, borrow)
}
}
// ZeroHardLiquidityProviderClaim zeroes out the claim object's rewards and returns the updated claim object
func (k Keeper) ZeroHardLiquidityProviderClaim(ctx sdk.Context, claim types.HardLiquidityProviderClaim) types.HardLiquidityProviderClaim {
claim.Reward = sdk.NewCoins()
k.SetHardLiquidityProviderClaim(ctx, claim)
return claim
}
// SimulateHardSynchronization calculates a user's outstanding hard rewards by simulating reward synchronization
func (k Keeper) SimulateHardSynchronization(ctx sdk.Context, claim types.HardLiquidityProviderClaim) types.HardLiquidityProviderClaim {
// 1. Simulate Hard supply-side rewards
for _, ri := range claim.SupplyRewardIndexes {
globalRewardIndexes, foundGlobalRewardIndexes := k.GetHardSupplyRewardIndexes(ctx, ri.CollateralType)
if !foundGlobalRewardIndexes {
continue
}
userRewardIndexes, foundUserRewardIndexes := claim.SupplyRewardIndexes.GetRewardIndex(ri.CollateralType)
if !foundUserRewardIndexes {
continue
}
userRewardIndexIndex, foundUserRewardIndexIndex := claim.SupplyRewardIndexes.GetRewardIndexIndex(ri.CollateralType)
if !foundUserRewardIndexIndex {
continue
}
for _, globalRewardIndex := range globalRewardIndexes {
userRewardIndex, foundUserRewardIndex := userRewardIndexes.RewardIndexes.GetRewardIndex(globalRewardIndex.CollateralType)
if !foundUserRewardIndex {
userRewardIndex = types.NewRewardIndex(globalRewardIndex.CollateralType, sdk.ZeroDec())
userRewardIndexes.RewardIndexes = append(userRewardIndexes.RewardIndexes, userRewardIndex)
claim.SupplyRewardIndexes[userRewardIndexIndex].RewardIndexes = append(claim.SupplyRewardIndexes[userRewardIndexIndex].RewardIndexes, userRewardIndex)
}
globalRewardFactor := globalRewardIndex.RewardFactor
userRewardFactor := userRewardIndex.RewardFactor
rewardsAccumulatedFactor := globalRewardFactor.Sub(userRewardFactor)
if rewardsAccumulatedFactor.IsZero() {
continue
}
deposit, found := k.hardKeeper.GetDeposit(ctx, claim.GetOwner())
if !found {
continue
}
newRewardsAmount := rewardsAccumulatedFactor.Mul(deposit.Amount.AmountOf(ri.CollateralType).ToDec()).RoundInt()
if newRewardsAmount.IsZero() || newRewardsAmount.IsNegative() {
continue
}
factorIndex, foundFactorIndex := userRewardIndexes.RewardIndexes.GetFactorIndex(globalRewardIndex.CollateralType)
if !foundFactorIndex {
continue
}
claim.SupplyRewardIndexes[userRewardIndexIndex].RewardIndexes[factorIndex].RewardFactor = globalRewardIndex.RewardFactor
newRewardsCoin := sdk.NewCoin(userRewardIndex.CollateralType, newRewardsAmount)
claim.Reward = claim.Reward.Add(newRewardsCoin)
}
}
// 2. Simulate Hard borrow-side rewards
for _, ri := range claim.BorrowRewardIndexes {
globalRewardIndexes, foundGlobalRewardIndexes := k.GetHardBorrowRewardIndexes(ctx, ri.CollateralType)
if !foundGlobalRewardIndexes {
continue
}
userRewardIndexes, foundUserRewardIndexes := claim.BorrowRewardIndexes.GetRewardIndex(ri.CollateralType)
if !foundUserRewardIndexes {
continue
}
userRewardIndexIndex, foundUserRewardIndexIndex := claim.BorrowRewardIndexes.GetRewardIndexIndex(ri.CollateralType)
if !foundUserRewardIndexIndex {
continue
}
for _, globalRewardIndex := range globalRewardIndexes {
userRewardIndex, foundUserRewardIndex := userRewardIndexes.RewardIndexes.GetRewardIndex(globalRewardIndex.CollateralType)
if !foundUserRewardIndex {
userRewardIndex = types.NewRewardIndex(globalRewardIndex.CollateralType, sdk.ZeroDec())
userRewardIndexes.RewardIndexes = append(userRewardIndexes.RewardIndexes, userRewardIndex)
claim.BorrowRewardIndexes[userRewardIndexIndex].RewardIndexes = append(claim.BorrowRewardIndexes[userRewardIndexIndex].RewardIndexes, userRewardIndex)
}
globalRewardFactor := globalRewardIndex.RewardFactor
userRewardFactor := userRewardIndex.RewardFactor
rewardsAccumulatedFactor := globalRewardFactor.Sub(userRewardFactor)
if rewardsAccumulatedFactor.IsZero() {
continue
}
borrow, found := k.hardKeeper.GetBorrow(ctx, claim.GetOwner())
if !found {
continue
}
newRewardsAmount := rewardsAccumulatedFactor.Mul(borrow.Amount.AmountOf(ri.CollateralType).ToDec()).RoundInt()
if newRewardsAmount.IsZero() || newRewardsAmount.IsNegative() {
continue
}
factorIndex, foundFactorIndex := userRewardIndexes.RewardIndexes.GetFactorIndex(globalRewardIndex.CollateralType)
if !foundFactorIndex {
continue
}
claim.BorrowRewardIndexes[userRewardIndexIndex].RewardIndexes[factorIndex].RewardFactor = globalRewardIndex.RewardFactor
newRewardsCoin := sdk.NewCoin(userRewardIndex.CollateralType, newRewardsAmount)
claim.Reward = claim.Reward.Add(newRewardsCoin)
}
}
return claim
}
// CalculateTimeElapsed calculates the number of reward-eligible seconds that have passed since the previous
// time rewards were accrued, taking into account the end time of the reward period
func CalculateTimeElapsed(start, end, blockTime time.Time, previousAccrualTime time.Time) sdk.Int {
if (end.Before(blockTime) &&
(end.Before(previousAccrualTime) || end.Equal(previousAccrualTime))) ||
(start.After(blockTime)) ||
(start.Equal(blockTime)) {
return sdk.ZeroInt()
}
if start.After(previousAccrualTime) && start.Before(blockTime) {
previousAccrualTime = start
}
if end.Before(blockTime) {
return sdk.MaxInt(sdk.ZeroInt(), sdk.NewInt(int64(math.RoundToEven(
end.Sub(previousAccrualTime).Seconds(),
))))
}
return sdk.MaxInt(sdk.ZeroInt(), sdk.NewInt(int64(math.RoundToEven(
blockTime.Sub(previousAccrualTime).Seconds(),
))))
}
// Set setDifference: A - B
func setDifference(a, b []string) (diff []string) {
m := make(map[string]bool)
for _, item := range b {
m[item] = true
}
for _, item := range a {
if _, ok := m[item]; !ok {
diff = append(diff, item)
}
}
return
}
func getDenoms(coins sdk.Coins) []string {
denoms := []string{}
for _, coin := range coins {
denoms = append(denoms, coin.Denom)
}
return denoms
} | x/incentive/keeper/rewards_supply.go | 0.73659 | 0.46217 | rewards_supply.go | starcoder |
package main
import (
"fmt"
"os"
"specify"
"strings"
t "../src/_test/specify"
)
func HavePassing(expected interface{}) reporterMatcher {
return reporterMatcher{expected, func(r t.ReporterSummary) interface{} { return r.PassingCount() }}
}
func HavePending(expected interface{}) reporterMatcher {
return reporterMatcher{expected, func(r t.ReporterSummary) interface{} { return r.PendingCount() }}
}
func HaveFailing(expected interface{}) reporterMatcher {
return reporterMatcher{expected, func(r t.ReporterSummary) interface{} { return r.FailingCount() }}
}
func HaveErrors(expected interface{}) reporterMatcher {
return reporterMatcher{expected, func(r t.ReporterSummary) interface{} { return r.ErrorCount() }}
}
func HaveFailureIncluding(s string) eachMatcher {
return eachMatcher{s, "failing example", eachFailure, matchTitle}
}
func HavePendingIncluding(s string) eachMatcher {
return eachMatcher{s, "pending example", eachPending, matchTitle}
}
func HaveErrorIncluding(s string) eachMatcher {
return eachMatcher{s, "error", eachError, matchTitle}
}
func HaveFailureAt(loc string) eachMatcher {
return eachMatcher{loc, "failure", eachFailure, matchLocation}
}
func HavePendingAt(loc string) eachMatcher {
return eachMatcher{loc, "pending example", eachPending, matchLocation}
}
func HaveErrorAt(loc string) eachMatcher {
return eachMatcher{loc, "error", eachError, matchLocation}
}
func toReporterSummary(value interface{}) (reporter t.ReporterSummary, err os.Error) {
var ok bool
if reporter, ok = value.(t.ReporterSummary); !ok {
err = os.NewError("Not a t.ReporterSummary")
}
return
}
type reporterMatcher struct {
expected interface{}
actualFunc func(t.ReporterSummary) interface{}
}
func (self reporterMatcher) Should(actual interface{}) (result os.Error) {
if reporter, error := toReporterSummary(actual); error != nil {
result = error
} else {
result = specify.Be(self.expected).Should(self.actualFunc(reporter))
}
return
}
func (self reporterMatcher) ShouldNot(actual interface{}) (result os.Error) {
if reporter, error := toReporterSummary(actual); error != nil {
result = error
} else {
result = specify.Be(self.expected).ShouldNot(self.actualFunc(reporter))
}
return
}
type eachMatcher struct {
s, message string
each func(t.ReporterSummary) <-chan t.Report
f func(t.Report, string) bool
}
func (self eachMatcher) match(r t.ReporterSummary) bool {
for report := range self.each(r) {
if self.f(report, self.s) {
return true
}
}
return false
}
func (self eachMatcher) Should(val interface{}) os.Error {
if reporter, error := toReporterSummary(val); error != nil {
return error
} else {
if !self.match(reporter) {
return os.NewError(fmt.Sprintf("expected %v including `%v`", self.message, self.s))
}
}
return nil
}
func (eachMatcher) ShouldNot(val interface{}) os.Error {
return os.NewError("matcher not implemented")
}
func matchTitle(r t.Report, s string) bool { return strings.Count(r.Title(), s) > 0 }
func matchLocation(r t.Report, s string) bool {
return strings.HasSuffix(r.Location().String(), s)
}
func eachFailure(r t.ReporterSummary) <-chan t.Report {
return r.EachFailure()
}
func eachPending(r t.ReporterSummary) <-chan t.Report {
return r.EachPending()
}
func eachError(r t.ReporterSummary) <-chan t.Report {
return r.EachError()
} | src/spec_matchers.go | 0.742235 | 0.412589 | spec_matchers.go | starcoder |
package v1
// ImageConfig defines the execution parameters which should be used as a base when running a container using an image.
type ImageConfig struct {
// User defines the username or UID which the process in the container should run as.
User string `json:"User"`
// Memory defines the memory limit.
Memory int64 `json:"Memory"`
// MemorySwap defines the total memory usage limit (memory + swap).
MemorySwap int64 `json:"MemorySwap"`
// CPUShares is the CPU shares (relative weight vs. other containers).
CPUShares int64 `json:"CpuShares"`
// ExposedPorts a set of ports to expose from a container running this image.
ExposedPorts map[string]struct{} `json:"ExposedPorts"`
// Env is a list of environment variables to be used in a container.
Env []string `json:"Env"`
// Entrypoint defines a list of arguments to use as the command to execute when the container starts.
Entrypoint []string `json:"Entrypoint"`
// Cmd defines the default arguments to the entrypoint of the container.
Cmd []string `json:"Cmd"`
// Volumes is a set of directories which should be created as data volumes in a container running this image.
Volumes map[string]struct{} `json:"Volumes"`
// WorkingDir sets the current working directory of the entrypoint process in the container.
WorkingDir string `json:"WorkingDir"`
}
// RootFS describes a layer content addresses
type RootFS struct {
// Type is the type of the rootfs.
Type string `json:"type"`
// DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most.
DiffIDs []string `json:"diff_ids"`
}
// History describes the history of a layer.
type History struct {
// Created is the creation time.
Created string `json:"created"`
// CreatedBy is the command which created the layer.
CreatedBy string `json:"created_by"`
// Author is the author of the build point.
Author string `json:"author"`
// Comment is a custom message set when creating the layer.
Comment string `json:"comment"`
// EmptyLayer is used to mark if the history item created a filesystem diff.
EmptyLayer bool `json:"empty_layer"`
}
// Image is the JSON structure which describes some basic information about the image.
type Image struct {
// Created defines an ISO-8601 formatted combined date and time at which the image was created.
Created string `json:"created"`
// Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image.
Author string `json:"author"`
// Architecture is the CPU architecture which the binaries in this image are built to run on.
Architecture string `json:"architecture"`
// OS is the name of the operating system which the image is built to run on.
OS string `json:"os"`
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config"`
// RootFS references the layer content addresses used by the image.
RootFS RootFS `json:"rootfs"`
// History describes the history of each layer.
History []History `json:"history"`
} | vendor/github.com/coreos/rkt/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go | 0.798619 | 0.411939 | config.go | starcoder |
package nune
import (
"errors"
"github.com/vorduin/slices"
)
// Cast casts a Tensor's underlying type to the given numeric type.
func Cast[T Number, V Number](t Tensor[V]) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return Tensor[T]{
Err: t.Err,
}
}
}
dataBuf := t.Ravel()
c := slices.WithLen[T](t.Numel())
for i := 0; i < len(c); i++ {
c[i] = T(dataBuf[i])
}
return Tensor[T]{
data: c,
shape: t.shape,
stride: t.stride,
offset: t.offset,
}
}
// Clone clones the Tensor and its underlying view into its data buffer.
func (t Tensor[T]) Clone() Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
return Tensor[T]{
data: slices.Clone(t.Ravel()),
shape: slices.Clone(t.shape),
stride: slices.Clone(t.stride),
}
}
// Reshape modifies the Tensor's indexing scheme.
func (t Tensor[T]) Reshape(shape ...int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
if len(shape) == 0 && t.Numel() <= 1 {
return Tensor[T]{
data: t.data,
offset: t.offset,
}
} else {
err := verifyGoodShape(shape...)
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
newstride := slices.WithLen[int](len(shape))
if len(shape) <= len(t.shape) {
copy(newstride, t.stride[len(t.stride)-len(shape):])
} else {
copy(newstride[len(shape)-len(t.stride):], t.stride)
for i := len(shape)-len(t.stride)-1; i >= 0; i-- {
newstride[i] = shape[i+1] * newstride[i+1]
}
}
return Tensor[T]{
data: t.data,
shape: slices.Clone(shape),
stride: newstride,
offset: t.offset,
}
}
}
// Index returns a view over an index of the Tensor.
// Multiple indices can be provided at the same time.
func (t Tensor[T]) Index(indices ...int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
err := verifyArgsBounds(len(indices), t.Rank())
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
for i, idx := range indices {
err = verifyAxisBounds(idx, t.Size(i))
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
}
offset := t.offset
for i, idx := range indices {
offset += idx * t.stride[i]
}
return Tensor[T]{
data: t.data,
shape: slices.Clone(t.shape[len(indices):]),
stride: slices.Clone(t.stride[len(indices):]),
offset: offset,
}
}
// Slice returns a view over a slice of the Tensor.
func (t Tensor[T]) Slice(start, end int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
err := verifyGoodShape(t.shape...) // make sure Tensor rank is not 0
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
err = verifyGoodInterval(start, end, [2]int{0, t.Size(0)})
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
shape := slices.WithLen[int](len(t.shape))
shape[0] = end - start
copy(shape[1:], t.shape[1:])
return Tensor[T]{
data: t.data,
shape: shape,
stride: slices.Clone(t.stride),
offset: t.offset + start*t.stride[0],
}
}
// Broadcast broadcasts the Tensor to the given shape.
func (t Tensor[T]) Broadcast(shape ...int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
if !t.Broadable(shape...) {
if EnvConfig.Interactive {
panic(ErrNotBroadable)
} else {
t.Err = ErrNotBroadable
return t
}
}
var expandedShape []int
if len(t.shape) < len(shape) {
expandedShape = slices.WithLen[int](len(shape))
for i := 0; i < len(shape)-len(t.shape); i++ {
expandedShape[i] = 1
}
copy(expandedShape[len(shape)-len(t.shape):], t.shape)
} else {
expandedShape = t.shape
}
expandedStride := configStride(expandedShape)
newStride := configStride(shape)
data := slices.WithLen[T](int(slices.Prod(shape)))
var expansion, stride int = 1, newStride[0]
// This is around 20% slower on average the the shortened version
// I also came up with, but this one generalizes correctly so...
for axis := 0; axis < len(shape); axis++ {
if expandedShape[axis] != shape[axis] {
for i := 0; i < expansion; i++ {
for j := 0; j < t.Numel()/expandedStride[axis]; j++ {
for k := 0; k < shape[axis]; k++ {
dstIdx := i*stride + j*shape[axis] + k*newStride[axis]
srcIdx := j * expandedStride[axis]
copy(data[dstIdx:dstIdx+newStride[axis]], t.Ravel()[srcIdx:srcIdx+expandedStride[axis]])
}
}
}
expansion *= shape[axis]
stride = newStride[axis]
}
}
return Tensor[T]{
data: data,
shape: slices.Clone(shape),
stride: newStride,
}
}
// Reverse reverses the order of the elements of the Tensor.
func (t Tensor[T]) Reverse() Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
for i, j := 0, t.Numel()-1; i < j; i, j = i+1, j-1 {
t.data[t.offset+i], t.data[t.offset+j] = t.data[t.offset+j], t.data[t.offset+i]
}
return t
}
// Flip reverses the order of the elements of the Tensor
// along the given axis.
func (t Tensor[T]) Flip(axis int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
err := verifyAxisBounds(0, len(t.shape))
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
stride := t.stride[axis]
for i := 0; i < t.Numel(); i += t.shape[axis] * stride {
for j, k := 0, t.shape[axis]-1; j < k; j, k = j+1, k-1 {
for l := 0; l < t.stride[axis]; l++ {
t.data[t.offset+i+j*stride+l], t.data[t.offset+i+k*stride+l] = t.data[t.offset+i+k*stride+l], t.data[t.offset+i+j*stride+l]
}
}
}
return t
}
// Repeat repeats the elements of the array n times.
func (t Tensor[T]) Repeat(n int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
numel := t.Numel()
dataBuf := t.Ravel()
data := slices.WithLen[T](n * numel)
for i := 0; i < n; i++ {
copy(data[i*numel:i*numel+numel], dataBuf)
}
shape := slices.WithLen[int](len(t.shape) + 1)
stride := slices.WithLen[int](len(t.stride) + 1)
shape[0] = n
copy(shape[1:], t.shape)
stride[0] = t.shape[0] * t.stride[0]
copy(stride[1:], t.stride)
return Tensor[T]{
data: data,
shape: shape,
stride: stride,
}
}
// Permute permutes the Tensor's axes without changing the data.
func (t Tensor[T]) Permute(axes ...int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
err := verifyArgsBounds(len(axes), len(t.shape))
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
shapeCopy := slices.Clone(t.shape)
strideCopy := slices.Clone(t.stride)
newshape := slices.WithLen[int](len(t.shape))
newstride := slices.WithLen[int](len(t.stride))
for i, axis := range axes {
err := verifyAxisBounds(axis, len(t.shape))
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
newshape[i] = shapeCopy[axis]
newstride[i] = strideCopy[axis]
}
return Tensor[T]{
data: t.data,
shape: newshape,
stride: newstride,
offset: t.offset,
}
}
// Cat concatenates the other Tensor to this Tensor along the given axis.
func (t Tensor[T]) Cat(other Tensor[T], axis int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
if other.Err != nil {
if EnvConfig.Interactive {
panic("nune: could not concatenate the two tensors")
} else {
t.Err = errors.New("nune: could not concatenate the two tensors")
return t
}
}
err := verifyAxisBounds(axis, len(t.shape))
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
if !slices.Equal(t.shape[:axis], other.shape[:axis]) || !slices.Equal(t.shape[axis+1:], other.shape[axis+1:]) {
if EnvConfig.Interactive {
panic("nune: tensors' shapes do not allow concatenating them")
} else {
t.Err = errors.New(("nune: tensors' shapes do not allow concatenating them"))
return t
}
}
newshape := slices.Clone(t.shape)
newshape[axis] += other.shape[axis]
newstride := configStride(newshape)
ts := t.stride[axis]
os := other.stride[axis]
ns := newstride[axis]
data := slices.WithLen[T](t.Numel() + other.Numel())
// how do I come up with these algorithms...
for i := 0; i < t.Numel()/(ts*t.shape[axis]); i++ {
copy(data[i*ns*newshape[axis]:i*ns*newshape[axis]+ts*t.shape[axis]], t.Ravel()[i*ts*t.shape[axis]:(i+1)*ts*t.shape[axis]])
}
for i := 0; i < other.Numel()/(os*other.shape[axis]); i++ {
copy(data[i*ns*newshape[axis]+ts*t.shape[axis]:(i+1)*ns*newshape[axis]], other.Ravel()[i*os*other.shape[axis]:(i+1)*os*other.shape[axis]])
}
return Tensor[T]{
data: data,
shape: newshape,
stride: newstride,
}
}
// Stack stacks this and the other Tensor together along a new axis.
func (t Tensor[T]) Stack(other Tensor[T], axis int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
if other.Err != nil {
if EnvConfig.Interactive {
panic("nune: could not concatenate the two tensors")
} else {
t.Err = errors.New("nune: could not concatenate the two tensors")
return t
}
}
err := verifyAxisBounds(axis, len(t.shape))
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
t = t.Unsqueeze(axis)
other = other.Unsqueeze(axis)
t = t.Cat(other, axis)
return t
}
// Squeeze removes an axis of dimensions 1 from the Tensor's shape.
func (t Tensor[T]) Squeeze(axis int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
err := verifyAxisBounds(axis, len(t.shape))
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
if t.shape[axis] > 1 {
if EnvConfig.Interactive {
panic("nune: tensor axis dimensions greater than 1")
} else {
t.Err = errors.New("nune: tensor axis dimensions greater than 1")
return t
}
}
newshape := slices.WithLen[int](len(t.shape) - 1)
newstride := slices.WithLen[int](len(t.stride) - 1)
copy(newshape[:axis], t.shape[:axis])
copy(newshape[axis:], t.shape[axis+1:])
copy(newstride[:axis], t.stride[:axis])
copy(newstride[axis:], t.stride[axis+1:])
return Tensor[T]{
data: t.data,
shape: newshape,
stride: newstride,
offset: t.offset,
}
}
// Unsqueeze adds an axis of dimensions 1 to the Tensor's shape.
func (t Tensor[T]) Unsqueeze(axis int) Tensor[T] {
if t.Err != nil {
if EnvConfig.Interactive {
panic(t.Err)
} else {
return t
}
}
err := verifyAxisBounds(axis, len(t.shape))
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
t.Err = err
return t
}
}
newshape := slices.WithLen[int](len(t.shape) + 1)
newstride := slices.WithLen[int](len(t.stride) + 1)
copy(newshape[:axis], t.shape[:axis])
copy(newstride[:axis], t.stride[:axis])
newshape[axis] = 1
if axis < len(t.shape) {
copy(newshape[axis+1:], t.shape[axis:])
copy(newstride[axis+1:], t.stride[axis:])
newstride[axis] = t.shape[axis] * t.stride[axis]
} else {
newstride[axis] = 1
}
return Tensor[T]{
data: t.data,
shape: newshape,
stride: newstride,
offset: t.offset,
}
} | manip.go | 0.804713 | 0.596463 | manip.go | starcoder |
package core
import (
"fmt"
"math"
)
// Vector3d is a 3D vector.
type Vector3d struct {
X, Y, Z Float
}
// NewVector3d creates a new vector with specified coordinates.
func NewVector3d(x, y, z Float) *Vector3d {
ret := new(Vector3d)
ret.X = x
ret.Y = y
ret.Z = z
return ret
}
// NewVector3dWithString parses a string, and returns a parsed vector.
func NewVector3dWithString(s string) *Vector3d {
var x, y, z Float
n, _ := fmt.Sscanf(s, "(%f, %f, %f)", &x, &y, &z)
if n != 3 {
panic(fmt.Sprintf("Failed to parse Vector3d: %s", s))
}
return NewVector3d(x, y, z)
}
func (v1 *Vector3d) Add(v2 *Vector3d) *Vector3d {
ret := new(Vector3d)
ret.X = v1.X + v2.X
ret.Y = v1.Y + v2.Y
ret.Z = v1.Z + v2.Z
return ret
}
func (v *Vector3d) Negate() *Vector3d {
ret := new(Vector3d)
ret.X = -v.X
ret.Y = -v.Y
ret.Z = -v.Z
return ret
}
func (v1 *Vector3d) Subtract(v2 *Vector3d) *Vector3d {
return v1.Add(v2.Negate())
}
func (v1 *Vector3d) Scale(s Float) *Vector3d {
ret := new(Vector3d)
ret.X = v1.X * s
ret.Y = v1.Y * s
ret.Z = v1.Z * s
return ret
}
func (v *Vector3d) Divide(s Float) *Vector3d {
if s == 0.0 {
panic("Zero division!")
}
return v.Scale(1.0 / s)
}
func (v *Vector3d) Abs() *Vector3d {
ret := &Vector3d{}
ret.X = math.Abs(v.X)
ret.Y = math.Abs(v.Y)
ret.Z = math.Abs(v.Z)
return ret
}
func (v1 *Vector3d) Dot(v2 *Vector3d) Float {
return v1.X*v2.X + v1.Y*v2.Y + v1.Z*v2.Z
}
func (v1 *Vector3d) Cross(v2 *Vector3d) *Vector3d {
ret := &Vector3d{}
ret.X = v1.Y*v2.Z - v2.Y*v1.Z
ret.Y = v1.Z*v2.X - v2.Z*v1.X
ret.Z = v1.X*v2.Y - v2.X*v1.Y
return ret
}
func (v1 *Vector3d) Length() Float {
return Float(math.Sqrt(float64(v1.LengthSquared())))
}
func (v *Vector3d) LengthSquared() Float {
return v.Dot(v)
}
func (v *Vector3d) Normalized() *Vector3d {
ret := &Vector3d{}
ret = v.Divide(v.Length())
return ret
}
func (v1 *Vector3d) Minimum(v2 *Vector3d) *Vector3d {
ret := &Vector3d{}
ret.X = math.Min(v1.X, v2.X)
ret.Y = math.Min(v1.Y, v2.Y)
ret.Z = math.Min(v1.Z, v2.Z)
return ret
}
func (v1 *Vector3d) Maximum(v2 *Vector3d) *Vector3d {
ret := &Vector3d{}
ret.X = math.Max(v1.X, v2.X)
ret.Y = math.Max(v1.Y, v2.Y)
ret.Z = math.Max(v1.Z, v2.Z)
return ret
}
func (v *Vector3d) NthElement(i int) Float {
switch i {
case 0:
return v.X
case 1:
return v.Y
case 2:
return v.Z
}
panic("Element index out of range!")
}
func (v1 *Vector3d) Equals(v2 *Vector3d) bool {
return v1.X == v2.X && v1.Y == v2.Y && v1.Z == v2.Z
}
func (v Vector3d) String() string {
return fmt.Sprintf("(%.5f, %.5f, %.5f)", v.X, v.Y, v.Z)
} | src/core/vector3d.go | 0.822225 | 0.673077 | vector3d.go | starcoder |
package bst
import "errors"
// Node ...
type Node struct {
Begin int
End int
Left *Node
Right *Node
}
// IsOverlap test if any range between begin and end overlap with the node.
// Assume begin >= end
func (n *Node) IsOverlap(begin, end int) bool {
return begin >= n.Begin || end < n.End || (begin <= n.Begin && end >= n.End)
}
// BST ...
type BST struct {
tree *Node
}
// New ...
func New() *BST {
return &BST{}
}
// Insert value into node.
func (bst *BST) Insert(begin, end int) error {
if begin > end {
return errors.New("Begin must be smaller than end")
}
node := &Node{Begin: begin, End: end}
if bst.tree == nil {
bst.tree = node
return nil
}
for cur := bst.tree; cur != nil; {
// If the range is completely to the left
if end < cur.Begin {
if cur.Left == nil {
cur.Left = node
return nil
}
cur = cur.Left
continue
}
// If the range is completely to the right
if begin > cur.End {
if cur.Right == nil {
cur.Right = node
return nil
}
cur = cur.Right
continue
}
return errors.New("Cannot Insert")
}
return nil
}
// Find the min value of the right subtree.
func findMin(subtree *Node) *Node {
if subtree.Left == nil {
return subtree
}
return findMin(subtree.Right)
}
func del(subtree *Node, begin, end int) *Node {
if subtree == nil {
return nil
}
if end < subtree.Begin {
subtree.Left = del(subtree.Left, begin, end)
return subtree
}
if begin > subtree.End {
subtree.Right = del(subtree.Right, begin, end)
return subtree
}
// Will only delete the node if begin and end is exact match to
// the node.
if begin == subtree.Begin && end == subtree.End {
// Leaf node
if subtree.Left == nil && subtree.Right == nil {
return nil
}
// One subtree exist
if subtree.Left == nil {
return subtree.Right
}
if subtree.Right == nil {
return subtree.Left
}
// Both left and right subtree are not nil
minNode := findMin(subtree)
minNode.Right = del(subtree.Right, minNode.Begin, minNode.End)
minNode.Left = subtree.Left
return minNode
}
return subtree
}
// Remove the value. Will only remove if begin and end is exact match.
func (bst *BST) Remove(begin, end int) {
if bst.tree == nil {
return
}
bst.tree = del(bst.tree, begin, end)
}
// Find a node thats overlap with the given range in the BST. Error if not found.
func (bst *BST) Find(begin, end int) (*Node, error) {
if begin > end {
return nil, errors.New("Begin must be smaller than end")
}
if bst.tree == nil {
return nil, errors.New("Not Found")
}
for cur := bst.tree; cur != nil; {
// If the range is completely to the left
if end < cur.Begin {
cur = cur.Left
continue
}
// If the range is completely to the right
if begin > cur.End {
cur = cur.Right
continue
}
return cur, nil
}
return nil, errors.New("Not Found")
}
func size(subtree *Node) int {
if subtree == nil {
return 0
}
return size(subtree.Left) + size(subtree.Right) + 1
}
// Size calculate the size of the tree.
func (bst *BST) Size() int {
return size(bst.tree)
} | bst/bst.go | 0.72526 | 0.494812 | bst.go | starcoder |
The flow package implements a dataflow mechanism in Go. It was greatly inspired
by <NAME>'s Flow-based Programming (FBP) and <NAME>'s "goflow"
implementation - see also https://en.wikipedia.org/wiki/Flow-based_programming.
The flow library is available as import, along with some supporting packages:
import "github.com/jcw/flow"
import _ "github.com/jcw/flow/gadgets"
The "gadgets" package is loaded only for its side-effects here: defining some
basic gadgets in the registry.
To use it, start by creating a "circuit", then add "gadgets" and "wires":
g := flow.NewCircuit()
g.Add("r", "Repeater")
g.Add("c", "Counter")
g.Connect("r.Out", "c.In", 0)
Then set a few initial values to send and start the whole thing up:
g.Feed("r.Num", 3)
g.Feed("r.In", "abc")
g.Run()
Run returns once all gadgets have finished. Output shows up as "lost" since the
output hasn't been connected:
Lost int: 3
A circuit can also be used as gadget, collectively called "circuitry". For this,
internal pins must be labeled with external names to expose them:
g.Label("MyOut", "c.out")
Once pins have been labeled, the circuit can be used inside another circuit:
g2 := flow.NewCircuit()
g2.AddCircuitry("g", g)
g2.Add("p", "Printer")
g2.Connect("g.MyOut", "p.In", 0)
g2.Run()
Since the output pin has been wired up this time, the output will now be:
3
Definitions of gadgets, wires, and initial set requests can be loaded
from a JSON description:
data, _ := ioutil.ReadFile("config.json")
g := flow.NewCircuit()
g.LoadJSON(data)
g.Run()
Te define your own gadget, create a type which embeds Gadget and defines Run():
type LineLengths struct {
flow.Gadget
In flow.Input
Out flow.Output
}
func (w *LineLengths) Run() {
for m := range w.In {
s := m.(string) // needs a type assertion
w.Out.Send(len(s))
}
}
g := flow.NewCircuit()
g.AddCircuitry("ll", new(LineLengths))
g.Feed("ll.In", "abc")
g.Feed("ll.In", "defgh")
g.Run()
Inputs and outputs become available to the circuit in which this gadget is used.
For this simple case, a Transformer could also have been used:
ll := flow.Transformer(func(m Message) Message) {
return len(m.(string))
}
...
g.AddCircuitry("ll", ll)
This wraps a function into a gadget with In and Out pins. It can be used when
there is a one-to-one processing task from incoming to outgoing messages.
To make a gadget available by name in the registry, set up a factory method:
flow.registry["LineLen"] = func() GadgetType {
return new(LineLengths)
}
...
g.Add("ll", "LineLen")
Message is a synonym for Go's generic "interface{}" type.
*/
package flow | doc.go | 0.817902 | 0.653611 | doc.go | starcoder |
package tda
import (
"math"
"sort"
)
// Landscape supports construction of landscape diagrams for
// describing the persistence homology of an image.
type Landscape struct {
// Birth times
birth []float64
// Death times
death []float64
// Average of birth and death times
bda []float64
// Distinct birth or death times
distinct []float64
// The observed intervals that are in each elementary interval
index [][]int
// The minimum and maximum of the distinct birth and death times
min, max float64
}
// NewLandscape returns a Landscape value for the given object birth
// and death times. Call the Eval method to evaluate the landscape
// function at prescribed depths.
func NewLandscape(birth, death []float64) *Landscape {
if len(birth) != len(death) {
panic("birth and death slices must have the same length")
}
ls := &Landscape{
birth: birth,
death: death,
}
ls.init()
return ls
}
func (ls *Landscape) init() {
// All birth and death times.
n := len(ls.birth)
di := make([]float64, 2*n)
copy(di[0:n], ls.birth)
copy(di[n:], ls.death)
sort.Float64Slice(di).Sort()
// Deduplicate
j := 1
for i := 1; i < len(di); i++ {
if di[i] != di[i-1] {
di[j] = di[i]
j++
}
}
di = di[0:j]
ls.distinct = di
mn := di[0]
mx := di[0]
for i := range di {
if di[i] < mn {
mn = di[i]
}
if di[i] > mx {
mx = di[i]
}
}
ls.min = mn
ls.max = mx
// Determine which observed intervals cover each elementary
// interval.
ls.index = make([][]int, len(di))
for i := range ls.birth {
j0 := sort.SearchFloat64s(di, ls.birth[i])
j1 := sort.SearchFloat64s(di, ls.death[i])
for j := j0; j < j1; j++ {
ls.index[j] = append(ls.index[j], i)
}
}
// Birth/death mid-points
ls.bda = make([]float64, len(ls.birth))
for i := range ls.birth {
ls.bda[i] = (ls.birth[i] + ls.death[i]) / 2
}
}
func maxi(x []int) int {
m := x[0]
for i := range x {
if x[i] > m {
m = x[i]
}
}
return m
}
// Eval evaluates the landscape function at a given point t, at a
// given series of depths. Depth=0 corresponds to the maximum
// landscape pofile, depth=1 corresponds to the second highest
// landscape profile etc.
func (ls *Landscape) Eval(t float64, depth []int) []float64 {
ii := sort.SearchFloat64s(ls.distinct, t)
// The evaluation point does not fall under any tents.
if ii == 0 || ii == len(ls.distinct) {
return make([]float64, len(depth))
}
if ls.distinct[ii] != t {
ii--
}
x := make([]float64, len(ls.birth))
j := 0
for _, i := range ls.index[ii] {
if t <= ls.bda[i] {
x[j] = t - ls.birth[i]
j++
} else if t < ls.death[i] {
x[j] = ls.death[i] - t
j++
}
}
x = x[0:j]
// Zeros are not included above, append them here if needed
mx := maxi(depth)
for len(x) <= mx {
x = append(x, 0)
}
sort.Sort(sort.Reverse(sort.Float64Slice(x)))
// Get the requested positions
for p, q := range depth {
x[p] = x[q]
}
return x
}
// Stat contains summary statistics about a landscape or convex peel
// profile at a given depth.
type Stat struct {
Depth float64
Area float64
Perimeter float64
Centroid [2]float64
}
// Stats obtains the area, perimeter, and centroid for a series of
// landscape profiles. The landscape function is evaluated on a grid
// of npoints points over the range of the landscape function.
func (ls *Landscape) Stats(depth []int, npoints int) []Stat {
d := (ls.max - ls.min) / float64(npoints-1)
r := make([]Stat, len(depth))
lastx := ls.Eval(ls.min, depth)
for i := 1; i < npoints; i++ {
t := ls.min + float64(i)*d
x := ls.Eval(t, depth)
for j := range depth {
// Area
r[j].Area += d * (x[j] + lastx[j]) / 2
// Perimeter
u := lastx[j] - x[j]
r[j].Perimeter += math.Sqrt(d*d + u*u)
// Centroid
r[j].Centroid[0] += t
r[j].Centroid[1] += x[j]
if i == 1 {
r[j].Centroid[0] += ls.min
r[j].Centroid[1] += lastx[j]
}
}
lastx = x
}
for j := range depth {
r[j].Centroid[0] /= float64(npoints)
r[j].Centroid[1] /= float64(npoints)
r[j].Depth = float64(depth[j])
}
return r
} | landscape.go | 0.79649 | 0.530236 | landscape.go | starcoder |
package cp
type HashSetEqualArbiter func(ptr []*Shape, elt *Arbiter) bool
type HashSetTransArbiter func(ptr []*Shape, space *Space) *Arbiter
type HashSetIteratorArbiter func(elt *Arbiter)
type HashSetFilterArbiter func(arb *Arbiter, space *Space) bool
type HashSetBinArbiter struct {
elt *Arbiter
hash HashValue
next *HashSetBinArbiter
}
type HashSetArbiter struct {
// number of bins in the table, not just table size
entries uint
eql HashSetEqualArbiter
defaultValue Arbiter
size uint
table []*HashSetBinArbiter
pooledBins *HashSetBinArbiter
}
func NewHashSetArbiter(eql HashSetEqualArbiter) *HashSetArbiter {
size := nextPrime(0)
return &HashSetArbiter{
eql: eql,
size: size,
table: make([]*HashSetBinArbiter, size),
}
}
func (set *HashSetArbiter) Resize() {
newSize := nextPrime(set.size + 1)
newTable := make([]*HashSetBinArbiter, newSize)
var i uint
for i = 0; i < set.size; i++ {
bin := set.table[i]
for bin != nil {
next := bin.next
idx := uint(bin.hash) % newSize
bin.next = newTable[idx]
newTable[idx] = bin
bin = next
}
}
set.table = newTable
set.size = newSize
}
func (set *HashSetArbiter) Free() {
if set != nil {
set.table = []*HashSetBinArbiter{}
}
}
func (set *HashSetArbiter) Count() uint {
return set.entries
}
func (set *HashSetArbiter) Insert(hash HashValue, ptr []*Shape, trans HashSetTransArbiter, space *Space) *Arbiter {
idx := uint(hash) % set.size
// Find the bin with the matching element.
bin := set.table[idx]
for bin != nil && !set.eql(ptr, bin.elt) {
bin = bin.next
}
// Create it if necessary.
if bin == nil {
bin = set.GetUnusedBin()
bin.hash = hash
bin.elt = trans(ptr, space)
bin.next = set.table[idx]
set.table[idx] = bin
set.entries++
if set.entries >= set.size {
set.Resize()
}
}
return bin.elt
}
func (set *HashSetArbiter) InsertArb(hash HashValue, ptr []*Shape, arb *Arbiter) interface{} {
idx := uint(hash) % set.size
// Find the bin with the matching element.
bin := set.table[idx]
for bin != nil && !set.eql(ptr, bin.elt) {
bin = bin.next
}
// Create it if necessary.
if bin == nil {
bin = set.GetUnusedBin()
bin.hash = hash
bin.elt = arb
bin.next = set.table[idx]
set.table[idx] = bin
set.entries++
if set.entries >= set.size {
set.Resize()
}
}
return bin.elt
}
func (set *HashSetArbiter) Recycle(bin *HashSetBinArbiter) {
bin.next = set.pooledBins
set.pooledBins = bin
bin.elt = nil
}
func (set *HashSetArbiter) GetUnusedBin() *HashSetBinArbiter {
bin := set.pooledBins
if bin != nil {
set.pooledBins = bin.next
return bin
}
for i := 0; i < POOLED_BUFFER_SIZE; i++ {
set.Recycle(&HashSetBinArbiter{})
}
return &HashSetBinArbiter{}
}
func (set *HashSetArbiter) Remove(hash HashValue, ptr []*Shape) *Arbiter {
idx := uint(hash) % set.size
prevPtr := &set.table[idx]
bin := set.table[idx]
// Find the bin
for bin != nil && !set.eql(ptr, bin.elt) {
prevPtr = &bin.next
bin = bin.next
}
// Remove the bin if it exists
if bin != nil {
// Update the previous linked list pointer
*prevPtr = bin.next
set.entries--
elt := bin.elt
set.Recycle(bin)
return elt
}
return nil
}
func (set *HashSetArbiter) Find(hash HashValue, ptr []*Shape) interface{} {
idx := uint(hash) % set.size
bin := set.table[idx]
for bin != nil && !set.eql(ptr, bin.elt) {
bin = bin.next
}
if bin != nil {
return bin.elt
} else {
return set.defaultValue
}
}
func (set *HashSetArbiter) Each(f HashSetIteratorArbiter) {
for _, bin := range set.table {
for bin != nil {
next := bin.next
f(bin.elt)
bin = next
}
}
}
func (set *HashSetArbiter) Filter(filter func(arb *Arbiter) bool) {
var i uint
for i = 0; i < set.size; i++ {
prevPtr := &set.table[i]
bin := set.table[i]
for bin != nil {
next := bin.next
if filter(bin.elt) {
prevPtr = &bin.next
} else {
*prevPtr = next
set.entries--
set.Recycle(bin)
}
bin = next
}
}
}
// Hashset filter func to throw away old arbiters.
func SpaceArbiterSetFilter(arb *Arbiter, space *Space) bool {
// TODO: should make an arbiter state for this so it doesn't require filtering arbiters for dangling body pointers on body removal.
// Preserve arbiters on sensors and rejected arbiters for sleeping objects.
// This prevents errant separate callbacks from happening.
a := arb.body_a
b := arb.body_b
if (a.GetType() == BODY_STATIC || a.IsSleeping()) && (b.GetType() == BODY_STATIC || b.IsSleeping()) {
return true
}
ticks := space.stamp - arb.stamp
if ticks >= 1 && arb.state != CP_ARBITER_STATE_CACHED {
arb.state = CP_ARBITER_STATE_CACHED
handler := arb.handler
handler.SeparateFunc(arb, space, handler.UserData)
}
if ticks >= space.collisionPersistence {
arb.contacts = nil
arb.count = 0
select {
case space.pooledArbiters <- arb:
default:
}
return false
}
return true
}
func CachedArbitersFilter(arb *Arbiter, space *Space, shape *Shape, body *Body) bool {
// Match on the filter shape, or if it's NULL the filter body
if (body == arb.body_a && (shape == arb.a || shape == nil)) ||
(body == arb.body_b && (shape == arb.b || shape == nil)) {
// Call separate when removing shapes.
if shape != nil && arb.state != CP_ARBITER_STATE_CACHED {
// Invalidate the arbiter since one of the shapes was removed
arb.state = CP_ARBITER_STATE_INVALIDATED
handler := arb.handler
handler.SeparateFunc(arb, space, handler.UserData)
}
arb.Unthread()
for i, arbiter := range space.arbiters {
if arb == arbiter {
space.arbiters = append(space.arbiters[:i], space.arbiters[i+1:]...)
break
}
}
select {
case space.pooledArbiters <- arb:
default:
}
return false
}
return true
} | vendor/github.com/jakecoffman/cp/hashset_arbiter.go | 0.543227 | 0.426262 | hashset_arbiter.go | starcoder |
package internal
import (
"reflect"
"github.com/lyraproj/dgo/dgo"
)
type (
sensitive struct {
value dgo.Value
}
sensitiveType struct {
wrapped dgo.Type
}
)
// DefaultSensitiveType is the unconstrained Sensitive type
var DefaultSensitiveType = &sensitiveType{wrapped: DefaultAnyType}
// SensitiveType returns a Sensitive dgo.Type that wraps the given dgo.Type
func SensitiveType(args []interface{}) dgo.Type {
switch len(args) {
case 0:
return DefaultSensitiveType
case 1:
if st, ok := Value(args[0]).(dgo.Type); ok {
return &sensitiveType{wrapped: st}
}
panic(illegalArgument(`SensitiveType`, `Type`, args, 0))
}
panic(illegalArgumentCount(`SensitiveType`, 0, 1, len(args)))
}
func (t *sensitiveType) Assignable(other dgo.Type) bool {
return Assignable(nil, t, other)
}
func (t *sensitiveType) DeepAssignable(guard dgo.RecursionGuard, other dgo.Type) bool {
if ot, ok := other.(*sensitiveType); ok {
return Assignable(guard, t.wrapped, ot.wrapped)
}
return CheckAssignableTo(guard, other, t)
}
func (t *sensitiveType) Equals(other interface{}) bool {
return equals(nil, t, other)
}
func (t *sensitiveType) deepEqual(seen []dgo.Value, other deepEqual) bool {
if ot, ok := other.(*sensitiveType); ok {
return equals(seen, t.wrapped, ot.wrapped)
}
return false
}
func (t *sensitiveType) HashCode() int {
return deepHashCode(nil, t)
}
func (t *sensitiveType) deepHashCode(seen []dgo.Value) int {
return int(dgo.TiSensitive)*31 + deepHashCode(seen, t.wrapped)
}
func (t *sensitiveType) Instance(value interface{}) bool {
if ov, ok := value.(*sensitive); ok {
return t.wrapped.Instance(ov.value)
}
return false
}
var reflectSensitiveType = reflect.TypeOf((*dgo.Sensitive)(nil)).Elem()
func (t *sensitiveType) ReflectType() reflect.Type {
return reflectSensitiveType
}
func (t *sensitiveType) Operand() dgo.Type {
return t.wrapped
}
func (t *sensitiveType) Operator() dgo.TypeOp {
return dgo.OpSensitive
}
func (t *sensitiveType) New(arg dgo.Value) dgo.Value {
if args, ok := arg.(dgo.Arguments); ok {
args.AssertSize(`sensitive`, 1, 1)
arg = args.Get(0)
}
if s, ok := arg.(dgo.Sensitive); ok {
return s
}
return Sensitive(arg)
}
func (t *sensitiveType) String() string {
return TypeString(t)
}
func (t *sensitiveType) Type() dgo.Type {
return &metaType{t}
}
func (t *sensitiveType) TypeIdentifier() dgo.TypeIdentifier {
return dgo.TiSensitive
}
// Sensitive creates a new Sensitive that wraps the given value
func Sensitive(v interface{}) dgo.Sensitive {
return &sensitive{Value(v)}
}
func (v *sensitive) Equals(other interface{}) bool {
return equals(nil, v, other)
}
func (v *sensitive) deepEqual(seen []dgo.Value, other deepEqual) bool {
if ov, ok := other.(*sensitive); ok {
return equals(seen, v.value, ov.value)
}
return false
}
func (v *sensitive) Freeze() {
if f, ok := v.value.(dgo.Freezable); ok {
f.Freeze()
}
}
func (v *sensitive) Frozen() bool {
if f, ok := v.value.(dgo.Freezable); ok {
return f.Frozen()
}
return true
}
func (v *sensitive) FrozenCopy() dgo.Value {
if f, ok := v.value.(dgo.Freezable); ok && !f.Frozen() {
return &sensitive{f.FrozenCopy()}
}
return v
}
func (v *sensitive) ThawedCopy() dgo.Value {
if f, ok := v.value.(dgo.Freezable); ok {
return &sensitive{f.ThawedCopy()}
}
return v
}
func (v *sensitive) HashCode() int {
return deepHashCode(nil, v)
}
func (v *sensitive) deepHashCode(seen []dgo.Value) int {
return deepHashCode(seen, v.value) * 7
}
func (v *sensitive) String() string {
return `sensitive [value redacted]`
}
func (v *sensitive) Type() dgo.Type {
return &sensitiveType{wrapped: Generic(v.value.Type())}
}
func (v *sensitive) Unwrap() dgo.Value {
return v.value
} | internal/sensitive.go | 0.714628 | 0.410106 | sensitive.go | starcoder |
package gomfa
func Fw2xy(gamb float64, phib float64, psi float64, eps float64,
x *float64, y *float64) {
/*
** - - - - - -
** F w 2 x y
** - - - - - -
**
** CIP X,Y given Fukushima-Williams bias-precession-nutation angles.
**
** Given:
** gamb float64 F-W angle gamma_bar (radians)
** phib float64 F-W angle phi_bar (radians)
** psi float64 F-W angle psi (radians)
** eps float64 F-W angle epsilon (radians)
**
** Returned:
** x,y float64 CIP unit vector X,Y
**
** Notes:
**
** 1) Naming the following points:
**
** e = J2000.0 ecliptic pole,
** p = GCRS pole
** E = ecliptic pole of date,
** and P = CIP,
**
** the four Fukushima-Williams angles are as follows:
**
** gamb = gamma = epE
** phib = phi = pE
** psi = psi = pEP
** eps = epsilon = EP
**
** 2) The matrix representing the combined effects of frame bias,
** precession and nutation is:
**
** NxPxB = R_1(-epsA).R_3(-psi).R_1(phib).R_3(gamb)
**
** The returned values x,y are elements [2][0] and [2][1] of the
** matrix. Near J2000.0, they are essentially angles in radians.
**
** Called:
** Fw2m F-W angles to r-matrix
** Bpn2xy extract CIP X,Y coordinates from NPB matrix
**
** Reference:
**
** <NAME>., 2006, Celest.Mech.Dyn.Astron. 94, 351
**
** This revision: 2021 May 11
**
** Copyright (C) 2013-2021, NumFOCUS Foundation.
** Derived, with permission, from the SOFA library. See notes at end of file.
*/
var r [3][3]float64
/* Form NxPxB matrix. */
Fw2m(gamb, phib, psi, eps, &r)
/* Extract CIP X,Y. */
Bpn2xy(&r, x, y)
/* Finished. */
}
/*----------------------------------------------------------------------
**
**
** Copyright (C) 2021, <NAME>
** All rights reserved.
**
** This library is derived, with permission, from the International
** Astronomical Union's "Standards of Fundamental Astronomy" library,
** available from http://www.iausofa.org.
**
** The GOMFA version is intended to retain identical functionality to
** the SOFA library, but made distinct through different namespaces and
** file names, as set out in the SOFA license conditions. The SOFA
** original has a role as a reference standard for the IAU and IERS,
** and consequently redistribution is permitted only in its unaltered
** state. The GOMFA version is not subject to this restriction and
** therefore can be included in distributions which do not support the
** concept of "read only" software.
**
** Although the intent is to replicate the SOFA API (other than
** replacement of prefix names) and results (with the exception of
** bugs; any that are discovered will be fixed), SOFA is not
** responsible for any errors found in this version of the library.
**
** If you wish to acknowledge the SOFA heritage, please acknowledge
** that you are using a library derived from SOFA, rather than SOFA
** itself.
**
**
** TERMS AND CONDITIONS
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
**
** 1 Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
**
** 2 Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in
** the documentation and/or other materials provided with the
** distribution.
**
** 3 Neither the name of the Standards Of Fundamental Astronomy Board,
** the International Astronomical Union nor the names of its
** contributors may be used to endorse or promote products derived
** from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
** FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
** COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
** BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
** CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
** LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
** ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
** POSSIBILITY OF SUCH DAMAGE.
**
*/ | fw2xy.go | 0.770206 | 0.666877 | fw2xy.go | starcoder |
package orm
import (
"bytes"
"context"
"github.com/goradd/goradd/web/examples/gen/goradd/model"
"github.com/goradd/goradd/web/examples/gen/goradd/model/node"
)
func (ctrl *RefPanel) DrawTemplate(ctx context.Context, buf *bytes.Buffer) (err error) {
buf.WriteString(`
<h1>References</h1>
<h2>Foreign Keys</h2>
<p>
Relational databases let you link records together using record ids, called foreign keys. At its basic level, a foreign key is
just a field that
contains a key that identifies a record in another table (or even in the same table). Many databases have a mechanism that lets you further describe
a foreign key and how it behaves. For example, MySQL calls these CONSTRAINTs. These descriptions help to maintain the
integrity of the database while modifying inter-related records.
</p>
<p>
Goradd will detect these relationships in your database and create links to these related objects so that you can get to them easily.
If you are not using a SQL database, or you are using a SQL database that does not have a CONSTRAINT mechanism,
you can still get the same behavior by creating a data description file to tell Goradd about these relationships, and Goradd will
then manage these links.
</p>
<p>
One important thing to do is decide what should happen if the referenced record
is deleted. Usually, you will want one of two behaviors:
<ol>
<li>Set the reference to NULL, or</li>
<li>Delete this record</li>
</ol>
Goradd will look at what direction you have given in the constraint for the foreign key to determine what to do.
If the constraint is specified to Set Null on Delete, then it will set the foreign key to NULL when the record
on the other side of the relationship is deleted.
If it is directed to Cascade on Delete, it will delete any records pointing to it with a foreign key.
You can override this behavior, but that is what happens by default.
</p>
<h2>Loading Referenced Records</h2>
<p>
In the example below, we get the first address record, and then we follow the link to the person that has that address
by using the LoadPerson function from that Address. That will query the database again for the related address.
</p>
`)
address := model.LoadAddress(ctx, "1")
person := address.LoadPerson(ctx)
buf.WriteString(`<p>
Address: `)
buf.WriteString(address.Street())
buf.WriteString(`, `)
buf.WriteString(address.City())
buf.WriteString(`<br>
Person: `)
buf.WriteString(person.FirstName())
buf.WriteString(` `)
buf.WriteString(person.LastName())
buf.WriteString(`
</p>
<h2>Pre-loading Referenced Records</h2>
<p>
In the example above, we made two queries to the database. All SQL databases, and some NoSQL databases, have the ability
to combine queries like this into one query. In SQL, you use a JOIN statement, and Goradd adopts this terminology
to indicate that you want to use a foreign key to pre-load related records.
</p>
<p>
To preload a connection using a Load* function, simply pass in nodes for the tables that you want to preload as an extra
parameter to the Load* function.
</p>
`)
address = model.LoadAddress(ctx, "2", node.Address().Person())
buf.WriteString(`<p>
Address: `)
buf.WriteString(address.Street())
buf.WriteString(`, `)
buf.WriteString(address.City())
buf.WriteString(`<br>
Person: `)
buf.WriteString(person.FirstName())
buf.WriteString(` `)
buf.WriteString(person.LastName())
buf.WriteString(`
</p>
<p>
You can pre-load slice queries too.
</p>
<p>
`)
for _, project := range model.QueryProjects(ctx).
Join(node.Project().Manager()).
Load() {
buf.WriteString(` <div>Project: `)
buf.WriteString(project.Name())
buf.WriteString(`, Manager: `)
buf.WriteString(project.Manager().FirstName())
buf.WriteString(` `)
buf.WriteString(project.Manager().LastName())
buf.WriteString(`</div>
`)
}
buf.WriteString(`</p>
`)
buf.WriteString(`
`)
return
} | web/examples/tutorial/orm/5-ref.tpl.go | 0.657978 | 0.409693 | 5-ref.tpl.go | starcoder |
package sprite
import (
"math"
"github.com/losinggeneration/hge"
"github.com/losinggeneration/hge/gfx"
"github.com/losinggeneration/hge/helpers/rect"
)
type Sprite struct {
gfx.Quad
TX, TY, W, H float64
TexW, TexH float64
HotX, HotY float64
XFlip, YFlip, HSFlip bool
}
func New(texture *gfx.Texture, texx, texy, w, h float64) Sprite {
var sprite Sprite
sprite.TX, sprite.TY = texx, texy
sprite.W, sprite.H = w, h
if texture != nil {
sprite.TexW = float64(texture.Width())
sprite.TexH = float64(texture.Height())
} else {
sprite.TexW = 1.0
sprite.TexH = 1.0
}
sprite.Quad.Texture = texture
texx1 := texx / sprite.TexW
texy1 := texy / sprite.TexH
texx2 := (texx + w) / sprite.TexW
texy2 := (texy + h) / sprite.TexH
sprite.Quad.V[0].TX, sprite.Quad.V[0].TY = float32(texx1), float32(texy1)
sprite.Quad.V[1].TX, sprite.Quad.V[1].TY = float32(texx2), float32(texy1)
sprite.Quad.V[2].TX, sprite.Quad.V[2].TY = float32(texx2), float32(texy2)
sprite.Quad.V[3].TX, sprite.Quad.V[3].TY = float32(texx1), float32(texy2)
sprite.Quad.V[0].Z = 0.5
sprite.Quad.V[1].Z = 0.5
sprite.Quad.V[2].Z = 0.5
sprite.Quad.V[3].Z = 0.5
sprite.Quad.V[0].Color = 0xffffffff
sprite.Quad.V[1].Color = 0xffffffff
sprite.Quad.V[2].Color = 0xffffffff
sprite.Quad.V[3].Color = 0xffffffff
sprite.Quad.Blend = gfx.BLEND_DEFAULT
return sprite
}
func (sprite *Sprite) Render(x, y float64) {
tempx1 := float32(x - sprite.HotX)
tempy1 := float32(y - sprite.HotY)
tempx2 := float32(x + sprite.W - sprite.HotX)
tempy2 := float32(y + sprite.H - sprite.HotY)
sprite.Quad.V[0].X, sprite.Quad.V[0].Y = tempx1, tempy1
sprite.Quad.V[1].X, sprite.Quad.V[1].Y = tempx2, tempy1
sprite.Quad.V[2].X, sprite.Quad.V[2].Y = tempx2, tempy2
sprite.Quad.V[3].X, sprite.Quad.V[3].Y = tempx1, tempy2
sprite.Quad.Render()
}
func (sprite *Sprite) RenderEx(x, y float64, rot float64, arg ...interface{}) {
var tx1, ty1, tx2, ty2 float64
var sint, cost float64
hscale, vscale := 1.0, 0.0
for i := 0; i < len(arg); i++ {
if i == 0 {
if h, ok := arg[i].(float64); ok {
hscale = h
}
}
if i == 1 {
if v, ok := arg[i].(float64); ok {
vscale = v
}
}
}
if vscale == 0 {
vscale = hscale
}
tx1 = -sprite.HotX * hscale
ty1 = -sprite.HotY * vscale
tx2 = (sprite.W - sprite.HotX) * hscale
ty2 = (sprite.H - sprite.HotY) * vscale
if rot != 0.0 {
cost = math.Cos(rot)
sint = math.Sin(rot)
sprite.Quad.V[0].X = float32(tx1*cost - ty1*sint + x)
sprite.Quad.V[0].Y = float32(tx1*sint + ty1*cost + y)
sprite.Quad.V[1].X = float32(tx2*cost - ty1*sint + x)
sprite.Quad.V[1].Y = float32(tx2*sint + ty1*cost + y)
sprite.Quad.V[2].X = float32(tx2*cost - ty2*sint + x)
sprite.Quad.V[2].Y = float32(tx2*sint + ty2*cost + y)
sprite.Quad.V[3].X = float32(tx1*cost - ty2*sint + x)
sprite.Quad.V[3].Y = float32(tx1*sint + ty2*cost + y)
} else {
sprite.Quad.V[0].X = float32(tx1 + x)
sprite.Quad.V[0].Y = float32(ty1 + y)
sprite.Quad.V[1].X = float32(tx2 + x)
sprite.Quad.V[1].Y = float32(ty1 + y)
sprite.Quad.V[2].X = float32(tx2 + x)
sprite.Quad.V[2].Y = float32(ty2 + y)
sprite.Quad.V[3].X = float32(tx1 + x)
sprite.Quad.V[3].Y = float32(ty2 + y)
}
sprite.Quad.Render()
}
func (sprite *Sprite) RenderStretch(x1, y1, x2, y2 float64) {
sprite.Quad.V[0].X, sprite.Quad.V[0].Y = float32(x1), float32(y1)
sprite.Quad.V[1].X, sprite.Quad.V[1].Y = float32(x2), float32(y1)
sprite.Quad.V[2].X, sprite.Quad.V[2].Y = float32(x2), float32(y2)
sprite.Quad.V[3].X, sprite.Quad.V[3].Y = float32(x1), float32(y2)
sprite.Quad.Render()
}
func (sprite *Sprite) Render4V(x0, y0, x1, y1, x2, y2, x3, y3 float64) {
sprite.Quad.V[0].X, sprite.Quad.V[0].Y = float32(x0), float32(y0)
sprite.Quad.V[1].X, sprite.Quad.V[1].Y = float32(x1), float32(y1)
sprite.Quad.V[2].X, sprite.Quad.V[2].Y = float32(x2), float32(y2)
sprite.Quad.V[3].X, sprite.Quad.V[3].Y = float32(x3), float32(y3)
sprite.Quad.Render()
}
func (sprite *Sprite) SetTexture(tex *gfx.Texture) {
var tw, th float64
sprite.Quad.Texture = tex
if tex != nil {
tw = float64(tex.Width())
th = float64(tex.Height())
} else {
tw, th = 1.0, 1.0
}
if tw != sprite.TexW || th != sprite.TexH {
tx1 := float64(sprite.Quad.V[0].TX) * sprite.TexW
ty1 := float64(sprite.Quad.V[0].TY) * sprite.TexH
tx2 := float64(sprite.Quad.V[2].TX) * sprite.TexW
ty2 := float64(sprite.Quad.V[2].TY) * sprite.TexH
sprite.TexW, sprite.TexH = tw, th
tx1 /= tw
ty1 /= th
tx2 /= tw
ty2 /= th
sprite.Quad.V[0].TX, sprite.Quad.V[0].TY = float32(tx1), float32(ty1)
sprite.Quad.V[1].TX, sprite.Quad.V[1].TY = float32(tx2), float32(ty1)
sprite.Quad.V[2].TX, sprite.Quad.V[2].TY = float32(tx2), float32(ty2)
sprite.Quad.V[3].TX, sprite.Quad.V[3].TY = float32(tx1), float32(ty2)
}
}
func (sprite *Sprite) SetTextureRect(x, y, w, h float64, a ...interface{}) {
adjSize := true
if len(a) == 1 {
if b, ok := a[0].(bool); ok {
adjSize = b
}
}
sprite.TX, sprite.TY = x, y
if adjSize {
sprite.W, sprite.H = w, h
}
tx1 := sprite.TX / sprite.TexW
ty1 := sprite.TY / sprite.TexH
tx2 := (sprite.TX + w) / sprite.TexW
ty2 := (sprite.TY + h) / sprite.TexH
sprite.Quad.V[0].TX, sprite.Quad.V[0].TY = float32(tx1), float32(ty1)
sprite.Quad.V[1].TX, sprite.Quad.V[1].TY = float32(tx2), float32(ty1)
sprite.Quad.V[2].TX, sprite.Quad.V[2].TY = float32(tx2), float32(ty2)
sprite.Quad.V[3].TX, sprite.Quad.V[3].TY = float32(tx1), float32(ty2)
bX, bY, bHS := sprite.XFlip, sprite.YFlip, sprite.HSFlip
sprite.XFlip, sprite.YFlip = false, false
sprite.SetFlip(bX, bY, bHS)
}
func (sprite *Sprite) SetColor(col hge.Dword, arg ...interface{}) {
i := -1
if len(arg) == 1 {
if ni, ok := arg[0].(int); ok {
i = ni
}
}
if i != -1 {
sprite.Quad.V[i].Color = col
} else {
sprite.Quad.V[0].Color = col
sprite.Quad.V[1].Color = col
sprite.Quad.V[2].Color = col
sprite.Quad.V[3].Color = col
}
}
func (sprite *Sprite) SetZ(z float64, arg ...interface{}) {
i := -1
if len(arg) == 1 {
if ni, ok := arg[0].(int); ok {
i = ni
}
}
if i != -1 {
sprite.Quad.V[i].Z = float32(z)
} else {
sprite.Quad.V[0].Z = float32(z)
sprite.Quad.V[1].Z = float32(z)
sprite.Quad.V[2].Z = float32(z)
sprite.Quad.V[3].Z = float32(z)
}
}
func (sprite *Sprite) SetBlendMode(blend int) {
sprite.Quad.Blend = blend
}
func (sprite *Sprite) SetHotSpot(x, y float64) {
sprite.HotX, sprite.HotY = x, y
}
func (sprite *Sprite) SetFlip(x, y, hotSpot bool) {
var tx, ty float64
if sprite.HSFlip && sprite.XFlip {
sprite.HotX = sprite.W - sprite.HotX
}
if sprite.HSFlip && sprite.YFlip {
sprite.HotY = sprite.H - sprite.HotY
}
sprite.HSFlip = hotSpot
if sprite.HSFlip && sprite.XFlip {
sprite.HotX = sprite.W - sprite.HotX
}
if sprite.HSFlip && sprite.YFlip {
sprite.HotY = sprite.H - sprite.HotY
}
if x != sprite.XFlip {
tx = float64(sprite.Quad.V[0].TX)
sprite.Quad.V[0].TX = sprite.Quad.V[1].TX
sprite.Quad.V[1].TX = float32(tx)
ty = float64(sprite.Quad.V[0].TY)
sprite.Quad.V[0].TY = sprite.Quad.V[1].TY
sprite.Quad.V[1].TY = float32(ty)
tx = float64(sprite.Quad.V[3].TX)
sprite.Quad.V[3].TX = sprite.Quad.V[2].TX
sprite.Quad.V[2].TX = float32(tx)
ty = float64(sprite.Quad.V[3].TY)
sprite.Quad.V[3].TY = sprite.Quad.V[2].TY
sprite.Quad.V[2].TY = float32(ty)
sprite.XFlip = !sprite.XFlip
}
if y != sprite.YFlip {
tx = float64(sprite.Quad.V[0].TX)
sprite.Quad.V[0].TX = sprite.Quad.V[3].TX
sprite.Quad.V[3].TX = float32(tx)
ty = float64(sprite.Quad.V[0].TY)
sprite.Quad.V[0].TY = sprite.Quad.V[3].TY
sprite.Quad.V[3].TY = float32(ty)
tx = float64(sprite.Quad.V[1].TX)
sprite.Quad.V[1].TX = sprite.Quad.V[2].TX
sprite.Quad.V[2].TX = float32(tx)
ty = float64(sprite.Quad.V[1].TY)
sprite.Quad.V[1].TY = sprite.Quad.V[2].TY
sprite.Quad.V[2].TY = float32(ty)
sprite.YFlip = !sprite.YFlip
}
}
func (sprite *Sprite) Texture() *gfx.Texture {
return sprite.Quad.Texture
}
func (sprite *Sprite) TextureRect() (x, y, w, h float64) {
return sprite.TX, sprite.TY, sprite.W, sprite.H
}
func (sprite *Sprite) Color(arg ...interface{}) hge.Dword {
i := 0
if len(arg) == 1 {
if ni, ok := arg[0].(int); ok {
i = ni
}
}
return sprite.Quad.V[i].Color
}
func (sprite *Sprite) Z(arg ...interface{}) float64 {
i := 0
if len(arg) == 1 {
if ni, ok := arg[0].(int); ok {
i = ni
}
}
return float64(sprite.Quad.V[i].Z)
}
func (sprite *Sprite) BlendMode() int {
return sprite.Quad.Blend
}
func (sprite *Sprite) HotSpot() (x, y float64) {
x, y = sprite.HotX, sprite.HotY
return
}
func (sprite *Sprite) Flip() (x, y bool) {
x, y = sprite.XFlip, sprite.YFlip
return
}
func (sprite *Sprite) Width() float64 {
return sprite.W
}
func (sprite *Sprite) Height() float64 {
return sprite.H
}
func (sprite *Sprite) BoundingBox(x, y float64) *rect.Rect {
return rect.New(x-sprite.HotX, y-sprite.HotY, x-sprite.HotX+sprite.W, y-sprite.HotY+sprite.H)
}
func (sprite *Sprite) BoundingBoxEx(x, y, rot, hscale, vscale float64) *rect.Rect {
var tx1, ty1, tx2, ty2 float64
var sint, cost float64
rect := new(rect.Rect)
tx1 = -sprite.HotX * hscale
ty1 = -sprite.HotY * vscale
tx2 = (sprite.W - sprite.HotX) * hscale
ty2 = (sprite.H - sprite.HotY) * vscale
if rot != 0.0 {
cost = math.Cos(rot)
sint = math.Sin(rot)
rect.Encapsulate(tx1*cost-ty1*sint+x, tx1*sint+ty1*cost+y)
rect.Encapsulate(tx2*cost-ty1*sint+x, tx2*sint+ty1*cost+y)
rect.Encapsulate(tx2*cost-ty2*sint+x, tx2*sint+ty2*cost+y)
rect.Encapsulate(tx1*cost-ty2*sint+x, tx1*sint+ty2*cost+y)
} else {
rect.Encapsulate(tx1+x, ty1+y)
rect.Encapsulate(tx2+x, ty1+y)
rect.Encapsulate(tx2+x, ty2+y)
rect.Encapsulate(tx1+x, ty2+y)
}
return rect
} | helpers/sprite/sprite.go | 0.59561 | 0.551996 | sprite.go | starcoder |
package slice
// Inserts one or more elements to the end of the slice
func SlicePush(slice *[]interface{}, elementsToAdd ...interface{}) int {
*slice = append(*slice, elementsToAdd...)
return len(*slice)
}
// Removes the last element from an slice and returns that removed element
func SlicePop(slice *[]interface{}) interface{} {
if len(*slice) == 0 {
return nil
}
interfaceToRemove := (*slice)[len(*slice)-1]
*slice = (*slice)[:len(*slice)-1]
return interfaceToRemove
}
// Removes the first element from an slice and returns that removed element
func SliceShift(slice *[]interface{}) interface{} {
if len(*slice) == 0 {
return nil
}
interfaceToRemove := (*slice)[0]
*slice = (*slice)[1:]
return interfaceToRemove
}
// Remove an item by index position
func RemoveItemByIndex(slice *[]interface{}, position int) int {
*slice = append((*slice)[:position], (*slice)[position+1:]...)
return len(*slice)
}
// Removes repeated values in a slice
func SliceUnique(slice *[]interface{}) (uniqueslice []interface{}) {
for _, v := range *slice {
if !isInterfaceInSlice(v, uniqueslice) {
uniqueslice = append(uniqueslice, v)
}
}
*slice = uniqueslice
return
}
// Returns a slice containing all the entries from slice1 that are not present in slice2.
func SliceDiff(slice1 *[]interface{}, slice2 *[]interface{}) (diffSlice []interface{}) {
for _, v := range *slice1 {
if !isInterfaceInSlice(v, *slice2) {
diffSlice = append(diffSlice, v)
}
}
return
}
// Returns a slice containing all the entries from slice1 that are present in slice2.
func SliceIntersect(slice1 *[]interface{}, slice2 *[]interface{}) (intersectSlice []interface{}) {
for _, v := range *slice1 {
if isInterfaceInSlice(v, *slice2) {
intersectSlice = append(intersectSlice, v)
}
}
return
}
// Checks if given interface exists in interface slice
func isInterfaceInSlice(inputInterface interface{}, uniqSlice []interface{}) bool {
for _, element := range uniqSlice {
if element == inputInterface {
return true
}
}
return false
} | slice/slice.go | 0.848972 | 0.487307 | slice.go | starcoder |
package tensor
import (
"github.com/pkg/errors"
"gorgonia.org/tensor/internal/storage"
)
// Add performs a + b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Add(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Add failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Add")
}
if useIter {
switch {
case incr:
err = e.E.AddIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.AddIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.AddIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.AddIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.AddIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Add(typ, dataReuse, dataB)
retVal = reuse
case !safe:
err = e.E.Add(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Add(typ, retVal.hdr(), dataB)
}
return
}
// Sub performs a - b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Sub(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Sub failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Sub")
}
if useIter {
switch {
case incr:
err = e.E.SubIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.SubIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.SubIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.SubIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.SubIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Sub(typ, dataReuse, dataB)
retVal = reuse
case !safe:
err = e.E.Sub(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Sub(typ, retVal.hdr(), dataB)
}
return
}
// Mul performs a × b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Mul(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Mul failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Mul")
}
if useIter {
switch {
case incr:
err = e.E.MulIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MulIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.MulIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.MulIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.MulIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Mul(typ, dataReuse, dataB)
retVal = reuse
case !safe:
err = e.E.Mul(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Mul(typ, retVal.hdr(), dataB)
}
return
}
// Div performs a ÷ b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Div(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Div failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Div")
}
if useIter {
switch {
case incr:
err = e.E.DivIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.DivIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.DivIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.DivIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.DivIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Div(typ, dataReuse, dataB)
retVal = reuse
case !safe:
err = e.E.Div(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Div(typ, retVal.hdr(), dataB)
}
return
}
// Pow performs a ^ b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Pow(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Pow failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Pow")
}
if useIter {
switch {
case incr:
err = e.E.PowIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.PowIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.PowIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.PowIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.PowIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Pow(typ, dataReuse, dataB)
retVal = reuse
case !safe:
err = e.E.Pow(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Pow(typ, retVal.hdr(), dataB)
}
return
}
// Mod performs a % b elementwise. Both a and b must have the same shape.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) Mod(a Tensor, b Tensor, opts ...FuncOpt) (retVal Tensor, err error) {
if err = binaryCheck(a, b, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Mod failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(a.Shape(), a.Dtype(), a.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
typ := a.Dtype().Type
var dataA, dataB, dataReuse *storage.Header
var ait, bit, iit Iterator
var useIter, swap bool
if dataA, dataB, dataReuse, ait, bit, iit, useIter, swap, err = prepDataVV(a, b, reuse); err != nil {
return nil, errors.Wrapf(err, "StdEng.Mod")
}
if useIter {
switch {
case incr:
err = e.E.ModIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.ModIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case !safe:
err = e.E.ModIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.ModIter(typ, retVal.hdr(), dataB, ait, bit)
}
return
}
switch {
case incr:
err = e.E.ModIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Mod(typ, dataReuse, dataB)
retVal = reuse
case !safe:
err = e.E.Mod(typ, dataA, dataB)
retVal = a
default:
if swap {
retVal = b.Clone().(Tensor)
} else {
retVal = a.Clone().(Tensor)
}
err = e.E.Mod(typ, retVal.hdr(), dataB)
}
return
}
// AddScalar performs t + s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) AddScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Add failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Add failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Add")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Add")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.AddIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.AddIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.AddIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.AddIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.AddIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.AddIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.AddIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Add(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Add(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Add(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Add(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// SubScalar performs t - s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) SubScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Sub failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Sub failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Sub")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Sub")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.SubIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.SubIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.SubIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.SubIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.SubIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.SubIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.SubIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Sub(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Sub(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Sub(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Sub(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// MulScalar performs t × s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) MulScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Mul failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Mul failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Mul")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Mul")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.MulIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.MulIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.MulIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.MulIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.MulIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.MulIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.MulIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Mul(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Mul(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Mul(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Mul(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// DivScalar performs t ÷ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) DivScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Div failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Div failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Div")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Div")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.DivIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.DivIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.DivIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.DivIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.DivIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.DivIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.DivIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Div(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Div(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Div(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Div(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// PowScalar performs t ^ s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) PowScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Pow failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Pow failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Pow")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Pow")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.PowIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.PowIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.PowIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.PowIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.PowIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.PowIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.PowIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Pow(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Pow(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Pow(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Pow(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
// ModScalar performs t % s elementwise. The leftTensor parameter indicates if the tensor is the left operand. Only scalar types are accepted in s.
// Acceptable FuncOpts are: UseUnsafe(), WithReuse(T), WithIncr(T)
func (e StdEng) ModScalar(t Tensor, s interface{}, leftTensor bool, opts ...FuncOpt) (retVal Tensor, err error) {
if err = unaryCheck(t, numberTypes); err != nil {
return nil, errors.Wrapf(err, "Mod failed")
}
if err = scalarDtypeCheck(t, s); err != nil {
return nil, errors.Wrap(err, "Mod failed")
}
var reuse DenseTensor
var safe, toReuse, incr bool
if reuse, safe, toReuse, incr, _, err = handleFuncOpts(t.Shape(), t.Dtype(), t.DataOrder(), true, opts...); err != nil {
return nil, errors.Wrap(err, "Unable to handle funcOpts")
}
a := t
typ := t.Dtype().Type
var ait, bit, iit Iterator
var dataA, dataB, dataReuse, scalarHeader *storage.Header
var useIter, newAlloc bool
if leftTensor {
if dataA, dataB, dataReuse, ait, iit, useIter, newAlloc, err = prepDataVS(t, s, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Mod")
}
scalarHeader = dataB
} else {
if dataA, dataB, dataReuse, bit, iit, useIter, newAlloc, err = prepDataSV(s, t, reuse); err != nil {
return nil, errors.Wrapf(err, opFail, "StdEng.Mod")
}
scalarHeader = dataA
}
if useIter {
switch {
case incr:
err = e.E.ModIterIncr(typ, dataA, dataB, dataReuse, ait, bit, iit)
retVal = reuse
case toReuse && leftTensor:
storage.CopyIter(typ, dataReuse, dataA, iit, ait)
ait.Reset()
iit.Reset()
err = e.E.ModIter(typ, dataReuse, dataB, iit, bit)
retVal = reuse
case toReuse && !leftTensor:
storage.CopyIter(typ, dataReuse, dataB, iit, bit)
iit.Reset()
bit.Reset()
err = e.E.ModIter(typ, dataA, dataReuse, ait, iit)
retVal = reuse
case !safe:
err = e.E.ModIter(typ, dataA, dataB, ait, bit)
retVal = a
default:
retVal = a.Clone().(Tensor)
if leftTensor {
err = e.E.ModIter(typ, retVal.hdr(), dataB, ait, bit)
} else {
err = e.E.ModIter(typ, dataA, retVal.hdr(), ait, bit)
}
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
}
switch {
case incr:
err = e.E.ModIncr(typ, dataA, dataB, dataReuse)
retVal = reuse
case toReuse && leftTensor:
storage.Copy(typ, dataReuse, dataA)
err = e.E.Mod(typ, dataReuse, dataB)
retVal = reuse
case toReuse && !leftTensor:
storage.Copy(typ, dataReuse, dataB)
err = e.E.Mod(typ, dataA, dataReuse)
if t.Shape().IsScalarEquiv() {
storage.Copy(typ, dataReuse, dataA)
}
retVal = reuse
case !safe:
err = e.E.Mod(typ, dataA, dataB)
if t.Shape().IsScalarEquiv() && !leftTensor {
storage.Copy(typ, dataB, dataA)
}
retVal = a
default:
retVal = a.Clone().(Tensor)
if !leftTensor {
storage.Fill(typ, retVal.hdr(), dataA)
}
err = e.E.Mod(typ, retVal.hdr(), dataB)
}
if newAlloc {
freeScalar(scalarHeader.Raw)
}
returnHeader(scalarHeader)
return
} | defaultengine_arith.go | 0.561095 | 0.594551 | defaultengine_arith.go | starcoder |
package tree
type Node struct {
key int
left *Node
right *Node
}
func NewNode(value int) *Node {
return &Node{key: value}
}
type BinarySearch struct {
root *Node
}
func NewBinary() *BinarySearch {
return &BinarySearch{}
}
func (b *BinarySearch) Insert(key int) {
var (
newNode = NewNode(key)
)
if b.root == nil {
b.root = newNode
return
} else {
b.insertNode(b.root, newNode)
}
}
func (b *BinarySearch) Search(key int) *Node {
return b.searchNode(b.root, key)
}
func (b *BinarySearch) Remove(key int) {
b.removeNode(b.root, key)
}
func (b *BinarySearch) insertNode(node, newNode *Node) {
if newNode == nil {
return
}
if node == nil {
return
}
if newNode.key < node.key {
if node.left == nil {
node.left = newNode
} else {
b.insertNode(node.left, newNode)
}
}
if newNode.key > node.key {
if node.right == nil {
node.right = newNode
} else {
b.insertNode(node.right, newNode)
}
}
}
func (b *BinarySearch) searchNode(node *Node, key int) *Node {
if node == nil {
return nil
}
if key == node.key {
return node
}
if key < node.key {
return b.searchNode(node.left, key)
}
if key > node.key {
return b.searchNode(node.right, key)
}
return nil
}
func (b *BinarySearch) removeNode(node *Node, key int) *Node {
if node == nil {
return nil
}
if key < node.key {
node.left = b.removeNode(node.left, key)
return node
}
if key > node.key {
node.right = b.removeNode(node.right, key)
return node
}
if node.left == nil && node.right == nil {
node = nil
return node
}
if node.left == nil {
node = node.right
return node
}
if node.right == nil {
node = node.left
return node
}
// node has two children, take inorderNextGreater value => choose the smallest key that is bigger than removed key
inOrderSuccessor := node.right
for inOrderSuccessor.left != nil {
inOrderSuccessor = inOrderSuccessor.left
}
node.key = inOrderSuccessor.key
node.right = b.removeNode(node.right, inOrderSuccessor.key)
return node
} | tree/binary.go | 0.724578 | 0.404272 | binary.go | starcoder |
package index
import "math"
// normPoint takes the latitude and longitude of one point and return the x,y position on a world map.
// The map bounds are minimum -180,-90 and maximum 180,90. These values are x,y; not lat,lon.
func normPoint(lat, lon float64) (x, y float64, normd bool) {
// Check if the rect is completely in bounds.
// This is likely to be the vast majority of cases.
if lon >= -180 && lon <= 180 && lat >= -90 && lat <= 90 {
return lon, lat, false
}
lat = math.Mod(lat, 360)
for lat < -90 || lat > 90 {
if lat < -90 {
lat = -90 - (90 + lat)
lon = 180 + lon
}
if lat > 90 {
lat = 90 + (90 - lat)
lon = 180 + lon
}
}
lon = math.Mod(lon, 360)
for lon < -180 {
lon += 360
}
for lon > 180 {
lon -= 360
}
return lon, lat, true
}
// normRect takes the latitude and longitude of two points which define a rectangle and returns an array of x,y rectangles on a world map.
// The map bounds are minimum -180,-90 and maximum 180,90. These values are x,y; not lat,lon.
func normRect(swLat, swLon, neLat, neLon float64) (mins, maxs [][]float64, normd bool) {
mins, maxs, normd = normRectStep(swLat, swLon, neLat, neLon, nil, nil, false)
return mins, maxs, normd
}
func normRectStep(swLat, swLon, neLat, neLon float64, mins, maxs [][]float64, normd bool) (minsOut, maxsOut [][]float64, normdOut bool) {
// Make sure that the northeast point is greater than the southwest point.
if neLat < swLat {
swLat, neLat, normd = neLat, swLat, true
}
if neLon < swLon {
swLon, neLon, normd = neLon, swLon, true
}
if swLon < -180 || neLon > 180 {
// The rect is horizontally out of bounds.
if neLon-swLon > 360 {
// The rect goes around the world. Just normalize to -180 to 180.
swLon = -180
neLon = 180
} else if swLon < -180 && neLon < -180 {
// The rect is way left. Move it into range.
// TODO: replace loops with math/mod.
for {
swLon += 360
neLon += 360
if swLon >= -180 || neLon >= -180 {
break
}
}
} else if swLon > 180 && neLon > 180 {
// The rect is way right. Move it into range.
// TODO: replace loops with math/mod.
for {
swLon -= 360
neLon -= 360
if swLon <= 180 || neLon <= 180 {
break
}
}
} else {
// The rect needs to be split into two.
if swLon < -180 {
mins, maxs, normd = normRectStep(swLat, 180+(180+swLon), neLat, 180, mins, maxs, normd)
mins, maxs, normd = normRectStep(swLat, -180, neLat, neLon, mins, maxs, normd)
} else if neLon > 180 {
mins, maxs, normd = normRectStep(swLat, swLon, neLat, 180, mins, maxs, normd)
mins, maxs, normd = normRectStep(swLat, -180, neLat, -180+(neLon-180), mins, maxs, normd)
} else {
panic("should not be reached")
}
return mins, maxs, true
}
return normRectStep(swLat, swLon, neLat, neLon, mins, maxs, true)
} else if swLat < -90 || neLat > 90 {
// The rect is vertically out of bounds.
if neLat-swLat > 360 {
// The rect goes around the world. Just normalize to -180 to 180.
swLat = -180
neLat = 180
} else if swLat < -90 && neLat < -90 {
swLat = -90 + (-90 - swLat)
neLat = -90 + (-90 - neLat)
swLon = swLon - 180
neLon = neLon - 180
} else if swLat > 90 && neLat > 90 {
swLat = 90 - (swLat - 90)
neLat = 90 - (neLat - 90)
swLon = swLon - 180
neLon = neLon - 180
} else {
if neLat > 90 {
mins, maxs, normd = normRectStep(swLat, swLon, 90, neLon, mins, maxs, normd)
mins, maxs, normd = normRectStep(90-(neLat-90), swLon-180, 90, neLon-180, mins, maxs, normd)
} else if swLat < -90 {
mins, maxs, normd = normRectStep(-90, swLon, neLat, neLon, mins, maxs, normd)
mins, maxs, normd = normRectStep(-90, swLon-180, -90-(90+swLat), neLon-180, mins, maxs, normd)
} else {
panic("should not be reached")
}
return mins, maxs, true
}
return normRectStep(swLat, swLon, neLat, neLon, mins, maxs, true)
} else {
// rect is completely in bounds.
mins = append(mins, []float64{swLon, swLat})
maxs = append(maxs, []float64{neLon, neLat})
return mins, maxs, normd
}
} | pkg/index/norm.go | 0.644225 | 0.606819 | norm.go | starcoder |
package rtc
import "math"
// WorldT represents the world to be rendered.
type WorldT struct {
Objects []Object
Lights []*PointLightT // TODO: Replace with light interfaces.
}
// World creates an empty world.
func World() *WorldT {
return &WorldT{}
}
// DefaultWorld returns a default test world.
func DefaultWorld() *WorldT {
s1 := Sphere()
s1.GetMaterial().Color = Color(0.8, 1.0, 0.6)
s1.GetMaterial().Diffuse = 0.7
s1.GetMaterial().Specular = 0.2
s2 := Sphere()
s2.SetTransform(Scaling(0.5, 0.5, 0.5))
return &WorldT{
Objects: []Object{s1, s2},
Lights: []*PointLightT{PointLight(Point(-10, 10, -10), Color(1, 1, 1))},
}
}
// IntersectWorld intersects a world with a ray.
func (w *WorldT) IntersectWorld(ray RayT) []IntersectionT {
var result []IntersectionT
for _, obj := range w.Objects {
xs := Intersect(obj, ray)
result = append(result, xs...)
}
return Intersections(result...) // Sorts them.
}
// ShadeHit returns the color (as a Tuple) for the precomputed intersection.
func (w *WorldT) ShadeHit(comps *Comps, remaining int) Tuple {
var result Tuple
for _, light := range w.Lights {
shadowed := w.IsShadowed(comps.OverPoint, light)
surface := Lighting(comps.Object.GetMaterial(),
comps.Object,
light,
comps.Point,
comps.EyeVector,
comps.NormalVector,
shadowed,
)
reflected := w.ReflectedColor(comps, remaining)
refracted := w.RefractedColor(comps, remaining)
material := comps.Object.GetMaterial()
if material.Reflective > 0 && material.Transparency > 0 {
reflectance := comps.Schlick()
result = result.Add(surface).Add(reflected.MultScalar(reflectance)).Add(refracted.MultScalar(1 - reflectance))
} else {
result = result.Add(surface).Add(reflected).Add(refracted)
}
}
return result
}
// ColorAt returns the color (as a Tuple) when casting the given ray.
func (w *WorldT) ColorAt(ray RayT, remaining int) Tuple {
xs := w.IntersectWorld(ray)
hit := Hit(xs)
if hit == nil {
return Color(0, 0, 0)
}
comps := hit.PrepareComputations(ray, xs)
return w.ShadeHit(comps, remaining)
}
// IsShadowed determines if the provided point is in a shadow for the given light.
func (w *WorldT) IsShadowed(point Tuple, light *PointLightT) bool {
v := light.position.Sub(point)
distance := v.Magnitude()
direction := v.Normalize()
r := Ray(point, direction)
intersections := w.IntersectWorld(r)
h := Hit(intersections)
return h != nil && h.T < distance
}
// ReflectedColor returns the reflected color for the precomputed intersection.
func (w *WorldT) ReflectedColor(comps *Comps, remaining int) Tuple {
if remaining < 1 || comps.Object.GetMaterial().Reflective == 0 {
return Color(0, 0, 0)
}
reflectRay := Ray(comps.OverPoint, comps.ReflectVector)
color := w.ColorAt(reflectRay, remaining-1)
return color.MultScalar(comps.Object.GetMaterial().Reflective)
}
// RefractedColor returns the refracted color for the precomputed intersection.
func (w *WorldT) RefractedColor(comps *Comps, remaining int) Tuple {
if remaining < 1 || comps.Object.GetMaterial().Transparency == 0 {
return Color(0, 0, 0)
}
nRatio := comps.N1 / comps.N2 // precompute?
cosI := comps.EyeVector.Dot(comps.NormalVector) // precompute?
sin2t := nRatio * nRatio * (1 - (cosI * cosI))
if sin2t > 1 {
return Color(0, 0, 0)
}
cosT := math.Sqrt(1 - sin2t)
direction := comps.NormalVector.MultScalar(nRatio*cosI - cosT).Sub(comps.EyeVector.MultScalar(nRatio))
refractedRay := Ray(comps.UnderPoint, direction)
color := w.ColorAt(refractedRay, remaining-1)
return color.MultScalar(comps.Object.GetMaterial().Transparency)
}
// WorldToObject converts a world-space point to object space, taking into
// account all the parents of the object.
func WorldToObject(object Object, point Tuple) Tuple {
if p := object.GetParent(); p != nil {
point = WorldToObject(p, point)
}
return object.GetTransform().Inverse().MultTuple(point)
}
// NormalToWorld converts an object-space normal to world space, taking into
// account all the parents of the object.
func NormalToWorld(object Object, normal Tuple) Tuple {
inv := object.GetTransform().Inverse()
worldNormal := inv.Transpose().MultTuple(normal)
worldNormal[3] = 0 // W
normal = worldNormal.Normalize()
if p := object.GetParent(); p != nil {
normal = NormalToWorld(p, normal)
}
return normal
} | rtc/world.go | 0.708717 | 0.598107 | world.go | starcoder |
package parser
import (
"fmt"
"time"
"github.com/ebay/akutan/api"
"github.com/ebay/akutan/rpc"
"github.com/ebay/akutan/util/unicode"
"github.com/vektah/goparsify"
)
func unit(n *goparsify.Result) {
switch t := n.Child[1].Result.(type) {
case *QName:
n.Result = &Unit{Value: t.Value}
case *Entity:
n.Result = &Unit{Value: t.Value}
default:
panic(fmt.Sprintf("unsupported unit type: %T", t))
}
}
func lang(n *goparsify.Result) {
if n.Child[1].Token == "" {
n.Result = NoLanguage
} else {
n.Result = &Language{Value: n.Child[1].Token}
}
}
func literalBool(n *goparsify.Result) {
switch n.Child[0].Token {
case "true":
n.Result = &LiteralBool{getUnitType(n), true}
case "false":
n.Result = &LiteralBool{getUnitType(n), false}
default:
panic(fmt.Sprintf("unsupported bool literal: %s", n.Token))
}
}
// getUnitType returns either NoUnits or Unit from the child result.
func getUnitType(n *goparsify.Result) Unit {
if n.Child[1].Result == nil {
return NoUnit
}
return *n.Child[1].Result.(*Unit)
}
func literalNumber(n *goparsify.Result) {
switch v := n.Child[0].Result.(type) {
case float64:
n.Result = &LiteralFloat{getUnitType(n), v}
case int64:
n.Result = &LiteralInt{getUnitType(n), v}
default:
panic(fmt.Sprintf("unsupported number literal: '%s' %v", n.Token, v))
}
}
func literalString(n *goparsify.Result) {
res := LiteralString{
Value: unicode.Normalize(n.Child[0].Token),
Language: NoLanguage,
}
if n.Child[1].Result != nil {
res.Language = *n.Child[1].Result.(*Language)
}
n.Result = &res
}
func literalTimeY(n *goparsify.Result) {
year := n.Result.(int)
n.Result = &LiteralTime{
NoUnit,
time.Date(year, zero.Month(), zero.Day(), zero.Hour(), zero.Minute(), zero.Second(), zero.Nanosecond(), zero.Location()),
api.Year,
}
}
func literalTimeYM(n *goparsify.Result) {
lt := n.Child[0].Result.(*LiteralTime).Value
month := time.Month(n.Child[2].Result.(int))
n.Result = &LiteralTime{
NoUnit,
time.Date(lt.Year(), month, zero.Day(), zero.Hour(), zero.Minute(), zero.Second(), zero.Nanosecond(), zero.Location()),
api.Month,
}
}
func literalTimeYMD(n *goparsify.Result) {
lt := n.Child[0].Result.(*LiteralTime).Value
day := n.Child[2].Result.(int)
n.Result = &LiteralTime{
NoUnit,
time.Date(lt.Year(), lt.Month(), day, zero.Hour(), zero.Minute(), zero.Second(), zero.Nanosecond(), zero.Location()),
api.Day,
}
}
func literalTimeYMDH(n *goparsify.Result) {
lt := n.Child[0].Result.(*LiteralTime).Value
hour := n.Child[2].Result.(int)
n.Result = &LiteralTime{
NoUnit,
time.Date(lt.Year(), lt.Month(), lt.Day(), hour, zero.Minute(), zero.Second(), zero.Nanosecond(), zero.Location()),
api.Hour,
}
}
func literalTimeYMDHM(n *goparsify.Result) {
lt := n.Child[0].Result.(*LiteralTime).Value
minute := n.Child[2].Result.(int)
n.Result = &LiteralTime{
NoUnit,
time.Date(lt.Year(), lt.Month(), lt.Day(), lt.Hour(), minute, zero.Second(), zero.Nanosecond(), zero.Location()),
api.Minute,
}
}
func literalTimeYMDHMS(n *goparsify.Result) {
lt := n.Child[0].Result.(*LiteralTime).Value
second := n.Child[2].Result.(int)
n.Result = &LiteralTime{
NoUnit,
time.Date(lt.Year(), lt.Month(), lt.Day(), lt.Hour(), lt.Minute(), second, zero.Nanosecond(), zero.Location()),
api.Second,
}
}
func literalTimeYMDHMSN(n *goparsify.Result) {
lt := n.Child[0].Result.(*LiteralTime).Value
nanosecond := n.Child[2].Result.(int64)
n.Result = &LiteralTime{
NoUnit,
time.Date(lt.Year(), lt.Month(), lt.Day(), lt.Hour(), lt.Minute(), lt.Second(), int(nanosecond), zero.Location()),
api.Nanosecond,
}
}
func literalTime(n *goparsify.Result) {
lt := n.Child[0].Child[1].Result.(*LiteralTime)
lt.Unit = getUnitType(n)
n.Result = lt
}
// bindOpResult is a helper to generate a goparisfy Map function that will set
// an Operator value as the result. This returns a new instance every time
// rather than a shared one to stop issues in the event that something consuming
// the parsed output mutates the Operator instance it was given.
func bindOpResult(opResult rpc.Operator) func(n *goparsify.Result) {
return func(n *goparsify.Result) {
n.Result = &Operator{Value: opResult}
}
}
// literalSet is a distinct set of Terms. It uses the Term's String method to
// define whether or not a Term has previously been seen.
func literalSet(n *goparsify.Result) {
// 0: '{'
// 1: Cut()
// 2: optionalWS
// 3: ${terms}
// 4: optionalWS
// 5: '}'
set := &LiteralSet{
Values: make([]Term, 0, len(n.Child[3].Child)),
}
seen := make(map[string]bool, len(n.Child[3].Child))
for _, c := range n.Child[3].Child {
term := c.Result.(Term)
str := term.String()
if seen[str] {
continue
}
seen[str] = true
set.Values = append(set.Values, term)
}
n.Result = set
}
func where(n *goparsify.Result) {
quads := make(WhereClause, 0, len(n.Child))
for _, c := range n.Child {
if c.Result == nil {
// Result will be nil for comment lines.
continue
}
quads = append(quads, c.Result.(*Quad))
}
n.Result = quads
}
func limitOffset(n *goparsify.Result) {
limit := n.Child[0].Result.(uint64)
res := LimitOffset{
Limit: &limit,
}
if n.Child[1].Result != nil {
offset := n.Child[1].Result.(uint64)
res.Offset = &offset
}
n.Result = res
}
func offsetLimit(n *goparsify.Result) {
offset := n.Child[0].Result.(uint64)
res := LimitOffset{
Offset: &offset,
}
if n.Child[1].Result != nil {
limit := n.Child[1].Result.(uint64)
res.Limit = &limit
}
n.Result = res
}
func orderBy(direction SortDirection) func(*goparsify.Result) {
return func(n *goparsify.Result) {
n.Result = OrderCondition{
On: n.Result.(*Variable),
Direction: direction,
}
}
}
func orderBys(n *goparsify.Result) {
conditions := n.Child[4]
res := make([]OrderCondition, 0, len(conditions.Child))
for _, child := range conditions.Child {
res = append(res, child.Result.(OrderCondition))
}
n.Result = res
}
func aggExpr(n *goparsify.Result) {
res := AggregateExpr{
Function: n.Child[0].Result.(AggregateFunction),
}
if n.Child[2].Token == "*" {
res.Of = Wildcard{}
} else {
res.Of = n.Child[2].Result.(*Variable)
}
n.Result = &res
}
func boundExpr(n *goparsify.Result) {
res := BoundExpression{
Expr: n.Child[1].Result.(Expression),
As: n.Child[3].Result.(*Variable),
}
n.Result = &res
}
func selectExprs(n *goparsify.Result) {
if n.Token == "*" {
n.Result = []selectClauseItem{Wildcard{}}
return
}
res := make([]selectClauseItem, 0, len(n.Child))
for _, c := range n.Child {
res = append(res, c.Result.(selectClauseItem))
}
n.Result = res
}
// child is a helper to generate a goparsify Map function that will grab a child
// result at a specific index and set it as the result for this node. This is
// useful for picking out the interesting part of a Seq().
func child(idx int) func(*goparsify.Result) {
return func(n *goparsify.Result) {
n.Result = n.Child[idx].Result
}
}
func selectQuery(n *goparsify.Result) {
// selectClauseKeyword is optional, so might be nil
var kw selectClauseKeyword
if n.Child[1].Result != nil {
kw = n.Child[1].Result.(selectClauseKeyword)
}
q := Query{
Type: SelectQuery,
Select: SelectClause{
Keyword: kw,
Items: n.Child[2].Result.([]selectClauseItem),
},
Where: n.Child[3].Result.(WhereClause),
}
// these 2 are optional, so might be nil
if n.Child[4].Result != nil {
q.Modifiers.OrderBy = n.Child[4].Result.([]OrderCondition)
}
if n.Child[5].Result != nil {
q.Modifiers.Paging = n.Child[5].Result.(LimitOffset)
}
n.Result = &q
}
func askQuery(n *goparsify.Result) {
q := Query{
Type: AskQuery,
Where: n.Child[1].Result.(WhereClause),
}
n.Result = &q
}
func wrapWhere(n *goparsify.Result) {
q := Query{
Type: LegacyPatternQuery,
Where: n.Result.(WhereClause),
}
n.Result = &q
} | src/github.com/ebay/akutan/query/parser/lang_callbacks.go | 0.699357 | 0.410697 | lang_callbacks.go | starcoder |
package date
import (
"fmt"
"math"
"time"
"github.com/influxdata/flux"
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
)
var SpecialFns map[string]values.Function
func init() {
SpecialFns = map[string]values.Function{
"second": values.NewFunction(
"second",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
return values.NewInt(int64(v1.Time().Time().Second())), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"minute": values.NewFunction(
"minute",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
return values.NewInt(int64(v1.Time().Time().Minute())), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"hour": values.NewFunction(
"hour",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
return values.NewInt(int64(v1.Time().Time().Hour())), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"weekDay": values.NewFunction(
"weekDay",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
return values.NewInt(int64(v1.Time().Time().Weekday())), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"monthDay": values.NewFunction(
"monthDay",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
return values.NewInt(int64(v1.Time().Time().Day())), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"yearDay": values.NewFunction(
"yearDay",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
return values.NewInt(int64(v1.Time().Time().YearDay())), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"month": values.NewFunction(
"month",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
return values.NewInt(int64(v1.Time().Time().Month())), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"year": values.NewFunction(
"year",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
return values.NewInt(int64(v1.Time().Time().Year())), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"week": values.NewFunction(
"week",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
_, week := v1.Time().Time().ISOWeek()
return values.NewInt(int64(week)), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"quarter": values.NewFunction(
"quarter",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
month := v1.Time().Time().Month()
return values.NewInt(int64(math.Ceil(float64(month) / 3.0))), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"millisecond": values.NewFunction(
"millisecond",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
millisecond := int64(time.Nanosecond) * int64(v1.Time().Time().Nanosecond()) / int64(time.Millisecond)
return values.NewInt(millisecond), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"microsecond": values.NewFunction(
"microsecond",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
microsecond := int64(time.Nanosecond) * int64(v1.Time().Time().Nanosecond()) / int64(time.Microsecond)
return values.NewInt(microsecond), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"nanosecond": values.NewFunction(
"nanosecond",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time},
Required: semantic.LabelSet{"t"},
Return: semantic.Int,
}),
func(args values.Object) (values.Value, error) {
v1, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
if v1.Type().Nature() == semantic.Time {
return values.NewInt(int64(v1.Time().Time().Nanosecond())), nil
}
return nil, fmt.Errorf("cannot convert argument t of type %v to time", v1.Type().Nature())
}, false,
),
"truncate": values.NewFunction(
"truncate",
semantic.NewFunctionPolyType(semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{"t": semantic.Time, "unit": semantic.Duration},
Required: semantic.LabelSet{"t", "unit"},
Return: semantic.Time,
}),
func(args values.Object) (values.Value, error) {
v, ok := args.Get("t")
if !ok {
return nil, errors.New(codes.Invalid, "missing argument t")
}
u, unitOk := args.Get("unit")
if !unitOk {
return nil, errors.New(codes.Invalid, "missing argument unit")
}
if v.Type().Nature() == semantic.Time && u.Type().Nature() == semantic.Duration {
return values.NewTime(v.Time().Truncate(u.Duration())), nil
}
return nil, fmt.Errorf("cannot truncate argument t of type %v to unit %v", v.Type().Nature(), u)
}, false,
),
}
flux.RegisterPackageValue("date", "second", SpecialFns["second"])
flux.RegisterPackageValue("date", "minute", SpecialFns["minute"])
flux.RegisterPackageValue("date", "hour", SpecialFns["hour"])
flux.RegisterPackageValue("date", "weekDay", SpecialFns["weekDay"])
flux.RegisterPackageValue("date", "monthDay", SpecialFns["monthDay"])
flux.RegisterPackageValue("date", "yearDay", SpecialFns["yearDay"])
flux.RegisterPackageValue("date", "month", SpecialFns["month"])
flux.RegisterPackageValue("date", "year", SpecialFns["year"])
flux.RegisterPackageValue("date", "week", SpecialFns["week"])
flux.RegisterPackageValue("date", "quarter", SpecialFns["quarter"])
flux.RegisterPackageValue("date", "millisecond", SpecialFns["millisecond"])
flux.RegisterPackageValue("date", "microsecond", SpecialFns["microsecond"])
flux.RegisterPackageValue("date", "nanosecond", SpecialFns["nanosecond"])
flux.RegisterPackageValue("date", "truncate", SpecialFns["truncate"])
} | stdlib/date/date.go | 0.57344 | 0.48438 | date.go | starcoder |
package turfgo
const (
infinity = 0x7FF0000000000000
)
// Unit for distance
type Unit int
// Unit constants
const (
Kilometers Unit = iota
Miles
Meters
Centimeters
Degrees
Radians
NauticalMiles
Inches
Yards
Feet
)
var radius = map[Unit]float64{
Kilometers: 6373,
Miles: 3960,
Meters: 6373000,
Centimeters: 6.373e+8,
Degrees: 57.2957795,
Radians: 1,
NauticalMiles: 3441.145,
Inches: 250905600,
Yards: 6969600,
Feet: 20908792.65,
}
var areaFactors = map[Unit]float64{
Kilometers: 0.000001,
Meters: 1,
Centimeters: 10000,
Miles: 3.86e-7,
Yards: 1.195990046,
Feet: 10.763910417,
Inches: 1550.003100006,
}
//Geometry is geoJson geometry
type Geometry interface {
getPoints() []*Point
}
//PolygonI is geoJson polygon
type PolygonI interface {
getPolygons() []*Polygon
}
//A Point on earth
type Point struct {
Lat float64
Lng float64
}
func (p *Point) getPoints() []*Point {
return []*Point{p}
}
//NewPoint creates a new point for given lat, lng
func NewPoint(lat float64, lon float64) *Point {
return &Point{lat, lon}
}
//MultiPoint geojson type
type MultiPoint struct {
Points []*Point
}
func (p *MultiPoint) getPoints() []*Point {
return p.Points
}
//NewMultiPoint creates a new multiPoint for given points
func NewMultiPoint(points []*Point) *MultiPoint {
return &MultiPoint{Points: points}
}
//LineString geojson type
type LineString struct {
Points []*Point
}
func (p *LineString) getPoints() []*Point {
return p.Points
}
//NewLineString creates a new lineString for given points
func NewLineString(points []*Point) *LineString {
return &LineString{Points: points}
}
//MultiLineString geojson type
type MultiLineString struct {
LineStrings []*LineString
}
func (p *MultiLineString) getPoints() []*Point {
points := []*Point{}
for _, lineString := range p.LineStrings {
points = append(points, lineString.getPoints()...)
}
return points
}
//NewMultiLineString creates a new multiLineString for given lineStrings
func NewMultiLineString(lineStrings []*LineString) *MultiLineString {
return &MultiLineString{LineStrings: lineStrings}
}
//Polygon geojson type
type Polygon struct {
LineStrings []*LineString
}
func (p *Polygon) getPoints() []*Point {
points := []*Point{}
for _, lineString := range p.LineStrings {
points = append(points, lineString.getPoints()...)
}
return points
}
func (p *Polygon) getPolygons() []*Polygon {
return []*Polygon{p}
}
//NewPolygon creates a new polygon for given lineStrings
func NewPolygon(lineStrings []*LineString) *Polygon {
return &Polygon{LineStrings: lineStrings}
}
//MultiPolygon geojson type
type MultiPolygon struct {
Polygons []*Polygon
}
func (p *MultiPolygon) getPoints() []*Point {
points := []*Point{}
for _, polygon := range p.Polygons {
points = append(points, polygon.getPoints()...)
}
return points
}
func (p *MultiPolygon) getPolygons() []*Polygon {
return p.Polygons
}
// NewMultiPolygon creates a new multiPolygon for given polygons
func NewMultiPolygon(polygons []*Polygon) *MultiPolygon {
return &MultiPolygon{Polygons: polygons}
}
// BoundingBox represent a bbox
type BoundingBox struct {
West float64
South float64
East float64
North float64
}
// NewInfiniteBBox creates a bounding box with corners really far away
func NewInfiniteBBox() *BoundingBox {
return &BoundingBox{infinity, infinity, -infinity, -infinity}
}
// NewBBox creates bounding box with given corners
func NewBBox(w float64, s float64, e float64, n float64) *BoundingBox {
return &BoundingBox{w, s, e, n}
} | types.go | 0.835249 | 0.40869 | types.go | starcoder |
package fbast
type Operator = int8
const (
OperatorMultiplicationOperator Operator = 0
OperatorDivisionOperator Operator = 1
OperatorModuloOperator Operator = 2
OperatorPowerOperator Operator = 3
OperatorAdditionOperator Operator = 4
OperatorSubtractionOperator Operator = 5
OperatorLessThanEqualOperator Operator = 6
OperatorLessThanOperator Operator = 7
OperatorGreaterThanEqualOperator Operator = 8
OperatorGreaterThanOperator Operator = 9
OperatorStartsWithOperator Operator = 10
OperatorInOperator Operator = 11
OperatorNotOperator Operator = 12
OperatorExistsOperator Operator = 13
OperatorNotEmptyOperator Operator = 14
OperatorEmptyOperator Operator = 15
OperatorEqualOperator Operator = 16
OperatorNotEqualOperator Operator = 17
OperatorRegexpMatchOperator Operator = 18
OperatorNotRegexpMatchOperator Operator = 19
)
var EnumNamesOperator = map[Operator]string{
OperatorMultiplicationOperator: "MultiplicationOperator",
OperatorDivisionOperator: "DivisionOperator",
OperatorModuloOperator: "ModuloOperator",
OperatorPowerOperator: "PowerOperator",
OperatorAdditionOperator: "AdditionOperator",
OperatorSubtractionOperator: "SubtractionOperator",
OperatorLessThanEqualOperator: "LessThanEqualOperator",
OperatorLessThanOperator: "LessThanOperator",
OperatorGreaterThanEqualOperator: "GreaterThanEqualOperator",
OperatorGreaterThanOperator: "GreaterThanOperator",
OperatorStartsWithOperator: "StartsWithOperator",
OperatorInOperator: "InOperator",
OperatorNotOperator: "NotOperator",
OperatorExistsOperator: "ExistsOperator",
OperatorNotEmptyOperator: "NotEmptyOperator",
OperatorEmptyOperator: "EmptyOperator",
OperatorEqualOperator: "EqualOperator",
OperatorNotEqualOperator: "NotEqualOperator",
OperatorRegexpMatchOperator: "RegexpMatchOperator",
OperatorNotRegexpMatchOperator: "NotRegexpMatchOperator",
} | ast/internal/fbast/Operator.go | 0.602646 | 0.525856 | Operator.go | starcoder |
package prime
import (
"bytes"
"crypto/sha256"
"github.com/gogo/protobuf/proto"
"github.com/ibalajiarun/go-consensus/peer/peerpb"
pb "github.com/ibalajiarun/go-consensus/protocols/prime/primepb"
)
type oinstance struct {
s *prime
is pb.OInstanceState
pCert *oquorum
cCert *oquorum
}
func makeOInstance(m *pb.OrderMessage, p *prime) *oinstance {
return &oinstance{
is: pb.OInstanceState{
View: m.View,
Index: m.Index,
Status: pb.OInstanceState_None,
POSummaryMatrix: m.POSummaryMatrix,
},
pCert: newOQuorum(p),
cCert: newOQuorum(p),
}
}
func (p *prime) updateOCommand(inst *oinstance, m *pb.OrderMessage) bool {
if inst.is.POSummaryMatrix != nil {
for i := range inst.is.POSummaryMatrix.POSummaryMatrix {
if !inst.is.POSummaryMatrix.POSummaryMatrix[i].Equals(m.POSummaryMatrix.POSummaryMatrix[i]) {
p.logger.Debugf("Different summaries for same instance %v: Has: %v, Received %v.", inst.is, inst.is.POSummaryMatrix, m.POSummaryMatrix)
return false
}
}
} else {
inst.is.POSummaryMatrix = m.POSummaryMatrix
}
return true
}
func (p *prime) updateOCommandHash(inst *oinstance, m *pb.OrderMessage) bool {
if inst.is.POSummaryMatrixHash != nil {
if !bytes.Equal(inst.is.POSummaryMatrixHash, m.POSummaryMatrixHash) {
p.logger.Debugf("Different summary hashes for same instance %v: Has: %v, Received %v.", inst.is, inst.is.POSummaryMatrixHash, m.POSummaryMatrixHash)
return false
}
} else {
inst.is.POSummaryMatrixHash = m.POSummaryMatrixHash
}
return true
}
func (p *prime) sendPrePrepare() {
if !p.isPrimaryAtView(p.id, p.oview) {
p.logger.Errorf("Not the leader of view %d: %d", p.oview, p.id)
return
}
if p.lastPrepareHash != nil {
if bytes.Equal(p.lastPreorderSummariesHash, p.lastPrepareHash) {
return
}
}
p.prePrepareCounter++
mat := make([]pb.POSummary, len(p.lastPreorderSummaries.POSummaryMatrix))
copy(mat, p.lastPreorderSummaries.POSummaryMatrix)
for i := range mat {
copy(mat[i].POSummary, p.lastPreorderSummaries.POSummaryMatrix[i].POSummary)
}
poSumMat := pb.POSummaryMatrix{POSummaryMatrix: mat}
summaryBytes, err := proto.Marshal(&poSumMat)
if err != nil {
panic(err)
}
summaryHash := sha256.Sum256(summaryBytes)
p.lastPrepareHash = p.lastPreorderSummariesHash
index := p.oindex
p.oindex++
inst := &oinstance{
s: p,
is: pb.OInstanceState{
View: p.oview,
Index: index,
Status: pb.OInstanceState_PrePrepared,
POSummaryMatrix: &poSumMat,
POSummaryMatrixHash: summaryHash[:],
},
pCert: newOQuorum(p),
cCert: newOQuorum(p),
}
p.olog[index] = inst
p.logger.Debugf("Sending PrePrepare for %v", inst.is.Index)
pm := &pb.OrderMessage{
View: inst.is.View,
Index: inst.is.Index,
Type: pb.OrderMessage_PrePrepare,
POSummaryMatrix: inst.is.POSummaryMatrix,
POSummaryMatrixHash: inst.is.POSummaryMatrixHash,
}
inst.pCert.log(p.id, pm)
p.broadcast(pm, false)
}
func (p *prime) checkOCommandHash(cmd *pb.POSummaryMatrix, hash []byte) bool {
cmdBytes, err := proto.Marshal(cmd)
if err != nil {
panic(err)
}
compHash := sha256.Sum256(cmdBytes)
return bytes.Equal(hash, compHash[:])
}
func (p *prime) onPrePrepare(m *pb.OrderMessage, from peerpb.PeerID) {
// Check if the message sender is the current primary
if !p.isCurrentPrimary(from) {
p.logger.Errorf("PrePrepare vcSender Non-Primary %v: %v", from, m)
return
}
if !p.checkOCommandHash(m.POSummaryMatrix, m.POSummaryMatrixHash) {
p.logger.Panicf("The hash does not correspond to the command: %v", m)
}
inst, exists := p.olog[m.Index]
// Update the log with the message if it not known already. Otherwise,
// ensure that the log is consistent with the message.
if !exists {
inst = makeOInstance(m, p)
p.olog[m.Index] = inst
} else if !p.updateOCommand(inst, m) || !p.updateOCommandHash(inst, m) {
return
}
// The PrePrepare message of the primary serves as the prepare message.
inst.pCert.log(from, m)
inst.is.Status = pb.OInstanceState_PrePrepared
pm := &pb.OrderMessage{
View: m.View,
Index: m.Index,
Type: pb.OrderMessage_Prepare,
POSummaryMatrixHash: m.POSummaryMatrixHash,
}
inst.pCert.log(p.id, pm)
p.broadcast(pm, false)
if inst.pCert.Majority(pm) && inst.is.IsPrePrepared() {
inst.is.Status = pb.OInstanceState_Prepared
cm := &pb.OrderMessage{
View: pm.View,
Index: pm.Index,
Type: pb.OrderMessage_Commit,
POSummaryMatrixHash: pm.POSummaryMatrixHash,
}
p.broadcast(cm, false)
inst.cCert.log(p.id, cm)
if inst.cCert.Majority(cm) && inst.is.IsPrepared() {
inst.is.Status = pb.OInstanceState_Committed
p.exec()
}
}
}
func (p *prime) onPrepare(m *pb.OrderMessage, from peerpb.PeerID) {
if p.isCurrentPrimary(from) {
p.logger.Errorf("Prepare received vcSender Primary %v: %v", from, m)
return
}
inst, exists := p.olog[m.Index]
// Update the log with the message if it not known already. Otherwise,
// ensure that the log is consistent with the message.
if !exists {
inst = makeOInstance(m, p)
p.olog[m.Index] = inst
} else if !p.updateOCommandHash(inst, m) {
return
}
inst.pCert.log(from, m)
if inst.pCert.Majority(m) && inst.is.IsPrePrepared() {
inst.is.Status = pb.OInstanceState_Prepared
cm := &pb.OrderMessage{
View: m.View,
Index: m.Index,
Type: pb.OrderMessage_Commit,
POSummaryMatrixHash: m.POSummaryMatrixHash,
}
p.broadcast(cm, false)
inst.cCert.log(p.id, cm)
if inst.cCert.Majority(cm) && inst.is.IsPrepared() {
inst.is.Status = pb.OInstanceState_Committed
p.exec()
}
}
}
// HandleP2b handles P2b message
func (p *prime) onCommit(m *pb.OrderMessage, from peerpb.PeerID) {
inst, exists := p.olog[m.Index]
// Update the log with the message if it not known already. Otherwise,
// ensure that the log is consistent with the message.
if !exists {
inst = makeOInstance(m, p)
p.olog[m.Index] = inst
} else if !p.updateOCommandHash(inst, m) {
return
}
inst.cCert.log(from, m)
if inst.cCert.Majority(m) && inst.is.IsPrepared() {
inst.is.Status = pb.OInstanceState_Committed
p.exec()
} else if inst.is.IsCommitted() {
p.exec()
}
//p.logger.Debugf("commit acks for %v: %v", m.Command.ID, len(p.log[m.Index].cCert.msgs))
}
func (p *prime) isCurrentPrimary(id peerpb.PeerID) bool {
return p.isPrimaryAtView(id, p.oview)
}
func (p *prime) isPrimaryAtView(id peerpb.PeerID, view pb.View) bool {
return id == peerpb.PeerID(int(view)%len(p.nodes))
} | protocols/prime/oinstance.go | 0.571527 | 0.403567 | oinstance.go | starcoder |
package matrix
import (
crand "crypto/rand"
"encoding/binary"
"errors"
"math"
"math/rand"
"time"
)
func (m *Matrix) newByFloatArray(vector []float64) {
if len(vector) == 0 {
m.matrix = make([]float64, m.row*m.column)
return
}
vec := make([]float64, len(vector))
copy(vec, vector)
m.matrix = vec
if err := m.checkNormal(); err != nil {
m.err = err
return
}
return
}
func (m *Matrix) newByFLoat64(value float64) {
m.matrix = make([]float64, m.row*m.column)
for i := 0; i < m.row*m.column; i++ {
m.matrix[i] = value
}
}
// New will return *Matrix
func New(row, column int, value interface{}) (matrix *Matrix) {
matrix = new(Matrix)
matrix.row = row
matrix.column = column
if row <= 0 || column <= 0 {
matrix.err = errors.New("Length is not greater 0")
return
}
if vector, ok := value.([]float64); ok {
matrix.newByFloatArray(vector)
return
} else if num, ok := value.(int); ok {
matrix.newByFLoat64(float64(num))
return
} else if num, ok := value.(float64); ok {
matrix.newByFLoat64(num)
return
} else if value == nil {
matrix.matrix = make([]float64, row*column)
return
}
matrix.err = errors.New("The argument type is not allowed")
return
}
// NewVector will create vector by array
func NewVector(row []float64) (matrix *Matrix) {
matrix = new(Matrix)
if len(row) <= 0 {
matrix.err = errors.New("The vector is broken")
return
}
vector := make([]float64, len(row))
copy(vector, row)
matrix.row = len(row)
matrix.column = 1
matrix.matrix = vector
return
}
// NewRandom will return matrix which values are 0~1
func NewRandom(row, column int, decimal uint8) (matrix *Matrix) {
matrix = new(Matrix)
if row <= 0 || column <= 0 {
matrix.err = errors.New("Length is not greater 0")
return
}
matrix.row = row
matrix.column = column
matrix.matrix = make([]float64, row*column)
d := math.Pow10(int(decimal))
var s int64
if err := binary.Read(crand.Reader, binary.LittleEndian, &s); err != nil {
s = time.Now().UnixNano()
}
rand.Seed(s)
for i := 0; i < row*column; i++ {
matrix.matrix[i] = float64(rand.Intn(int(d))) / d
}
return
}
// NewHotVector will return hot vector
func NewHotVector(size, place int) (matrix *Matrix) {
matrix = new(Matrix)
if size <= 0 || place <= 0 {
matrix.err = errors.New("The size and place must be > 0")
return
} else if place > size {
matrix.err = errors.New("place must be less or equal than size")
return
}
matrix.row = size
matrix.column = 1
matrix.matrix = make([]float64, size)
matrix.matrix[place-1] = 1
return
}
// NewEye will return Unit matrix
func NewEye(length int) (matrix *Matrix) {
matrix = new(Matrix)
if length <= 0 {
matrix.err = newError("lenght should greater than 0", "NewEye", matrix, nil)
return
}
matrix.row = length
matrix.column = length
matrix.matrix = make([]float64, length*length)
for i := 0; i < length; i++ {
matrix.matrix[matrix.column*i+i] = 1
}
return
}
// Copy will copy matrix
func Copy(mat *Matrix) *Matrix {
vector := make([]float64, len(mat.matrix))
copy(vector, mat.matrix)
matrix := &Matrix{mat.row, mat.column, vector, mat.err}
return matrix
} | create.go | 0.544317 | 0.527925 | create.go | starcoder |
package electreIII
import (
"fmt"
"github.com/Azbesciak/RealDecisionMaker/lib/utils"
)
var DefaultDistillationFunc = utils.LinearFunctionParameters{A: -.15, B: .3}
type CompareFunction = func(old int, new int) bool
func RankAscending(matrix *AlternativesMatrix, distillationFun *utils.LinearFunctionParameters) *[]int {
return rank(matrix, distillationFun, greater)
}
func RankDescending(matrix *AlternativesMatrix, distillationFun *utils.LinearFunctionParameters) *[]int {
ranking := rank(matrix, distillationFun, lower)
maxPosition := Max(ranking)
minusValuesFrom(ranking, maxPosition+1)
return ranking
}
func Max(values *[]int) int {
if len(*values) == 0 {
panic(fmt.Errorf("slice is empty"))
}
best := (*values)[0]
for _, v := range *values {
if v > best {
best = v
}
}
return best
}
func minusValuesFrom(values *[]int, value int) {
for i, v := range *values {
(*values)[i] = value - v
}
}
func rank(matrix *AlternativesMatrix, distillationFun *utils.LinearFunctionParameters, evaluateFunction CompareFunction) *[]int {
position := 1
withoutD := removeDiagonal(matrix)
maxCred := withoutD.Max()
positions := make([]int, len(*matrix.Alternatives))
indices := make([]int, len(positions))
for i := range indices {
indices[i] = i
}
return distillate(maxCred, position, withoutD, distillationFun, evaluateFunction, false)
}
func samePositions(size, value int) *[]int {
pos := make([]int, size)
for i := range pos {
pos[i] = value
}
return &pos
}
func distillate(
maxCred float64, position int,
matrix *Matrix,
distillationFun *utils.LinearFunctionParameters,
evaluateFunction CompareFunction,
isInner bool,
) *[]int {
if maxCred == 0 {
return samePositions(matrix.Size, position)
}
minCred, valuesToConsider := getDistillateMatrix(distillationFun, maxCred, matrix)
quality := computeQuality(valuesToConsider)
_, bestIndices := findBestMatch(quality, evaluateFunction)
positions := samePositions(matrix.Size, 0)
updatePositions(position, minCred, matrix, bestIndices, positions, distillationFun, evaluateFunction)
indicesLeftToUpdate := updatedPositions(bestIndices, positions)
if len(*indicesLeftToUpdate) == matrix.Size || isInner {
return positions
}
position++
nextIterationMatrix := matrix.Without(indicesLeftToUpdate)
furtherPositions := distillate(nextIterationMatrix.Max(), position, nextIterationMatrix, distillationFun, evaluateFunction, false)
writePositionsSequentially(furtherPositions, positions)
return positions
}
func writePositionsSequentially(positionsToWrite, positions *[]int) {
toWriteIndex := 0
for i, p := range *positions {
if p == 0 {
if toWriteIndex >= len(*positionsToWrite) {
panic(fmt.Errorf(
"position %d is out of scope for possible possitions %v and all positions %v",
i, *positionsToWrite, *positions,
))
}
(*positions)[i] = (*positionsToWrite)[toWriteIndex]
toWriteIndex++
}
}
}
func updatedPositions(indices, positions *[]int) *[]int {
newValues := make([]int, 0)
for _, p := range *indices {
if (*positions)[p] != 0 {
newValues = append(newValues, p)
}
}
return &newValues
}
func updatePositions(
position int, minCred float64,
valuesToConsider *Matrix, bestIndices, positions *[]int,
distillationFun *utils.LinearFunctionParameters, evaluateFunction CompareFunction,
) {
bestIndicesNum := len(*bestIndices)
if bestIndicesNum > 1 && minCred > 0 {
nextToFilter := valuesToConsider.Slice(bestIndices)
subPositions := distillate(minCred, position, nextToFilter, distillationFun, evaluateFunction, true)
updateValues(bestIndices, positions, subPositions)
} else if bestIndicesNum > 0 {
updateValues(bestIndices, positions, samePositions(bestIndicesNum, position))
}
}
func getDistillateMatrix(distillationFun *utils.LinearFunctionParameters, maxCred float64, matrix *Matrix) (float64, *Matrix) {
v, _ := distillationFun.Evaluate(maxCred)
minCredThreshold := maxCred - v
minCred := matrix.FindBest(func(old, new float64) bool {
// ok because the lowest value is 0, on diagonal for sure.
return new < minCredThreshold && new > old
})
valuesToConsider := matrix.Filter(func(row, col int, v float64) bool {
if v <= minCred {
return false
}
funcValueForThisField, _ := distillationFun.Evaluate(v)
value := matrix.At(col, row) + funcValueForThisField
return v > value
})
return minCred, valuesToConsider
}
func updateValues(indicesToUpdate, original, new *[]int) {
for i, v := range *indicesToUpdate {
(*original)[v] = (*new)[i]
}
}
func removeDiagonal(matrix *AlternativesMatrix) *Matrix {
return matrix.Values.Filter(func(row, col int, v float64) bool {
return row != col
})
}
func computeQuality(matrix *Matrix) *[]int {
strength := matrix.MatchesInRow(utils.IsPositive)
weakness := matrix.MatchesInColumn(utils.IsPositive)
return calcQuality(&strength, &weakness)
}
func calcQuality(strength, weakness *[]int) *[]int {
quality := make([]int, len(*strength))
for i, s := range *strength {
quality[i] = s - (*weakness)[i]
}
return &quality
}
func greater(old, new int) bool {
return old < new
}
func lower(old, new int) bool {
return old > new
}
func findBestMatch(values *[]int, isBetter CompareFunction) (value int, indices *[]int) {
bestValue := (*values)[0]
bestIndices := make([]int, 0)
for i, v := range *values {
if isBetter(bestValue, v) {
bestValue = v
bestIndices = []int{i}
} else if v == bestValue {
bestIndices = append(bestIndices, i)
}
}
return bestValue, &bestIndices
} | lib/logic/preference-func/electreIII/distilation.go | 0.671471 | 0.442396 | distilation.go | starcoder |
package primitives
import (
"github.com/zimmski/tavor/token"
)
// Scope implements a general scope token which references a token
type Scope struct {
token token.Token
}
// NewScope returns a new instance of a Scope token
func NewScope(tok token.Token) *Scope {
return &Scope{
token: tok,
}
}
// Token interface methods
// Clone returns a copy of the token and all its children
func (p *Scope) Clone() token.Token {
return &Scope{
token: p.token.Clone(),
}
}
// Parse tries to parse the token beginning from the current position in the parser data.
// If the parsing is successful the error argument is nil and the next current position after the token is returned.
func (p *Scope) Parse(pars *token.InternalParser, cur int) (int, []error) {
return p.token.Parse(pars, cur)
}
// Permutation sets a specific permutation for this token
func (p *Scope) Permutation(i uint) error {
permutations := p.Permutations()
if i >= permutations {
return &token.PermutationError{
Type: token.PermutationErrorIndexOutOfBound,
}
}
// do nothing
return nil
}
// Permutations returns the number of permutations for this token
func (p *Scope) Permutations() uint {
return 1
}
// PermutationsAll returns the number of all possible permutations for this token including its children
func (p *Scope) PermutationsAll() uint {
return p.token.PermutationsAll()
}
func (p *Scope) String() string {
return p.token.String()
}
// ForwardToken interface methods
// Get returns the current referenced token
func (p *Scope) Get() token.Token {
return p.token
}
// InternalGet returns the current referenced internal token
func (p *Scope) InternalGet() token.Token {
return p.token
}
// InternalLogicalRemove removes the referenced internal token and returns the replacement for the current token or nil if the current token should be removed.
func (p *Scope) InternalLogicalRemove(tok token.Token) token.Token {
if p.token == tok {
return nil
}
return p
}
// InternalReplace replaces an old with a new internal token if it is referenced by this token. The error return argument is not nil, if the replacement is not suitable.
func (p *Scope) InternalReplace(oldToken, newToken token.Token) error {
if p.token == oldToken {
p.token = newToken
}
return nil
}
// Minimize interface methods
// Minimize tries to minimize itself and returns a token if it was successful, or nil if there was nothing to minimize
func (p *Scope) Minimize() token.Token {
if _, ok := p.token.(*Scope); ok {
return p.token
}
return nil
}
// Resolve interface methods
// Resolve returns the token which is referenced by the token, or a path of tokens
func (p *Scope) Resolve() token.Token {
var ok bool
po := p
for {
c := po.InternalGet()
po, ok = c.(*Scope)
if !ok {
return c
}
}
}
// Scoping interface methods
// Scoping returns if the token holds a new scope
func (p *Scope) Scoping() bool {
return true
} | token/primitives/scope.go | 0.852245 | 0.458046 | scope.go | starcoder |
package graph
import "errors"
type Node struct {
Key string
Data interface{}
}
type edge struct {
Dest *Node
Cost int
}
type value struct {
start *Node
edges []edge
}
// DirectedGraph represents a directed graph as an adjacency list.
type DirectedGraph struct {
adjList map[string]value
}
// NewDirectedGraph returns an empty directed graph.
func NewDirectedGraph() *DirectedGraph {
return &DirectedGraph{make(map[string]value)}
}
// AddNode adds a new node to the graph.
func (d *DirectedGraph) AddNode(n *Node) error {
if _, ok := d.adjList[n.Key]; ok {
return errors.New("node with key already in graph")
}
d.adjList[n.Key] = value{n, []edge{}}
return nil
}
// RemoveNode deletes a node (by key) and any associated edges from the graph.
func (d *DirectedGraph) RemoveNode(key string) error {
if _, ok := d.adjList[key]; !ok {
return errors.New("node with key not in graph")
}
// delete the node
delete(d.adjList, key)
// delete any edges that contain the node as an endpoint
for k, value := range d.adjList {
for i, e := range value.edges {
if e.Dest.Key == key {
// cut element from list
value.edges = append(value.edges[:i], value.edges[i+1:]...)
d.adjList[k] = value
}
}
}
return nil
}
// AddNode adds a new node to the graph.
func (d *DirectedGraph) GetNode(key string) (*Node, error) {
if n, ok := d.adjList[key]; !ok {
return nil, errors.New("node with key not in graph")
} else {
return n.start, nil
}
}
// AddEdge adds a new edge to the graph with a cost.
func (d *DirectedGraph) AddEdge(start, end string, cost int) error {
if _, ok := d.adjList[start]; !ok {
return errors.New("start node with key not in graph")
}
if _, ok := d.adjList[end]; !ok {
return errors.New("end node with key not in graph")
}
value := d.adjList[start]
endNode, err := d.GetNode(end)
if err != nil {
return err
}
value.edges = append(value.edges, edge{endNode, cost})
d.adjList[start] = value
return nil
}
// RemoveEdge deletes a an edge from the graph.
func (d *DirectedGraph) RemoveEdge(start, end string) error {
if _, ok := d.adjList[start]; !ok {
return errors.New("start node with key not in graph")
}
if _, ok := d.adjList[end]; !ok {
return errors.New("end node with key not in graph")
}
// iterate through start node's edges, until we find end
value := d.adjList[start]
for i, e := range value.edges {
if e.Dest.Key == end {
value.edges = append(value.edges[:i], value.edges[i+1:]...)
d.adjList[start] = value
}
}
return nil
}
// GetEdgeCost returns the cost of the edge between start and end nodes
func (d *DirectedGraph) GetEdgeCost(start, end string) (int, error) {
if _, ok := d.adjList[start]; !ok {
return -1, errors.New("start node with key not in graph")
}
if _, ok := d.adjList[end]; !ok {
return -1, errors.New("end node with key not in graph")
}
for _, edge := range d.adjList[start].edges {
if edge.Dest.Key == end {
return edge.Cost, nil
}
}
return -1, errors.New("no edge exists between nodes")
}
// Chain is a method chaining struct for a directed graph.
type Chain struct {
g *DirectedGraph
err error
}
func NewDirectedGraphChain() *Chain {
g := NewDirectedGraph()
return &Chain{g, nil}
}
// Err returns the error from the chain, if any exist
func (c *Chain) Err() error {
return c.err
}
// DirectedGraph returns the directed graph built by the chain, or the error if any occurred in building.
func (c *Chain) DirectedGraph() (*DirectedGraph, error) {
if c.err != nil {
return nil, c.err
}
return c.g, nil
}
// AddNode calls AddNode on the graph
func (c *Chain) AddNode(n *Node) *Chain {
if c.err != nil {
return c
}
err := c.g.AddNode(n)
if err != nil {
c.err = err
}
return c
}
// AddEdge calls addEdge on the graph
func (c *Chain) AddEdge(start, end string, cost int) *Chain {
if c.err != nil {
return c
}
err := c.g.AddEdge(start, end, cost)
if err != nil {
c.err = err
}
return c
} | internal/graph/graph.go | 0.753829 | 0.466238 | graph.go | starcoder |
package game
import "math/rand"
// Point point of a shape
type Point struct {
row int
col int
}
// Shape shape of a piece consisting of 4 points
type Shape [4]Point
// PieceType type of a Tetris piece
type PieceType int
// Different piece types
const (
IType PieceType = iota
JType
LType
OType
SType
TType
ZType
)
// Piece tetris piece which can be moved
type Piece struct {
Shape *Shape
Type PieceType
}
func (p *Piece) copy() *Piece {
return &Piece{
Shape: &Shape{
p.Shape[0],
p.Shape[1],
p.Shape[2],
p.Shape[3],
},
Type: p.Type,
}
}
func (p *Piece) down() {
for i := 0; i < 4; i++ {
p.Shape[i].row++
}
}
func (p *Piece) left() {
p.moveHorizontal(-1)
}
func (p *Piece) right() {
p.moveHorizontal(1)
}
func (p *Piece) moveHorizontal(offset int) {
for i := 0; i < 4; i++ {
p.Shape[i].col += offset
}
}
func (p *Piece) rotate() {
// pivot piece at 1
pivot := p.Shape[1]
for i := 0; i < 4; i++ {
if i == 1 {
continue
}
dRow := pivot.row - p.Shape[i].row
dCol := pivot.col - p.Shape[i].col
p.Shape[i].row = pivot.row + (dCol * -1)
p.Shape[i].col = pivot.col + (dRow)
}
}
func newPieceFromType(pieceType PieceType) *Piece {
switch pieceType {
case LType:
return &Piece{
Type: LType,
Shape: &Shape{
Point{row: 1, col: 0},
Point{row: 1, col: 1},
Point{row: 1, col: 2},
Point{row: 0, col: 0},
},
}
case IType:
return &Piece{
Type: IType,
Shape: &Shape{
Point{row: 1, col: 0},
Point{row: 1, col: 1},
Point{row: 1, col: 2},
Point{row: 1, col: 3},
},
}
case OType:
return &Piece{
Type: OType,
Shape: &Shape{
Point{row: 1, col: 0},
Point{row: 1, col: 1},
Point{row: 0, col: 0},
Point{row: 0, col: 1},
},
}
case TType:
return &Piece{
Type: TType,
Shape: &Shape{
Point{row: 1, col: 0},
Point{row: 1, col: 1},
Point{row: 1, col: 2},
Point{row: 0, col: 1},
},
}
case SType:
return &Piece{
Type: SType,
Shape: &Shape{
Point{row: 0, col: 0},
Point{row: 0, col: 1},
Point{row: 1, col: 1},
Point{row: 1, col: 2},
},
}
case ZType:
return &Piece{
Type: ZType,
Shape: &Shape{
Point{row: 1, col: 0},
Point{row: 1, col: 1},
Point{row: 0, col: 1},
Point{row: 0, col: 2},
},
}
case JType:
return &Piece{
Type: JType,
Shape: &Shape{
Point{row: 1, col: 0},
Point{row: 0, col: 1},
Point{row: 0, col: 0},
Point{row: 0, col: 2},
},
}
default:
panic("unknown piece type")
}
}
func (p *Piece) offset() int {
switch p.Type {
case IType:
return rand.Intn(7)
case OType:
return rand.Intn(9)
case JType, LType, SType, TType, ZType:
return rand.Intn(8)
default:
return rand.Intn(8)
}
} | pkg/game/piece.go | 0.687525 | 0.657318 | piece.go | starcoder |
package glm
import (
"sort"
"gonum.org/v1/gonum/optimize"
"gonum.org/v1/gonum/stat/distuv"
)
// ScaleProfiler is used to do likelihood profile analysis on the scale
// parameter. Set the Results field to a fitted GLMResults value.
// This is suitable for models with no additional parameters, if there
// are other parameters (e.g. in the Tweedie or Negative Binomial
// case), they are held fixed at their values from the provided fit.
type ScaleProfiler struct {
// The profile analysis is done with respect to this fitted
// model.
results *GLMResults
// After calling GetMLE, this will hold the MLE of the scale
// parameter.
scaleMLE float64
// This is the largest log-likelihood value that can be
// obtained by varying the scale parameter.
maxLogLike float64
// A sequence of (scale, log-likelihood) values that lie on
// the profile curve.
Profile [][2]float64
// The parameters of the original fit.
params []float64
}
// NewScaleProfiler returns a ScaleProfiler value that can be used to
// profile the scale parameters.
func NewScaleProfiler(result *GLMResults) *ScaleProfiler {
ps := &ScaleProfiler{
results: result,
}
pa := result.Params()
params := make([]float64, len(pa))
copy(params, pa)
ps.params = params
ps.getScaleMLE()
return ps
}
type profPoint [][2]float64
func (a profPoint) Len() int { return len(a) }
func (a profPoint) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a profPoint) Less(i, j int) bool { return a[i][0] < a[j][1] }
// LogLike returns the profile log likelihood value at the given scale
// parameter value.
func (ps *ScaleProfiler) LogLike(scale float64) float64 {
model := ps.results.Model().(*GLM)
model.dispersionMethod = DispersionFixed
model.dispersionValue = scale
copy(model.start, ps.params)
result := model.Fit()
return result.LogLike()
}
func bisectmax(f func(float64) float64, x0, x1, x2, y1 float64) (float64, float64, [][2]float64) {
var hist [][2]float64
for x2-x0 > 1e-4 {
if x2-x1 > x1-x0 {
x := (x1 + x2) / 2
y := f(x)
hist = append(hist, [2]float64{x, y})
if y > y1 {
x0 = x1
y1 = y
x1 = x
} else {
x2 = x
}
} else {
x := (x0 + x1) / 2
y := f(x)
hist = append(hist, [2]float64{x, y})
if y > y1 {
x2 = x1
y1 = y
x1 = x
} else {
x0 = x
}
}
}
return x1, y1, hist
}
func bisectroot(f func(float64) float64, x0, x1, y0, y1, yt float64) (float64, [][2]float64) {
if (y0-yt)*(y1-yt) > 0 {
panic("bisectroot invalid bracket")
}
var hist [][2]float64
for x1-x0 > 1e-4 {
x := (x0 + x1) / 2
y := f(x)
hist = append(hist, [2]float64{x, y})
if (y-yt)*(y0-yt) > 0 {
x0 = x
y0 = y
} else {
x1 = x
}
}
return (x0 + x1) / 2, hist
}
// ScaleMLE returns the maximum likelihood estimate of the scale parameter.
func (ps *ScaleProfiler) ScaleMLE() float64 {
return ps.scaleMLE
}
func (ps *ScaleProfiler) getScaleMLE() {
// Center point
scale1 := ps.results.scale
ll1 := ps.LogLike(scale1)
// Upper point
scale2 := 1.2 * scale1
ll2 := ps.LogLike(scale2)
for ll2 >= ll1 {
scale2 *= 1.2
ll2 = ps.LogLike(scale2)
}
// Lower point
scale0 := 0.8 * scale1
ll0 := ps.LogLike(scale0)
for ll0 >= ll1 {
scale0 *= 0.8
ll0 = ps.LogLike(scale0)
}
var hist [][2]float64
ps.scaleMLE, ps.maxLogLike, hist = bisectmax(ps.LogLike, scale0, scale1, scale2, ll1)
ps.Profile = append(ps.Profile, hist...)
sort.Sort(profPoint(ps.Profile))
}
// ConfInt identifies scale parameters scale1, scale2 that define a
// profile confidence interval for the scale parameter. All points on
// the profile likelihood visited during the search are added to the
// Profile field of the ScaleProfiler value.
func (ps *ScaleProfiler) ConfInt(prob float64) (float64, float64) {
qp := distuv.ChiSquared{K: 1}.Quantile(prob) / 2
// Left side
scale0 := 0.9 * ps.scaleMLE
ll0 := ps.LogLike(scale0)
for ll0 > ps.maxLogLike-qp {
scale0 *= 0.9
ll0 = ps.LogLike(scale0)
ps.Profile = append(ps.Profile, [2]float64{scale0, ll0})
}
var hist [][2]float64
scale0, hist = bisectroot(ps.LogLike, scale0, ps.scaleMLE, ll0, ps.maxLogLike, ps.maxLogLike-qp)
ps.Profile = append(ps.Profile, hist...)
// Right side
scale1 := 1.1 * ps.scaleMLE
ll1 := ps.LogLike(scale1)
for ll1 > ps.maxLogLike-qp {
scale1 *= 1.1
ll1 = ps.LogLike(scale1)
ps.Profile = append(ps.Profile, [2]float64{scale1, ll1})
}
scale1, hist = bisectroot(ps.LogLike, ps.scaleMLE, scale1, ps.maxLogLike, ll1, ps.maxLogLike-qp)
ps.Profile = append(ps.Profile, hist...)
sort.Sort(profPoint(ps.Profile))
return scale0, scale1
}
// TweedieProfiler conducts profile likelihood analyses on a GLM with
// the Tweedie family.
type TweedieProfiler struct {
// The profile analysis is done with respect to this fitted
// model.
results *GLMResults
// The MLE of the scale parameter
scaleMLE float64
// The MLE of the variance power parameter
varPowerMLE float64
params []float64
}
// NewTweedieProfiler returns a TweedieProfiler that can be used to
// profile the variance power parameter of a Tweedie GLM.
func NewTweedieProfiler(result *GLMResults) *TweedieProfiler {
tp := &TweedieProfiler{
results: result,
}
pa := result.Params()
tp.params = make([]float64, len(pa))
copy(tp.params, pa)
tp.getMLE()
return tp
}
// ScaleMLE returns the maximum likelihood estimate of the scale parameter.
func (tp *TweedieProfiler) ScaleMLE() float64 {
return tp.scaleMLE
}
// VarPowerMLE returns the maximum likelihood estimate of the variance power parameter..
func (tp *TweedieProfiler) VarPowerMLE() float64 {
return tp.varPowerMLE
}
// LogLike returns the profile log likelihood value at the given
// variance power and scale parameter.
func (tp *TweedieProfiler) LogLike(pw, scale float64) float64 {
model := tp.results.Model().(*GLM)
model.dispersionMethod = DispersionFixed
model.dispersionValue = scale
model.fam = NewTweedieFamily(pw, model.link)
copy(model.start, tp.params)
result := model.Fit()
return result.LogLike()
}
func (tp *TweedieProfiler) getMLE() {
p := optimize.Problem{
Func: func(x []float64) float64 {
return -tp.LogLike(x[0], x[1])
},
}
// Starting point for the search
x0 := []float64{1.5, tp.results.scale}
r, err := optimize.Minimize(p, x0, nil, &optimize.NelderMead{})
if err != nil {
panic(err)
}
tp.varPowerMLE = r.X[0]
tp.scaleMLE = r.X[1]
}
// NegBinomProfiler conducts profile likelihood analyses on a GLM with
// the negative binomial family.
type NegBinomProfiler struct {
// The profile analysis is done with respect to this fitted
// model.
results *GLMResults
// The MLE of the dispersion parameter
dispersionMLE float64
// The maximum likelihood value at the MLE
maxLogLike float64
// A sequence of (dispersion, log-likelihood) values that lie on
// the profile curve.
Profile [][2]float64
params []float64
}
// NewNegBinomProfiler returns a NegBinomProfiler that can be used to
// profile the dispersion parameter of a negative binomial GLM.
func NewNegBinomProfiler(result *GLMResults) *NegBinomProfiler {
nb := &NegBinomProfiler{
results: result,
}
pa := result.Params()
nb.params = make([]float64, len(pa))
copy(nb.params, pa)
nb.getMLE()
return nb
}
// LogLike returns the profile log likelihood value at the given
// dispersion parameter value.
func (nb *NegBinomProfiler) LogLike(disp float64) float64 {
model := nb.results.Model().(*GLM)
model.dispersionMethod = DispersionFixed
model.dispersionValue = 1
link := NewLink(LogLink)
model.fam = NewNegBinomFamily(disp, link)
copy(model.start, nb.params)
result := model.Fit()
return result.LogLike()
}
// DispersionMLE returns the maximum likelihood estimate of the dispersion parameter.
func (nb *NegBinomProfiler) DispersionMLE() float64 {
return nb.dispersionMLE
}
func (nb *NegBinomProfiler) getMLE() {
model := nb.results.Model().(*GLM)
// Center point
disp1 := model.fam.alpha
ll1 := nb.LogLike(disp1)
// Upper point
disp2 := 1.2 * disp1
ll2 := nb.LogLike(disp2)
for ll2 >= ll1 {
disp2 *= 1.2
ll2 = nb.LogLike(disp2)
}
// Lower point
disp0 := 0.8 * disp1
ll0 := nb.LogLike(disp0)
for ll0 >= ll1 {
disp0 *= 0.8
ll0 = nb.LogLike(disp0)
}
var hist [][2]float64
nb.dispersionMLE, nb.maxLogLike, hist = bisectmax(nb.LogLike, disp0, disp1, disp2, ll1)
nb.Profile = append(nb.Profile, hist...)
sort.Sort(profPoint(nb.Profile))
}
// ConfInt identifies dispersion parameters disp1, disp2 that define a
// profile confidence interval for the dispersion parameter. All
// points on the profile likelihood visited during the search are
// added to the Profile field of the NegBinomProfiler value.
func (nb *NegBinomProfiler) ConfInt(prob float64) (float64, float64) {
qp := distuv.ChiSquared{K: 1}.Quantile(prob) / 2
// Left side
disp0 := 0.9 * nb.dispersionMLE
ll0 := nb.LogLike(disp0)
for ll0 > nb.maxLogLike-qp {
disp0 *= 0.9
ll0 = nb.LogLike(disp0)
nb.Profile = append(nb.Profile, [2]float64{disp0, ll0})
}
var hist [][2]float64
disp0, hist = bisectroot(nb.LogLike, disp0, nb.dispersionMLE, ll0, nb.maxLogLike, nb.maxLogLike-qp)
nb.Profile = append(nb.Profile, hist...)
// Right side
disp1 := 1.1 * nb.dispersionMLE
ll1 := nb.LogLike(disp1)
for ll1 > nb.maxLogLike-qp {
disp1 *= 1.1
ll1 = nb.LogLike(disp1)
nb.Profile = append(nb.Profile, [2]float64{disp1, ll1})
}
disp1, hist = bisectroot(nb.LogLike, nb.dispersionMLE, disp1, nb.maxLogLike, ll1, nb.maxLogLike-qp)
nb.Profile = append(nb.Profile, hist...)
sort.Sort(profPoint(nb.Profile))
return disp0, disp1
} | glm/profile.go | 0.812198 | 0.46223 | profile.go | starcoder |
package engine
import "errors"
type graph struct {
// names contains the keys of the "edges" field.
// It allows the vertices to be sorted.
// It makes the structure deterministic.
names []string
// vertices ordered by name.
vertices map[string]*graphVertex
}
// graphVertex contains the vertex data.
type graphVertex struct {
// numIn in the number of incoming edges.
numIn int
// numInTmp is used by the TopologicalOrdering to avoid messing with numIn
numInTmp int
// out contains the name the outgoing edges.
out []string
// outMap is the same as "out", but in a map
// to quickly check if a vertex is in the outgoing edges.
outMap map[string]struct{}
}
// newGraph creates a new graph.
func newGraph() *graph {
return &graph{
names: []string{},
vertices: map[string]*graphVertex{},
}
}
// AddVertex adds a vertex to the graph.
func (g *graph) AddVertex(v string) {
_, ok := g.vertices[v]
if ok {
return
}
g.names = append(g.names, v)
g.vertices[v] = &graphVertex{
numIn: 0,
out: []string{},
outMap: map[string]struct{}{},
}
}
// AddEdge adds an edge to the graph.
func (g *graph) AddEdge(from, to string) {
g.AddVertex(from)
g.AddVertex(to)
// check if the edge is aleady registered
if _, ok := g.vertices[from].outMap[to]; ok {
return
}
// update the vertices
g.vertices[from].out = append(g.vertices[from].out, to)
g.vertices[from].outMap[to] = struct{}{}
g.vertices[to].numIn++
}
// TopologicalOrdering returns a valid topological sort.
// It implements Kahn's algorithm.
// If there is a cycle in the graph, an error is returned.
// The list of vertices is also returned even if it is not ordered.
func (g *graph) TopologicalOrdering() ([]string, error) {
l := []string{}
q := []string{}
for _, v := range g.names {
if g.vertices[v].numIn == 0 {
q = append(q, v)
}
g.vertices[v].numInTmp = g.vertices[v].numIn
}
for len(q) > 0 {
n := q[len(q)-1]
q = q[:len(q)-1]
l = append(l, n)
for _, m := range g.vertices[n].out {
g.vertices[m].numInTmp--
if g.vertices[m].numInTmp == 0 {
q = append(q, m)
}
}
}
if len(l) != len(g.names) {
return append([]string{}, g.names...), errors.New("a cycle has been found in the dependencies")
}
return l, nil
} | vendor/github.com/puper/ppgo/v2/engine/graph.go | 0.736116 | 0.545467 | graph.go | starcoder |
package lex
import (
"fmt"
"github.com/goki/ki/nptime"
"github.com/goki/pi/token"
)
// Lex represents a single lexical element, with a token, and start and end rune positions
// within a line of a file. Critically it also contains the nesting depth computed from
// all the parens, brackets, braces. Todo: also support XML < > </ > tag depth.
type Lex struct {
Tok token.KeyToken `desc:"token, includes cache of keyword for keyword types, and also has nesting depth: starting at 0 at start of file and going up for every increment in bracket / paren / start tag and down for every decrement. Is computed once and used extensively in parsing."`
St int `desc:"start rune index within original source line for this token"`
Ed int `desc:"end rune index within original source line for this token (exclusive -- ends one before this)"`
Time nptime.Time `desc:"time when region was set -- used for updating locations in the text based on time stamp (using efficient non-pointer time)"`
}
func NewLex(tok token.KeyToken, st, ed int) Lex {
lx := Lex{Tok: tok, St: st, Ed: ed}
return lx
}
// Src returns the rune source for given lex item (does no validity checking)
func (lx *Lex) Src(src []rune) []rune {
return src[lx.St:lx.Ed]
}
// Now sets the time stamp to now
func (lx *Lex) Now() {
lx.Time.Now()
}
// String satisfies the fmt.Stringer interface
func (lx *Lex) String() string {
return fmt.Sprintf("[+%d:%v:%v:%v]", lx.Tok.Depth, lx.St, lx.Ed, lx.Tok.String())
}
// ContainsPos returns true if the Lex element contains given character position
func (lx *Lex) ContainsPos(pos int) bool {
return pos >= lx.St && pos < lx.Ed
}
// OverlapsReg returns true if the two regions overlap
func (lx *Lex) OverlapsReg(or Lex) bool {
// start overlaps
if (lx.St >= or.St && lx.St < or.Ed) || (or.St >= lx.St && or.St < lx.Ed) {
return true
}
// end overlaps
if (lx.Ed > or.St && lx.Ed <= or.Ed) || (or.Ed > lx.St && or.Ed <= lx.Ed) {
return true
}
return false
}
// Region returns the region for this lexical element, at given line
func (lx *Lex) Region(ln int) Reg {
return Reg{St: Pos{Ln: ln, Ch: lx.St}, Ed: Pos{Ln: ln, Ch: lx.Ed}}
} | lex/lex.go | 0.773216 | 0.413359 | lex.go | starcoder |
package reactor
import (
"fmt"
"math"
"strconv"
"strings"
"time"
)
// QuantumFraction applies a quantum fraction to a rate given in minutes.
func QuantumFraction(rate float64, quantum time.Duration) float64 {
return rate * (float64(quantum) / float64(time.Minute))
}
// Thresholds returns a new serverity provider based on (3) different severity states.
func Thresholds(fatal, critical, warning float64) func(float64) Severity {
return func(value float64) Severity {
if value > fatal {
return SeverityFatal
}
if value > critical {
return SeverityCritical
}
if value > warning {
return SeverityWarning
}
return SeverityNone
}
}
// Transfer moves quantity from one value to another given a rate and quantum.
func Transfer(from, to *float64, rate float64, quantum time.Duration) {
quantumFraction := QuantumFraction(rate, quantum)
effectiveRate := rate * quantumFraction
delta := (*from - *to)
transferred := delta * effectiveRate
if transferred > delta {
transferred = delta
}
*from = *from - (transferred / 2.0)
*to = *to + (transferred / 2.0)
}
// Percent returns the percent of the maximum of a given value.
func Percent(value uint8) int {
return int((float64(value) / float64(math.MaxUint8)) * 100)
}
// FormatOutput formats the output.
func FormatOutput(output float64) string {
if output > 1000*1000 {
return fmt.Sprintf("%.2fgw/hr", output/(1000*1000))
}
if output > 1000 {
return fmt.Sprintf("%.2fmw/hr", output/1000)
}
return fmt.Sprintf("%.2fkw/hr", output)
}
// FormatFields formats a fields set.
func FormatFields(fields map[string]string) string {
var pairs []string
for key, value := range fields {
pairs = append(pairs, fmt.Sprintf("%s=%s", key, value))
}
return strings.Join(pairs, " ")
}
// RelativeQuantum returns a normalized quantum based on a from and to position change.
func RelativeQuantum(from, to, max float64, quantum time.Duration) time.Duration {
if from == to {
return 0
}
var a, b float64
if from > to {
a = from
b = to
} else {
a = to
b = from
}
delta := a - b
if delta == 0 {
return 0
}
pctChange := delta / max
return time.Duration(pctChange * float64(quantum))
}
// RoundMillis rounds a given duration to milliseconds
func RoundMillis(d time.Duration) time.Duration {
millis := int64(d) / int64(time.Millisecond)
return time.Duration(millis) * time.Millisecond
}
// ParseValue parses string as an int, and applies a given validator.
func ParseValue(validator func(int) error, value string) (int, error) {
parsed, err := strconv.Atoi(value)
if err != nil {
return 0, err
}
if validator != nil {
if err := validator(parsed); err != nil {
return 0, err
}
}
return parsed, nil
}
// ParseValues parses a list of strings as ints, and applies a given validator.
func ParseValues(validator func(int) error, values ...string) ([]int, error) {
output := make([]int, len(values))
for index, value := range values {
parsed, err := strconv.Atoi(value)
if err != nil {
return nil, err
}
if validator != nil {
if err := validator(parsed); err != nil {
return nil, err
}
}
output[index] = parsed
}
return output, nil
}
// ParseCommand splits a raw command into a command and arguments.
func ParseCommand(rawCommand string) (command string, args []string) {
parts := strings.Split(rawCommand, " ")
if len(parts) > 0 {
command = parts[0]
} else {
command = rawCommand
}
if len(parts) > 1 {
args = parts[1:]
}
return
}
// Between returns if a value is between the given min and max.
func Between(min, max int) func(int) error {
return func(v int) error {
if v < min || v > max {
return fmt.Errorf("validation error: %d is not between %d and %d", v, min, max)
}
return nil
}
}
// Below returns if a value is below a given maximum.
func Below(max int) func(int) error {
return func(v int) error {
if v >= max {
return fmt.Errorf("validation error: %d is not below %d", v, max)
}
return nil
}
}
// ValidUint8 returns a validator for uint8s.
func ValidUint8(v int) error {
return Between(0, int(math.MaxUint8))(v)
} | pkg/reactor/util.go | 0.842345 | 0.614076 | util.go | starcoder |
package spec
import (
"fmt"
. "github.com/sdboyer/gocheck"
. "github.com/sdboyer/gogl"
)
/* DataGraphSuite - tests for data graphs */
type DataGraphSuite struct {
Factory func(GraphSource) DataGraph
}
func (s *DataGraphSuite) SuiteLabel() string {
return fmt.Sprintf("%T", s.Factory(NullGraph))
}
func (s *DataGraphSuite) TestEdges(c *C) {
// This method is not redundant with the base Graph suite as it ensures that the edges
// provided by the Edges() iterator actually do implement DataEdge.
g := s.Factory(GraphFixtures["d-2e3v"])
var we DataEdge
g.Edges(func(e Edge) (terminate bool) {
c.Assert(e, Implements, &we)
return
})
}
func (s *DataGraphSuite) TestHasDataEdge(c *C) {
g := s.Factory(GraphFixtures["d-2e3v"])
c.Assert(g.HasDataEdge(NewDataEdge(1, 2, "foo")), Equals, true)
c.Assert(g.HasDataEdge(NewDataEdge(2, 1, "foo")), Equals, true) // both directions work
c.Assert(g.HasDataEdge(NewDataEdge(1, 2, "qux")), Equals, false) // wrong data
}
type DataDigraphSuite struct {
Factory func(GraphSource) DataGraph
}
func (s *DataDigraphSuite) SuiteLabel() string {
return fmt.Sprintf("%T", s.Factory(NullGraph))
}
func (s *DataDigraphSuite) TestArcSubtypeImplementation(c *C) {
// This method is not redundant with the base Graph suite as it ensures that the edges
// provided by the Arcs() iterator actually do implement DataArc.
g := s.Factory(GraphFixtures["d-2e3v"]).(DataDigraph)
var hit int // just internal safety check to ensure the fixture is good and hits
var wa DataArc
g.Arcs(func(e Arc) (terminate bool) {
hit++
c.Assert(e, Implements, &wa)
return
})
g.ArcsFrom(2, func(e Arc) (terminate bool) {
hit++
c.Assert(e, Implements, &wa)
return
})
g.ArcsFrom(2, func(e Arc) (terminate bool) {
hit++
c.Assert(e, Implements, &wa)
return
})
c.Assert(hit, Equals, 4)
}
/* DataEdgeSetMutatorSuite - tests for mutable data graphs */
type DataEdgeSetMutatorSuite struct {
Factory func(GraphSource) DataGraph
}
func (s *DataEdgeSetMutatorSuite) SuiteLabel() string {
return fmt.Sprintf("%T", s.Factory(NullGraph))
}
func (s *DataEdgeSetMutatorSuite) TestGracefulEmptyVariadics(c *C) {
g := s.Factory(NullGraph)
m := g.(DataEdgeSetMutator)
m.AddEdges()
c.Assert(Order(g), Equals, 0)
c.Assert(Size(g), Equals, 0)
m.RemoveEdges()
c.Assert(Order(g), Equals, 0)
c.Assert(Size(g), Equals, 0)
}
func (s *DataEdgeSetMutatorSuite) TestAddRemoveEdge(c *C) {
g := s.Factory(NullGraph)
m := g.(DataEdgeSetMutator)
m.AddEdges(NewDataEdge(1, 2, "foo"))
c.Assert(g.HasDataEdge(NewDataEdge(1, 2, "foo")), Equals, true)
// Now test removal
m.RemoveEdges(NewDataEdge(1, 2, "foo"))
c.Assert(g.HasEdge(NewEdge(1, 2)), Equals, false)
c.Assert(g.HasDataEdge(NewDataEdge(1, 2, "foo")), Equals, false)
}
func (s *DataEdgeSetMutatorSuite) TestMultiAddRemoveEdge(c *C) {
g := s.Factory(NullGraph)
m := g.(DataEdgeSetMutator)
m.AddEdges(NewDataEdge(1, 2, "foo"), NewDataEdge(2, 3, "bar"))
c.Assert(g.HasDataEdge(NewDataEdge(1, 2, "foo")), Equals, true)
c.Assert(g.HasDataEdge(NewDataEdge(2, 3, "bar")), Equals, true)
// Now test removal
m.RemoveEdges(NewDataEdge(1, 2, "foo"), NewDataEdge(2, 3, "bar"))
c.Assert(g.HasDataEdge(NewDataEdge(1, 2, "foo")), Equals, false)
c.Assert(g.HasDataEdge(NewDataEdge(2, 3, "bar")), Equals, false)
}
/* DataArcSetMutatorSuite - tests for mutable data graphs */
type DataArcSetMutatorSuite struct {
Factory func(GraphSource) DataGraph
}
func (s *DataArcSetMutatorSuite) SuiteLabel() string {
return fmt.Sprintf("%T", s.Factory(NullGraph))
}
func (s *DataArcSetMutatorSuite) TestGracefulEmptyVariadics(c *C) {
g := s.Factory(NullGraph).(DataDigraph)
m := g.(DataArcSetMutator)
m.AddArcs()
c.Assert(Order(g), Equals, 0)
c.Assert(Size(g), Equals, 0)
m.RemoveArcs()
c.Assert(Order(g), Equals, 0)
c.Assert(Size(g), Equals, 0)
}
func (s *DataArcSetMutatorSuite) TestAddRemoveHasArc(c *C) {
g := s.Factory(NullGraph).(DataDigraph)
m := g.(DataArcSetMutator)
m.AddArcs(NewDataArc(1, 2, "foo"))
c.Assert(g.HasDataArc(NewDataArc(1, 2, "foo")), Equals, true)
c.Assert(g.HasDataArc(NewDataArc(1, 2, "bar")), Equals, false) // wrong data
// Now test removal
m.RemoveArcs(NewDataArc(1, 2, "foo"))
c.Assert(g.HasDataArc(NewDataArc(1, 2, "foo")), Equals, false)
}
func (s *DataArcSetMutatorSuite) TestMultiAddRemoveHasArc(c *C) {
g := s.Factory(NullGraph).(DataDigraph)
m := g.(DataArcSetMutator)
m.AddArcs(NewDataArc(1, 2, "foo"), NewDataArc(2, 3, "bar"))
c.Assert(g.HasDataArc(NewDataArc(1, 2, "foo")), Equals, true)
c.Assert(g.HasDataArc(NewDataArc(2, 3, "bar")), Equals, true)
m.RemoveArcs(NewDataArc(1, 2, "foo"), NewDataArc(2, 3, "bar"))
c.Assert(g.HasDataArc(NewDataArc(1, 2, "foo")), Equals, false)
c.Assert(g.HasDataArc(NewDataArc(2, 3, "bar")), Equals, false)
} | spec/suite_data.go | 0.662469 | 0.401805 | suite_data.go | starcoder |
package avatar
import (
"crypto/sha1"
"errors"
"image"
"image/color"
"strings"
)
const nblock = 5
// DefaultBG is the default image background color.
var DefaultBG = color.NRGBA{0xed, 0xed, 0xed, 0xff}
// Avatar defines the properties to make an avatar image.
type Avatar struct {
// Case insensitive text
Text string
// Non-zero positive image size
Size int
// The value is in percentage. Ranges from 0 to 10.
// Off limits values will be clipped.
Padding int
// Customizable avatar colors
BGColor color.NRGBA
Color color.NRGBA
palette palette
}
type palette [nblock][nblock]bool
// Create performs the algorithm to make an avatar image.
func (a *Avatar) Create() (image.Image, error) {
if err := a.init(); err != nil {
return nil, err
}
r := image.Rect(0, 0, a.Size, a.Size)
m := image.NewNRGBA(r)
fillRect(m, r, a.BGColor) // set background
// Set avatar
avatarRect := alignCenter(a.Size, a.Padding)
blockSize := avatarRect.Dx() / nblock
vt := avatarRect.Min.Y
for i := 0; i < nblock; i++ {
hr := avatarRect.Min.X
for j := 0; j < nblock; j++ {
if a.palette[i][j] {
b := image.Rect(hr, vt, hr+blockSize, vt+blockSize)
fillRect(m, b, a.Color)
}
hr += blockSize
}
vt += blockSize
}
return m, nil
}
// init verifies the avatar properties and initalizes them.
func (a *Avatar) init() error {
if a.Size < 1 {
return errors.New("invalid Avatar.Size")
}
if a.Padding < 0 {
a.Padding = 0
} else if a.Padding > 10 {
a.Padding = 10
}
a.Padding *= a.Size / 100
s := sha1.Sum([]byte(strings.ToLower(a.Text)))
sum := s[:]
var zc color.NRGBA // zero value color
if a.BGColor == zc {
a.BGColor = DefaultBG
}
if a.Color == zc {
a.Color = color.NRGBA{sum[0], sum[1], sum[2], 0xff}
}
a.palette = mixColor(sum[3:])
return nil
}
// alignCenter finds the center position of the image.
func alignCenter(size, padding int) image.Rectangle {
rem := (size - padding<<1) % nblock
min := padding + rem>>1
max := size - min
return image.Rect(min, min, max, max)
}
// fillRect colors a certain area of the image.
func fillRect(m *image.NRGBA, r image.Rectangle, c color.NRGBA) {
for y := r.Min.Y; y < r.Max.Y; y++ {
for x := r.Min.X; x < r.Max.X; x++ {
m.SetNRGBA(x, y, c)
}
}
}
// mixColor makes a symmetrical color palette but with boolean values.
func mixColor(recipe []byte) (p palette) {
l := len(p)
mid := l>>1 + 1
z := 0
for i := 0; i < l; i++ {
for j := 0; j < mid; j++ {
p[i][j] = recipe[z]%2 == 0
// Mirror the bool to the other end.
p[i][l-(j+1)] = p[i][j]
z++
}
}
return p
} | avatar/avatar.go | 0.720368 | 0.44571 | avatar.go | starcoder |
package spec
// Schema The Schema Object allows the definition of input and output data types.
// These types can be objects, but also primitives and arrays.
// This object is an extended subset of the JSON Schema Specification Wright Draft 00.
// For more information about the properties, see JSON Schema Core and JSON Schema Validation.
// Unless stated otherwise, the property definitions follow the JSON Schema.
type Schema struct {
Refable `json:",inline"`
// Allows sending a null value for the defined schema.
// Default value is false.
Nullable bool `json:"nullable,omitempty"`
// Adds support for polymorphism.
// The discriminator is an object name that is used to differentiate between other schemas which may satisfy the payload description.
// See Composition and Inheritance for more details.
Discriminator *Discriminator `json:"discriminator,omitempty"`
// Relevant only for Schema "properties" definitions.
// Declares the property as "read only".
// This means that it MAY be sent as part of a response but SHOULD NOT be sent as part of the request.
// If the property is marked as readOnly being true and is in the required list, the required will take effect on the response only.
// A property MUST NOT be marked as both readOnly and writeOnly being true.
// Default value is false.
ReadOnly bool `json:"readOnly,omitempty"`
// Relevant only for Schema "properties" definitions.
// Declares the property as "write only".
// Therefore, it MAY be sent as part of a request but SHOULD NOT be sent as part of the response.
// If the property is marked as writeOnly being true and is in the required list, the required will take effect on the request only.
// A property MUST NOT be marked as both readOnly and writeOnly being true.
// Default value is false.
WriteOnly bool `json:"writeOnly,omitempty"`
// This MAY be used only on properties schemas.
// It has no effect on root schemas.
// Adds additional metadata to describe the XML representation of this property.
XML *XML `json:"xml,omitempty"`
// Additional external documentation for this schema.
ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
// Any A free-form property to include an example of an instance for this schema.
// To represent examples that cannot be naturally represented in JSON or YAML, a string value can be used to contain the example with escaping where necessary.
Example Any `json:"example,omitempty"`
// Specifies that a schema is deprecated and SHOULD be transitioned out of usage.
// Default value is false.
Deprecated bool `json:"deprecated,omitempty"`
// The following properties are taken directly from the JSON Schema definition and follow the same specifications:
Title string `json:"title,omitempty"`
// Numbers
MultipleOf *float64 `json:"multipleOf,omitempty"`
Maximum *float64 `json:"maximum,omitempty"`
ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
Minimum *float64 `json:"minimum,omitempty"`
ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
// Strings
MaxLength *int64 `json:"maxLength,omitempty"`
MinLength *int64 `json:"minLength,omitempty"`
Pattern string `json:"pattern,omitempty"`
// Arrays
MaxItems *int64 `json:"maxItems,omitempty"`
MinItems *int64 `json:"minItems,omitempty"`
UniqueItems bool `json:"uniqueItems,omitempty"`
// Objects
MaxProperties *int64 `json:"maxProperties,omitempty"`
MinProperties *int64 `json:"minProperties,omitempty"`
Required []string `json:"required,omitempty"`
// All
Enum []Any `json:"enum,omitempty"`
// The following properties are taken from the JSON Schema definition but their definitions were adjusted to the OpenAPI Specification.
// Value MUST be a string.
// Multiple types via an array are not supported.
Type OAPIType `json:"type,omitempty"`
// Inline or referenced schema MUST be of a Schema Object and not a standard JSON Schema.
AllOf []*Schema `json:"allOf,omitempty"`
// Inline or referenced schema MUST be of a Schema Object and not a standard JSON Schema.
OneOf []*Schema `json:"oneOf,omitempty"`
// Inline or referenced schema MUST be of a Schema Object and not a standard JSON Schema.
AnyOf []*Schema `json:"anyOf,omitempty"`
// Inline or referenced schema MUST be of a Schema Object and not a standard JSON Schema.
Not *Schema `json:"not,omitempty"`
// Value MUST be an object and not an array.
// Inline or referenced schema MUST be of a Schema Object and not a standard JSON Schema.
// items MUST be present if the type is array.
Items *Schema `json:"items,omitempty"`
// Property definitions MUST be a Schema Object and not a standard JSON Schema (inline or referenced).
Properties map[string]*Schema `json:"properties,omitempty"`
// Value can be boolean or object.
// Inline or referenced schema MUST be of a Schema Object and not a standard JSON Schema.
AdditionalProperties *Schema `json:"additionalProperties,omitempty"`
// CommonMark syntax MAY be used for rich text representation.
Description string `json:"description,omitempty"`
// See Data Type Formats for further details.
// While relying on JSON Schema's defined formats, the OAS offers a few additional predefined formats.
Format string `json:"format,omitempty"`
// The default value represents what would be assumed by the consumer of the input as the value of the schema if one is not provided.
// Unlike JSON Schema, the value MUST conform to the defined type for the Schema Object defined at the same level.
// For example, if type is string, then default can be "foo" but cannot be 1.
Default Any `json:"default,omitempty"`
}
// Entity satisfies componenter interface
func (s Schema) Entity() Entity {
return SchemaKind
} | internal/oapi/spec/schema.go | 0.841793 | 0.472014 | schema.go | starcoder |
package matrix
import (
"fmt"
"log"
"math"
"math/rand"
"time"
)
//Matrix type does matrix math
type Matrix struct {
slice [][]float64
}
//NewMatrix returns a matrix and an error
func NewMatrix(slice [][]float64) Matrix {
rows := sliceRows(slice)
columns := sliceColumns(slice)
if columns < 2 || rows < 2 {
log.Fatalf("This is not a matrix. Please, enter a proper number of elements.")
}
return Matrix{slice: slice}
}
//helper functions
func sliceColumns(slice [][]float64) int {
return len(slice[len(slice)-1])
}
//helper functions
func sliceRows(slice [][]float64) int {
return len(slice)
}
//NumberOfColumns returns the number of columns.
func (m Matrix) NumberOfColumns() int {
return sliceColumns(m.slice)
}
//NumberOfRows returns the number of columns.
func (m Matrix) NumberOfRows() int {
return sliceRows(m.slice)
}
//Dimensions returns the number of rows and columns of m.
func (m Matrix) Dimensions() (int, int) {
return m.NumberOfRows(), m.NumberOfColumns()
}
//NumberOfElements returns the number of elements.
func (m Matrix) NumberOfElements() int {
return m.NumberOfColumns() * m.NumberOfRows()
}
//RoundtoDecimals round all elements of a matrix to decimals accuracy.
func (m Matrix) RoundtoDecimals(decimals int) Matrix {
for _, r := range m.slice {
for _, e := range r {
e = roundTo(e, decimals)
}
}
return m
}
func roundTo(number float64, decimals int) float64 {
s := math.Pow(10, float64(decimals))
return math.Round(number*s) / s
}
//Randomize randomizes m to random values
func (m Matrix) Randomize() Matrix {
row := m.NumberOfRows()
column := m.NumberOfColumns()
for i := 0; i < row; i++ {
for j := 0; j < column; j++ {
m.slice[i][j] = rand.Float64() * 000.3
}
}
return m
}
//RandomValuedMatrix returns a row*column random valued matrix.
func RandomValuedMatrix(row, column int) Matrix {
rand.Seed(time.Now().UnixNano())
slc := make([][]float64, row)
for i := 0; i < row; i++ {
sl := make([]float64, column)
for j := 0; j < column; j++ {
sl[j] = randFloats(row * column)[i*j]
slc[i] = sl
}
}
return Matrix{slice: slc}
}
func randFloats(n int) []float64 {
rand.Seed(time.Now().UnixNano())
fls := make([]float64, n)
for i := range fls {
fls[i] = rand.Float64()
}
return fls
}
//Slice returns matrix.slice
// You can perform indexing with this method.
func (m Matrix) Slice() [][]float64 {
return m.slice
}
//PrintByRow prints the matrix by row.
func (m Matrix) PrintByRow() {
for r := range m.slice {
fmt.Println(m.slice[r])
}
}
//At method finds the value at rowIndex,columnIndex
func (m *Matrix) At(rowIndex, columnIndex int) float64 {
return m.slice[rowIndex][columnIndex]
}
//Identity function returns an n*n identity matrix
func Identity(n int) Matrix {
matrix := Matrix{}
k := 0
for i := 0; i < n; i++ {
slice := make([]float64, n)
slice[k] = 1
k++
matrix.slice = append(matrix.slice, slice)
}
return matrix
}
//Zeros returns a matrix of zeros.
func Zeros(row, column int) Matrix {
b := make([][]float64, row)
v := make([]float64, column)
for i := 0; i < row; i++ {
for j := 0; j < column; j++ {
v[j] = 0
b[i] = v
}
}
return Matrix{slice: b}
}
//Ones returns a matrix of ones.
func Ones(row, column int) Matrix {
b := make([][]float64, row)
v := make([]float64, column)
for i := 0; i < row; i++ {
for j := 0; j < column; j++ {
v[j] = 1
b[i] = v
}
}
return Matrix{slice: b}
}
//AllSameNumber returns a row * column matrix of number.
func AllSameNumber(row, column int, number float64) Matrix {
b := make([][]float64, row)
v := make([]float64, column)
for i := 0; i < row; i++ {
for j := 0; j < column; j++ {
v[j] = number
b[i] = v
}
}
return Matrix{slice: b}
}
//FromValues returns a matrix from a set of values
func FromValues(row, column int, values []float64) Matrix {
slice := make([][]float64, row)
for i := range values {
if (i+1)%column == 0 {
slc := make([]float64, column)
slc = append(slc, values[i+1], values[i+2], values[i+2])
slice = append(slice, slc)
}
}
return Matrix{slice: slice}
}
//Sigmoid returns sigmoid of x.
func Sigmoid(x float64) float64 {
return 1 / (1 + math.Exp(-x))
}
//SigmoidPrime returns the sigmoid derivative of x.
func SigmoidPrime(x float64) float64 {
return Sigmoid(x) * (1 - Sigmoid(x))
}
//Matmul does the matrix multiplication. A's rows must match B's columns
func Matmul(a, b Matrix) Matrix {
result := RandomValuedMatrix(a.NumberOfRows(), b.NumberOfColumns())
for i := 0; i < a.NumberOfRows(); i++ {
for j := 0; j < b.NumberOfColumns(); j++ {
summa := 0.0
for k := 0; k < a.NumberOfColumns(); k++ {
summa += a.Slice()[i][k] * b.Slice()[k][j]
}
result.Slice()[i][j] = summa
}
}
return result
}
//Add performs elementary matrix addition
func (m Matrix) Add(mat Matrix) Matrix {
var product Matrix
for i := 0; i < m.NumberOfRows(); i++ {
for j := 0; j < m.NumberOfColumns(); j++ {
product.slice[i][j] = m.slice[i][j] + mat.slice[i][j]
}
}
return product
}
//Subtract performs elementary matrix subtraction
func (m Matrix) Subtract(mat Matrix) Matrix {
var product Matrix
for i := 0; i < m.NumberOfRows(); i++ {
for j := 0; j < m.NumberOfColumns(); j++ {
product.slice[i][j] = m.slice[i][j] - mat.slice[i][j]
}
}
return product
}
//Multiply performs elementary matrix multiplication
func (m Matrix) Multiply(mat Matrix) Matrix {
var product Matrix
for i := 0; i < m.NumberOfRows(); i++ {
for j := 0; j < m.NumberOfColumns(); j++ {
product.slice[i][j] = m.slice[i][j] * mat.slice[i][j]
}
}
return product
}
//Divide performs elementary matrix division
func (m Matrix) Divide(mat Matrix) Matrix {
var product Matrix
for i := 0; i < m.NumberOfRows(); i++ {
for j := 0; j < m.NumberOfColumns(); j++ {
product.slice[i][j] = m.slice[i][j] / mat.slice[i][j]
}
}
return product
}
//ScalarMultiplication multiplies every element with a scalar
func (m Matrix) ScalarMultiplication(scalar float64) Matrix {
for _, r := range m.slice {
for i := range r {
r[i] = r[i] * scalar
}
}
return m
}
//ScalarAdition adds a scalar to every elements
func (m Matrix) ScalarAdition(scalar float64) Matrix {
for _, r := range m.slice {
for i := range r {
r[i] = r[i] + scalar
}
}
return m
}
//Transpose returns the tranpose of a matrix
func (m Matrix) Transpose() Matrix {
for i, r := range m.slice {
for j := range r {
m.slice[i][j] = m.slice[j][i]
}
}
return m
}
//FindDeterminant returns the matrix determinant
func (m Matrix) FindDeterminant() float64 {
dims := m.NumberOfRows()
var determinant, p float64
for k := 0; k < dims; k++ {
if k%2 == 0 {
p = -1.0
} else {
p = 1.0
}
if dims == 1 {
log.Fatalf("This is a single valued matrix.")
} else if dims == 2 {
determinant += m.slice[0][k] * m.Shorten(0, k).Find2x2Determinant() * p
} else {
determinant += m.slice[0][k] * m.Shorten(0, k).FindDeterminant() * p
}
}
return determinant
}
//Find2x2Determinant returns the determinant of a 2x2 matrix
func (m Matrix) Find2x2Determinant() float64 {
return m.slice[0][0]*m.slice[1][1] - m.slice[1][0]*m.slice[1][0]
}
//Shorten returns the so-called minor matrix, it shrinks all numbers that lie either with one coordinate on rowIndex or columnIndex
func (m Matrix) Shorten(rowIndex, columnIndex int) Matrix {
for j, r := range m.slice {
for i := range r {
m.slice[rowIndex][j] = 0.0
m.slice[i][columnIndex] = 0.0
m.slice[i][j] = m.slice[i-1][j-1]
}
}
return m
}
//Adjoint returns the adjoint matrix
func (m Matrix) Adjoint() (Matrix, error) {
for i, r := range m.slice {
for j := range r {
m.slice[i][j] = math.Pow(-1, float64(i+j)) * m.Shorten(i, j).FindDeterminant()
}
}
return m, nil
}
//Inverse returns the inverse of a matrix
func (m Matrix) Inverse() Matrix {
var inverse Matrix
det := m.FindDeterminant()
adjoint, err := m.Adjoint()
if err != nil {
log.Fatalf("unable to create adjoint matrix :%v", err)
}
inverse = adjoint.ScalarMultiplication(1 / det)
return inverse
}
//Inverse2x2 returns the inverse of a 2x2 matrix
func (m Matrix) Inverse2x2() Matrix {
if m.NumberOfRows() != 2 {
log.Fatalf("This is not a 2x2 matrix.")
}
var result Matrix
result.slice[0][0] = m.slice[1][1]
result.slice[1][1] = m.slice[0][0]
result.slice[0][1] = -m.slice[0][1]
result.slice[1][0] = -m.slice[1][0]
return result
}
//EinsteinConvention returns the multiplication matrix of two matrices, given that rows of A matches columns of B.
//According to this convention, when an index variable appears twice in a single term and is not otherwise defined, it implies summation of that term over all the values of the index.
func (m Matrix) EinsteinConvention(m2 Matrix) Matrix {
var result Matrix
sum := 0
for range m2.slice {
sum++
}
if len(m.slice) != sum {
log.Fatal("Rows of A must match columns of B")
}
for n := 0; n < sum; n++ {
for h := 0; h < len(m.slice); h++ {
for i := 0; i < sum; i++ {
for j := 0; j < len(m2.slice); j++ {
result.slice[n][h] += m.slice[2][i] * m2.slice[j][3]
}
}
}
}
return result
}
//DotProduct returns the dot product of two matrices
func (m Matrix) DotProduct(m2 Matrix) float64 {
var sum float64
for i := 0; i < m.NumberOfRows(); i++ {
for j := 0; j < m.NumberOfColumns(); j++ {
sum += m.slice[i][j] * m2.slice[i][j]
}
}
return sum
}
//FromArray returns a matrix from array
func FromArray(arr []float64) Matrix {
m := Zeros(len(arr), 1)
for i := 0; i < len(arr); i++ {
m.slice[i][0] = arr[0]
}
return m
}
//ToArray returns the matrix in array form.
func (m Matrix) ToArray() []float64 {
var arr []float64
for i := 0; i < m.NumberOfRows(); i++ {
for j := 0; j < m.NumberOfColumns(); j++ {
arr = append(arr, m.slice[i][j])
}
}
return arr
}
//MapFunc applies f to every element
func (m Matrix) MapFunc(f func(x float64) float64) Matrix {
for i := 0; i < m.NumberOfRows(); i++ {
for j := 0; j < m.NumberOfColumns(); j++ {
m.slice[i][j] = f(m.slice[i][j])
}
}
return m
}
//TransformationInAChangedBasis function takes a given matrix as an input and outputs it in a changed basis
func (m Matrix) TransformationInAChangedBasis(basis Matrix) Matrix {
inv := basis.Inverse()
transform := inv.Multiply(m)
result := transform.Multiply(basis)
return result
} | matrix.go | 0.777258 | 0.484014 | matrix.go | starcoder |
package matrix
import (
"fmt"
)
func rowsToColumns(x int) int {
out := x / 8
if x%8 != 0 {
out++
}
return out
}
// Matrix is a logical, or (0, 1)-matrix
type Matrix []Row
// Mul right-multiplies a matrix by a row.
func (e Matrix) Mul(f Row) Row {
out, in := e.Size()
if in != f.Size() {
panic("Can't multiply by row that is wrong size!")
}
res := NewRow(out)
for i, row := range e {
if row.DotProduct(f) {
res.SetBit(i, true)
}
}
return res
}
// Add adds two binary matrices from GF(2)^nxm.
func (e Matrix) Add(f Matrix) Matrix {
a, _ := e.Size()
out := make([]Row, a)
for i, _ := range out {
out[i] = e[i].Add(f[i])
}
return out
}
// Compose returns the result of composing e with f.
func (e Matrix) Compose(f Matrix) Matrix {
n, m := e.Size()
p, q := f.Size()
if m != p {
panic("Can't multiply matrices of wrong size!")
}
out := GenerateEmpty(n, q)
g := f.Transpose()
for i, e_i := range e {
for j, g_j := range g {
out[i].SetBit(j, e_i.DotProduct(g_j))
}
}
return out
}
// Invert computes the multiplicative inverse of a matrix, if it exists.
func (e Matrix) Invert() (Matrix, bool) {
inv, _, frees := e.gaussJordan()
return inv, len(frees) == 0
}
// Transpose returns the transpose of a matrix.
func (e Matrix) Transpose() Matrix {
n, m := e.Size()
out := GenerateEmpty(m, n)
for i, _ := range out {
for j := 0; j < n; j++ {
out[i].SetBit(j, e[j].GetBit(i) == 1)
}
}
return out
}
// Trace returns the trace (sum/parity of elements on the diagonal) of a matrix: 0x00 or 0x01.
func (e Matrix) Trace() (out byte) {
for i, e_i := range e {
out ^= e_i.GetBit(i)
}
return
}
// FindPivot finds a row with non-zero entry in column col, starting at the given row and moving down. It returns the
// index of the given row or -1 if one does not exist.
func (e Matrix) FindPivot(row, col int) int {
for i, e_i := range e[row:] {
if e_i.GetBit(col) == 1 {
return row + i
}
}
return -1
}
// Equals returns true if two matrices are equal and false otherwise.
func (e Matrix) Equals(f Matrix) bool {
a, _ := e.Size()
c, _ := f.Size()
if a != c {
return false
}
for i, _ := range e {
if !e[i].Equals(f[i]) {
return false
}
}
return true
}
// Dup returns a duplicate of this matrix.
func (e Matrix) Dup() Matrix {
n, m := e.Size()
f := GenerateEmpty(n, m)
for i, _ := range e {
copy(f[i], e[i])
}
return f
}
// Size returns the dimensions of the matrix in (Rows, Columns) order.
func (e Matrix) Size() (int, int) {
if len(e) == 0 {
return 0, 0
} else {
return len(e), e[0].Size()
}
}
// String converts the matrix to space-and-dot notation.
func (e Matrix) String() string {
out := []rune{}
_, b := e.Size()
addBar := func() {
for i := -2; i < b; i++ {
out = append(out, '-')
}
out = append(out, '\n')
}
addBar()
for _, row := range e {
out = append(out, []rune(row.String())...)
}
addBar()
return string(out)
}
// OctaveString converts the matrix into a string that can be imported into Octave.
func (e Matrix) OctaveString() string {
out := []rune{}
for _, row := range e {
out = append(out, []rune(row.OctaveString())...)
}
return string(out)
}
func (e Matrix) GoString() string {
out := []rune("matrix.Matrix{\n")
for _, row := range e {
out = append(out, []rune("\tmatrix.Row{")...)
for _, elem := range row[:len(row)-1] {
out = append(out, []rune(fmt.Sprintf("0x%2.2x, ", elem))...)
}
out = append(out, []rune(fmt.Sprintf("0x%2.2x},\n", row[len(row)-1]))...)
}
out = append(out, '}')
return string(out)
} | matrix/matrix.go | 0.859295 | 0.452596 | matrix.go | starcoder |
// Interrupter encoder driver.
package hand
import (
"log"
)
// GetStep provides a method to read the absolute location of the stepper motor.
type GetStep interface {
GetStep() int64
}
// Syncer provides an interface for a callback when the encoder mark is hit.
// The measured steps in a revolution is provided.
type Syncer interface {
Mark(int, int64)
}
// IO provides a method to return when an input changes.
type IO interface {
Get() (int, error)
}
const debounce = 5
const mAvgCount = 5
// Encoder is an interrupter encoder driver used to measure shaft rotations.
// The count of current step values is used to track the
// number of steps in a rotation between encoder signals, and
// this is used to calculate the actual number of steps in a revolution.
type Encoder struct {
Name string
getStep GetStep
syncer Syncer
enc IO // I/O from encoder hardware
Invert bool // Invert input signal
Measured int // Measured steps per revolution
size int64 // Minimum span of sensor mark
lastEdge int64 // Last location of encoder mark
}
// NewEncoder creates a new Encoder structure.
func NewEncoder(name string, stepper GetStep, syncer Syncer, io IO, size int) *Encoder {
e := new(Encoder)
e.Name = name
e.getStep = stepper
e.syncer = syncer
e.enc = io
e.size = int64(size)
go e.driver()
return e
}
// Location returns the current location as a relative position from the encoder mark
func (e *Encoder) Location() int {
return int(e.getStep.GetStep() - e.lastEdge)
}
// driver is the main goroutine for servicing the encoder.
// Edge triggered input values are read, and encoder marks are searched for.
// An encoder mark is a 0->1->0 transition of at least a minimum size, usually
// correlating to a physical sensor such as an interrupting shaft photo-sensor.
// The 1->0 transition is considered the reference point for measuring the
// number of steps in a revolution.
func (e *Encoder) driver() {
last := int64(0)
e.lastEdge = int64(-1)
lastMeasured := 0
var mavg []int
avgTotal := 0
avgIndex := 0
for {
// Retrieve the sensor value when it changes.
s, err := e.enc.Get()
if err != nil {
log.Fatalf("%s: Encoder input: %v", e.Name, err)
}
if e.Invert {
s = s ^ 1
}
// Retrieve the current absolute location.
loc := e.getStep.GetStep()
// Check for debounce, and discard if noisy.
d := diff(loc, last)
last = loc
if debounce != 0 && d < debounce {
continue
}
// Transitioned from 1 to 0, and the signal is large
// enough to be considered as the real encoder mark.
if s == 0 && d >= e.size {
if e.lastEdge > 0 {
// If the previous sensor edge has been seen,
// calculate the difference between the current
// mark and the previous mark.
// This is the measured number of steps in a revolution.
newM := int(diff(e.lastEdge, loc))
if avgTotal == 0 {
// If first time, init moving average.
for i := 0; i < mAvgCount; i++ {
mavg = append(mavg, newM)
}
avgTotal = newM * mAvgCount
}
// Recalculate moving average.
avgTotal = avgTotal - mavg[avgIndex] + newM
avgIndex = (avgIndex + 1) % mAvgCount
newM = avgTotal / mAvgCount
e.Measured = newM
e.syncer.Mark(newM, loc)
log.Printf("%s: Mark at %d (%d)", e.Name, e.Measured, e.Measured-lastMeasured)
lastMeasured = newM
}
e.lastEdge = loc
}
}
}
// Get difference between 2 absolute locations.
func diff(a, b int64) int64 {
d := a - b
if d < 0 {
d = -d
}
return d
} | hand/encoder.go | 0.664758 | 0.533397 | encoder.go | starcoder |
package influxql
import (
"bytes"
"container/heap"
"fmt"
"math"
"sort"
)
/*
This file contains iterator implementations for each function call available
in InfluxQL. Call iterators are separated into two groups:
1. Map/reduce-style iterators - these are passed to IteratorCreator so that
processing can be at the low-level storage and aggregates are returned.
2. Raw aggregate iterators - these require the full set of data for a window.
These are handled by the select() function and raw points are streamed in
from the low-level storage.
There are helpers to aid in building aggregate iterators. For simple map/reduce
iterators, you can use the reduceIterator types and pass a reduce function. This
reduce function is passed a previous and current value and the new timestamp,
value, and auxilary fields are returned from it.
For raw aggregate iterators, you can use the reduceSliceIterators which pass
in a slice of all points to the function and return a point. For more complex
iterator types, you may need to create your own iterators by hand.
Once your iterator is complete, you'll need to add it to the NewCallIterator()
function if it is to be available to IteratorCreators and add it to the select()
function to allow it to be included during planning.
*/
// NewCallIterator returns a new iterator for a Call.
func NewCallIterator(input Iterator, opt IteratorOptions) Iterator {
name := opt.Expr.(*Call).Name
switch name {
case "count":
return newCountIterator(input, opt)
case "min":
return newMinIterator(input, opt)
case "max":
return newMaxIterator(input, opt)
case "sum":
return newSumIterator(input, opt)
case "first":
return newFirstIterator(input, opt)
case "last":
return newLastIterator(input, opt)
default:
panic(fmt.Sprintf("unsupported function call: %s", name))
}
}
// newCountIterator returns an iterator for operating on a count() call.
func newCountIterator(input Iterator, opt IteratorOptions) Iterator {
// FIXME: Wrap iterator in int-type iterator and always output int value.
switch input := input.(type) {
case FloatIterator:
return &floatReduceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatCountReduce}
case IntegerIterator:
return &integerReduceIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerCountReduce}
default:
panic(fmt.Sprintf("unsupported count iterator type: %T", input))
}
}
// floatCountReduce returns the count of points.
func floatCountReduce(prev, curr *FloatPoint, opt *reduceOptions) (int64, float64, []interface{}) {
if prev == nil {
return opt.startTime, 1, nil
}
return prev.Time, prev.Value + 1, nil
}
// integerCountReduce returns the count of points.
func integerCountReduce(prev, curr *IntegerPoint, opt *reduceOptions) (int64, int64, []interface{}) {
if prev == nil {
return opt.startTime, 1, nil
}
return prev.Time, prev.Value + 1, nil
}
// newMinIterator returns an iterator for operating on a min() call.
func newMinIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatMinReduce}
case IntegerIterator:
return &integerReduceIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerMinReduce}
default:
panic(fmt.Sprintf("unsupported min iterator type: %T", input))
}
}
// floatMinReduce returns the minimum value between prev & curr.
func floatMinReduce(prev, curr *FloatPoint, opt *reduceOptions) (int64, float64, []interface{}) {
if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// integerMinReduce returns the minimum value between prev & curr.
func integerMinReduce(prev, curr *IntegerPoint, opt *reduceOptions) (int64, int64, []interface{}) {
if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// newMaxIterator returns an iterator for operating on a max() call.
func newMaxIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatMaxReduce}
case IntegerIterator:
return &integerReduceIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerMaxReduce}
default:
panic(fmt.Sprintf("unsupported max iterator type: %T", input))
}
}
// floatMaxReduce returns the maximum value between prev & curr.
func floatMaxReduce(prev, curr *FloatPoint, opt *reduceOptions) (int64, float64, []interface{}) {
if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// integerMaxReduce returns the maximum value between prev & curr.
func integerMaxReduce(prev, curr *IntegerPoint, opt *reduceOptions) (int64, int64, []interface{}) {
if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// newSumIterator returns an iterator for operating on a sum() call.
func newSumIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatSumReduce}
case IntegerIterator:
return &integerReduceIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerSumReduce}
default:
panic(fmt.Sprintf("unsupported sum iterator type: %T", input))
}
}
// floatSumReduce returns the sum prev value & curr value.
func floatSumReduce(prev, curr *FloatPoint, opt *reduceOptions) (int64, float64, []interface{}) {
if prev == nil {
return curr.Time, curr.Value, nil
}
return prev.Time, prev.Value + curr.Value, nil
}
// integerSumReduce returns the sum prev value & curr value.
func integerSumReduce(prev, curr *IntegerPoint, opt *reduceOptions) (int64, int64, []interface{}) {
if prev == nil {
return curr.Time, curr.Value, nil
}
return prev.Time, prev.Value + curr.Value, nil
}
// newFirstIterator returns an iterator for operating on a first() call.
func newFirstIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatFirstReduce}
case IntegerIterator:
return &integerReduceIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerFirstReduce}
default:
panic(fmt.Sprintf("unsupported first iterator type: %T", input))
}
}
// floatFirstReduce returns the first point sorted by time.
func floatFirstReduce(prev, curr *FloatPoint, opt *reduceOptions) (int64, float64, []interface{}) {
if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// integerFirstReduce returns the first point sorted by time.
func integerFirstReduce(prev, curr *IntegerPoint, opt *reduceOptions) (int64, int64, []interface{}) {
if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// newLastIterator returns an iterator for operating on a last() call.
func newLastIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatLastReduce}
case IntegerIterator:
return &integerReduceIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerLastReduce}
default:
panic(fmt.Sprintf("unsupported last iterator type: %T", input))
}
}
// floatLastReduce returns the last point sorted by time.
func floatLastReduce(prev, curr *FloatPoint, opt *reduceOptions) (int64, float64, []interface{}) {
if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// integerLastReduce returns the last point sorted by time.
func integerLastReduce(prev, curr *IntegerPoint, opt *reduceOptions) (int64, int64, []interface{}) {
if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) {
return curr.Time, curr.Value, curr.Aux
}
return prev.Time, prev.Value, prev.Aux
}
// NewDistinctIterator returns an iterator for operating on a distinct() call.
func NewDistinctIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceSliceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatDistinctReduceSlice}
case IntegerIterator:
return &integerReduceSliceIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerDistinctReduceSlice}
case StringIterator:
return &stringReduceSliceIterator{input: newBufStringIterator(input), opt: opt, fn: stringDistinctReduceSlice}
default:
panic(fmt.Sprintf("unsupported distinct iterator type: %T", input))
}
}
// floatDistinctReduceSlice returns the distinct value within a window.
func floatDistinctReduceSlice(a []FloatPoint, opt *reduceOptions) []FloatPoint {
m := make(map[float64]FloatPoint)
for _, p := range a {
if _, ok := m[p.Value]; !ok {
m[p.Value] = p
}
}
points := make([]FloatPoint, 0, len(m))
for _, p := range m {
points = append(points, FloatPoint{Time: p.Time, Value: p.Value})
}
sort.Sort(floatPoints(points))
return points
}
// integerDistinctReduceSlice returns the distinct value within a window.
func integerDistinctReduceSlice(a []IntegerPoint, opt *reduceOptions) []IntegerPoint {
m := make(map[int64]IntegerPoint)
for _, p := range a {
if _, ok := m[p.Value]; !ok {
m[p.Value] = p
}
}
points := make([]IntegerPoint, 0, len(m))
for _, p := range m {
points = append(points, IntegerPoint{Time: p.Time, Value: p.Value})
}
sort.Sort(integerPoints(points))
return points
}
// stringDistinctReduceSlice returns the distinct value within a window.
func stringDistinctReduceSlice(a []StringPoint, opt *reduceOptions) []StringPoint {
m := make(map[string]StringPoint)
for _, p := range a {
if _, ok := m[p.Value]; !ok {
m[p.Value] = p
}
}
points := make([]StringPoint, 0, len(m))
for _, p := range m {
points = append(points, StringPoint{Time: p.Time, Value: p.Value})
}
sort.Sort(stringPoints(points))
return points
}
// newMeanIterator returns an iterator for operating on a mean() call.
func newMeanIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceSliceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatMeanReduceSlice}
case IntegerIterator:
return &integerReduceSliceFloatIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerMeanReduceSlice}
default:
panic(fmt.Sprintf("unsupported mean iterator type: %T", input))
}
}
// floatMeanReduceSlice returns the mean value within a window.
func floatMeanReduceSlice(a []FloatPoint, opt *reduceOptions) []FloatPoint {
var mean float64
var count int
for _, p := range a {
if math.IsNaN(p.Value) {
continue
}
count++
mean += (p.Value - mean) / float64(count)
}
return []FloatPoint{{Time: opt.startTime, Value: mean}}
}
// integerMeanReduceSlice returns the mean value within a window.
func integerMeanReduceSlice(a []IntegerPoint, opt *reduceOptions) []FloatPoint {
var mean float64
var count int
for _, p := range a {
count++
mean += (float64(p.Value) - mean) / float64(count)
}
return []FloatPoint{{Time: opt.startTime, Value: mean}}
}
// newMedianIterator returns an iterator for operating on a median() call.
func newMedianIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceSliceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatMedianReduceSlice}
case IntegerIterator:
return &integerReduceSliceFloatIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerMedianReduceSlice}
default:
panic(fmt.Sprintf("unsupported median iterator type: %T", input))
}
}
// floatMedianReduceSlice returns the median value within a window.
func floatMedianReduceSlice(a []FloatPoint, opt *reduceOptions) []FloatPoint {
if len(a) == 1 {
return []FloatPoint{{Time: opt.startTime, Value: a[0].Value}}
}
// OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1.
// Return the middle value from the points.
// If there are an even number of points then return the mean of the two middle points.
sort.Sort(floatPointsByValue(a))
if len(a)%2 == 0 {
lo, hi := a[len(a)/2-1], a[(len(a)/2)]
return []FloatPoint{{Time: opt.startTime, Value: lo.Value + (hi.Value-lo.Value)/2}}
}
return []FloatPoint{{Time: opt.startTime, Value: a[len(a)/2].Value}}
}
// integerMedianReduceSlice returns the median value within a window.
func integerMedianReduceSlice(a []IntegerPoint, opt *reduceOptions) []FloatPoint {
if len(a) == 1 {
return []FloatPoint{{Time: opt.startTime, Value: float64(a[0].Value)}}
}
// OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1.
// Return the middle value from the points.
// If there are an even number of points then return the mean of the two middle points.
sort.Sort(integerPointsByValue(a))
if len(a)%2 == 0 {
lo, hi := a[len(a)/2-1], a[(len(a)/2)]
return []FloatPoint{{Time: opt.startTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}}
}
return []FloatPoint{{Time: opt.startTime, Value: float64(a[len(a)/2].Value)}}
}
// newStddevIterator returns an iterator for operating on a stddev() call.
func newStddevIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceSliceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatStddevReduceSlice}
case IntegerIterator:
return &integerReduceSliceFloatIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerStddevReduceSlice}
case StringIterator:
return &stringReduceSliceIterator{input: newBufStringIterator(input), opt: opt, fn: stringStddevReduceSlice}
default:
panic(fmt.Sprintf("unsupported stddev iterator type: %T", input))
}
}
// floatStddevReduceSlice returns the stddev value within a window.
func floatStddevReduceSlice(a []FloatPoint, opt *reduceOptions) []FloatPoint {
// If there is only one point then return 0.
if len(a) < 2 {
return []FloatPoint{{Time: opt.startTime, Nil: true}}
}
// Calculate the mean.
var mean float64
var count int
for _, p := range a {
if math.IsNaN(p.Value) {
continue
}
count++
mean += (p.Value - mean) / float64(count)
}
// Calculate the variance.
var variance float64
for _, p := range a {
if math.IsNaN(p.Value) {
continue
}
variance += math.Pow(p.Value-mean, 2)
}
return []FloatPoint{{
Time: opt.startTime,
Value: math.Sqrt(variance / float64(count-1)),
}}
}
// integerStddevReduceSlice returns the stddev value within a window.
func integerStddevReduceSlice(a []IntegerPoint, opt *reduceOptions) []FloatPoint {
// If there is only one point then return 0.
if len(a) < 2 {
return []FloatPoint{{Time: opt.startTime, Nil: true}}
}
// Calculate the mean.
var mean float64
var count int
for _, p := range a {
count++
mean += (float64(p.Value) - mean) / float64(count)
}
// Calculate the variance.
var variance float64
for _, p := range a {
variance += math.Pow(float64(p.Value)-mean, 2)
}
return []FloatPoint{{
Time: opt.startTime,
Value: math.Sqrt(variance / float64(count-1)),
}}
}
// stringStddevReduceSlice always returns "".
func stringStddevReduceSlice(a []StringPoint, opt *reduceOptions) []StringPoint {
return []StringPoint{{Time: opt.startTime, Value: ""}}
}
// newSpreadIterator returns an iterator for operating on a spread() call.
func newSpreadIterator(input Iterator, opt IteratorOptions) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceSliceIterator{input: newBufFloatIterator(input), opt: opt, fn: floatSpreadReduceSlice}
case IntegerIterator:
return &integerReduceSliceIterator{input: newBufIntegerIterator(input), opt: opt, fn: integerSpreadReduceSlice}
default:
panic(fmt.Sprintf("unsupported spread iterator type: %T", input))
}
}
// floatSpreadReduceSlice returns the spread value within a window.
func floatSpreadReduceSlice(a []FloatPoint, opt *reduceOptions) []FloatPoint {
// Find min & max values.
min, max := a[0].Value, a[0].Value
for _, p := range a[1:] {
min = math.Min(min, p.Value)
max = math.Max(max, p.Value)
}
return []FloatPoint{{Time: opt.startTime, Value: max - min}}
}
// integerSpreadReduceSlice returns the spread value within a window.
func integerSpreadReduceSlice(a []IntegerPoint, opt *reduceOptions) []IntegerPoint {
// Find min & max values.
min, max := a[0].Value, a[0].Value
for _, p := range a[1:] {
if p.Value < min {
min = p.Value
}
if p.Value > max {
max = p.Value
}
}
return []IntegerPoint{{Time: opt.startTime, Value: max - min}}
}
// newTopIterator returns an iterator for operating on a top() call.
func newTopIterator(input Iterator, opt IteratorOptions, n *NumberLiteral, tags []int) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceSliceIterator{input: newBufFloatIterator(input), opt: opt, fn: newFloatTopReduceSliceFunc(int(n.Val), tags, opt.Interval)}
case IntegerIterator:
return &integerReduceSliceIterator{input: newBufIntegerIterator(input), opt: opt, fn: newIntegerTopReduceSliceFunc(int(n.Val), tags, opt.Interval)}
default:
panic(fmt.Sprintf("unsupported top iterator type: %T", input))
}
}
// newFloatTopReduceSliceFunc returns the top values within a window.
func newFloatTopReduceSliceFunc(n int, tags []int, interval Interval) floatReduceSliceFunc {
return func(a []FloatPoint, opt *reduceOptions) []FloatPoint {
// Filter by tags if they exist.
if tags != nil {
a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool {
return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring higher values and breaking ties
// based on the earliest time for a point.
h := floatPointsSortBy(a, func(a, b *FloatPoint) bool {
if a.Value != b.Value {
return a.Value > b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]FloatPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(FloatPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = opt.startTime
}
} else {
sort.Stable(floatPoints(points))
}
return points
}
}
// newIntegerTopReduceSliceFunc returns the top values within a window.
func newIntegerTopReduceSliceFunc(n int, tags []int, interval Interval) integerReduceSliceFunc {
return func(a []IntegerPoint, opt *reduceOptions) []IntegerPoint {
// Filter by tags if they exist.
if tags != nil {
a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool {
return p.Value > cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring higher values and breaking ties
// based on the earliest time for a point.
h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool {
if a.Value != b.Value {
return a.Value > b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]IntegerPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(IntegerPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = opt.startTime
}
} else {
sort.Stable(integerPoints(points))
}
return points
}
}
// newBottomIterator returns an iterator for operating on a bottom() call.
func newBottomIterator(input Iterator, opt IteratorOptions, n *NumberLiteral, tags []int) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceSliceIterator{input: newBufFloatIterator(input), opt: opt, fn: newFloatBottomReduceSliceFunc(int(n.Val), tags, opt.Interval)}
case IntegerIterator:
return &integerReduceSliceIterator{input: newBufIntegerIterator(input), opt: opt, fn: newIntegerBottomReduceSliceFunc(int(n.Val), tags, opt.Interval)}
default:
panic(fmt.Sprintf("unsupported bottom iterator type: %T", input))
}
}
// newFloatBottomReduceSliceFunc returns the bottom values within a window.
func newFloatBottomReduceSliceFunc(n int, tags []int, interval Interval) floatReduceSliceFunc {
return func(a []FloatPoint, opt *reduceOptions) []FloatPoint {
// Filter by tags if they exist.
if tags != nil {
a = filterFloatByUniqueTags(a, tags, func(cur, p *FloatPoint) bool {
return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring lower values and breaking ties
// based on the earliest time for a point.
h := floatPointsSortBy(a, func(a, b *FloatPoint) bool {
if a.Value != b.Value {
return a.Value < b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]FloatPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(FloatPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = opt.startTime
}
} else {
sort.Stable(floatPoints(points))
}
return points
}
}
// newIntegerBottomReduceSliceFunc returns the bottom values within a window.
func newIntegerBottomReduceSliceFunc(n int, tags []int, interval Interval) integerReduceSliceFunc {
return func(a []IntegerPoint, opt *reduceOptions) []IntegerPoint {
// Filter by tags if they exist.
if tags != nil {
a = filterIntegerByUniqueTags(a, tags, func(cur, p *IntegerPoint) bool {
return p.Value < cur.Value || (p.Value == cur.Value && p.Time < cur.Time)
})
}
// If we ask for more elements than exist, restrict n to be the length of the array.
size := n
if size > len(a) {
size = len(a)
}
// Construct a heap preferring lower values and breaking ties
// based on the earliest time for a point.
h := integerPointsSortBy(a, func(a, b *IntegerPoint) bool {
if a.Value != b.Value {
return a.Value < b.Value
}
return a.Time < b.Time
})
heap.Init(h)
// Pop the first n elements and then sort by time.
points := make([]IntegerPoint, 0, size)
for i := 0; i < size; i++ {
p := heap.Pop(h).(IntegerPoint)
points = append(points, p)
}
// Either zero out all values or sort the points by time
// depending on if a time interval was given or not.
if !interval.IsZero() {
for i := range points {
points[i].Time = opt.startTime
}
} else {
sort.Stable(integerPoints(points))
}
return points
}
}
func filterFloatByUniqueTags(a []FloatPoint, tags []int, cmpFunc func(cur, p *FloatPoint) bool) []FloatPoint {
pointMap := make(map[string]FloatPoint)
for _, p := range a {
keyBuf := bytes.NewBuffer(nil)
for i, index := range tags {
if i > 0 {
keyBuf.WriteString(",")
}
fmt.Fprintf(keyBuf, "%s", p.Aux[index])
}
key := keyBuf.String()
cur, ok := pointMap[key]
if ok {
if cmpFunc(&cur, &p) {
pointMap[key] = p
}
} else {
pointMap[key] = p
}
}
// Recreate the original array with our new filtered list.
points := make([]FloatPoint, 0, len(pointMap))
for _, p := range pointMap {
points = append(points, p)
}
return points
}
func filterIntegerByUniqueTags(a []IntegerPoint, tags []int, cmpFunc func(cur, p *IntegerPoint) bool) []IntegerPoint {
pointMap := make(map[string]IntegerPoint)
for _, p := range a {
keyBuf := bytes.NewBuffer(nil)
for i, index := range tags {
if i > 0 {
keyBuf.WriteString(",")
}
fmt.Fprintf(keyBuf, "%s", p.Aux[index])
}
key := keyBuf.String()
cur, ok := pointMap[key]
if ok {
if cmpFunc(&cur, &p) {
pointMap[key] = p
}
} else {
pointMap[key] = p
}
}
// Recreate the original array with our new filtered list.
points := make([]IntegerPoint, 0, len(pointMap))
for _, p := range pointMap {
points = append(points, p)
}
return points
}
// newPercentileIterator returns an iterator for operating on a percentile() call.
func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceSliceIterator{input: newBufFloatIterator(input), opt: opt, fn: newFloatPercentileReduceSliceFunc(percentile)}
case IntegerIterator:
return &integerReduceSliceIterator{input: newBufIntegerIterator(input), opt: opt, fn: newIntegerPercentileReduceSliceFunc(percentile)}
default:
panic(fmt.Sprintf("unsupported percentile iterator type: %T", input))
}
}
// newFloatPercentileReduceSliceFunc returns the percentile value within a window.
func newFloatPercentileReduceSliceFunc(percentile float64) floatReduceSliceFunc {
return func(a []FloatPoint, opt *reduceOptions) []FloatPoint {
length := len(a)
i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1
if i < 0 || i >= length {
return []FloatPoint{{Time: opt.startTime, Nil: true}}
}
sort.Sort(floatPointsByValue(a))
return []FloatPoint{{Time: opt.startTime, Value: a[i].Value}}
}
}
// newIntegerPercentileReduceSliceFunc returns the percentile value within a window.
func newIntegerPercentileReduceSliceFunc(percentile float64) integerReduceSliceFunc {
return func(a []IntegerPoint, opt *reduceOptions) []IntegerPoint {
length := len(a)
i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1
if i < 0 || i >= length {
return nil
}
sort.Sort(integerPointsByValue(a))
return []IntegerPoint{{Time: opt.startTime, Value: a[i].Value}}
}
}
// newDerivativeIterator returns an iterator for operating on a derivative() call.
func newDerivativeIterator(input Iterator, opt IteratorOptions, interval Interval, isNonNegative bool) Iterator {
switch input := input.(type) {
case FloatIterator:
return &floatReduceSliceIterator{input: newBufFloatIterator(input), opt: opt, fn: newFloatDerivativeReduceSliceFunc(interval, isNonNegative)}
case IntegerIterator:
return &integerReduceSliceFloatIterator{input: newBufIntegerIterator(input), opt: opt, fn: newIntegerDerivativeReduceSliceFunc(interval, isNonNegative)}
default:
panic(fmt.Sprintf("unsupported derivative iterator type: %T", input))
}
}
// newFloatDerivativeReduceSliceFunc returns the derivative value within a window.
func newFloatDerivativeReduceSliceFunc(interval Interval, isNonNegative bool) floatReduceSliceFunc {
prev := FloatPoint{Time: -1}
return func(a []FloatPoint, opt *reduceOptions) []FloatPoint {
if len(a) == 0 {
return a
} else if len(a) == 1 {
return []FloatPoint{{Time: a[0].Time, Nil: true}}
}
if prev.Time == -1 {
prev = a[0]
}
output := make([]FloatPoint, 0, len(a)-1)
for i := 1; i < len(a); i++ {
p := &a[i]
// Calculate the derivative of successive points by dividing the
// difference of each value by the elapsed time normalized to the interval.
diff := p.Value - prev.Value
elapsed := p.Time - prev.Time
value := 0.0
if elapsed > 0 {
value = diff / (float64(elapsed) / float64(interval.Duration))
}
prev = *p
// Drop negative values for non-negative derivatives.
if isNonNegative && diff < 0 {
continue
}
output = append(output, FloatPoint{Time: p.Time, Value: value})
}
return output
}
}
// newIntegerDerivativeReduceSliceFunc returns the derivative value within a window.
func newIntegerDerivativeReduceSliceFunc(interval Interval, isNonNegative bool) integerReduceSliceFloatFunc {
prev := IntegerPoint{Time: -1}
return func(a []IntegerPoint, opt *reduceOptions) []FloatPoint {
if len(a) == 0 {
return []FloatPoint{}
} else if len(a) == 1 {
return []FloatPoint{{Time: a[0].Time, Nil: true}}
}
if prev.Time == -1 {
prev = a[0]
}
output := make([]FloatPoint, 0, len(a)-1)
for i := 1; i < len(a); i++ {
p := &a[i]
// Calculate the derivative of successive points by dividing the
// difference of each value by the elapsed time normalized to the interval.
diff := float64(p.Value - prev.Value)
elapsed := p.Time - prev.Time
value := 0.0
if elapsed > 0 {
value = diff / (float64(elapsed) / float64(interval.Duration))
}
prev = *p
// Drop negative values for non-negative derivatives.
if isNonNegative && diff < 0 {
continue
}
output = append(output, FloatPoint{Time: p.Time, Value: value})
}
return output
}
}
// integerReduceSliceFloatIterator executes a reducer on all points in a window and buffers the result.
// This iterator receives an integer iterator but produces a float iterator.
type integerReduceSliceFloatIterator struct {
input *bufIntegerIterator
fn integerReduceSliceFloatFunc
opt IteratorOptions
points []FloatPoint
}
// Close closes the iterator and all child iterators.
func (itr *integerReduceSliceFloatIterator) Close() error { return itr.input.Close() }
// Next returns the minimum value for the next available interval.
func (itr *integerReduceSliceFloatIterator) Next() *FloatPoint {
// Calculate next window if we have no more points.
if len(itr.points) == 0 {
itr.points = itr.reduce()
if len(itr.points) == 0 {
return nil
}
}
// Pop next point off the stack.
p := itr.points[len(itr.points)-1]
itr.points = itr.points[:len(itr.points)-1]
return &p
}
// reduce executes fn once for every point in the next window.
// The previous value for the dimension is passed to fn.
func (itr *integerReduceSliceFloatIterator) reduce() []FloatPoint {
// Calculate next window.
startTime, endTime := itr.opt.Window(itr.input.peekTime())
var reduceOptions = reduceOptions{
startTime: startTime,
endTime: endTime,
}
// Group points by name and tagset.
groups := make(map[string]struct {
name string
tags Tags
points []IntegerPoint
})
for {
// Read next point.
p := itr.input.NextInWindow(startTime, endTime)
if p == nil {
break
}
tags := p.Tags.Subset(itr.opt.Dimensions)
// Append point to dimension.
id := tags.ID()
g := groups[id]
g.name = p.Name
g.tags = tags
g.points = append(g.points, *p)
groups[id] = g
}
// Reduce each set into a set of values.
results := make(map[string][]FloatPoint)
for key, g := range groups {
a := itr.fn(g.points, &reduceOptions)
if len(a) == 0 {
continue
}
// Update name and tags for each returned point.
for i := range a {
a[i].Name = g.name
a[i].Tags = g.tags
}
results[key] = a
}
// Reverse sort points by name & tag.
keys := make([]string, 0, len(results))
for k := range results {
keys = append(keys, k)
}
sort.Sort(sort.Reverse(sort.StringSlice(keys)))
// Reverse order points within each key.
a := make([]FloatPoint, 0, len(results))
for _, k := range keys {
for i := len(results[k]) - 1; i >= 0; i-- {
a = append(a, results[k][i])
}
}
return a
}
// integerReduceSliceFloatFunc is the function called by a IntegerPoint slice reducer that emits FloatPoint.
type integerReduceSliceFloatFunc func(a []IntegerPoint, opt *reduceOptions) []FloatPoint | vendor/github.com/influxdata/influxdb/influxql/call_iterator.go | 0.712132 | 0.421016 | call_iterator.go | starcoder |
package main
import (
"bytes"
"fmt"
)
const wordLength int = 32 << (^uint(0) >> 63)
// PopCount returns the population count using the "shift off rightmost set bit and check" method
func PopCount(x uint) int {
var result uint
for x&(x-1) != x {
result++
x = x & (x - 1)
}
return int(result)
}
// An IntSet is a set of small non-negative integers.
// Its zero value represents the empty set.
type IntSet struct {
words []uint
}
// Has reports whether the set contains the non-negative value x.
func (s *IntSet) Has(x int) bool {
word, bit := x/wordLength, uint(x%wordLength)
return word < len(s.words) && s.words[word]&(1<<bit) != 0
}
// Add adds the non-negative value x to the set.
func (s *IntSet) Add(x int) {
word, bit := x/wordLength, uint(x%wordLength)
for word >= len(s.words) {
s.words = append(s.words, 0)
}
s.words[word] |= 1 << bit
}
// UnionWith sets s to the union of s and t.
func (s *IntSet) UnionWith(t *IntSet) {
for i, tword := range t.words {
if i < len(s.words) {
s.words[i] |= tword
} else {
s.words = append(s.words, tword)
}
}
}
// IntersectWith sets s to the intersection of s and t.
func (s *IntSet) IntersectWith(t *IntSet) {
for i, tword := range t.words {
if i < len(s.words) {
s.words[i] &= tword
} else {
return
}
}
}
// DifferenceWith sets s to the difference of s and t.
func (s *IntSet) DifferenceWith(t *IntSet) {
for i, tword := range t.words {
if i < len(s.words) {
s.words[i] &= ^tword
} else {
return
}
}
}
// SymmetricDifferenceWith sets s to the symmetric difference of s and t.
func (s *IntSet) SymmetricDifferenceWith(t *IntSet) {
for i, tword := range t.words {
if i < len(s.words) {
s.words[i] ^= tword
} else {
s.words = append(s.words, tword)
}
}
}
// String returns the set as a string of the form "{1 2 3}".
func (s *IntSet) String() string {
var buf bytes.Buffer
buf.WriteByte('{')
for i, word := range s.words {
if word == 0 {
continue
}
for j := 0; j < wordLength; j++ {
if word&(1<<uint(j)) != 0 {
if buf.Len() > len("{") {
buf.WriteByte(' ')
}
fmt.Fprintf(&buf, "%d", wordLength*i+j)
}
}
}
buf.WriteByte('}')
return buf.String()
}
// Len returns the number of elements in the set
func (s *IntSet) Len() int {
count := 0
for _, word := range s.words {
count += PopCount(word)
}
return count
}
// Remove removes an item x from the set
func (s *IntSet) Remove(x int) {
word, bit := x/wordLength, uint(x%wordLength)
if word >= len(s.words) {
return
}
s.words[word] &= ^(1 << bit)
}
// Clear removes all elements from the set
func (s *IntSet) Clear() {
s.words = make([]uint, 0)
}
// Copy returns a copy of the set
func (s *IntSet) Copy() *IntSet {
copy := &IntSet{}
for _, word := range s.words {
copy.words = append(copy.words, word)
}
return copy
}
// AddAll allows multiple non-negative integers to be added at once
func (s *IntSet) AddAll(vals ...int) {
for _, val := range vals {
s.Add(val)
}
}
// Elems returns a slice of the elements of the set, suitable for iteration with range
func (s *IntSet) Elems() []int {
var values []int
for i, word := range s.words {
if word == 0 {
continue
}
for j := 0; j < wordLength; j++ {
if word&(1<<uint(j)) != 0 {
values = append(values, wordLength*i+j)
}
}
}
return values
}
func main() {
// This main function represents extremely unstructured testing I used to verify the above
// work to myself during development.
x := &IntSet{}
x.Add(1)
x.Add(20)
x.Add(14)
x.Add(5000)
fmt.Println(x)
fmt.Println(x.Len())
x.Remove(150)
fmt.Println(x)
y := x.Copy()
x.Clear()
fmt.Println(x)
fmt.Println(y)
x.AddAll(1, 2, 3, 5, 8, 13, 21, 34)
fmt.Println(x)
z := &IntSet{}
z.AddAll(1, 2, 3, 5, 7, 13)
x.SymmetricDifferenceWith(z)
fmt.Println(x)
fmt.Println(x.Elems())
} | ch6/intset/main.go | 0.68056 | 0.439807 | main.go | starcoder |
package schemes
import "image/color"
// OMG is a gradient color scheme from purple through red to white.
var OMG []color.Color
func init() {
OMG = []color.Color{
color.RGBA{R: 0xff, G: 0xff, B: 0xff, A: 0xff},
color.RGBA{R: 0xff, G: 0xfe, B: 0xfe, A: 0xff},
color.RGBA{R: 0xff, G: 0xfd, B: 0xfd, A: 0xff},
color.RGBA{R: 0xff, G: 0xfb, B: 0xfb, A: 0xff},
color.RGBA{R: 0xff, G: 0xfa, B: 0xfa, A: 0xff},
color.RGBA{R: 0xff, G: 0xf9, B: 0xf9, A: 0xff},
color.RGBA{R: 0xff, G: 0xf7, B: 0xf7, A: 0xff},
color.RGBA{R: 0xff, G: 0xf6, B: 0xf6, A: 0xff},
color.RGBA{R: 0xff, G: 0xf4, B: 0xf4, A: 0xff},
color.RGBA{R: 0xff, G: 0xf2, B: 0xf2, A: 0xff},
color.RGBA{R: 0xff, G: 0xf1, B: 0xf1, A: 0xff},
color.RGBA{R: 0xff, G: 0xef, B: 0xef, A: 0xff},
color.RGBA{R: 0xff, G: 0xed, B: 0xed, A: 0xff},
color.RGBA{R: 0xff, G: 0xeb, B: 0xeb, A: 0xff},
color.RGBA{R: 0xff, G: 0xe9, B: 0xe9, A: 0xff},
color.RGBA{R: 0xff, G: 0xe7, B: 0xe7, A: 0xff},
color.RGBA{R: 0xff, G: 0xe5, B: 0xe5, A: 0xff},
color.RGBA{R: 0xff, G: 0xe3, B: 0xe3, A: 0xff},
color.RGBA{R: 0xff, G: 0xe2, B: 0xe2, A: 0xff},
color.RGBA{R: 0xff, G: 0xe0, B: 0xe0, A: 0xff},
color.RGBA{R: 0xff, G: 0xde, B: 0xde, A: 0xff},
color.RGBA{R: 0xff, G: 0xdc, B: 0xdc, A: 0xff},
color.RGBA{R: 0xff, G: 0xd9, B: 0xd9, A: 0xff},
color.RGBA{R: 0xff, G: 0xd7, B: 0xd7, A: 0xff},
color.RGBA{R: 0xff, G: 0xd5, B: 0xd5, A: 0xff},
color.RGBA{R: 0xff, G: 0xd2, B: 0xd2, A: 0xff},
color.RGBA{R: 0xff, G: 0xd0, B: 0xd0, A: 0xff},
color.RGBA{R: 0xff, G: 0xce, B: 0xce, A: 0xff},
color.RGBA{R: 0xff, G: 0xcc, B: 0xcc, A: 0xff},
color.RGBA{R: 0xff, G: 0xca, B: 0xca, A: 0xff},
color.RGBA{R: 0xff, G: 0xc7, B: 0xc7, A: 0xff},
color.RGBA{R: 0xff, G: 0xc5, B: 0xc5, A: 0xff},
color.RGBA{R: 0xff, G: 0xc2, B: 0xc2, A: 0xff},
color.RGBA{R: 0xff, G: 0xc0, B: 0xc0, A: 0xff},
color.RGBA{R: 0xff, G: 0xbd, B: 0xbd, A: 0xff},
color.RGBA{R: 0xff, G: 0xbc, B: 0xbc, A: 0xff},
color.RGBA{R: 0xff, G: 0xb9, B: 0xb9, A: 0xff},
color.RGBA{R: 0xff, G: 0xb7, B: 0xb7, A: 0xff},
color.RGBA{R: 0xff, G: 0xb4, B: 0xb4, A: 0xff},
color.RGBA{R: 0xff, G: 0xb2, B: 0xb2, A: 0xff},
color.RGBA{R: 0xff, G: 0xb0, B: 0xb0, A: 0xff},
color.RGBA{R: 0xff, G: 0xad, B: 0xad, A: 0xff},
color.RGBA{R: 0xff, G: 0xab, B: 0xab, A: 0xff},
color.RGBA{R: 0xff, G: 0xa9, B: 0xa9, A: 0xff},
color.RGBA{R: 0xff, G: 0xa7, B: 0xa7, A: 0xff},
color.RGBA{R: 0xff, G: 0xa4, B: 0xa4, A: 0xff},
color.RGBA{R: 0xff, G: 0xa2, B: 0xa2, A: 0xff},
color.RGBA{R: 0xff, G: 0xa0, B: 0xa0, A: 0xff},
color.RGBA{R: 0xff, G: 0x9e, B: 0x9e, A: 0xff},
color.RGBA{R: 0xff, G: 0x9b, B: 0x9b, A: 0xff},
color.RGBA{R: 0xff, G: 0x99, B: 0x99, A: 0xff},
color.RGBA{R: 0xff, G: 0x97, B: 0x97, A: 0xff},
color.RGBA{R: 0xff, G: 0x95, B: 0x95, A: 0xff},
color.RGBA{R: 0xff, G: 0x93, B: 0x93, A: 0xff},
color.RGBA{R: 0xff, G: 0x91, B: 0x91, A: 0xff},
color.RGBA{R: 0xff, G: 0x8f, B: 0x8f, A: 0xff},
color.RGBA{R: 0xff, G: 0x8d, B: 0x8d, A: 0xff},
color.RGBA{R: 0xff, G: 0x8b, B: 0x8b, A: 0xff},
color.RGBA{R: 0xff, G: 0x89, B: 0x89, A: 0xff},
color.RGBA{R: 0xff, G: 0x88, B: 0x88, A: 0xff},
color.RGBA{R: 0xff, G: 0x86, B: 0x86, A: 0xff},
color.RGBA{R: 0xff, G: 0x84, B: 0x84, A: 0xff},
color.RGBA{R: 0xff, G: 0x83, B: 0x83, A: 0xff},
color.RGBA{R: 0xff, G: 0x81, B: 0x81, A: 0xff},
color.RGBA{R: 0xff, G: 0x80, B: 0x80, A: 0xff},
color.RGBA{R: 0xff, G: 0x7f, B: 0x7f, A: 0xff},
color.RGBA{R: 0xff, G: 0x7f, B: 0x7f, A: 0xff},
color.RGBA{R: 0xff, G: 0x7e, B: 0x7e, A: 0xff},
color.RGBA{R: 0xff, G: 0x7d, B: 0x7d, A: 0xff},
color.RGBA{R: 0xff, G: 0x7d, B: 0x7d, A: 0xff},
color.RGBA{R: 0xff, G: 0x7c, B: 0x7c, A: 0xff},
color.RGBA{R: 0xff, G: 0x7b, B: 0x7a, A: 0xff},
color.RGBA{R: 0xff, G: 0x7b, B: 0x7a, A: 0xff},
color.RGBA{R: 0xff, G: 0x7a, B: 0x79, A: 0xff},
color.RGBA{R: 0xff, G: 0x7a, B: 0x79, A: 0xff},
color.RGBA{R: 0xff, G: 0x79, B: 0x78, A: 0xff},
color.RGBA{R: 0xff, G: 0x78, B: 0x77, A: 0xff},
color.RGBA{R: 0xff, G: 0x77, B: 0x76, A: 0xff},
color.RGBA{R: 0xff, G: 0x77, B: 0x76, A: 0xff},
color.RGBA{R: 0xff, G: 0x76, B: 0x74, A: 0xff},
color.RGBA{R: 0xff, G: 0x75, B: 0x74, A: 0xff},
color.RGBA{R: 0xff, G: 0x75, B: 0x73, A: 0xff},
color.RGBA{R: 0xff, G: 0x73, B: 0x72, A: 0xff},
color.RGBA{R: 0xff, G: 0x73, B: 0x72, A: 0xff},
color.RGBA{R: 0xff, G: 0x72, B: 0x71, A: 0xff},
color.RGBA{R: 0xff, G: 0x72, B: 0x70, A: 0xff},
color.RGBA{R: 0xff, G: 0x71, B: 0x6f, A: 0xff},
color.RGBA{R: 0xff, G: 0x71, B: 0x6f, A: 0xff},
color.RGBA{R: 0xff, G: 0x70, B: 0x6e, A: 0xff},
color.RGBA{R: 0xff, G: 0x6f, B: 0x6c, A: 0xff},
color.RGBA{R: 0xff, G: 0x6f, B: 0x6c, A: 0xff},
color.RGBA{R: 0xff, G: 0x6e, B: 0x6b, A: 0xff},
color.RGBA{R: 0xff, G: 0x6e, B: 0x6b, A: 0xff},
color.RGBA{R: 0xff, G: 0x6d, B: 0x69, A: 0xff},
color.RGBA{R: 0xff, G: 0x6d, B: 0x69, A: 0xff},
color.RGBA{R: 0xff, G: 0x6c, B: 0x68, A: 0xff},
color.RGBA{R: 0xff, G: 0x6b, B: 0x68, A: 0xff},
color.RGBA{R: 0xff, G: 0x6b, B: 0x66, A: 0xff},
color.RGBA{R: 0xff, G: 0x6a, B: 0x66, A: 0xff},
color.RGBA{R: 0xff, G: 0x6a, B: 0x65, A: 0xff},
color.RGBA{R: 0xff, G: 0x69, B: 0x65, A: 0xff},
color.RGBA{R: 0xff, G: 0x68, B: 0x63, A: 0xff},
color.RGBA{R: 0xff, G: 0x68, B: 0x63, A: 0xff},
color.RGBA{R: 0xff, G: 0x67, B: 0x62, A: 0xff},
color.RGBA{R: 0xff, G: 0x67, B: 0x62, A: 0xff},
color.RGBA{R: 0xff, G: 0x66, B: 0x61, A: 0xff},
color.RGBA{R: 0xff, G: 0x66, B: 0x60, A: 0xff},
color.RGBA{R: 0xff, G: 0x65, B: 0x60, A: 0xff},
color.RGBA{R: 0xff, G: 0x65, B: 0x60, A: 0xff},
color.RGBA{R: 0xff, G: 0x64, B: 0x5e, A: 0xff},
color.RGBA{R: 0xff, G: 0x64, B: 0x5e, A: 0xff},
color.RGBA{R: 0xff, G: 0x63, B: 0x5d, A: 0xff},
color.RGBA{R: 0xff, G: 0x63, B: 0x5c, A: 0xff},
color.RGBA{R: 0xff, G: 0x62, B: 0x5b, A: 0xff},
color.RGBA{R: 0xff, G: 0x62, B: 0x5b, A: 0xff},
color.RGBA{R: 0xff, G: 0x61, B: 0x5a, A: 0xff},
color.RGBA{R: 0xff, G: 0x61, B: 0x59, A: 0xff},
color.RGBA{R: 0xff, G: 0x60, B: 0x59, A: 0xff},
color.RGBA{R: 0xff, G: 0x60, B: 0x59, A: 0xff},
color.RGBA{R: 0xff, G: 0x5f, B: 0x58, A: 0xff},
color.RGBA{R: 0xff, G: 0x5f, B: 0x58, A: 0xff},
color.RGBA{R: 0xff, G: 0x5e, B: 0x56, A: 0xff},
color.RGBA{R: 0xff, G: 0x5d, B: 0x56, A: 0xff},
color.RGBA{R: 0xff, G: 0x5d, B: 0x55, A: 0xff},
color.RGBA{R: 0xff, G: 0x5d, B: 0x55, A: 0xff},
color.RGBA{R: 0xff, G: 0x5c, B: 0x55, A: 0xff},
color.RGBA{R: 0xff, G: 0x5c, B: 0x54, A: 0xff},
color.RGBA{R: 0xff, G: 0x5b, B: 0x53, A: 0xff},
color.RGBA{R: 0xff, G: 0x5b, B: 0x53, A: 0xff},
color.RGBA{R: 0xff, G: 0x5a, B: 0x52, A: 0xff},
color.RGBA{R: 0xff, G: 0x5a, B: 0x52, A: 0xff},
color.RGBA{R: 0xff, G: 0x59, B: 0x51, A: 0xff},
color.RGBA{R: 0xff, G: 0x59, B: 0x52, A: 0xff},
color.RGBA{R: 0xff, G: 0x59, B: 0x50, A: 0xff},
color.RGBA{R: 0xff, G: 0x59, B: 0x50, A: 0xff},
color.RGBA{R: 0xff, G: 0x59, B: 0x4f, A: 0xff},
color.RGBA{R: 0xff, G: 0x59, B: 0x4f, A: 0xff},
color.RGBA{R: 0xff, G: 0x58, B: 0x4f, A: 0xff},
color.RGBA{R: 0xff, G: 0x58, B: 0x4f, A: 0xff},
color.RGBA{R: 0xff, G: 0x57, B: 0x4e, A: 0xff},
color.RGBA{R: 0xff, G: 0x57, B: 0x4e, A: 0xff},
color.RGBA{R: 0xff, G: 0x57, B: 0x4e, A: 0xff},
color.RGBA{R: 0xff, G: 0x57, B: 0x4d, A: 0xff},
color.RGBA{R: 0xff, G: 0x57, B: 0x4d, A: 0xff},
color.RGBA{R: 0xff, G: 0x56, B: 0x4d, A: 0xff},
color.RGBA{R: 0xff, G: 0x56, B: 0x4d, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4a, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4b, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4c, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4d, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4e, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4e, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4f, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4e, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x4f, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x4f, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x50, A: 0xff},
color.RGBA{R: 0xff, G: 0x53, B: 0x50, A: 0xff},
color.RGBA{R: 0xff, G: 0x54, B: 0x51, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x52, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x52, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x53, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x53, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x54, A: 0xff},
color.RGBA{R: 0xff, G: 0x55, B: 0x54, A: 0xff},
color.RGBA{R: 0xff, G: 0x56, B: 0x55, A: 0xff},
color.RGBA{R: 0xff, G: 0x56, B: 0x55, A: 0xff},
color.RGBA{R: 0xff, G: 0x57, B: 0x57, A: 0xff},
color.RGBA{R: 0xfe, G: 0x59, B: 0x59, A: 0xff},
color.RGBA{R: 0xfe, G: 0x5b, B: 0x5c, A: 0xff},
color.RGBA{R: 0xfd, G: 0x5c, B: 0x5d, A: 0xff},
color.RGBA{R: 0xfc, G: 0x5e, B: 0x60, A: 0xff},
color.RGBA{R: 0xfb, G: 0x60, B: 0x62, A: 0xff},
color.RGBA{R: 0xfb, G: 0x61, B: 0x64, A: 0xff},
color.RGBA{R: 0xf9, G: 0x63, B: 0x67, A: 0xff},
color.RGBA{R: 0xf9, G: 0x64, B: 0x69, A: 0xff},
color.RGBA{R: 0xf8, G: 0x66, B: 0x6c, A: 0xff},
color.RGBA{R: 0xf7, G: 0x68, B: 0x6f, A: 0xff},
color.RGBA{R: 0xf6, G: 0x69, B: 0x71, A: 0xff},
color.RGBA{R: 0xf5, G: 0x6b, B: 0x74, A: 0xff},
color.RGBA{R: 0xf4, G: 0x6d, B: 0x77, A: 0xff},
color.RGBA{R: 0xf3, G: 0x6e, B: 0x7a, A: 0xff},
color.RGBA{R: 0xf2, G: 0x70, B: 0x7d, A: 0xff},
color.RGBA{R: 0xf1, G: 0x71, B: 0x7f, A: 0xff},
color.RGBA{R: 0xf0, G: 0x73, B: 0x82, A: 0xff},
color.RGBA{R: 0xef, G: 0x75, B: 0x86, A: 0xff},
color.RGBA{R: 0xee, G: 0x76, B: 0x88, A: 0xff},
color.RGBA{R: 0xed, G: 0x78, B: 0x8c, A: 0xff},
color.RGBA{R: 0xec, G: 0x79, B: 0x8e, A: 0xff},
color.RGBA{R: 0xeb, G: 0x7b, B: 0x91, A: 0xff},
color.RGBA{R: 0xea, G: 0x7c, B: 0x94, A: 0xff},
color.RGBA{R: 0xe9, G: 0x7e, B: 0x97, A: 0xff},
color.RGBA{R: 0xe8, G: 0x7f, B: 0x9a, A: 0xff},
color.RGBA{R: 0xe8, G: 0x81, B: 0x9d, A: 0xff},
color.RGBA{R: 0xe6, G: 0x82, B: 0x9f, A: 0xff},
color.RGBA{R: 0xe6, G: 0x84, B: 0xa2, A: 0xff},
color.RGBA{R: 0xe5, G: 0x85, B: 0xa5, A: 0xff},
color.RGBA{R: 0xe4, G: 0x87, B: 0xa8, A: 0xff},
color.RGBA{R: 0xe3, G: 0x88, B: 0xaa, A: 0xff},
color.RGBA{R: 0xe3, G: 0x8a, B: 0xad, A: 0xff},
color.RGBA{R: 0xe2, G: 0x8b, B: 0xb0, A: 0xff},
color.RGBA{R: 0xe1, G: 0x8c, B: 0xb2, A: 0xff},
color.RGBA{R: 0xe0, G: 0x8e, B: 0xb5, A: 0xff},
color.RGBA{R: 0xdf, G: 0x8f, B: 0xb7, A: 0xff},
color.RGBA{R: 0xdf, G: 0x90, B: 0xb9, A: 0xff},
color.RGBA{R: 0xdf, G: 0x92, B: 0xbc, A: 0xff},
color.RGBA{R: 0xde, G: 0x93, B: 0xbe, A: 0xff},
color.RGBA{R: 0xdd, G: 0x94, B: 0xc0, A: 0xff},
color.RGBA{R: 0xdd, G: 0x96, B: 0xc3, A: 0xff},
color.RGBA{R: 0xdc, G: 0x97, B: 0xc5, A: 0xff},
color.RGBA{R: 0xdb, G: 0x98, B: 0xc7, A: 0xff},
color.RGBA{R: 0xdb, G: 0x99, B: 0xc9, A: 0xff},
color.RGBA{R: 0xdb, G: 0x9a, B: 0xca, A: 0xff},
color.RGBA{R: 0xdb, G: 0x9c, B: 0xcd, A: 0xff},
color.RGBA{R: 0xda, G: 0x9d, B: 0xcf, A: 0xff},
color.RGBA{R: 0xd9, G: 0x9e, B: 0xd0, A: 0xff},
color.RGBA{R: 0xd9, G: 0x9f, B: 0xd2, A: 0xff},
color.RGBA{R: 0xd9, G: 0xa0, B: 0xd3, A: 0xff},
color.RGBA{R: 0xd9, G: 0xa1, B: 0xd5, A: 0xff},
color.RGBA{R: 0xd8, G: 0xa2, B: 0xd6, A: 0xff},
color.RGBA{R: 0xd8, G: 0xa3, B: 0xd8, A: 0xff},
color.RGBA{R: 0xd8, G: 0xa4, B: 0xd9, A: 0xff},
color.RGBA{R: 0xd7, G: 0xa5, B: 0xda, A: 0xff},
color.RGBA{R: 0xd8, G: 0xa6, B: 0xdb, A: 0xff},
color.RGBA{R: 0xd7, G: 0xa6, B: 0xdc, A: 0xff},
color.RGBA{R: 0xd7, G: 0xa7, B: 0xde, A: 0xff},
color.RGBA{R: 0xd7, G: 0xa8, B: 0xdf, A: 0xff},
color.RGBA{R: 0xd7, G: 0xa9, B: 0xdf, A: 0xff},
color.RGBA{R: 0xd7, G: 0xaa, B: 0xe0, A: 0xff},
color.RGBA{R: 0xd7, G: 0xaa, B: 0xe1, A: 0xff},
}
} | schemes/omg.go | 0.540196 | 0.690918 | omg.go | starcoder |
package repairdroid
import (
"fmt"
"math"
"time"
"github.com/chr-ras/advent-of-code-2019/util/geometry"
"github.com/chr-ras/advent-of-code-2019/util/intcode"
q "github.com/enriquebris/goconcurrentqueue"
"github.com/gosuri/uilive"
)
// FindShortestWayToOxygenTank controls the repair droid to explore the ship and find the shortest route to the oxygen tank.
func FindShortestWayToOxygenTank(remoteControlProgram []int64) (OxygenStation, map[geometry.Vector]Position) {
movementQueue := q.NewFIFO()
resultQueue := q.NewFIFO()
finalMemory := make(chan []int64)
go intcode.ExecuteProgram(remoteControlProgram, finalMemory, movementQueue, resultQueue, 1024)
oxygenStation := make(chan OxygenStation)
explorationFinished := make(chan map[geometry.Vector]Position)
go exploreShip(movementQueue, resultQueue, oxygenStation, explorationFinished)
station := <-oxygenStation
shipMap := <-explorationFinished
return station, shipMap
}
func exploreShip(movementQueue, resultQueue q.Queue, oxygenStation chan OxygenStation, explorationFinished chan map[geometry.Vector]Position) {
startPositionVector := geometry.Vector{X: 0, Y: 0}
shipMap := make(map[geometry.Vector]Position)
shipMap[startPositionVector] = Position{Status: StartPosition, AdjacentPositions: []geometry.Vector{}}
writer := uilive.New()
writer.Start()
moveDroid(0, geometry.Vector{}, startPositionVector, shipMap, movementQueue, resultQueue, oxygenStation, writer)
writer.Stop()
explorationFinished <- shipMap
}
func moveDroid(currentStepsTaken int64, previousDirection, currentPosition geometry.Vector, shipMap map[geometry.Vector]Position, movementQueue, resultQueue q.Queue, oxygenStation chan OxygenStation, writer *uilive.Writer) {
northDirectionVector := geometry.Vector{X: 0, Y: -1}
eastDirectionVector := geometry.Vector{X: 1, Y: 0}
southDirectionVector := geometry.Vector{X: 0, Y: 1}
westDirectionVector := geometry.Vector{X: -1, Y: 0}
movedNorth := goIntoDirection(currentStepsTaken, previousDirection, northDirectionVector, currentPosition, northCommand, shipMap, movementQueue, resultQueue, oxygenStation, writer)
if movedNorth {
reverseMove(currentPosition, shipMap, southCommand, movementQueue, resultQueue, writer)
}
movedEast := goIntoDirection(currentStepsTaken, previousDirection, eastDirectionVector, currentPosition, eastCommand, shipMap, movementQueue, resultQueue, oxygenStation, writer)
if movedEast {
reverseMove(currentPosition, shipMap, westCommand, movementQueue, resultQueue, writer)
}
movedSouth := goIntoDirection(currentStepsTaken, previousDirection, southDirectionVector, currentPosition, southCommand, shipMap, movementQueue, resultQueue, oxygenStation, writer)
if movedSouth {
reverseMove(currentPosition, shipMap, northCommand, movementQueue, resultQueue, writer)
}
movedWest := goIntoDirection(currentStepsTaken, previousDirection, westDirectionVector, currentPosition, westCommand, shipMap, movementQueue, resultQueue, oxygenStation, writer)
if movedWest {
reverseMove(currentPosition, shipMap, eastCommand, movementQueue, resultQueue, writer)
}
}
func goIntoDirection(currentStepsTaken int64, previousDirection, newDirection, currentPosition geometry.Vector, droidCommand int64, shipMap map[geometry.Vector]Position, movementQueue, resultQueue q.Queue, oxygenStation chan OxygenStation, writer *uilive.Writer) bool {
if previousDirection.ScalarMult(-1) != newDirection {
newPosition := currentPosition.Add(newDirection)
if _, alreadyVisited := shipMap[newPosition]; !alreadyVisited {
movementQueue.Enqueue(droidCommand)
resultElement, _ := resultQueue.DequeueOrWaitForNextElement()
result := resultElement.(int64)
var newPositionAdjacentPositions []geometry.Vector
if result != HitWall {
newPositionAdjacentPositions = []geometry.Vector{currentPosition}
}
shipMap[newPosition] = Position{Status: result, AdjacentPositions: newPositionAdjacentPositions}
currentPositionInfo := shipMap[currentPosition]
currentPositionInfo.AdjacentPositions = append(currentPositionInfo.AdjacentPositions, newPosition)
shipMap[currentPosition] = currentPositionInfo
if result == HitWall {
PrettyPrint(currentPosition, shipMap, writer, 10)
return false
}
currentStepsTaken++
PrettyPrint(newPosition, shipMap, writer, 10)
if result == OxygenSystem {
oxygenStation <- OxygenStation{Distance: currentStepsTaken, Position: newPosition}
}
moveDroid(currentStepsTaken, newDirection, newPosition, shipMap, movementQueue, resultQueue, oxygenStation, writer)
return true
}
}
return false
}
func reverseMove(currentPosition geometry.Vector, shipMap map[geometry.Vector]Position, command int64, movementQueue, resultQueue q.Queue, writer *uilive.Writer) {
movementQueue.Enqueue(command)
resultQueue.DequeueOrWaitForNextElement() // ignore result because the result is already in the ship map
PrettyPrint(currentPosition, shipMap, writer, 10)
}
// PrettyPrint prints the current ship map.
func PrettyPrint(currentPosition geometry.Vector, shipMap map[geometry.Vector]Position, writer *uilive.Writer, sleepTime int) {
minX, minY, maxX, maxY := math.MaxInt32, math.MaxInt32, math.MinInt32, math.MinInt32
for position := range shipMap {
if position.X < minX {
minX = position.X
}
if position.X > maxX {
maxX = position.X
}
if position.Y < minY {
minY = position.Y
}
if position.Y > maxY {
maxY = position.Y
}
}
output := make([][]string, maxY-minY+1)
for i := range output {
output[i] = make([]string, maxX-minX+1)
for j := range output[i] {
output[i][j] = " "
}
}
xOffset := 0 - minX
yOffset := 0 - minY
for position, positionInfo := range shipMap {
x := position.X + xOffset
y := position.Y + yOffset
if positionInfo.Status == Oxygen {
output[y][x] = "O"
continue
}
if position.X == currentPosition.X && position.Y == currentPosition.Y {
output[y][x] = "o"
} else {
switch positionInfo.Status {
case HitWall:
output[y][x] = "░"
case Moved:
output[y][x] = "."
case OxygenSystem:
output[y][x] = "X"
case StartPosition:
output[y][x] = "S"
}
}
}
renderedOutput := ""
for _, row := range output {
for _, cell := range row {
renderedOutput += cell
}
renderedOutput += "\n"
}
fmt.Fprintf(writer, renderedOutput)
time.Sleep(time.Duration(sleepTime) * time.Millisecond)
}
const (
northCommand = int64(1)
southCommand = int64(2)
westCommand = int64(3)
eastCommand = int64(4)
// HitWall indicates that a position is occupied by a wall
HitWall = int64(0)
// Moved indicates that a position is accessible
Moved = int64(1)
// OxygenSystem indicates that a position is accessible and the oxygen system is there
OxygenSystem = int64(2)
// StartPosition indicates that a position is accessible and that it is the starting position
StartPosition = int64(3)
// Oxygen indicates that a position is accessible and that oxygen is available
Oxygen = int64(4)
)
// OxygenStation defines the position of the oxygen station and its distance from the starting position.
type OxygenStation struct {
Distance int64
Position geometry.Vector
}
// Position defines a position on the space ship and includes a statu (wall, ...) as well as the adjacent positions.
type Position struct {
Status int64
AdjacentPositions []geometry.Vector
} | 15-oxygen-system/repairdroid/repairdroid.go | 0.754644 | 0.539469 | repairdroid.go | starcoder |
package dynamodb
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/guregu/dynamo"
)
type (
// Scan : Request to scan all the data in a table.
Scan interface {
// StartFrom : Makes this scan continue from a previous one.
StartFrom(key dynamo.PagingKey) Scan
// Index : Specifies the name of the index that Scan will operate on.
Index(name string) Scan
// Project : Limits the result attributes to the given paths.
Project(paths ...string) Scan
// Filter : Takes an expression that all results will be evaluated against.
Filter(expr string, args ...interface{}) Scan
// Consistent : Set the read consistency to strong or not.
Consistent(on bool) Scan
// Limit : Specifies the maximum amount of results to return.
Limit(limit int64) Scan
// SearchLimit : Specifies a maximum amount of results to evaluate.
SearchLimit(limit int64) Scan
// ConsumedCapacity : Measure the throughput capacity consumed by this operation and add it to cc.
ConsumedCapacity(cc *dynamo.ConsumedCapacity) Scan
// Iter : Returns a results iterator for this request.
Iter() dynamo.PagingIter
// All : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
All(out interface{}) error
// AllWithContext : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
AllWithContext(ctx aws.Context, out interface{}) error
// AllWithLastEvaluatedKey : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
AllWithLastEvaluatedKey(out interface{}) (dynamo.PagingKey, error)
// AllWithLastEvaluatedKeyContext : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
AllWithLastEvaluatedKeyContext(ctx aws.Context, out interface{}) (dynamo.PagingKey, error)
// Count : Executes this request and returns the number of items matching the scan.
Count() (int64, error)
// CountWithContext : Executes this request and returns the number of items matching the scan.
CountWithContext(ctx aws.Context) (int64, error)
}
scanWrap struct {
scan *dynamo.Scan
}
)
// StartFrom : Makes this scan continue from a previous one.
func (sw *scanWrap) StartFrom(key dynamo.PagingKey) Scan {
return &scanWrap{
scan: sw.scan.StartFrom(key),
}
}
// Index : Specifies the name of the index that Scan will operate on.
func (sw *scanWrap) Index(name string) Scan {
return &scanWrap{
scan: sw.scan.Index(name),
}
}
// Project : Limits the result attributes to the given paths.
func (sw *scanWrap) Project(paths ...string) Scan {
return &scanWrap{
scan: sw.scan.Project(paths...),
}
}
// Filter : Takes an expression that all results will be evaluated against.
func (sw *scanWrap) Filter(expr string, args ...interface{}) Scan {
return &scanWrap{
scan: sw.scan.Filter(expr, args...),
}
}
// Consistent : Set the read consistency to strong or not.
func (sw *scanWrap) Consistent(on bool) Scan {
return &scanWrap{
scan: sw.scan.Consistent(on),
}
}
// Limit : Specifies the maximum amount of results to return.
func (sw *scanWrap) Limit(limit int64) Scan {
return &scanWrap{
scan: sw.scan.Limit(limit),
}
}
// SearchLimit : Specifies a maximum amount of results to evaluate.
func (sw *scanWrap) SearchLimit(limit int64) Scan {
return &scanWrap{
scan: sw.scan.SearchLimit(limit),
}
}
// ConsumedCapacity : Measure the throughput capacity consumed by this operation and add it to cc.
func (sw *scanWrap) ConsumedCapacity(cc *dynamo.ConsumedCapacity) Scan {
return &scanWrap{
scan: sw.scan.ConsumedCapacity(cc),
}
}
// Iter : Returns a results iterator for this request.
func (sw *scanWrap) Iter() dynamo.PagingIter {
return sw.scan.Iter()
}
// All : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
func (sw *scanWrap) All(out interface{}) error {
return sw.scan.All(out)
}
// AllWithContext : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
func (sw *scanWrap) AllWithContext(ctx aws.Context, out interface{}) error {
return sw.scan.AllWithContext(ctx, out)
}
// AllWithLastEvaluatedKey : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
func (sw *scanWrap) AllWithLastEvaluatedKey(out interface{}) (dynamo.PagingKey, error) {
return sw.scan.AllWithLastEvaluatedKey(out)
}
// AllWithLastEvaluatedKeyContext : Executes this request and unmarshals all results to out, which must be a pointer to a slice.
func (sw *scanWrap) AllWithLastEvaluatedKeyContext(ctx aws.Context, out interface{}) (dynamo.PagingKey, error) {
return sw.scan.AllWithLastEvaluatedKeyContext(ctx, out)
}
// Count : Executes this request and returns the number of items matching the scan.
func (sw *scanWrap) Count() (int64, error) {
return sw.scan.Count()
}
// CountWithContext : Executes this request and returns the number of items matching the scan.
func (sw *scanWrap) CountWithContext(ctx aws.Context) (int64, error) {
return sw.scan.CountWithContext(ctx)
} | scan.go | 0.761982 | 0.422028 | scan.go | starcoder |
package kcp
import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/sha1"
"github.com/templexxx/xor"
"golang.org/x/crypto/blowfish"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/salsa20"
"golang.org/x/crypto/tea"
"golang.org/x/crypto/twofish"
"golang.org/x/crypto/xtea"
)
var (
initialVector = []byte{167, 115, 79, 156, 18, 172, 27, 1, 164, 21, 242, 193, 252, 120, 230, 107}
saltxor = `sH3CIVoF#rWLtJo6`
)
// BlockCrypt defines encryption/decryption methods for a given byte slice.
// Notes on implementing: the data to be encrypted contains a builtin
// nonce at the first 16 bytes
type BlockCrypt interface {
// Encrypt encrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Encrypt(dst, src []byte)
// Decrypt decrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Decrypt(dst, src []byte)
}
type salsa20BlockCrypt struct {
key [32]byte
}
// NewSalsa20BlockCrypt https://en.wikipedia.org/wiki/Salsa20
func NewSalsa20BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(salsa20BlockCrypt)
copy(c.key[:], key)
return c, nil
}
func (c *salsa20BlockCrypt) Encrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
type twofishBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTwofishBlockCrypt https://en.wikipedia.org/wiki/Twofish
func NewTwofishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(twofishBlockCrypt)
block, err := twofish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, twofish.BlockSize)
c.decbuf = make([]byte, 2*twofish.BlockSize)
return c, nil
}
func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type tripleDESBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTripleDESBlockCrypt https://en.wikipedia.org/wiki/Triple_DES
func NewTripleDESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(tripleDESBlockCrypt)
block, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, des.BlockSize)
c.decbuf = make([]byte, 2*des.BlockSize)
return c, nil
}
func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type cast5BlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewCast5BlockCrypt https://en.wikipedia.org/wiki/CAST-128
func NewCast5BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(cast5BlockCrypt)
block, err := cast5.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, cast5.BlockSize)
c.decbuf = make([]byte, 2*cast5.BlockSize)
return c, nil
}
func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type blowfishBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewBlowfishBlockCrypt https://en.wikipedia.org/wiki/Blowfish_(cipher)
func NewBlowfishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(blowfishBlockCrypt)
block, err := blowfish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, blowfish.BlockSize)
c.decbuf = make([]byte, 2*blowfish.BlockSize)
return c, nil
}
func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type aesBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewAESBlockCrypt https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
func NewAESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(aesBlockCrypt)
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, aes.BlockSize)
c.decbuf = make([]byte, 2*aes.BlockSize)
return c, nil
}
func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type teaBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewTEABlockCrypt https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
func NewTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(teaBlockCrypt)
block, err := tea.NewCipherWithRounds(key, 16)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, tea.BlockSize)
c.decbuf = make([]byte, 2*tea.BlockSize)
return c, nil
}
func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type xteaBlockCrypt struct {
encbuf []byte
decbuf []byte
block cipher.Block
}
// NewXTEABlockCrypt https://en.wikipedia.org/wiki/XTEA
func NewXTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(xteaBlockCrypt)
block, err := xtea.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
c.encbuf = make([]byte, xtea.BlockSize)
c.decbuf = make([]byte, 2*xtea.BlockSize)
return c, nil
}
func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) }
func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) }
type simpleXORBlockCrypt struct {
xortbl []byte
}
// NewSimpleXORBlockCrypt simple xor with key expanding
func NewSimpleXORBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(simpleXORBlockCrypt)
c.xortbl = pbkdf2.Key(key, []byte(saltxor), 32, mtuLimit, sha1.New)
return c, nil
}
func (c *simpleXORBlockCrypt) Encrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
func (c *simpleXORBlockCrypt) Decrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
type noneBlockCrypt struct{}
// NewNoneBlockCrypt does nothing but copying
func NewNoneBlockCrypt(key []byte) (BlockCrypt, error) {
return new(noneBlockCrypt), nil
}
func (c *noneBlockCrypt) Encrypt(dst, src []byte) { copy(dst, src) }
func (c *noneBlockCrypt) Decrypt(dst, src []byte) { copy(dst, src) }
// packet encryption with local CFB mode
func encrypt(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize()
tbl := buf[:blocksize]
block.Encrypt(tbl, initialVector)
n := len(src) / blocksize
base := 0
for i := 0; i < n; i++ {
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
}
xor.BytesSrc0(dst[base:], src[base:], tbl)
}
func decrypt(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize()
tbl := buf[:blocksize]
next := buf[blocksize:]
block.Encrypt(tbl, initialVector)
n := len(src) / blocksize
base := 0
for i := 0; i < n; i++ {
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
}
xor.BytesSrc0(dst[base:], src[base:], tbl)
} | vendor/github.com/xtaci/kcp-go/crypt.go | 0.752922 | 0.403449 | crypt.go | starcoder |
package aoc2019
import (
"io"
"io/ioutil"
"math"
"os"
"sort"
"github.com/pkg/errors"
)
type day10MonitorGrid struct {
asteroidMap []byte
width int
}
func (d day10MonitorGrid) asteroidCount() int {
var count int
for _, c := range d.asteroidMap {
if c == '#' {
count++
}
}
return count
}
func (d day10MonitorGrid) clone() *day10MonitorGrid {
var grid = &day10MonitorGrid{asteroidMap: make([]byte, len(d.asteroidMap)), width: d.width}
for i, c := range d.asteroidMap {
grid.asteroidMap[i] = c
}
return grid
}
func (d day10MonitorGrid) getAsteroidPositions() []int {
var knownPositions []int
for i, c := range d.asteroidMap {
if c != '#' {
// Not an asteroid, don't care
continue
}
knownPositions = append(knownPositions, i)
}
return knownPositions
}
func (d day10MonitorGrid) getCleanedGrid(x, y int) *day10MonitorGrid {
// Clone the map to work on
var grid = d.clone()
// Collect positions of all known asteroids
var knownPositions = grid.getAsteroidPositions()
// Mark observer (does not count into observable asteroids)
grid.asteroidMap[grid.coordToPos(x, y)] = '@'
// Iterate all positions and remove covered (invisible) asteroids
for _, pos := range knownPositions {
var aX, aY = d.posToCoord(pos)
if grid.isObstructed(x, y, aX, aY) {
grid.asteroidMap[pos] = '-'
}
}
return grid
}
func (d *day10MonitorGrid) isObstructed(observX, observY, x, y int) bool {
var distX, distY = x - observX, y - observY
if distX == 0 && distY == 0 {
// No steps, observer equals asteroid, needless calculation
return false
}
var (
div = int(math.Abs(float64(greatestCommonDivisor(int64(distX), int64(distY)))))
stepX, stepY = distX / div, distY / div
)
for i := 1; i < math.MaxInt64; i++ {
var rPosX, rPosY = observX + stepX*i, observY + stepY*i
if rPosX < 0 || rPosX >= d.width || rPosY < 0 || rPosY >= len(d.asteroidMap)/d.width {
// Position outside grid, stop searching
panic(errors.Errorf("Observed position ran out of bounds (obsX=%d obsY=%d x=%d y=%d div=%d stepX=%d stepY=%d)", observX, observY, x, y, div, stepX, stepY))
}
if rPosX == x && rPosY == y {
return false
}
if d.asteroidMap[d.coordToPos(rPosX, rPosY)] == '#' {
return true
}
}
panic(errors.Errorf("Unreachable end was reached"))
}
func (d day10MonitorGrid) coordToPos(x, y int) int { return y*d.width + x }
func (d day10MonitorGrid) posToCoord(pos int) (int, int) { return pos % d.width, pos / d.width }
func (d day10MonitorGrid) step2deg(x, y int) float64 {
rad := math.Atan2(float64(x), float64(y))
deg := rad * (180 / math.Pi)
return 180 + -1*deg
}
func day10ReadAsteroidMap(in io.Reader) (*day10MonitorGrid, error) {
var grid = &day10MonitorGrid{}
raw, err := ioutil.ReadAll(in)
if err != nil {
return nil, errors.Wrap(err, "Unable to read asteroid map")
}
for _, c := range raw {
if c == '\n' {
if grid.width == 0 {
grid.width = len(grid.asteroidMap)
}
// Skip newlines for our representation
continue
}
grid.asteroidMap = append(grid.asteroidMap, c)
}
return grid, nil
}
func solveDay10Part1Coordinate(inFile string) (*day10MonitorGrid, int, error) {
f, err := os.Open(inFile)
if err != nil {
return nil, 0, errors.Wrap(err, "Unable to open input file")
}
defer f.Close()
grid, err := day10ReadAsteroidMap(f)
if err != nil {
return nil, 0, errors.Wrap(err, "Unable to read asteroid map")
}
var (
bestMonitorPos int
bestAsteroidCount int
)
for _, pos := range grid.getAsteroidPositions() {
var aX, aY = grid.posToCoord(pos)
rGrid := grid.getCleanedGrid(aX, aY)
if c := rGrid.asteroidCount(); c > bestAsteroidCount {
bestMonitorPos = pos
bestAsteroidCount = c
}
}
return grid, bestMonitorPos, nil
}
func solveDay10Part1(inFile string) (int, error) {
grid, bestMonitorPos, err := solveDay10Part1Coordinate(inFile)
if err != nil {
return 0, err
}
var aX, aY = grid.posToCoord(bestMonitorPos)
return grid.getCleanedGrid(aX, aY).asteroidCount(), nil
}
func solveDay10Part2(inFile string) (int, error) {
grid, bestMonitorPos, err := solveDay10Part1Coordinate(inFile)
if err != nil {
return 0, err
}
var (
mX, mY = grid.posToCoord(bestMonitorPos)
destroyed []int
)
// Mark monitor / laser -- cannot be destroyed
grid.asteroidMap[bestMonitorPos] = 'M'
// Gradually destroy asteroids
for grid.asteroidCount() > 0 {
asteroidsInSight := grid.getCleanedGrid(mX, mY).getAsteroidPositions()
var degPos [][2]int
for _, pos := range asteroidsInSight {
var aX, aY = grid.posToCoord(pos)
degPos = append(degPos, [2]int{
pos,
int(grid.step2deg(aX-mX, aY-mY) * 1000000), // Degree to asteroid in 6-digit precision
})
}
// Sort by degree low-to-high -- represents order of destruction
sort.Slice(degPos, func(i, j int) bool { return degPos[i][1] < degPos[j][1] })
for _, dp := range degPos {
grid.asteroidMap[dp[0]] = '*' // Mark asteroids destroyed
destroyed = append(destroyed, dp[0])
}
}
destr200X, destr200Y := grid.posToCoord(destroyed[199])
return 100*destr200X + destr200Y, nil
} | day10.go | 0.711431 | 0.495239 | day10.go | starcoder |
package compilergraph
import (
"fmt"
"github.com/cayleygraph/cayley/quad"
)
// Cayley type mappings:
// GraphNodeId <-> quad.Raw
// Predicate <-> quad.IRI
// TaggedValue <-> quad.Raw
// Other values <-> quad.Value
// nodeIdToValue returns a Cayley value for a Graph Node ID.
func nodeIdToValue(nodeId GraphNodeId) quad.Value {
return quad.IRI(nodeId)
}
// valueToNodeId returns a Graph Node ID for a Cayley value.
func valueToNodeId(value quad.Value) GraphNodeId {
return GraphNodeId(value.(quad.IRI))
}
// predicateToValue converts a Predicate to a Cayley value.
func predicateToValue(predicate Predicate) quad.Value {
return quad.IRI(string(predicate))
}
// valueToPredicateString returns the string form of the predicate for a Cayley value.
func valueToPredicateString(predicateValue quad.Value) string {
return string(valueToPredicate(predicateValue))
}
// valueToPredicate returns a Predicate for a Cayley value.
func valueToPredicate(predicateValue quad.Value) Predicate {
return Predicate(iriToString(predicateValue))
}
// iriToString returns the string form of an IRI.
func iriToString(iri quad.Value) string {
return string(iri.(quad.IRI))
}
// valueToTaggedValueData returns the internal string presentation of a
// tagged value for a Cayley value.
func valueToTaggedValueData(value quad.Value) string {
return string(value.(quad.String))
}
// taggedValueDataToValue returns a Cayley value for the given internal
// tagged value representation.
func taggedValueDataToValue(taggedValue string) quad.Value {
return quad.Raw(taggedValue)
}
// valueToOriginalString converts the Cayley value back into its original string value.
func valueToOriginalString(value quad.Value) string {
return string(value.(quad.String))
}
// buildGraphValueForValue returns the GraphValue for a Cayley value.
func buildGraphValueForValue(value quad.Value) GraphValue {
return GraphValue{value}
}
// taggedToQuadValues converts a slice of TaggedValue's under a layer into their
// Cayley values.
func taggedToQuadValues(values []TaggedValue, gl *graphLayer) []quad.Value {
quadValues := make([]quad.Value, len(values))
for index, v := range values {
quadValues[index] = gl.getTaggedKey(v)
}
return quadValues
}
// graphIdsToQuadValues converts a slice of Graph Node IDs into their Cayley values.
func graphIdsToQuadValues(values []GraphNodeId) []quad.Value {
quadValues := make([]quad.Value, len(values))
for index, v := range values {
quadValues[index] = nodeIdToValue(v)
}
return quadValues
}
// toQuadValues converts a slice of arbitrary values into their Cayley values.
func toQuadValues(values []interface{}, gl *graphLayer) []quad.Value {
quadValues := make([]quad.Value, len(values))
for index, v := range values {
switch v := v.(type) {
case GraphNodeId:
quadValues[index] = nodeIdToValue(v)
case TaggedValue:
quadValues[index] = gl.getTaggedKey(v)
default:
vle, ok := quad.AsValue(v)
if !ok {
panic(fmt.Sprintf("Unknown value %v", v))
}
quadValues[index] = vle
}
}
return quadValues
} | compilergraph/quad.go | 0.85931 | 0.464719 | quad.go | starcoder |
package voltage
import . "github.com/deinspanjer/units/unit"
// Voltage represents a unit of voltage (in volt, V)
type Voltage Unit
// ...
const (
// SI
Yoctovolt = Volt * 1e-24
Zeptovolt = Volt * 1e-21
Attovolt = Volt * 1e-18
Femtovolt = Volt * 1e-15
Picovolt = Volt * 1e-12
Nanovolt = Volt * 1e-9
Microvolt = Volt * 1e-6
Millivolt = Volt * 1e-3
Centivolt = Volt * 1e-2
Decivolt = Volt * 1e-1
Volt Voltage = 1e0
Decavolt = Volt * 1e1
Hectovolt = Volt * 1e2
Kilovolt = Volt * 1e3
Megavolt = Volt * 1e6
Gigavolt = Volt * 1e9
Teravolt = Volt * 1e12
Petavolt = Volt * 1e15
Exavolt = Volt * 1e18
Zettavolt = Volt * 1e21
Yottavolt = Volt * 1e24
)
// Yoctovolts returns the voltage in yV
func (v Voltage) Yoctovolts() float64 {
return float64(v / Yoctovolt)
}
// Zeptovolts returns the voltage in zV
func (v Voltage) Zeptovolts() float64 {
return float64(v / Zeptovolt)
}
// Attovolts returns the voltage in aV
func (v Voltage) Attovolts() float64 {
return float64(v / Attovolt)
}
// Femtovolts returns the voltage in fV
func (v Voltage) Femtovolts() float64 {
return float64(v / Femtovolt)
}
// Picovolts returns the voltage in pV
func (v Voltage) Picovolts() float64 {
return float64(v / Picovolt)
}
// Nanovolts returns the voltage in nV
func (v Voltage) Nanovolts() float64 {
return float64(v / Nanovolt)
}
// Microvolts returns the voltage in µV
func (v Voltage) Microvolts() float64 {
return float64(v / Microvolt)
}
// Millivolts returns the voltage in mV
func (v Voltage) Millivolts() float64 {
return float64(v / Millivolt)
}
// Centivolts returns the voltage in cV
func (v Voltage) Centivolts() float64 {
return float64(v / Centivolt)
}
// Decivolts returns the voltage in dV
func (v Voltage) Decivolts() float64 {
return float64(v / Decivolt)
}
// Volts returns the voltage in V
func (v Voltage) Volts() float64 {
return float64(v)
}
// Decavolts returns the voltage in daV
func (v Voltage) Decavolts() float64 {
return float64(v / Decavolt)
}
// Hectovolts returns the voltage in hV
func (v Voltage) Hectovolts() float64 {
return float64(v / Hectovolt)
}
// Kilovolts returns the voltage in kV
func (v Voltage) Kilovolts() float64 {
return float64(v / Kilovolt)
}
// Megavolts returns the voltage in MV
func (v Voltage) Megavolts() float64 {
return float64(v / Megavolt)
}
// Gigavolts returns the voltage in GV
func (v Voltage) Gigavolts() float64 {
return float64(v / Gigavolt)
}
// Teravolts returns the voltage in TV
func (v Voltage) Teravolts() float64 {
return float64(v / Teravolt)
}
// Petavolts returns the voltage in PV
func (v Voltage) Petavolts() float64 {
return float64(v / Petavolt)
}
// Exavolts returns the voltage in EV
func (v Voltage) Exavolts() float64 {
return float64(v / Exavolt)
}
// Zettavolts returns the voltage in ZV
func (v Voltage) Zettavolts() float64 {
return float64(v / Zettavolt)
}
// Yottavolts returns the voltage in YV
func (v Voltage) Yottavolts() float64 {
return float64(v / Yottavolt)
} | voltage/voltage.go | 0.85183 | 0.637257 | voltage.go | starcoder |
package metric
import (
"math/rand"
"time"
)
const (
cosineMetricsMaxIteration = 200
cosineMetricsMaxTargetSample = 100
cosineMetricsTwoMeansThreshold = 0.7
cosineMetricsCentroidCalcRatio = 0.0001
)
func init() {
rand.Seed(time.Now().UnixNano())
}
type cosineDistance struct {
dim int
}
// NewCosineMetric returns cosineDistance.
// NOTE: We assume that the given vectors are already normalized, i.e. the norm equals 1
func NewCosineMetric(dim int) (Metric, error) {
return &cosineDistance{
dim: dim,
}, nil
}
func (c *cosineDistance) CalcDistance(v1, v2 []float64) float64 {
var ret float64
for i := range v1 {
ret += v1[i] * v2[i]
}
return -ret
}
func (c *cosineDistance) GetSplittingVector(vs [][]float64) []float64 {
lvs := len(vs)
// init centroids
k := rand.Intn(lvs)
l := rand.Intn(lvs - 1)
if k == l {
l++
}
c0 := vs[k]
c1 := vs[l]
for i := 0; i < cosineMetricsMaxIteration; i++ {
clusterToVecs := map[int][][]float64{}
iter := cosineMetricsMaxTargetSample
if len(vs) < cosineMetricsMaxTargetSample {
iter = len(vs)
}
for i := 0; i < iter; i++ {
v := vs[rand.Intn(len(vs))]
ip0 := c.CalcDistance(c0, v)
ip1 := c.CalcDistance(c1, v)
if ip0 > ip1 {
clusterToVecs[0] = append(clusterToVecs[0], v)
} else {
clusterToVecs[1] = append(clusterToVecs[1], v)
}
}
lc0 := len(clusterToVecs[0])
lc1 := len(clusterToVecs[1])
if (float64(lc0)/float64(iter) <= cosineMetricsTwoMeansThreshold) &&
(float64(lc1)/float64(iter) <= cosineMetricsTwoMeansThreshold) {
break
}
// update centroids
if lc0 == 0 || lc1 == 0 {
k := rand.Intn(lvs)
l := rand.Intn(lvs - 1)
if k == l {
l++
}
c0 = vs[k]
c1 = vs[l]
continue
}
c0 = make([]float64, c.dim)
it0 := int(float64(lvs) * cosineMetricsCentroidCalcRatio)
for i := 0; i < it0; i++ {
for d := 0; d < c.dim; d++ {
c0[d] += clusterToVecs[0][rand.Intn(lc0)][d] / float64(it0)
}
}
c1 = make([]float64, c.dim)
it1 := int(float64(lvs)*cosineMetricsCentroidCalcRatio + 1)
for i := 0; i < int(float64(lc1)*cosineMetricsCentroidCalcRatio+1); i++ {
for d := 0; d < c.dim; d++ {
c1[d] += clusterToVecs[1][rand.Intn(lc1)][d] / float64(it1)
}
}
}
ret := make([]float64, c.dim)
for d := 0; d < c.dim; d++ {
v := c0[d] - c1[d]
ret[d] += v
}
return ret
}
func (c *cosineDistance) CalcDirectionPriority(base, target []float64) float64 {
var ret float64
for i := range base {
ret += base[i] * target[i]
}
return ret
} | metric/cosine.go | 0.793506 | 0.434221 | cosine.go | starcoder |
package quantile
import (
"math/rand"
)
/*
"Space-Efficient Online Computation of Quantile Summaries" (<NAME> 2001)
http://infolab.stanford.edu/~datar/courses/cs361a/papers/quantiles.pdf
This implementation is backed by a skiplist to make inserting elements into the
summary faster. Querying is still O(n).
*/
// EPSILON is the precision of the rank returned by our quantile queries
const EPSILON float64 = 0.01
// Entry is an element of the skiplist, see GK paper for description
type Entry struct {
V float64 `json:"v"`
G int `json:"g"`
Delta int `json:"delta"`
}
// SummarySlice reprensents how many values are in a [Start, End] range
type SummarySlice struct {
Start float64
End float64
Weight int
}
const maxHeight = 31
// Skiplist is a pseudo-random data structure used to store nodes and find quickly what we want
type Skiplist struct {
height int
head *SkiplistNode
}
// SkiplistNode is holding the actual value and pointers to the neighbor nodes
type SkiplistNode struct {
value Entry
next []*SkiplistNode
prev []*SkiplistNode
}
// NewSkiplist returns a new empty Skiplist
func NewSkiplist() *Skiplist {
return &Skiplist{
height: 0,
head: &SkiplistNode{next: make([]*SkiplistNode, maxHeight)},
}
}
// Insert adds a new Entry to the Skiplist and yields a pointer to the node where the data was inserted
func (s *Skiplist) Insert(e Entry) *SkiplistNode {
level := 0
n := rand.Int31()
for n&1 == 1 {
level++
n >>= 1
}
if level > s.height {
s.height++
level = s.height
}
node := &SkiplistNode{
value: e,
next: make([]*SkiplistNode, level+1),
prev: make([]*SkiplistNode, level+1),
}
curr := s.head
for i := s.height; i >= 0; i-- {
for curr.next[i] != nil && e.V >= curr.next[i].value.V {
curr = curr.next[i]
}
if i > level {
continue
}
node.next[i] = curr.next[i]
if curr.next[i] != nil && curr.next[i].prev[i] != nil {
curr.next[i].prev[i] = node
}
curr.next[i] = node
node.prev[i] = curr
}
return node
}
// Remove removes a node from the Skiplist
func (s *Skiplist) Remove(node *SkiplistNode) {
// remove n from each level of the Skiplist
for i := range node.next {
prev := node.prev[i]
next := node.next[i]
if prev != nil {
prev.next[i] = next
}
if next != nil {
next.prev[i] = prev
}
node.next[i] = nil
node.prev[i] = nil
}
} | quantile/summary.go | 0.752013 | 0.459137 | summary.go | starcoder |
package set
import "sort"
// The Op type can be used to represent any of the mutating functions, such
// as Inter.
type Op func(data sort.Interface, pivot int) (size int)
// Uniq swaps away duplicate elements in data, returning the size of the
// unique set. data is expected to be pre-sorted, and the resulting set in
// the range [0:size] will remain in sorted order. Uniq, following a
// sort.Sort call, can be used to prepare arbitrary inputs for use as sets.
func Uniq(data sort.Interface) (size int) {
p, l := 0, data.Len()
if l <= 1 {
return l
}
for i := 1; i < l; i++ {
if !data.Less(p, i) {
continue
}
p++
if p < i {
data.Swap(p, i)
}
}
return p + 1
}
// Inter performs an in-place intersection on the two sets [0:pivot] and
// [pivot:Len]; the resulting set will occupy [0:size]. Inter is both
// associative and commutative.
func Inter(data sort.Interface, pivot int) (size int) {
k, l := pivot, data.Len()
p, i, j := 0, 0, k
for i < k && j < l {
switch {
case data.Less(i, j):
i++
case data.Less(j, i):
j++
case p < i:
data.Swap(p, i)
fallthrough
default:
p, i, j = p+1, i+1, j+1
}
}
return p
}
// Union performs an in-place union on the two sets [0:pivot] and
// [pivot:Len]; the resulting set will occupy [0:size]. Union is both
// associative and commutative.
func Union(data sort.Interface, pivot int) (size int) {
// BUG(extemporalgenome): Union currently uses a multi-pass implementation
sort.Sort(data)
return Uniq(data)
}
// Diff performs an in-place difference on the two sets [0:pivot] and
// [pivot:Len]; the resulting set will occupy [0:size]. Diff is neither
// associative nor commutative.
func Diff(data sort.Interface, pivot int) (size int) {
k, l := pivot, data.Len()
p, i, j := 0, 0, k
for i < k && j < l {
switch {
case data.Less(i, j):
if p < i {
data.Swap(p, i)
}
p, i = p+1, i+1
case data.Less(j, i):
j++
default:
i, j = i+1, j+1
}
}
return xcopy(data, p, i, k, k)
}
// SymDiff performs an in-place symmetric difference on the two sets
// [0:pivot] and [pivot:Len]; the resulting set will occupy [0:size].
// SymDiff is both associative and commutative.
func SymDiff(data sort.Interface, pivot int) (size int) {
// BUG(extemporalgenome): SymDiff currently uses a multi-pass implementation
i := Inter(data, pivot)
l := data.Len()
b := boundspan{data, span{i, l}}
sort.Sort(b)
size = Uniq(b)
slide(data, 0, i, size)
l = i + size
sort.Sort(boundspan{data, span{size, l}})
return Diff(data, size)
} | vendor/github.com/xtgo/set/mutators.go | 0.737631 | 0.545346 | mutators.go | starcoder |
package oddsengine
// Unit represents a specific unit within a game, identifing a lot of specific
// information related to the unit.
type Unit struct {
Alias string
Name string
Cost int
Attack int
Defend int
IsShip bool
IsAAA bool
IsSub bool
IsAircraft bool
IsBunker bool
CapitalShip bool
CanBombard bool
CanTakeTerritory bool
PlusOneRolls func(map[string]int) int
PlusOneDefend func(map[string]int) int
// Number of units receiving a defensive boost if this is a bunker
Capacity int
// MultiRoll is the number of dice the unit can roll, and select the best
// roll for it's hit.
MultiRoll int
}
// Units is a container for multiple Unit structs
type Units []Unit
// Swap implementing Sortable
func (p Units) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
// Len implementing Sortable
func (p Units) Len() int {
return len(p)
}
// Create three different Sortable containers for our Units
// ByDefendingPower sorts the units by the higest Defend value of the unit
type ByDefendingPower struct{ Units }
// ByAttackingPower sorts the units by the higest Attack value of the unit
type ByAttackingPower struct{ Units }
// ByCost sorts the units by the lowest Cost value of the unit
type ByCost struct{ Units }
// Less implementing Sortable
func (p ByDefendingPower) Less(i, j int) bool {
if p.Units[i].Alias == "aaa" || p.Units[i].Alias == "raaa" || p.Units[i].Alias == "aag" {
return false
} else if p.Units[j].Alias == "aaa" || p.Units[j].Alias == "raaa" || p.Units[j].Alias == "aag" {
return true
} else if p.Units[i].Defend == p.Units[j].Defend {
return p.Units[i].Cost < p.Units[j].Cost
}
return p.Units[i].Defend < p.Units[j].Defend
}
// Less implementing Sortable
func (p ByAttackingPower) Less(i, j int) bool {
if p.Units[i].Alias == "aaa" || p.Units[i].Alias == "raaa" || p.Units[i].Alias == "aag" {
return false
} else if p.Units[j].Alias == "aaa" || p.Units[j].Alias == "raaa" || p.Units[j].Alias == "aag" {
return true
} else if p.Units[i].Attack == p.Units[j].Attack {
return p.Units[i].Cost < p.Units[j].Cost
}
return p.Units[i].Attack < p.Units[j].Attack
}
// Less implementing Sortable
func (p ByCost) Less(i, j int) bool {
if p.Units[i].Alias == "aaa" || p.Units[i].Alias == "raaa" || p.Units[i].Alias == "aag" {
return false
} else if p.Units[j].Alias == "aaa" || p.Units[j].Alias == "raaa" || p.Units[j].Alias == "aag" {
return true
} else if p.Units[i].Cost == p.Units[j].Cost {
return p.Units[i].Attack < p.Units[j].Attack
}
return p.Units[i].Cost < p.Units[j].Cost
}
// getUnitsForGame returns a Units type containing all the units that are
// valid for a particular game identified by the game string passed in
func getUnitsForGame(game string) (p Units) {
switch game {
case "1940deluxe":
p = get1940DeluxeUnits()
case "deluxe":
p = getDeluxeUnits()
case "1940":
p = get1940Units()
case "1941":
p = get1941Units()
case "1942":
p = get1942Units()
}
return p
}
// Delete removes an item from a units type and returns the new type
func (p Units) Delete(alias string) Units {
for i, unit := range p {
if unit.Alias == alias {
copy(p[i:], p[i+1:])
p[len(p)-1] = Unit{}
p = p[:len(p)-1]
}
}
return p
}
// HasUnit tells us if a Units type has a unit identified by the alias
// provided
func (p Units) HasUnit(alias string) (has bool) {
for _, unit := range p {
if unit.Alias == alias {
has = true
break
}
}
return has
}
// Find a specific unit, by alias, within the slice of Units
func (p Units) Find(alias string) *Unit {
var targetUnit Unit
for _, unit := range p {
if unit.Alias == alias {
targetUnit = unit
break
}
}
return &targetUnit
}
// get1940Units returns the units that are valid for the game Axis and Allies
// 1940
func get1940Units() Units {
p := get1942Units()
// The carrier in 1940 is now a capital ship
p = p.Delete("car")
p = append(p,
// While not really a "unit" per se, we will treat it like one. It
// should be deleted from any unit mapping once it is used
Unit{
Alias: "kam",
Name: "Kamikaze",
Cost: 0,
Attack: 0,
Defend: 2,
},
Unit{
Alias: "mec",
Name: "Mechanized Infantry",
Cost: 4,
Attack: 1,
Defend: 2,
// The mec defers all it's +1 shots to the inf, only taking whatever
// shots are available
PlusOneRolls: func(u map[string]int) int {
var shots int
var remainingShots int
var pairedArtilleryShotsAvailable int
numInf := numAllUnitsInFormation(u, "inf")
numMec := numAllUnitsInFormation(u, "mec")
numArt := numAllUnitsInFormation(u, "art")
numAArt := numAllUnitsInFormation(u, "aart")
pairedArtilleryShotsAvailable = numArt + (numAArt * 2)
if pairedArtilleryShotsAvailable == 0 {
return shots
}
// 2 mec 3 art
if numInf > 0 {
remainingShots = pairedArtilleryShotsAvailable - numInf
} else {
remainingShots = pairedArtilleryShotsAvailable
}
if remainingShots <= 0 {
return shots
}
if remainingShots < numMec {
shots = remainingShots
} else {
shots = numMec
}
return shots
},
CanTakeTerritory: true,
},
Unit{
Alias: "tac",
Name: "<NAME>",
Cost: 11,
Attack: 3,
Defend: 3,
IsAircraft: true,
PlusOneRolls: func(u map[string]int) int {
var shots int
numTac := numAllUnitsInFormation(u, "tac")
numFig := numAllUnitsInFormation(u, "fig")
numTan := numAllUnitsInFormation(u, "tan")
totalNumBoosters := numFig + numTan
if totalNumBoosters == 0 {
return shots
}
// The number of shots are limited by the total number of tac
// within the unit group
shots = numTac
if totalNumBoosters < numTac {
shots = totalNumBoosters
}
return shots
},
},
Unit{
Alias: "car",
Name: "Aircraft Carrier",
Cost: 16,
Attack: 0,
Defend: 2,
IsShip: true,
CanBombard: false,
CapitalShip: true,
},
Unit{
Alias: "hbom",
Name: "Heavy Bomber",
Cost: 12,
Attack: 4,
Defend: 1,
IsAircraft: true,
MultiRoll: 2,
},
Unit{
Alias: "raaa",
Name: "Anti-Aircraft Artillery",
Cost: 5,
Attack: 0,
Defend: 2,
IsAAA: true,
},
Unit{
Alias: "jfig",
Name: "Jet Fighters",
Cost: 10,
IsAircraft: true,
Attack: 4,
Defend: 4,
},
Unit{
Alias: "ssub",
Name: "Super Sumbarine",
IsShip: true,
IsSub: true,
Cost: 6,
Attack: 3,
Defend: 1,
},
Unit{
Alias: "imec",
Name: "Mechanized Infantry",
Cost: 4,
Attack: 1,
Defend: 2,
// The imec defers all it's +1 shots to the inf, only taking
// whatever shots are available or pairs with a tank for shots
PlusOneRolls: func(u map[string]int) int {
var shots int
var remainingShots int
var pairedArtilleryShotsAvailable int
numInf := numAllUnitsInFormation(u, "inf")
numTan := numAllUnitsInFormation(u, "tan")
numArt := numAllUnitsInFormation(u, "art")
numAArt := numAllUnitsInFormation(u, "aart")
numMec := numAllUnitsInFormation(u, "imec")
pairedArtilleryShotsAvailable = numArt + (numAArt * 2)
remainingShots = pairedArtilleryShotsAvailable - numInf
if remainingShots <= 0 {
remainingShots = numTan
} else {
remainingShots += numTan
}
if remainingShots <= 0 {
return shots
}
if remainingShots < numMec {
shots = remainingShots
} else {
shots = numMec
}
return shots
},
CanTakeTerritory: true,
},
Unit{
Alias: "aart",
Name: "Advanced Artillery",
Cost: 4,
Attack: 2,
Defend: 2,
CanTakeTerritory: true,
},
)
return p
}
// get1942Units returns the units that are valid for the game Axis and Allies
// 1942
func get1942Units() Units {
p := get1941Units()
// The 1942 Battleship gains the ability to bombard, it is also more
// expensive
p = p.Delete("bat")
// The 1942 Battleship is more expensive
p = p.Delete("car")
p = append(p,
Unit{
Alias: "aaa",
Name: "Anti-Aircraft Artillery",
Cost: 5,
Attack: 0,
Defend: 1,
IsAAA: true,
},
Unit{
Alias: "art",
Name: "Artillery",
Cost: 4,
Attack: 2,
Defend: 2,
CanTakeTerritory: true,
},
Unit{
Alias: "cru",
Name: "Cruiser",
Cost: 12,
Attack: 3,
Defend: 3,
IsShip: true,
CanBombard: true,
},
Unit{
Alias: "bat",
Name: "Battleship",
Cost: 20,
Attack: 4,
Defend: 4,
IsShip: true,
CanBombard: true,
CapitalShip: true,
},
Unit{
Alias: "car",
Name: "Aircraft Carrier",
Cost: 14,
Attack: 1,
Defend: 2,
IsShip: true,
CanBombard: false,
CapitalShip: false,
},
)
return p
}
// get1941Units returns the units that are valid for the game Axis and Allies
// 1941
func get1941Units() Units {
return Units{
Unit{
Alias: "inf",
Name: "Infantry",
Cost: 3,
Attack: 1,
Defend: 2,
// Even though in 1941, there is no +1 for INF, having this here
// does no harm because the conditions will never be met
PlusOneRolls: func(u map[string]int) int {
var shots int
var pairedArtilleryShotsAvailable int
numInf := numAllUnitsInFormation(u, "inf")
numArt := numAllUnitsInFormation(u, "art")
numAArt := numAllUnitsInFormation(u, "aart")
pairedArtilleryShotsAvailable = numArt + (numAArt * 2)
if pairedArtilleryShotsAvailable == 0 {
return shots
}
// Assume they will all be paired
shots = numInf
// If they can't all be paired, return the total number of
// possible pairings
if pairedArtilleryShotsAvailable < numInf {
shots = pairedArtilleryShotsAvailable
}
return shots
},
CanTakeTerritory: true,
},
Unit{
Alias: "tan",
Name: "Tank",
Cost: 6,
Attack: 3,
Defend: 3,
CanTakeTerritory: true,
},
Unit{
Alias: "fig",
Name: "Fighter",
Cost: 10,
Attack: 3,
Defend: 4,
IsAircraft: true,
},
Unit{
Alias: "bom",
Name: "<NAME>",
Cost: 12,
Attack: 4,
Defend: 1,
IsAircraft: true,
},
Unit{
Alias: "sub",
Name: "Submarine",
Cost: 6,
IsShip: true,
IsSub: true,
Attack: 2,
Defend: 1,
},
Unit{
Alias: "des",
Name: "Destroyer",
Cost: 8,
Attack: 2,
Defend: 2,
IsShip: true,
},
Unit{
Alias: "car",
Name: "Aircraft Carrier",
Cost: 12,
Attack: 1,
Defend: 2,
IsShip: true,
CanBombard: false,
CapitalShip: false,
},
Unit{
Alias: "bat",
Name: "Battleship",
Cost: 16,
Attack: 4,
Defend: 4,
IsShip: true,
CanBombard: false,
CapitalShip: true,
},
}
} | units.go | 0.671363 | 0.617369 | units.go | starcoder |
package deep
import (
"fmt"
"unsafe"
"github.com/dhairyyas/leabra-sleepmod/leabra"
)
// deep.Neuron holds the extra neuron (unit) level variables for DeepLeabra computation.
// DeepLeabra includes both attentional and predictive learning functions of the deep layers
// and thalamocortical circuitry.
// These are maintained in a separate parallel slice from the leabra.Neuron variables.
type Neuron struct {
ActNoAttn float32 `desc:"non-attention modulated activation of the superficial-layer neurons -- i.e., the activation prior to any modulation by the DeepAttn modulatory signal. Using this as a driver of Burst when there is DeepAttn modulation of superficial-layer activations prevents a positive-feedback loop that can be problematic."`
Burst float32 `desc:"Deep layer bursting activation values, representing activity of layer 5b intrinsic bursting (5IB) neurons, which project into the thalamus (TRC) and other deep layers locally. Somewhat confusingly, this is computed on the Superficial layer neurons, as a thresholded function of the unit activation. Burst is only updated during the bursting quarter(s) (typically the 4th quarter) of the alpha cycle, and it is sent via BurstCtxt projections to Deep layers (representing activation of layer 6 CT corticothalamic neurons) to drive Ctxt value there, and via BurstTRC projections to TRC layers to drive the plus-phase outcome activation (e.g., in Pulvinar) for predictive learning."`
BurstPrv float32 `desc:"Burst from the previous alpha trial -- this is typically used for learning in the BurstCtxt projection."`
CtxtGe float32 `desc:"Current excitatory conductance for temporally-delayed local integration of Burst signals sent via BurstCtxt projection into separate Deep layer neurons, which represent the activation of layer 6 CT corticothalamic neurons. CtxtGe is updated at end of a DeepBurst quarter, and thus takes effect during subsequent quarter(s) until updated again."`
TRCBurstGe float32 `desc:"Total excitatory conductance received from Burst activations into TRC neurons, continuously updated during the bursting quarter(s). This drives plus-phase, outcome activation of TRC neurons."`
BurstSent float32 `desc:"Last Burst activation value sent, for computing TRCBurstGe using efficient delta mechanism."`
AttnGe float32 `desc:"Total excitatory conductance received from from deep layer activations (representing layer 6 regular spiking CT corticothalamic neurons) via DeepAttn projections. This is sent continuously all quarters from deep layers using standard delta-based Ge computation, and drives both DeepAttn and DeepLrn values."`
DeepAttn float32 `desc:"DeepAttn = Min + (1-Min) * (AttnGe / MAX(AttnGe)). This is the current attention modulatory value in Super neurons, based on inputs from deep layer 6 CT corticothalamic, regular spiking neurons that represents the net attentional filter applied to the superficial layers. This value directly multiplies the superficial layer activations (Act) (ActNoAttn represents value prior to this multiplication). Value is computed from AttnGe received via DeepAttn projections from Deep layers."`
DeepLrn float32 `desc:"DeepLrn = AttnGe / MAX(AttnGe) across layer. This version of DeepAttn modulates learning rates instead of activations -- learning is assumed to be more strongly affected than activation, so it lacks the positive offset that DeepAttn has."`
}
var (
NeuronVars = []string{"ActNoAttn", "Burst", "BurstPrv", "CtxtGe", "TRCBurstGe", "BurstSent", "AttnGe", "DeepAttn", "DeepLrn"}
NeuronVarsMap map[string]int
NeuronVarsAll []string
)
func init() {
NeuronVarsMap = make(map[string]int, len(NeuronVars))
for i, v := range NeuronVars {
NeuronVarsMap[v] = i
}
ln := len(leabra.NeuronVars)
NeuronVarsAll = make([]string, len(NeuronVars)+ln)
copy(NeuronVarsAll, leabra.NeuronVars)
copy(NeuronVarsAll[ln:], NeuronVars)
}
func (nrn *Neuron) VarNames() []string {
return NeuronVars
}
// NeuronVarByName returns the index of the variable in the Neuron, or error
func NeuronVarByName(varNm string) (int, error) {
i, ok := NeuronVarsMap[varNm]
if !ok {
return 0, fmt.Errorf("Neuron VarByName: variable name: %v not valid", varNm)
}
return i, nil
}
// VarByIndex returns variable using index (0 = first variable in NeuronVars list)
func (nrn *Neuron) VarByIndex(idx int) float32 {
fv := (*float32)(unsafe.Pointer(uintptr(unsafe.Pointer(nrn)) + uintptr(4*idx)))
return *fv
}
// VarByName returns variable by name, or error
func (nrn *Neuron) VarByName(varNm string) (float32, error) {
i, err := NeuronVarByName(varNm)
if err != nil {
return 0, err
}
return nrn.VarByIndex(i), nil
} | deep/neuron.go | 0.758958 | 0.68305 | neuron.go | starcoder |
package margaid
import (
"fmt"
"io"
"math"
"github.com/erkkah/margaid/brackets"
"github.com/erkkah/margaid/svg"
)
// Margaid == diagraM
type Margaid struct {
g *svg.SVG
width float64
height float64
inset float64
padding float64 // padding [0..1]
projections map[Axis]Projection
ranges map[Axis]minmax
plots []string
colorScheme int
titleFamily string
titleSize int
labelFamily string
labelSize int
}
const (
defaultPadding = 0
defaultInset = 64
tickDistance = 55
tickSize = 6
textSpacing = 4
)
// minmax is the range [min, max] of a chart axis
type minmax struct{ min, max float64 }
// Option is the base type for all series options
type Option func(*Margaid)
// New - Margaid constructor
func New(width, height int, options ...Option) *Margaid {
defaultRange := minmax{0, 100}
self := &Margaid{
g: svg.New(width, height),
inset: defaultInset,
width: float64(width),
height: float64(height),
padding: defaultPadding,
projections: map[Axis]Projection{
X1Axis: Lin,
X2Axis: Lin,
Y1Axis: Lin,
Y2Axis: Lin,
},
ranges: map[Axis]minmax{
X1Axis: defaultRange,
Y1Axis: defaultRange,
X2Axis: defaultRange,
Y2Axis: defaultRange,
},
colorScheme: 198,
titleFamily: "sans-serif",
titleSize: 18,
labelFamily: "sans-serif",
labelSize: 12,
}
for _, o := range options {
o(self)
}
return self
}
/// Options
// Projection is the type for the projection constants
type Projection int
// Projection constants
const (
Lin Projection = iota + 'p'
Log
)
// WithProjection sets the projection for a given axis
func WithProjection(axis Axis, proj Projection) Option {
return func(m *Margaid) {
m.projections[axis] = proj
}
}
// WithRange sets a fixed plotting range for a given axis
func WithRange(axis Axis, min, max float64) Option {
return func(m *Margaid) {
m.ranges[axis] = minmax{min, max}
}
}
// WithAutorange sets range for an axis from the values of a series
func WithAutorange(axis Axis, series *Series) Option {
return func(m *Margaid) {
var axisRange minmax
if axis == X1Axis || axis == X2Axis {
axisRange = minmax{
series.MinX(),
series.MaxX(),
}
}
if axis == Y1Axis || axis == Y2Axis {
axisRange = minmax{
series.MinY(),
series.MaxY(),
}
}
if axisRange.min == axisRange.max {
axisRange.min -= 1.0
axisRange.max += 1.0
}
m.ranges[axis] = axisRange
}
}
// WithInset sets the distance between the chart boundaries and the
// charting area.
func WithInset(inset float64) Option {
return func(m *Margaid) {
m.inset = inset
}
}
// WithPadding sets the padding inside the plotting area as a percentage
// [0..20] of the area width and height
func WithPadding(padding float64) Option {
return func(m *Margaid) {
factor := padding / 100
m.padding = math.Max(0, math.Min(0.20, factor))
}
}
// WithColorScheme sets the start color for selecting plot colors.
// The start color is selected as a hue value between 0 and 359.
func WithColorScheme(scheme int) Option {
return func(m *Margaid) {
m.colorScheme = scheme % 360
}
}
// WithTitleFont sets title font family and size in pixels
func WithTitleFont(family string, size int) Option {
return func(m *Margaid) {
m.titleFamily = family
m.titleSize = size
}
}
// WithLabelFont sets label font family and size in pixels
func WithLabelFont(family string, size int) Option {
return func(m *Margaid) {
m.labelFamily = family
m.labelSize = size
}
}
/// Drawing
// Title draws a title top center
func (m *Margaid) Title(title string) {
encoded := svg.EncodeText(title, svg.HAlignMiddle)
m.g.
Font(m.titleFamily, fmt.Sprintf("%dpx", m.titleSize)).
FontStyle(svg.StyleNormal, svg.WeightBold).
Alignment(svg.HAlignMiddle, svg.VAlignCentral).
Transform().
Fill("black").
Text(m.width/2, m.inset/2, encoded)
}
// LegendPosition decides where to draw the legend
type LegendPosition int
// LegendPosition constants
const (
RightTop LegendPosition = iota + 'l'
RightBottom
BottomLeft
)
// Legend draws a legend for named plots
func (m *Margaid) Legend(position LegendPosition) {
type namedPlot struct {
name string
color string
}
var plots []namedPlot
for i, label := range m.plots {
if label != "" {
color := m.getPlotColor(i)
plots = append(plots, namedPlot{
name: label,
color: color,
})
}
}
boxSize := float64(m.labelSize)
lineHeight := float64(m.labelSize) * 1.5
listStartX := 0.0
listStartY := 0.0
switch position {
case RightTop:
listStartX = m.width - m.inset + boxSize + textSpacing
listStartY = m.inset + 0.5*boxSize
case RightBottom:
listStartX = m.width - m.inset + boxSize + textSpacing
listStartY = m.height - m.inset - lineHeight*float64(len(plots))
case BottomLeft:
listStartX = m.inset + 0.5*boxSize
listStartY = m.height - m.inset + lineHeight + boxSize + tickSize
}
style := func(color string) {
m.g.
Font(m.labelFamily, fmt.Sprintf("%dpx", m.labelSize)).
FontStyle(svg.StyleNormal, svg.WeightNormal).
Alignment(svg.HAlignStart, svg.VAlignTop).
Color(color).
StrokeWidth("1px")
}
for i, plot := range plots {
floatIndex := float64(i)
yPos := listStartY + floatIndex*lineHeight
xPos := listStartX
style(plot.color)
m.g.Rect(xPos, yPos, boxSize, boxSize)
style("black")
m.g.Text(xPos+boxSize+textSpacing, yPos, brackets.XMLEscape(plot.name))
}
}
func (m *Margaid) error(message string) {
m.g.
Font(m.titleFamily, fmt.Sprintf("%dpx", m.titleSize)).
FontStyle(svg.StyleItalic, svg.WeightBold).
Alignment(svg.HAlignStart, svg.VAlignCentral).
Transform().
StrokeWidth("0").Fill("red").
Text(5, m.inset/2, brackets.XMLEscape(message))
}
// Frame draws a frame around the chart area
func (m *Margaid) Frame() {
m.g.Transform()
m.g.Fill("none").Stroke("black").StrokeWidth("2px")
m.g.Rect(m.inset, m.inset, m.width-m.inset*2, m.height-m.inset*2)
}
// Render renders the graph to the given destination.
func (m *Margaid) Render(writer io.Writer) error {
rendered := m.g.Render()
_, err := writer.Write([]byte(rendered))
return err
}
// Projects a value onto an axis using the current projection
// setting.
// The value returned is in user coordinates, [0..1] * width for the x-axis.
func (m *Margaid) project(value float64, axis Axis) (float64, error) {
min := m.ranges[axis].min
max := m.ranges[axis].max
projected := value
projection := m.projections[axis]
var axisLength float64
switch {
case axis == X1Axis || axis == X2Axis:
axisLength = m.width - 2*m.inset
case axis == Y1Axis || axis == Y2Axis:
axisLength = m.height - 2*m.inset
}
axisPadding := m.padding * axisLength
if projection == Log {
if value <= 0 {
return 0, fmt.Errorf("Cannot draw values <= 0 on log scale")
}
if min <= 0 || max <= 0 {
return 0, fmt.Errorf("Cannot have axis range <= 0 on log scale")
}
projected = math.Log10(value)
min = math.Log10(min)
max = math.Log10(max)
}
projected = axisPadding + (axisLength-2*axisPadding)*(projected-min)/(max-min)
return projected, nil
}
func (m *Margaid) getProjectedValues(series *Series, xAxis, yAxis Axis) (points []struct{ X, Y float64 }, err error) {
values := series.Values()
for values.Next() {
v := values.Get()
v.X, err = m.project(v.X, xAxis)
v.Y, err = m.project(v.Y, yAxis)
points = append(points, v)
}
return
}
// addPlot adds a named plot and returns its ID
func (m *Margaid) addPlot(name string) int {
id := len(m.plots)
m.plots = append(m.plots, name)
return id
}
// getPlotColor picks hues and saturations around the color wheel at prime indices.
// Kind of works for a quick selection of plotting colors.
func (m *Margaid) getPlotColor(id int) string {
color := 211*id + m.colorScheme
hue := color % 360
saturation := 47 + (id*41)%53
return fmt.Sprintf("hsl(%d, %d%%, 65%%)", hue, saturation)
} | margaid.go | 0.762689 | 0.420719 | margaid.go | starcoder |
package graph
import (
"errors"
"reflect"
"sort"
"strconv"
)
// VerSet represents a set of vertices.
type VerSet map[Vertex]struct{}
// NewVerSet constructs a new VerSet.
func NewVerSet() VerSet {
return make(VerSet, 0)
}
// Contains checks whether an `v` exists in `s` or not.
func (s VerSet) Contains(v Vertex) bool {
_, ok := s[v]
return ok
}
// Add adds an `v` to `s`.
func (s VerSet) Add(v Vertex) error {
if s.Contains(v) {
return errors.New(strconv.FormatUint(uint64(v), 10) + " already exists")
}
s[v] = struct{}{}
return nil
}
// Remove removes an `v` from `s`.
func (s VerSet) Remove(v Vertex) error {
if !s.Contains(v) {
return errors.New(strconv.FormatUint(uint64(v), 10) + " dose not exist")
}
delete(s, v)
return nil
}
// Cardinality returns the number of elements in the Set.
func (s VerSet) Cardinality() int {
return len(s)
}
// Equal checks whether `other` is same as `s` or not.
func (s VerSet) Equal(other VerSet) bool {
return reflect.DeepEqual(s, other)
}
// Difference returns the difference of `s` and `other`.
// Difference have a referential transparency.
func (s VerSet) Difference(other VerSet) VerSet {
d := NewVerSet()
for v := range s {
if _, ok := other[v]; !ok {
d.Add(v)
}
}
return d
}
// Intersect returns the Intersection of `s` and `other`.
// Intersect have a referential transparency.
func (s VerSet) Intersect(other VerSet) VerSet {
inter := NewVerSet()
for v := range s {
if _, ok := other[v]; ok {
inter.Add(v)
}
}
return inter
}
// Union returns the Union of `s` and `other`
// Union have a referential transparency.
func (s VerSet) Union(other VerSet) VerSet {
u := NewVerSet()
for v := range s {
u[v] = struct{}{}
}
for v := range other {
u[v] = struct{}{}
}
return u
}
// ExistsCycle check whether a cycle exists or not.
// You can specify the algorithm by method:
// "DisjointSet": Disjoint-Set Algorithm
func (g *graph) ExistsCycle(method string) bool {
if method == "DisjointSet" {
return DisjointSetAlgorithm(g)
}
return false
}
// DisjointSetAlgorithm check whether a cycle exists or not.
func DisjointSetAlgorithm(g *graph) bool {
d, err := g.NewDisjointSet()
if err != nil {
panic(err)
}
for _, e := range g.GetEdges() {
F, T := d.FindSet(e)
if F.Equal(T) {
return true
}
d = d.Union(F, T)
}
return false
}
// DisjointSet is a type consist of VerSets,
// each of the VerSet is disjoint.
type DisjointSet []VerSet
func (g *graph) NewDisjointSet() (DisjointSet, error) {
set := make([]VerSet, g.verticesCount)
i := 0
for _, v := range g.GetVertices() {
s := NewVerSet()
if err := s.Add(v); err != nil {
return nil, err
}
set[i] = s
i++
}
return set, nil
}
// Equal checks whether d is same as other or not.
func (d DisjointSet) Equal(other DisjointSet) bool {
sort.Slice(d, func(i, j int) bool {
var maxi, maxj Vertex
for k := range d[i] {
if k > maxi {
maxi = k
}
}
for k := range d[j] {
if k > maxj {
maxj = k
}
}
return maxi < maxj
})
sort.Slice(other, func(i, j int) bool {
var maxi, maxj Vertex
for k := range other[i] {
if k > maxi {
maxi = k
}
}
for k := range other[j] {
if k > maxj {
maxj = k
}
}
return maxi < maxj
})
return reflect.DeepEqual(d, other)
}
// FindSet returns VerSets.
// Each VerSet contains the Vertex represented by `e.From` and `e.To`.
func (d DisjointSet) FindSet(e Edge) (F, T VerSet) {
for _, set := range d {
if _, ok := set[e.From]; ok {
F = set
}
if _, ok := set[e.To]; ok {
T = set
}
}
return F, T
}
// Union unions 2 VerSets to one.
func (d DisjointSet) Union(A, B VerSet) DisjointSet {
idxA, idxB := -1, -1
for i, set := range d {
if reflect.DeepEqual(set, A) {
idxA = i
}
if reflect.DeepEqual(set, B) {
idxB = i
}
if idxA >= 0 && idxB >= 0 {
break
}
}
// In generally, idxB is lager than idxA
if idxA > idxB {
idxA, idxB = idxB, idxA
}
AB := make(VerSet, len(d[idxA])+len(d[idxB]))
i := 0
for v := range d[idxA] {
AB[v] = struct{}{}
i++
}
for v := range d[idxB] {
AB[v] = struct{}{}
i++
}
copy(d[idxB:], d[idxB+1:])
d[len(d)-1] = nil
d = d[:len(d)-1]
copy(d[idxA:], d[idxA+1:])
d[len(d)-1] = nil
d = d[:len(d)-1]
d = append(d, AB)
return d
} | graph/undirect.go | 0.832645 | 0.460228 | undirect.go | starcoder |
package graphics
import (
"github.com/inkyblackness/shocked-client/opengl"
)
// BitmapTexture contains a bitmap stored as OpenGL texture.
type BitmapTexture struct {
gl opengl.OpenGl
width, height float32
u, v float32
handle uint32
}
// BitmapRetriever is a thunk that retrieves a cached bitmap.
type BitmapRetriever func() *BitmapTexture
func powerOfTwo(value int) int {
result := 2
for (result < value) && (result < 0x1000) {
result *= 2
}
return result
}
// NewBitmapTexture downloads the provided raw data to OpenGL and returns a BitmapTexture instance.
func NewBitmapTexture(gl opengl.OpenGl, width, height int, pixelData []byte) *BitmapTexture {
textureWidth := powerOfTwo(width)
textureHeight := powerOfTwo(height)
tex := &BitmapTexture{
gl: gl,
width: float32(width),
height: float32(height),
handle: gl.GenTextures(1)[0]}
tex.u = tex.width / float32(textureWidth)
tex.v = tex.height / float32(textureHeight)
// The texture has to be blown up to use RGBA from the start;
// OpenGL 3.2 doesn't know ALPHA format, Open GL ES 2.0 (WebGL) doesn't know RED or R8.
rgbaData := make([]byte, textureWidth*textureHeight*BytesPerRgba)
for y := 0; y < height; y++ {
inStart := y * width
outOffset := y * textureWidth * BytesPerRgba
for x := 0; x < width; x++ {
value := pixelData[inStart+x]
rgbaData[outOffset+0] = value
rgbaData[outOffset+1] = value
rgbaData[outOffset+2] = value
rgbaData[outOffset+3] = value
outOffset += BytesPerRgba
}
}
gl.BindTexture(opengl.TEXTURE_2D, tex.handle)
gl.TexImage2D(opengl.TEXTURE_2D, 0, opengl.RGBA, int32(textureWidth), int32(textureHeight),
0, opengl.RGBA, opengl.UNSIGNED_BYTE, rgbaData)
gl.TexParameteri(opengl.TEXTURE_2D, opengl.TEXTURE_MAG_FILTER, opengl.NEAREST)
gl.TexParameteri(opengl.TEXTURE_2D, opengl.TEXTURE_MIN_FILTER, opengl.NEAREST)
gl.GenerateMipmap(opengl.TEXTURE_2D)
gl.BindTexture(opengl.TEXTURE_2D, 0)
return tex
}
// Dispose implements the GraphicsTexture interface.
func (tex *BitmapTexture) Dispose() {
if tex.handle != 0 {
tex.gl.DeleteTextures([]uint32{tex.handle})
tex.handle = 0
}
}
// Size returns the dimensions of the bitmap, in pixels.
func (tex *BitmapTexture) Size() (width, height float32) {
return tex.width, tex.height
}
// Handle returns the texture handle.
func (tex *BitmapTexture) Handle() uint32 {
return tex.handle
}
// UV returns the maximum U and V values for the bitmap. The bitmap will be
// stored in a power-of-two texture, which may be larger than the bitmap.
func (tex *BitmapTexture) UV() (u, v float32) {
return tex.u, tex.v
} | src/github.com/inkyblackness/shocked-client/graphics/BitmapTexture.go | 0.860516 | 0.570989 | BitmapTexture.go | starcoder |
package math
import (
stdmath "math"
)
// Pow raises a to the power of b (a^b).
// If a and b are both (unsigned-)integers, then returns an int. Otherwise, returns a float64.
// Supports types uint8, int32, int64, int, and float64.
func Pow(a interface{}, b interface{}) (out interface{}, err error) {
// Catch and recover from runtime error, e.g., divide by zero.
defer func() {
if r := recover(); r != nil {
if re, ok := r.(error); ok {
out = nil
err = re
} else {
panic(r)
}
}
}()
switch a := a.(type) {
case uint8:
switch b := b.(type) {
case uint8:
return int(stdmath.Pow(float64(a), float64(b))), nil
case int32:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case int64:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case int:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case float64:
return stdmath.Pow(float64(a), b), nil
}
case int32:
switch b := b.(type) {
case uint8:
return int(stdmath.Pow(float64(a), float64(b))), nil
case int32:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case int64:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case int:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case float64:
return stdmath.Pow(float64(a), b), nil
}
case int64:
switch b := b.(type) {
case uint8:
return int(stdmath.Pow(float64(a), float64(b))), nil
case int32:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case int64:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case int:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case float64:
return stdmath.Pow(float64(a), b), nil
}
case int:
switch b := b.(type) {
case uint8:
return int(stdmath.Pow(float64(a), float64(b))), nil
case int32:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case int64:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case int:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return int(stdmath.Pow(float64(a), float64(b))), nil
case float64:
return stdmath.Pow(float64(a), b), nil
}
case float64:
switch b := b.(type) {
case uint8:
return stdmath.Pow(a, float64(b)), nil
case int32:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return stdmath.Pow(a, float64(b)), nil
case int64:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return stdmath.Pow(a, float64(b)), nil
case int:
if b < 0 {
return stdmath.Pow(float64(a), float64(b)), nil
}
return stdmath.Pow(a, float64(b)), nil
case float64:
return stdmath.Pow(a, b), nil
}
}
return 0, &ErrInvalidPower{A: a, B: b}
} | pkg/math/Pow.go | 0.708213 | 0.66124 | Pow.go | starcoder |
package graphics
import "image/color"
import "github.com/banthar/Go-SDL/sdl"
// A Primitive is a basic shape which can be drawn directly by the artist.
type Primitive interface {
draw(s *sdl.Surface)
}
// A Point is as it sounds, a single point in space.
type Point struct {
x, y int
c color.Color
}
// Points are drawn by setting a single corresponding pixel.
func (p Point) draw(s *sdl.Surface) {
color := sdl.ColorFromGoColor(p.c)
safeSet(s, p.x, p.y, color)
}
// A Rectangle is... a rectangle.
type Rectangle struct {
x, y int16
w, h uint16
c color.Color
}
// Rectangles are drawn by directly calling FillRect on the surface.
func (r Rectangle) draw(s *sdl.Surface) {
format := s.Format
color := sdl.ColorFromGoColor(r.c)
colorVal := sdl.MapRGB(format, color.R, color.G, color.B)
s.FillRect(&sdl.Rect{r.x, r.y, r.w, r.h}, colorVal)
}
// Circles are, you guessed it. Circles.
type Circle struct {
x, y int16 // Location on screen
r uint16 // Radius
b int // Border thickness. For now only controls if there IS a border or not, not actually it's thickness.
c color.Color // Color
}
// Circles may be filled or not.
func (c Circle) draw(s *sdl.Surface) {
if c.b == 0 {
drawFilledCircle(c.x, c.y, c.r, c.c, s)
} else {
drawOutlineCircle(c.x, c.y, c.r, c.c, s)
}
}
// Specifies a line to be drawn.
type Line struct {
x0, y0, x1, y1 int16
c color.Color
}
func (l Line) draw(s *sdl.Surface) {
drawLine(l.x0, l.y0, l.x1, l.y1, l.c, s)
}
// drawFilledCircle uses the integer midpoint circle algorithm to draw a filled
// circle to the given surface.
func drawFilledCircle(x0, y0 int16, r uint16, c color.Color, s *sdl.Surface) {
format := s.Format
color := sdl.ColorFromGoColor(c)
colorVal := sdl.MapRGB(format, color.R, color.G, color.B)
x := int16(r)
y := int16(0)
e := 1 - x
for x >= y {
s.FillRect(&sdl.Rect{-x + x0, y + y0, uint16(2 * x), 1}, colorVal)
s.FillRect(&sdl.Rect{-x + x0, -y + y0, uint16(2 * x), 1}, colorVal)
s.FillRect(&sdl.Rect{-y + x0, x + y0, uint16(2 * y), 1}, colorVal)
s.FillRect(&sdl.Rect{-y + x0, -x + y0, uint16(2 * y), 1}, colorVal)
y++
if e < 0 {
e += 2*y + 1
} else {
x--
e += 2 * (y - x + 1)
}
}
}
// drawOutlineCircle uses the integer midpoint circle algorithm to draw the outline
// of a circle (1 px thick) to the given surface.
func drawOutlineCircle(x0, y0 int16, r uint16, c color.Color, s *sdl.Surface) {
s.Lock()
defer s.Unlock()
color := sdl.ColorFromGoColor(c)
x := int16(r)
y := int16(0)
e := 1 - x
for x >= y {
safeSet(s, int(x+x0), int(y+y0), color)
safeSet(s, int(x+x0), int(-y+y0), color)
safeSet(s, int(-x+x0), int(y+y0), color)
safeSet(s, int(-x+x0), int(-y+y0), color)
safeSet(s, int(y+x0), int(x+y0), color)
safeSet(s, int(y+x0), int(-x+y0), color)
safeSet(s, int(-y+x0), int(x+y0), color)
safeSet(s, int(-y+x0), int(-x+y0), color)
y++
if e < 0 {
e += 2*y + 1
} else {
x--
e += 2 * (y - x + 1)
}
}
}
// Uses Bresenham's algorithm to draw a line between two points.
func drawLine(x0, y0, x1, y1 int16, c color.Color, s *sdl.Surface) {
s.Lock()
defer s.Unlock()
color := sdl.ColorFromGoColor(c)
// Make sure the two ends are left-to-right.
if x1 < x0 {
x0, x1 = x1, x0
y0, y1 = y1, y0
}
// This algorithm only works for curves where dx > -dy and dy < 0
// So, prepare a coordinate transform to make this the case.
// We will then reverse the transform when we plot the points.
// There are 4 cases, all the transformations are self-inverse, which
// makes our lives a little easier.
dx := int(x1 - x0)
dy := int(y1 - y0)
var transform func(x, y int) (int, int)
var inverse func(x, y int) (int, int)
if dy < 0 {
if dx < -dy {
transform = func(x, y int) (int, int) { return -y, x }
inverse = func(x, y int) (int, int) { return y, -x }
} else {
transform = func(x, y int) (int, int) { return x, -y }
inverse = transform
}
} else {
if dx < dy {
transform = func(x, y int) (int, int) { return y, x }
inverse = transform
} else {
transform = func(x, y int) (int, int) { return x, y }
inverse = transform
}
}
// Transform coordinates.
tx0, ty0 := transform(int(x0), int(y0))
tx1, ty1 := transform(int(x1), int(y1))
// Recalculate dx and dy.
dx = tx1 - tx0
dy = ty1 - ty0
D := 2*dy - dx
safeSet(s, int(x0), int(y0), color)
y := ty0
for x := tx0 + 1; x <= tx1; x++ {
if D > 0 {
y += 1
D += 2*dy - 2*dx
} else {
D += 2 * dy
}
tx, ty := inverse(x, y)
safeSet(s, tx, ty, color)
}
}
func safeSet(s *sdl.Surface, x, y int, c sdl.Color) {
if x >= 0 && y >= 0 && x < int(s.W) && y < int(s.H) {
s.Set(x, y, c)
}
} | graphics/primitive.go | 0.708515 | 0.442094 | primitive.go | starcoder |
package core
import (
"database/sql/driver"
"fmt"
"strconv"
"time"
)
type DateType struct {
int64
}
func (tt DateType) String() string {
return fmt.Sprintf("%v", tt.int64)
}
func NewDateType(year, month, day int) DateType {
return DateType{
time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.Local).Unix(),
}
}
func NewDateTypeFromTime(toConvert time.Time) DateType {
var year, day int
var month time.Month
location, err := time.LoadLocation("America/New_York")
if err != nil {
year, month, day = toConvert.Date()
} else {
year, month, day = toConvert.In(location).Date()
}
return NewDateType(year, int(month), day)
}
func NewDateTypeFromUnix(timestamp int64) DateType {
return NewDateTypeFromTime(time.Unix(timestamp, 0).Local())
}
func (tt DateType) Time() time.Time {
return time.Unix(tt.int64, 0).Local()
}
func (tt DateType) Unix() int64 {
return tt.int64
}
func (tt *DateType) Scan(value interface{}) error {
if value == nil {
*tt = DateType{0}
return nil
}
switch value.(type) {
case int, int8, int16, int32, int64:
*tt = DateType{value.(int64)}
return nil
case uint, uint8, uint16, uint32, uint64:
u64 := value.(uint64)
if u64 > (1<<63)-1 {
return fmt.Errorf("sql/driver: value %v (type %T) overflows DateType", value, value)
}
*tt = DateType{int64(u64)}
return nil
case string:
i, err := strconv.ParseInt(value.(string), 10, 64)
if err != nil {
return fmt.Errorf("sql/driver: value %v (type %T) can't be converted to DateType", value, value)
}
*tt = DateType{i}
return nil
case []byte:
i, err := strconv.ParseInt(string(value.([]byte)), 10, 64)
if err != nil {
return fmt.Errorf("sql/driver: value %v (type %T) can't be converted to DateType", value, value)
}
*tt = DateType{i}
return nil
case []int8:
tmp := []byte{}
for _, i := range value.([]int8) {
tmp = append(tmp, byte(i))
}
i, err := strconv.ParseInt(string(tmp), 10, 64)
if err != nil {
return fmt.Errorf("sql/driver: value %v (type %T) can't be converted to DateType", value, value)
}
*tt = DateType{i}
return nil
}
return fmt.Errorf("sql/driver: unsupported value %v (type %T) converting to DateType", value, value)
}
func (tt DateType) Value() (driver.Value, error) {
//the driver requires a standard type for
//which for this type is int64
return int64(tt.int64), nil
}
func (tt DateType) IsZero() bool {
return tt.int64 == 0
}
func (tt DateType) Add(duration time.Duration) TimestampType {
return NewTimestampTypeFromTime(tt.Time().Add(duration))
}
func (tt DateType) AddDate(years int, months int, days int) DateType {
return NewDateTypeFromTime(tt.Time().AddDate(years, months, days))
}
func (tt DateType) Before(date DateType) bool {
return tt.int64 < date.int64
}
func (tt DateType) Equal(date DateType) bool {
return tt.int64 == date.int64
}
func (tt DateType) IsWeekDay() bool {
t := time.Unix(tt.int64, 0).Weekday()
return time.Sunday < t && t < time.Saturday
}
func (tt DateType) Day() time.Weekday {
return time.Unix(tt.int64, 0).Weekday()
} | core/dateType.go | 0.544075 | 0.426023 | dateType.go | starcoder |
package array
/* -------------------------------------------------------------------------------- */
// -- Metadata: attributes that every array must carry.
/* -------------------------------------------------------------------------------- */
// Metadata contains information that must accompany an array, but not the actual
// data stored in the array. Each concrete array type (in `cat.go`) has a `Metadata`
// member.
type Metadata struct {
dims []int
ndims int
numels int
micf []int // (m)ulti-(i)ndex (c)onversion (f)actors
}
/* -------------------------------------------------------------------------------- */
// -- Metadata: methods
/* -------------------------------------------------------------------------------- */
/*
NOTE: The getters, below, are not defined because of some Pavlovian OO reflex.
Rather, at this point, it seems like a bad idea to give access to a state
that has to remain consistent while operations are applied to arrays.
EDIT: Benchmarks show that there is a noticeable performance hit by calling
methods instead of directly accessing the properties of a `Metadata`
struct. Since performance is important of an array library, the properties
of `Metadata` are now exported.
EDIT: Things got messy after switching to direct access to the properties: there
are name clashes between the setters of the array factory and the getters
of the Metadata (an `ArrayFactory` has `Metadata` so if, for example, the
field `Dims` is exported by `Metadata`, then `Dims()` cannot be used to
construct an array as in `A := array.Factory().Dims([2, 2])`). Back to
access via methods. Anoher way to resolve the name clashes is, of course,
to use less concise function / method names.
*/
// Dims returns the dimensions of an array.
func (A *Metadata) Dims() []int {
return A.dims
}
// Ndims returns the number of dimensions of an array.
func (A *Metadata) Ndims() int {
return A.ndims
}
// Numels returns the number of elements of an array.
func (A *Metadata) Numels() int {
return A.numels
}
// Micf returns the multi-index conversion factors of an array.
func (A *Metadata) Micf() []int {
return A.micf
} | metadata.go | 0.76207 | 0.42483 | metadata.go | starcoder |
package main
import (
"fmt"
"strings"
"github.com/fatih/color"
)
// 2d Direction vectors
var (
North = Point{X: 0, Y: -1}
South = Point{X: 0, Y: 1}
East = Point{X: 1, Y: 0}
West = Point{X: -1, Y: 0}
Up = North
Down = South
Left = West
Right = East
)
// Point represents a point (or vector) in 2d space
type Point struct {
X, Y int
}
// Distance returns the manhattan distance between two points
func (p Point) Distance(p2 Point) int {
xdist := p.X - p2.X
ydist := p.Y - p2.Y
if xdist < 0 {
xdist *= -1
}
if ydist < 0 {
ydist *= -1
}
return xdist + ydist
}
// Add adds the vector v to the point, returning a new point
func (p Point) Add(v Point) Point {
return Point{
X: p.X + v.X,
Y: p.Y + v.Y,
}
}
// FixedGrid is a fixed-size grid
type FixedGrid struct {
values []rune
Size Point
}
// NewFixedGrid creates a fixed grid from a newline-separated string
func NewFixedGrid(str string) *FixedGrid {
lines := strings.Split(str, "\n")
size := Point{X: len(lines[0]), Y: len(lines)}
values := make([]rune, 0)
for _, line := range lines {
values = append(values, []rune(line)...)
}
return &FixedGrid{
values: values,
Size: size,
}
}
func (g *FixedGrid) pointIndex(p Point) int {
return p.Y*g.Size.X + p.X
}
// GetPoint gets the value at point p
func (g *FixedGrid) GetPoint(p Point) rune {
return g.values[g.pointIndex(p)]
}
// SetPoint sets the point specified and returns the previous value
func (g *FixedGrid) SetPoint(p Point, value rune) rune {
num := g.pointIndex(p)
old := g.values[num]
g.values[num] = value
return old
}
// AdjacentPoints returns the list of valid adjacent (NSEW) points to p
func (g *FixedGrid) AdjacentPoints(p Point) []Point {
possible := []Point{p.Add(North), p.Add(South), p.Add(East), p.Add(West)}
actual := []Point{}
for _, pos := range possible {
if pos.X >= 0 && pos.X < g.Size.X && pos.Y >= 0 && pos.Y < g.Size.Y {
actual = append(actual, pos)
}
}
return actual
}
// String implements the Stringer interface
func (g *FixedGrid) String() string {
b := strings.Builder{}
for y := 0; y < g.Size.Y; y++ {
fmt.Fprint(&b, string(g.values[y*g.Size.X:(y+1)*g.Size.X]), "\n")
}
s := b.String()
return s[:len(s)-1]
}
// Grid is a sparse grid of runes
type Grid struct {
values map[Point]rune
blank rune
minPoint, maxPoint Point
runeColor map[rune]*color.Color
}
// NewGrid creates a new grid
func NewGrid() *Grid {
g := &Grid{
values: make(map[Point]rune),
runeColor: make(map[rune]*color.Color),
blank: '.',
}
return g
}
// NewGridFromInput takes a multiline string, building a grid out of it
func NewGridFromInput(in string) *Grid {
g := NewGrid()
for y, line := range strings.Split(in, "\n") {
for x, r := range line {
p := Point{X: x, Y: y}
g.SetPoint(p, r)
}
}
return g
}
// Copy copies a grid into new memory
func (g *Grid) Copy() *Grid {
newg := NewGrid()
newg.blank = g.blank
newg.minPoint = g.minPoint
newg.maxPoint = g.maxPoint
for k, v := range g.values {
newg.values[k] = v
}
for k, v := range g.runeColor {
newg.runeColor[k] = v
}
return newg
}
// SetBlank sets the blank rune for a grid
func (g *Grid) SetBlank(r rune) {
g.blank = r
}
// SetPoint sets the point specified and returns the previous value if any
func (g *Grid) SetPoint(p Point, value rune) rune {
if p.X < g.minPoint.X {
g.minPoint.X = p.X
}
if p.Y < g.minPoint.Y {
g.minPoint.Y = p.Y
}
if p.X > g.maxPoint.X {
g.maxPoint.X = p.X
}
if p.Y > g.maxPoint.Y {
g.maxPoint.Y = p.Y
}
old := g.values[p]
g.values[p] = value
return old
}
// GetPoint gets the value of the grid at p
func (g *Grid) GetPoint(p Point) rune {
r, ok := g.values[p]
if ok {
return r
}
return g.blank
}
// Bounds returns the minimum and maximum points that bound this grid
func (g *Grid) Bounds() (minPoint, maxPoint Point) {
return g.minPoint, g.maxPoint
}
// String implements the Stringer interface
func (g *Grid) String() string {
b := strings.Builder{}
min, max := g.Bounds()
for y := min.Y; y <= max.Y; y++ {
for x := min.X; x <= max.X; x++ {
p := Point{X: x, Y: y}
fmt.Fprint(&b, string(g.GetPoint(p)))
}
fmt.Fprint(&b, "\n")
}
return b.String()
}
// AddRuneColor adds a color that a rune should be
func (g *Grid) AddRuneColor(r rune, c *color.Color) {
g.runeColor[r] = c
}
// ColorPrint prints in color directly to stdout
func (g *Grid) ColorPrint() {
min, max := g.Bounds()
defColor := color.New(color.Reset)
for y := min.Y; y <= max.Y; y++ {
for x := min.X; x <= max.X; x++ {
c := g.GetPoint(Point{X: x, Y: y})
attrs, ok := g.runeColor[c]
if !ok {
attrs = defColor
}
attrs.Print(string(c))
}
defColor.Print("\n")
}
} | 2019/grid.go | 0.758779 | 0.497742 | grid.go | starcoder |
package interval
// U64Span is the base interval type understood by the algorithms in this package.
// It is a half open interval that includes the lower bound, but not the upper.
type U64Span struct {
Start uint64 // the value at which the interval begins
End uint64 // the next value not included in the interval.
}
// U64Range is an interval specified by a beginning and size.
type U64Range struct {
First uint64 // the first value in the interval
Count uint64 // the count of values in the interval
}
// U64SpanList implements List for an array of U64Span intervals
type U64SpanList []U64Span
// U64RangeList implements List for an array of U64Range intervals
type U64RangeList []U64Range
// Range converts a U64Span to a U64Range
func (s U64Span) Range() U64Range { return U64Range{First: s.Start, Count: s.End - s.Start} }
// Span converts a U64Range to a U64Span
func (r U64Range) Span() U64Span { return U64Span{Start: r.First, End: r.First + r.Count} }
func (l U64SpanList) Length() int { return len(l) }
func (l U64SpanList) GetSpan(index int) U64Span { return l[index] }
func (l U64SpanList) SetSpan(index int, span U64Span) { l[index] = span }
func (l U64SpanList) New(index int, span U64Span) { l[index] = span }
func (l U64SpanList) Copy(to, from, count int) { copy(l[to:to+count], l[from:from+count]) }
func (l *U64SpanList) Resize(length int) {
if cap(*l) > length {
*l = (*l)[:length]
} else {
old := *l
capacity := cap(*l) * 2
if capacity < length {
capacity = length
}
*l = make(U64SpanList, length, capacity)
copy(*l, old)
}
}
func (l U64RangeList) Length() int { return len(l) }
func (l U64RangeList) GetSpan(index int) U64Span { return l[index].Span() }
func (l U64RangeList) SetSpan(index int, span U64Span) { l[index] = span.Range() }
func (l U64RangeList) New(index int, span U64Span) { l[index] = span.Range() }
func (l U64RangeList) Copy(to, from, count int) { copy(l[to:to+count], l[from:from+count]) }
func (l *U64RangeList) Resize(length int) {
if cap(*l) > length {
*l = (*l)[:length]
} else {
old := *l
capacity := cap(*l) * 2
if capacity < length {
capacity = length
}
*l = make(U64RangeList, length, capacity)
copy(*l, old)
}
}
func (l U64RangeList) Clone() U64RangeList {
res := make(U64RangeList, len(l))
copy(res, l)
return res
} | core/math/interval/u64.go | 0.749546 | 0.561275 | u64.go | starcoder |
package synthetics
import (
"encoding/json"
"time"
)
// V202101beta1MeshMetrics struct for V202101beta1MeshMetrics
type V202101beta1MeshMetrics struct {
Time *time.Time `json:"time,omitempty"`
Latency *V202101beta1MeshMetric `json:"latency,omitempty"`
PacketLoss *V202101beta1MeshMetric `json:"packetLoss,omitempty"`
Jitter *V202101beta1MeshMetric `json:"jitter,omitempty"`
}
// NewV202101beta1MeshMetrics instantiates a new V202101beta1MeshMetrics object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewV202101beta1MeshMetrics() *V202101beta1MeshMetrics {
this := V202101beta1MeshMetrics{}
return &this
}
// NewV202101beta1MeshMetricsWithDefaults instantiates a new V202101beta1MeshMetrics object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewV202101beta1MeshMetricsWithDefaults() *V202101beta1MeshMetrics {
this := V202101beta1MeshMetrics{}
return &this
}
// GetTime returns the Time field value if set, zero value otherwise.
func (o *V202101beta1MeshMetrics) GetTime() time.Time {
if o == nil || o.Time == nil {
var ret time.Time
return ret
}
return *o.Time
}
// GetTimeOk returns a tuple with the Time field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V202101beta1MeshMetrics) GetTimeOk() (*time.Time, bool) {
if o == nil || o.Time == nil {
return nil, false
}
return o.Time, true
}
// HasTime returns a boolean if a field has been set.
func (o *V202101beta1MeshMetrics) HasTime() bool {
if o != nil && o.Time != nil {
return true
}
return false
}
// SetTime gets a reference to the given time.Time and assigns it to the Time field.
func (o *V202101beta1MeshMetrics) SetTime(v time.Time) {
o.Time = &v
}
// GetLatency returns the Latency field value if set, zero value otherwise.
func (o *V202101beta1MeshMetrics) GetLatency() V202101beta1MeshMetric {
if o == nil || o.Latency == nil {
var ret V202101beta1MeshMetric
return ret
}
return *o.Latency
}
// GetLatencyOk returns a tuple with the Latency field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V202101beta1MeshMetrics) GetLatencyOk() (*V202101beta1MeshMetric, bool) {
if o == nil || o.Latency == nil {
return nil, false
}
return o.Latency, true
}
// HasLatency returns a boolean if a field has been set.
func (o *V202101beta1MeshMetrics) HasLatency() bool {
if o != nil && o.Latency != nil {
return true
}
return false
}
// SetLatency gets a reference to the given V202101beta1MeshMetric and assigns it to the Latency field.
func (o *V202101beta1MeshMetrics) SetLatency(v V202101beta1MeshMetric) {
o.Latency = &v
}
// GetPacketLoss returns the PacketLoss field value if set, zero value otherwise.
func (o *V202101beta1MeshMetrics) GetPacketLoss() V202101beta1MeshMetric {
if o == nil || o.PacketLoss == nil {
var ret V202101beta1MeshMetric
return ret
}
return *o.PacketLoss
}
// GetPacketLossOk returns a tuple with the PacketLoss field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V202101beta1MeshMetrics) GetPacketLossOk() (*V202101beta1MeshMetric, bool) {
if o == nil || o.PacketLoss == nil {
return nil, false
}
return o.PacketLoss, true
}
// HasPacketLoss returns a boolean if a field has been set.
func (o *V202101beta1MeshMetrics) HasPacketLoss() bool {
if o != nil && o.PacketLoss != nil {
return true
}
return false
}
// SetPacketLoss gets a reference to the given V202101beta1MeshMetric and assigns it to the PacketLoss field.
func (o *V202101beta1MeshMetrics) SetPacketLoss(v V202101beta1MeshMetric) {
o.PacketLoss = &v
}
// GetJitter returns the Jitter field value if set, zero value otherwise.
func (o *V202101beta1MeshMetrics) GetJitter() V202101beta1MeshMetric {
if o == nil || o.Jitter == nil {
var ret V202101beta1MeshMetric
return ret
}
return *o.Jitter
}
// GetJitterOk returns a tuple with the Jitter field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *V202101beta1MeshMetrics) GetJitterOk() (*V202101beta1MeshMetric, bool) {
if o == nil || o.Jitter == nil {
return nil, false
}
return o.Jitter, true
}
// HasJitter returns a boolean if a field has been set.
func (o *V202101beta1MeshMetrics) HasJitter() bool {
if o != nil && o.Jitter != nil {
return true
}
return false
}
// SetJitter gets a reference to the given V202101beta1MeshMetric and assigns it to the Jitter field.
func (o *V202101beta1MeshMetrics) SetJitter(v V202101beta1MeshMetric) {
o.Jitter = &v
}
func (o V202101beta1MeshMetrics) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Time != nil {
toSerialize["time"] = o.Time
}
if o.Latency != nil {
toSerialize["latency"] = o.Latency
}
if o.PacketLoss != nil {
toSerialize["packetLoss"] = o.PacketLoss
}
if o.Jitter != nil {
toSerialize["jitter"] = o.Jitter
}
return json.Marshal(toSerialize)
}
type NullableV202101beta1MeshMetrics struct {
value *V202101beta1MeshMetrics
isSet bool
}
func (v NullableV202101beta1MeshMetrics) Get() *V202101beta1MeshMetrics {
return v.value
}
func (v *NullableV202101beta1MeshMetrics) Set(val *V202101beta1MeshMetrics) {
v.value = val
v.isSet = true
}
func (v NullableV202101beta1MeshMetrics) IsSet() bool {
return v.isSet
}
func (v *NullableV202101beta1MeshMetrics) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableV202101beta1MeshMetrics(val *V202101beta1MeshMetrics) *NullableV202101beta1MeshMetrics {
return &NullableV202101beta1MeshMetrics{value: val, isSet: true}
}
func (v NullableV202101beta1MeshMetrics) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableV202101beta1MeshMetrics) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | kentikapi/synthetics/model_v202101beta1_mesh_metrics.go | 0.798226 | 0.436442 | model_v202101beta1_mesh_metrics.go | starcoder |
package eval
import (
"github.com/dito/src/ast"
"github.com/dito/src/object"
"math/rand"
"time"
)
func init() {
// need random number generator for builtin function rand.
rand.Seed(time.Now().UTC().UnixNano())
}
// Eval :
func Eval(node ast.Node, env *object.Environment) object.Object {
switch node := node.(type) {
case *ast.Program:
return evalProgram(node.Statements, env)
// Statements
case *ast.AssignmentStatement:
return evalAssignment(node, env)
case *ast.ReAssignStatement:
return evalReAssign(node, env)
case *ast.IndexAssignmentStatement:
return evalIndexAssignment(node, env)
case *ast.ReturnStatement:
return &object.ReturnValue{Value: Eval(node.Value, env)}
case *ast.ExpressionStatement:
return Eval(node.Expression, env)
case *ast.IfStatement:
return evalIfStatement(node, env)
case *ast.ForStatement:
return evalForStatement(node, env)
case *ast.BlockStatement:
return evalBlockStatement(node, env)
case *ast.ImportStatement:
return evalImportStatement(node, env)
// Expressions
case *ast.PrefixExpression:
right := Eval(node.Right, env)
return evalPrefixExpression(node.Operator, right)
case *ast.InfixExpression:
return evalInfixExpression(node, env)
case *ast.IfElseExpression:
return evalIfElseExpression(node, env)
case *ast.IndexExpression:
return evalIndexExpression(node, env)
case *ast.SliceExpression:
return evalSliceExpression(node, env)
// // Functions
case *ast.Function:
return object.NewFunction(node, env)
case *ast.LambdaFunction:
return object.NewLambda(node.Parameters, node.Expr, env)
case *ast.CallExpression:
return evalFunctionCall(node.Function, node.Arguments, env)
// Atoms
case *ast.Identifier:
return evalIdentifier(node, env)
case *ast.StringLiteral:
return object.NewString(node.Value)
case *ast.IntegerLiteral:
return object.NewInt(node.Value)
case *ast.FloatLiteral:
return object.NewFloat(node.Value)
case *ast.BooleanLiteral:
return object.NewBool(node.Value)
case *ast.ArrayLiteral:
elements := evalExpressions(node.Elements, env)
return object.NewArray(elements, -1)
}
return nil
}
func evalProgram(stmts []ast.Statement, env *object.Environment) object.Object {
var result object.Object
for _, statement := range stmts {
result = Eval(statement, env)
switch result := result.(type) {
case *object.ReturnValue:
return result.Value
case *object.Error:
return result
}
}
return result
}
func evalIdentifier(node *ast.Identifier, env *object.Environment) object.Object {
if val, ok := env.Get(node.Value); ok {
return val
}
if builtin, ok := Builtins[node.Value]; ok {
return builtin
}
return object.NewError("Identifier not found: '%s'", node.Value)
}
func isError(obj object.Object) bool {
if obj != nil {
return obj.Type() == object.ErrorType
}
return false
}
// Infix expressions.
func evalInfixExpression(node *ast.InfixExpression, env *object.Environment) object.Object {
op := object.BinaryOps[node.Operator]
if op == nil {
return object.NewError("Unknown Binary operation: '%s'", node.Operator)
}
left := Eval(node.Left, env)
if isError(left) {
return left
}
right := Eval(node.Right, env)
if isError(right) {
return right
}
return op.EvalBinary(env, left, right)
} | src/eval/exec.go | 0.576304 | 0.441312 | exec.go | starcoder |
package prestgo
const (
// This type captures boolean values true and false
Boolean = "boolean"
// A 64-bit signed two’s complement integer with a minimum value of -2^63 and a maximum value of 2^63 - 1.
BigInt = "bigint"
// Integer assumed to be an alias for BigInt.
Integer = "integer"
// A double is a 64-bit inexact, variable-precision implementing the IEEE Standard 754 for Binary Floating-Point Arithmetic.
Double = "double"
// Variable length character data.
VarChar = "varchar"
// Variable length binary data.
VarBinary = "varbinary"
// Variable length json data.
JSON = "json"
// Calendar date (year, month, day).
// Example: DATE '2001-08-22'
Date = "date"
// Time of day (hour, minute, second, millisecond) without a time zone. Values of this type are parsed and rendered in the session time zone.
// Example: TIME '01:02:03.456'
Time = "time"
// Instant in time that includes the date and time of day without a time zone. Values of this type are parsed and rendered in the session time zone.
// Example: TIMESTAMP '2001-08-22 03:04:05.321'
Timestamp = "timestamp"
// Instant in time that includes the date and time of day with a time zone. Values of this type are parsed and rendered in the provided time zone.
// Example: TIMESTAMP '2001-08-22 03:04:05.321' AT TIME ZONE 'America/Los_Angeles'
TimestampWithTimezone = "timestamp with time zone"
// MapVarchar is a map from string-keys to string-values.
MapVarchar = "map(varchar,varchar)"
// Array of variable length character data.
ArrayVarchar = "array(varchar)"
)
type stmtResponse struct {
ID string `json:"id"`
InfoURI string `json:"infoUri"`
NextURI string `json:"nextUri"`
Stats stmtStats `json:"stats"`
Error stmtError `json:"error"`
}
type stmtStats struct {
State string `json:"state"`
Scheduled bool `json:"scheduled"`
Nodes int `json:"nodes"`
TotalSplits int `json:"totalSplits"`
QueuesSplits int `json:"queuedSplits"`
RunningSplits int `json:"runningSplits"`
CompletedSplits int `json:"completedSplits"`
UserTimeMillis int `json:"userTimeMillis"`
CPUTimeMillis int `json:"cpuTimeMillis"`
WallTimeMillis int `json:"wallTimeMillis"`
ProcessedRows int `json:"processedRows"`
ProcessedBytes int `json:"processedBytes"`
RootStage stmtStage `json:"rootStage"`
}
type stmtError struct {
Message string `json:"message"`
ErrorCode int `json:"errorCode"`
ErrorLocation stmtErrorLocation `json:"errorLocation"`
FailureInfo stmtErrorFailureInfo `json:"failureInfo"`
// Other fields omitted
}
type stmtErrorLocation struct {
LineNumber int `json:"lineNumber"`
ColumnNumber int `json:"columnNumber"`
}
type stmtErrorFailureInfo struct {
Type string `json:"type"`
// Other fields omitted
}
func (e stmtError) Error() string {
return e.FailureInfo.Type + ": " + e.Message
}
type stmtStage struct {
StageID string `json:"stageId"`
State string `json:"state"`
Done bool `json:"done"`
Nodes int `json:"nodes"`
TotalSplits int `json:"totalSplits"`
QueuedSplits int `json:"queuedSplits"`
RunningSplits int `json:"runningSplits"`
CompletedSplits int `json:"completedSplits"`
UserTimeMillis int `json:"userTimeMillis"`
CPUTimeMillis int `json:"cpuTimeMillis"`
WallTimeMillis int `json:"wallTimeMillis"`
ProcessedRows int `json:"processedRows"`
ProcessedBytes int `json:"processedBytes"`
SubStages []stmtStage `json:"subStages"`
}
type queryResponse struct {
ID string `json:"id"`
InfoURI string `json:"infoUri"`
PartialCancelURI string `json:"partialCancelUri"`
NextURI string `json:"nextUri"`
Columns []queryColumn `json:"columns"`
Data []queryData `json:"data"`
Stats stmtStats `json:"stats"`
Error stmtError `json:"error"`
}
type queryColumn struct {
Name string `json:"name"`
Type string `json:"type"`
TypeSignature typeSignature `json:"typeSignature"`
}
type queryData []interface{}
type typeSignature struct {
RawType string `json:"rawType"`
TypeArguments []interface{} `json:"typeArguments"`
LiteralArguments []interface{} `json:"literalArguments"`
}
type infoResponse struct {
QueryID string `json:"queryId"`
State string `json:"state"`
}
const (
QueryStateQueued = "QUEUED"
QueryStatePlanning = "PLANNING"
QueryStateStarting = "STARTING"
QueryStateRunning = "RUNNING"
QueryStateFinished = "FINISHED"
QueryStateCanceled = "CANCELED"
QueryStateFailed = "FAILED"
) | types.go | 0.840259 | 0.407746 | types.go | starcoder |
package counter
import (
"encoding/json"
"sort"
"github.com/marcsantiago/collections"
)
type DataMap map[collections.Data]int
// NewDataMap takes in hash data, counts, and returns a concrete type that implements a CounterMap
func NewDataMap(hash map[collections.Data]int) DataMap {
var nh DataMap
if hash != nil {
nh = make(DataMap)
for key := range hash {
nh.Update(key)
}
}
return hash
}
// Get retrieves a data value from the internal map if it exists
func (i DataMap) Get(key collections.Data) (collections.Data, bool) {
val, ok := i[key]
return collections.IntValue(val), ok
}
// Len returns the number of stored keys
func (i DataMap) Len() int {
return len(i)
}
// Update updates the counter for a value or sets the value it if it does not exist
func (i DataMap) Update(key collections.Data) {
i[key]++
}
// Set replaces a keys counter data with another integer or creates a new key with data
func (i DataMap) Set(key collections.Data, value collections.Data) {
i[key] = value.Int()
}
// Subtract removes 1 from the counter if the key exists
func (i DataMap) Subtract(key collections.Data) {
_, ok := i[key]
if ok {
i[key]--
}
}
// Delete removes the element from the internal map
func (i DataMap) Delete(key collections.Data) {
delete(i, key)
}
// Items returns the internal map as a set of elements
func (i DataMap) Items() []collections.Element {
items := make([]collections.Element, 0, len(i))
for key, value := range i {
items = append(items, collections.Element{
Key: key,
Value: collections.IntValue(value),
})
}
return items
}
// Iterate creates a channel to create an iterator for he Go range statement
func (i DataMap) Iterate() <-chan collections.Element {
ch := make(chan collections.Element)
go func() {
for key, value := range i {
ch <- collections.Element{Key: key, Value: collections.IntValue(value)}
}
close(ch)
}()
return ch
}
// MostCommon returns the most common values by value
func (i DataMap) MostCommon(n int) []collections.Element {
elements := make([]collections.Element, 0, len(i))
for key, value := range i {
elements = append(elements, collections.Element{
Key: key,
Value: collections.IntValue(value),
})
}
if n <= 0 || n >= len(i) {
n = len(i) - 1
}
sort.Sort(collections.ElementsByValueIntDesc(elements))
return elements[:n]
}
// String returns the JSON string representation of the map data
func (i DataMap) String() string {
b, _ := json.Marshal(i)
return string(b)
} | counter/data_map.go | 0.755997 | 0.476884 | data_map.go | starcoder |
package runtime
// RawEqual returns two values. The second one is true if raw equality makes
// sense for x and y. The first one returns whether x and y are raw equal.
func RawEqual(x, y Value) (bool, bool) {
if x.Equals(y) {
return true, true
}
switch x.NumberType() {
case IntType:
if fy, ok := y.TryFloat(); ok {
return equalIntAndFloat(x.AsInt(), fy), true
}
case FloatType:
if ny, ok := y.TryInt(); ok {
return equalIntAndFloat(ny, x.AsFloat()), true
}
}
return false, false
}
// isZero returns true if x is a number and is equal to 0
func isZero(x Value) bool {
switch x.iface.(type) {
case int64:
return x.AsInt() == 0
case float64:
return x.AsFloat() == 0
}
return false
}
// isPositive returns true if x is a number and is > 0
func isPositive(x Value) bool {
switch x.iface.(type) {
case int64:
return x.AsInt() > 0
case float64:
return x.AsFloat() > 0
}
return false
}
func numIsLessThan(x, y Value) bool {
switch x.iface.(type) {
case int64:
switch y.iface.(type) {
case int64:
return x.AsInt() < y.AsInt()
case float64:
return ltIntAndFloat(x.AsInt(), y.AsFloat())
}
case float64:
switch y.iface.(type) {
case int64:
return ltFloatAndInt(x.AsFloat(), y.AsInt())
case float64:
return x.AsFloat() < y.AsFloat()
}
}
return false
}
func isLessThan(x, y Value) (bool, bool) {
switch x.iface.(type) {
case int64:
switch y.iface.(type) {
case int64:
return x.AsInt() < y.AsInt(), true
case float64:
return ltIntAndFloat(x.AsInt(), y.AsFloat()), true
}
case float64:
switch y.iface.(type) {
case int64:
return ltFloatAndInt(x.AsFloat(), y.AsInt()), true
case float64:
return x.AsFloat() < y.AsFloat(), true
}
}
return false, false
}
func equalIntAndFloat(n int64, f float64) bool {
nf := int64(f)
return float64(nf) == f && nf == n
}
func eq(t *Thread, x, y Value) (bool, *Error) {
if res, ok := RawEqual(x, y); ok {
return res, nil
}
if _, ok := x.TryTable(); ok {
if _, ok := y.TryTable(); !ok {
return false, nil
}
} else {
// TODO: deal with UserData
return false, nil
}
res, err, ok := metabin(t, "__eq", x, y)
if ok {
return Truth(res), err
}
return false, nil
}
// Lt returns whether x < y is true (and an error if it's not possible to
// compare them).
func Lt(t *Thread, x, y Value) (bool, *Error) {
lt, ok := isLessThan(x, y)
if ok {
return lt, nil
}
if sx, ok := x.TryString(); ok {
if sy, ok := y.TryString(); ok {
return sx < sy, nil
}
}
res, err, ok := metabin(t, "__lt", x, y)
if ok {
return Truth(res), err
}
return false, compareError(x, y)
}
func ltIntAndFloat(n int64, f float64) bool {
nf := int64(f)
if float64(nf) == f {
return n < nf
}
return float64(n) < f
}
func ltFloatAndInt(f float64, n int64) bool {
nf := int64(f)
if float64(nf) == f {
return nf < n
}
return f < float64(n)
}
func leIntAndFloat(n int64, f float64) bool {
nf := int64(f)
if float64(nf) == f {
return n <= nf
}
return float64(n) <= f
}
func leFloatAndInt(f float64, n int64) bool {
nf := int64(f)
if float64(nf) == f {
return nf <= n
}
return f <= float64(n)
}
func le(t *Thread, x, y Value) (bool, *Error) {
switch x.NumberType() {
case IntType:
switch y.NumberType() {
case IntType:
return x.AsInt() <= y.AsInt(), nil
case FloatType:
return leIntAndFloat(x.AsInt(), y.AsFloat()), nil
}
case FloatType:
switch y.NumberType() {
case IntType:
return leFloatAndInt(x.AsFloat(), y.AsInt()), nil
case FloatType:
return x.AsFloat() <= y.AsFloat(), nil
}
}
if sx, ok := x.TryString(); ok {
if sy, ok := y.TryString(); ok {
return sx <= sy, nil
}
}
res, err, ok := metabin(t, "__le", x, y)
if ok {
return Truth(res), err
}
return false, compareError(x, y)
}
func compareError(x, y Value) *Error {
return NewErrorF("attempt to compare a %s value with a %s value", x.CustomTypeName(), y.CustomTypeName())
} | runtime/comp.go | 0.696887 | 0.67254 | comp.go | starcoder |
package tokens
import (
"encoding/json"
"fmt"
"strconv"
alaTypes "github.com/onmax/go-alastria/types"
)
// Checks if list contains the given string
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
var emptyPayloadField = "the value %s is empty and it is mandatory"
var validHeaderTypes = [...]string{"JWT"}
var validHeaderAlgorithms = [...]string{"ES256K"}
// Validates that the header is valid following the specification found here:
// https://github.com/alastria/alastria-identity/wiki/Artifacts-and-User-Stories-Definitions#0-artifacts-definition
// Sets default values to header.Type and header.Algorithm if they are empty.
// If header.Type or header.Algorithm are invalid also throws an error.
func ValidateHeader(header *alaTypes.Header) error {
if header == nil {
return fmt.Errorf("header is nil")
}
// If header.Type is empty, then sets a default value. Otherwise, checks if header.Type is valid.
if header.Type == "" {
header.Type = validHeaderTypes[0]
} else if !stringInSlice(header.Type, validHeaderTypes[:]) {
return fmt.Errorf("invalid Type equals to %s in header. Use: JWT", header.Type)
}
// If header.Algorithm is empty, then sets a default value. Otherwise, checks if header.Algorithm is valid.
if header.Algorithm == "" {
header.Algorithm = validHeaderAlgorithms[0]
} else if !stringInSlice(header.Algorithm, validHeaderAlgorithms[:]) {
return fmt.Errorf("invalid Algorithm equals to %s in header. Use: ES256K", header.Algorithm)
}
// TODO Check if header.KeyID is valid
// TODO Check if header.JSONWebToken is valid
return nil
}
func checkMandatoryStringFieldsAreNotEmpty(values map[string]string) error {
for k, v := range values {
if v == "" {
return fmt.Errorf(emptyPayloadField, k)
}
}
return nil
}
func checkMandatoryStringArrayFieldsNotEmpty(values map[string][]string) error {
for k, v := range values {
if len(v) == 0 {
return fmt.Errorf(emptyPayloadField, k)
}
}
return nil
}
func checkMandatoryStructFieldsAreNotEmpty(values map[string]interface{}) error {
for k, v := range values {
if v == nil {
return fmt.Errorf(emptyPayloadField, k)
}
}
return nil
}
// Adds default value in values array if they are not set
func addDefaultValues(values []string, defaultValues []string) []string {
for _, _defaultValue := range defaultValues {
if !stringInSlice(_defaultValue, values) {
values = append(values, _defaultValue)
}
}
return values
}
// Validates that the elements in values are in the validValues array
func validateEnum(values []string, validValues []string, field string) error {
for _, value := range values {
if !stringInSlice(value, validValues) {
return fmt.Errorf("the value %s=%s is invalid. Only the following values are accepted %s", field, value, validValues)
}
}
return nil
}
func checkLevelOfAssurance(_data *map[string]interface{}) error {
if _data == nil {
return fmt.Errorf("arg is nil")
}
data := *_data
loaI := data["levelOfAssurance"]
if loaI == nil {
return fmt.Errorf("levelOfAssurance is empty")
}
loa, err := interfaceToInt(loaI)
if err != nil {
return err
}
if loa < 0 || loa > 3 {
return fmt.Errorf("levelOfAssurance is invalid. Only 0, 1, 2, 3 are valid")
}
return nil
}
func interfaceToInt(data interface{}) (float64, error) {
var res string
switch v := data.(type) {
case float64:
res = strconv.FormatFloat(data.(float64), 'f', 6, 64)
case float32:
res = strconv.FormatFloat(float64(data.(float32)), 'f', 6, 32)
case int:
res = strconv.FormatInt(int64(data.(int)), 10)
case int64:
res = strconv.FormatInt(data.(int64), 10)
case uint:
res = strconv.FormatUint(uint64(data.(uint)), 10)
case uint64:
res = strconv.FormatUint(data.(uint64), 10)
case uint32:
res = strconv.FormatUint(uint64(data.(uint32)), 10)
case json.Number:
res = data.(json.Number).String()
case string:
res = data.(string)
case []byte:
res = string(v)
default:
res = ""
}
v, e := strconv.ParseFloat(res, 32)
if e != nil {
return 0, e
}
return v, nil
} | tokens/utils.go | 0.639286 | 0.419588 | utils.go | starcoder |
package abi
import (
"math/big"
"reflect"
"github.com/ethereum/go-ethereum/common"
)
var big_t = reflect.TypeOf(&big.Int{})
var ubig_t = reflect.TypeOf(&big.Int{})
var byte_t = reflect.TypeOf(byte(0))
var byte_ts = reflect.TypeOf([]byte(nil))
var uint_t = reflect.TypeOf(uint(0))
var uint8_t = reflect.TypeOf(uint8(0))
var uint16_t = reflect.TypeOf(uint16(0))
var uint32_t = reflect.TypeOf(uint32(0))
var uint64_t = reflect.TypeOf(uint64(0))
var int_t = reflect.TypeOf(int(0))
var int8_t = reflect.TypeOf(int8(0))
var int16_t = reflect.TypeOf(int16(0))
var int32_t = reflect.TypeOf(int32(0))
var int64_t = reflect.TypeOf(int64(0))
var uint_ts = reflect.TypeOf([]uint(nil))
var uint8_ts = reflect.TypeOf([]uint8(nil))
var uint16_ts = reflect.TypeOf([]uint16(nil))
var uint32_ts = reflect.TypeOf([]uint32(nil))
var uint64_ts = reflect.TypeOf([]uint64(nil))
var ubig_ts = reflect.TypeOf([]*big.Int(nil))
var int_ts = reflect.TypeOf([]int(nil))
var int8_ts = reflect.TypeOf([]int8(nil))
var int16_ts = reflect.TypeOf([]int16(nil))
var int32_ts = reflect.TypeOf([]int32(nil))
var int64_ts = reflect.TypeOf([]int64(nil))
var big_ts = reflect.TypeOf([]*big.Int(nil))
// U256 will ensure unsigned 256bit on big nums
func U256(n *big.Int) []byte {
return common.LeftPadBytes(common.U256(n).Bytes(), 32)
}
func S256(n *big.Int) []byte {
sint := common.S256(n)
ret := common.LeftPadBytes(sint.Bytes(), 32)
if sint.Cmp(common.Big0) < 0 {
for i, b := range ret {
if b == 0 {
ret[i] = 1
continue
}
break
}
}
return ret
}
// S256 will ensure signed 256bit on big nums
func U2U256(n uint64) []byte {
return U256(big.NewInt(int64(n)))
}
func S2S256(n int64) []byte {
return S256(big.NewInt(n))
}
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation
func packNum(value reflect.Value, to byte) []byte {
switch kind := value.Kind(); kind {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if to == UintTy {
return U2U256(value.Uint())
} else {
return S2S256(int64(value.Uint()))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if to == UintTy {
return U2U256(uint64(value.Int()))
} else {
return S2S256(value.Int())
}
case reflect.Ptr:
// This only takes care of packing and casting. No type checking is done here. It should be done prior to using this function.
if to == UintTy {
return U256(value.Interface().(*big.Int))
} else {
return S256(value.Interface().(*big.Int))
}
}
return nil
}
// checks whether the given reflect value is signed. This also works for slices with a number type
func isSigned(v reflect.Value) bool {
switch v.Type() {
case ubig_ts, big_ts, big_t, ubig_t:
return true
case int_ts, int8_ts, int16_ts, int32_ts, int64_ts, int_t, int8_t, int16_t, int32_t, int64_t:
return true
}
return false
} | accounts/abi/numbers.go | 0.582135 | 0.422326 | numbers.go | starcoder |
package ztype
import (
"math/bits"
zserio "github.com/woven-planet/go-zserio"
)
const (
maxBitNumberBits = 6
maxBitNumberLimit = 62
)
// DeltaContext is a packing context used when writing data using delta
// packing, i.e. instead of storing all values, only stores the deltas.
type DeltaContext[T any] struct {
// specifies if delta packing is actually used (it may be skipped if normal
// packing is more efficient)
isPacked bool
// maxBitNumber specifies the number of bits needed per delta element
maxBitNumber int
// previousElement is the value of the previously stored element
previousElement *uint64
processingStarted bool
unpackedBitSize int
firstElementBitSize int
numElements int
}
// arrayTraitsBitsizeOf returns the bit size of an array element.
func arrayTraitsBitsizeOf[T any](arrayTraits IArrayTraits[T], bitPosition int, element T) int {
return arrayTraits.BitSizeOf(element, bitPosition)
}
func absDiff(lhs, rhs uint64) uint64 {
if lhs > rhs {
return lhs - rhs
}
return rhs - lhs
}
// Init initializes a delta context array, and calculates the needed space per element in the final array.
func (context *DeltaContext[T]) Init(arrayTraits IArrayTraits[T], element T) {
context.numElements++
context.unpackedBitSize += arrayTraitsBitsizeOf(arrayTraits, 0, element)
if context.previousElement == nil {
elementAsUint64 := arrayTraits.AsUint64(element)
context.previousElement = &elementAsUint64
context.firstElementBitSize = context.unpackedBitSize
} else if context.maxBitNumber <= maxBitNumberLimit {
context.isPacked = true
// Calculate the delta to the previous value, and calculate how many
// bits are needed to store the delta.
delta := absDiff(arrayTraits.AsUint64(element), *context.previousElement)
maxBitNumber := bits.Len64(delta)
if maxBitNumber > context.maxBitNumber {
context.maxBitNumber = maxBitNumber
// cannot store using delta packing if the 64bit range is
// exhausted
if maxBitNumber > maxBitNumberLimit {
context.isPacked = false
}
}
*context.previousElement = arrayTraits.AsUint64(element)
}
}
// BitSizeOfDescriptor returns the bit size of a delta context array descriptor.
func (context *DeltaContext[T]) BitSizeOfDescriptor() int {
context.finishInit()
if context.isPacked {
return 1 + maxBitNumberBits
}
return 1
}
// BitSizeOf returns the size of the delta context array in bits.
func (context *DeltaContext[T]) BitSizeOf(arrayTraits IArrayTraits[T], bitPosition int, element T) (int, error) {
if !context.processingStarted || !context.isPacked {
context.processingStarted = true
return arrayTraitsBitsizeOf(arrayTraits, bitPosition, element), nil
}
if context.maxBitNumber > 0 {
return context.maxBitNumber + 1, nil
}
return 0, nil
}
// ReadDescriptor reads the descriptor of a delta context array.
func (context *DeltaContext[T]) ReadDescriptor(reader zserio.Reader) error {
var err error
context.isPacked, err = reader.ReadBool()
if err != nil {
return err
}
if context.isPacked {
numOfBits := uint64(0)
numOfBits, err = reader.ReadBits(maxBitNumberBits)
context.maxBitNumber = int(numOfBits)
}
return nil
}
// Read reads the next element of an array encoded using delta contexts.
func (context *DeltaContext[T]) Read(arrayTraits IArrayTraits[T], reader zserio.Reader) (T, error) {
if !context.processingStarted || !context.isPacked {
context.processingStarted = true
element, err := arrayTraits.Read(reader, 0)
elementAsUint64 := arrayTraits.AsUint64(element)
context.previousElement = &elementAsUint64
return element, err
}
if context.maxBitNumber > 0 {
delta, err := ReadSignedBits(reader, uint8(context.maxBitNumber+1))
if err != nil {
return arrayTraits.FromUint64(0), err
}
*context.previousElement = uint64(int64(*context.previousElement) + delta)
}
value := arrayTraits.FromUint64(*context.previousElement)
return value, nil
}
func (context *DeltaContext[T]) WriteDescriptor(writer zserio.Writer) error {
context.finishInit()
err := writer.WriteBool(context.isPacked)
if err != nil {
return err
}
if context.isPacked {
writer.WriteBits(uint64(context.maxBitNumber), maxBitNumberBits)
}
return nil
}
// Write writes an element of an delta context array.
func (context *DeltaContext[T]) Write(arrayTraits IArrayTraits[T], writer zserio.Writer, element T) error {
if !context.processingStarted || !context.isPacked {
context.processingStarted = true
context.previousElement = new(uint64)
*context.previousElement = arrayTraits.AsUint64(element)
arrayTraits.Write(writer, element)
} else {
if context.maxBitNumber > 0 {
delta := arrayTraits.AsUint64(element) - *context.previousElement
err := writer.WriteBits(delta, uint8(context.maxBitNumber+1))
if err != nil {
return err
}
*context.previousElement = arrayTraits.AsUint64(element)
}
}
return nil
}
// finishInit decided if the array should be written packed or unpacked,
// depending on which variant is more space-efficient.
func (context *DeltaContext[T]) finishInit() {
if context.isPacked {
deltaBitsize := 0
if context.maxBitNumber > 0 {
deltaBitsize = context.maxBitNumber + 1
}
// decide if this array should be packed or not by comparing the array
// bit sizes of both methods. Packed is usually more efficient if the
// the array values are not differing too much from each other.
packedBitsizeWithDescriptor := 1 + maxBitNumberBits +
context.firstElementBitSize + (context.numElements-1)*deltaBitsize
unpackedBitsizeWithDescriptor := 1 + context.unpackedBitSize
if packedBitsizeWithDescriptor >= unpackedBitsizeWithDescriptor {
context.isPacked = false
}
}
} | ztype/delta_context.go | 0.687 | 0.52476 | delta_context.go | starcoder |
package distance_calculator
import (
"math"
)
const (
// Unit in Meter
UnitMeter = "METER"
// Unit in Mile
UnitMile = "MILE"
// Unit in Kilometer
UnitKilometer = "KILOMETER"
// Unit in Nautical
UnitNauticalMile = "NAUTICAL_MILE"
)
type Coordinate struct {
Latitude float64
Longitude float64
}
// deg2rad converts decimal degrees to radians
func deg2rad(deg float64) float64 {
return deg * math.Pi / 180.0
}
// rad2deg converts radians to decimal degrees
func rad2deg(rad float64) float64 {
return (rad * 180.0) / math.Pi
}
// calculate two coordinates and return the distance according to a given dist unit
func Calculate(first Coordinate, second Coordinate, unit string) (dist float64) {
theta := first.Longitude - second.Longitude
dist = math.Sin(deg2rad(first.Latitude))*math.Sin(deg2rad(second.Latitude)) + math.Cos(deg2rad(first.Latitude))*math.Cos(deg2rad(second.Latitude))*math.Cos(deg2rad(theta))
dist = math.Acos(dist)
dist = rad2deg(dist)
dist = dist * 60 * 1.1515
if unit == UnitMile {
// Do nothing
} else if unit == UnitMeter {
dist = dist * 0.00160934
} else if unit == UnitKilometer {
dist = dist * 1.60934
} else if unit == UnitNauticalMile {
dist = dist * 0.8684
} else {
dist = -1
}
return
}
// CalcMeters calculates the distance between two coordinates in Meters
func CalcMeters(firstLatitude float64, firstLongitude float64, secondLongitude float64, secondLatitude float64) float64 {
first := Coordinate{Latitude: firstLatitude, Longitude: firstLongitude}
second := Coordinate{Latitude: secondLatitude, Longitude: secondLongitude}
return Calculate(first, second, UnitMeter)
}
// CalcMiles calculates the distance between two coordinates in Miles
func CalcMiles(firstLatitude float64, firstLongitude float64, secondLongitude float64, secondLatitude float64) float64 {
first := Coordinate{Latitude: firstLatitude, Longitude: firstLongitude}
second := Coordinate{Latitude: secondLatitude, Longitude: secondLongitude}
return Calculate(first, second, UnitMile)
}
// CalcKilometers calculates the distance between two coordinates in Kilometers
func CalcKilometers(firstLatitude float64, firstLongitude float64, secondLongitude float64, secondLatitude float64) float64 {
first := Coordinate{Latitude: firstLatitude, Longitude: firstLongitude}
second := Coordinate{Latitude: secondLatitude, Longitude: secondLongitude}
return Calculate(first, second, UnitKilometer)
}
// CalcNautical calculates the distance between two coordinates in Nautical Miles
func CalcNauticalMiles(firstLatitude float64, firstLongitude float64, secondLongitude float64, secondLatitude float64) float64 {
first := Coordinate{Latitude: firstLatitude, Longitude: firstLongitude}
second := Coordinate{Latitude: secondLatitude, Longitude: secondLongitude}
return Calculate(first, second, UnitNauticalMile)
} | calculator.go | 0.874734 | 0.792464 | calculator.go | starcoder |
package timekit
import (
"time"
)
// FirstDayOfLastYear returns first date (with 0:00 hour) from last calendar year.
func FirstDayOfLastYear(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year()-1, 1, 1, 0, 0, 0, 0, dt.Location())
}
// FirstDayOfThisYear returns the date (with 0:00 hour) from the first date of this calendar year.
func FirstDayOfThisYear(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year(), 1, 1, 0, 0, 0, 0, dt.Location())
}
// FirstDayOfNextYear returns date (12AM) of the first date of next calendar year.
func FirstDayOfNextYear(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year()+1, 1, 1, 0, 0, 0, 0, dt.Location())
}
// FirstDayOfLastMonth returns the date (with 0:00 hour) of the first day from last month.
func FirstDayOfLastMonth(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year(), dt.Month()-1, 1, 0, 0, 0, 0, dt.Location())
}
// FirstDayOfThisMonth returns the first date (with 0:00 hour) from this month.
func FirstDayOfThisMonth(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year(), dt.Month(), 1, 0, 0, 0, 0, dt.Location())
}
// FirstDayOfNextMonth returns next months first day (in 12 AM hours).
func FirstDayOfNextMonth(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year(), dt.Month()+1, 1, 0, 0, 0, 0, dt.Location())
}
// MidnightYesterday return 12 AM date of yesterday.
func MidnightYesterday(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year(), dt.Month(), dt.Day()-1, 0, 0, 0, 0, dt.Location())
}
// Midnight return today's date at 12 o’clock (or 0:00) during the night.
func Midnight(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year(), dt.Month(), dt.Day(), 0, 0, 0, 0, dt.Location())
}
// MidnightTomorrow will return tomorrows date at 12 o’clock (or 0:00) during the night.
func MidnightTomorrow(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year(), dt.Month(), dt.Day()+1, 0, 0, 0, 0, dt.Location())
}
// Noon will return today's date at 12 o'clock (or 12:00) during the day.
func Noon(now func() time.Time) time.Time {
dt := now()
return time.Date(dt.Year(), dt.Month(), dt.Day(), 12, 0, 0, 0, dt.Location())
}
// FirstDayOfLastISOWeek returns the previous week's monday date.
func FirstDayOfLastISOWeek(now func() time.Time) time.Time {
dt := now()
// iterate back to Monday
for dt.Weekday() != time.Monday {
dt = dt.AddDate(0, 0, -1)
}
dt = dt.AddDate(0, 0, -1) // Skip the current monday we are on!
// iterate ONCE AGAIN back to Monday
for dt.Weekday() != time.Monday {
dt = dt.AddDate(0, 0, -1)
}
return dt
}
// FirstDayOfThisISOWeek return monday's date of this week. Please note monday is considered the first day of the week according to ISO 8601 and not sunday (which is what is used in Canada and USA).
func FirstDayOfThisISOWeek(now func() time.Time) time.Time {
dt := now()
// iterate back to Monday
for dt.Weekday() != time.Monday {
dt = dt.AddDate(0, 0, -1)
}
return dt
}
// LastDayOfThisISOWeek return sunday's date of this week. Please note sunday is considered the last day of the week according to ISO 8601.
func LastDayOfThisISOWeek(now func() time.Time) time.Time {
dt := now()
// iterate forward to Sunday
for dt.Weekday() != time.Sunday {
dt = dt.AddDate(0, 0, 1)
}
return dt
}
// FirstDayOfNextISOWeek return date of the upcoming monday.
func FirstDayOfNextISOWeek(now func() time.Time) time.Time {
dt := now()
// iterate forward to next Monday
for dt.Weekday() != time.Monday {
dt = dt.AddDate(0, 0, 1)
}
return dt
}
// IsFirstDayOfYear returns true or false depending on whether the date inputted falls on the very first day of the year.
func IsFirstDayOfYear(dt time.Time) bool {
return dt.Day() == 1 && dt.Month() == 1
}
// GetWeekNumberFromDate will return the week number for the inputted date.
func GetWeekNumberFromDate(dt time.Time) int {
_, week := dt.ISOWeek()
return week
}
// GetFirstDateFromWeekAndYear returns the first date for the particular week in the year inputted.
func GetFirstDateFromWeekAndYear(wk int, year int, loc *time.Location) time.Time {
start := time.Date(year, 1, 1, 1, 0, 0, 0, loc)
end := time.Date(year+1, 1, 1, 1, 0, 0, 0, loc)
ts := NewTimeStepper(start, end, 0, 0, 1, 0, 0, 0) // Step by day.
dt := ts.Get() // Get first day.
// Please note, there may be cases where week 52 happens in January,
// the the docs via https://pkg.go.dev/time#Time.ISOWeek.
// CASE 1
week := GetWeekNumberFromDate(dt)
if week == wk {
return dt
}
// CASE 2
for ts.Next() {
dt = ts.Get()
week := GetWeekNumberFromDate(dt)
if week == wk {
break
}
}
return dt
}
// GetFirstDateFromMonthAndYear returns the first day in the month/year specified.
func GetFirstDateFromMonthAndYear(month int, year int, loc *time.Location) time.Time {
return time.Date(year, time.Month(month), 1, 1, 0, 0, 0, loc)
} | timekit.go | 0.788543 | 0.668833 | timekit.go | starcoder |
package level
const (
// ObjectCrossReferenceEntrySize describes the size, in bytes, of a ObjectCrossReferenceEntry.
ObjectCrossReferenceEntrySize = 10
defaultObjectCrossReferenceEntryCount = 1600
)
func offMapReferencePosition() TilePosition {
return TilePosition{X: 0xFF, Y: 0}
}
// ObjectCrossReferenceEntry links objects and tiles.
type ObjectCrossReferenceEntry struct {
TileX int16
TileY int16
ObjectID ObjectID
NextInTile int16
NextTileForObj int16
}
// TilePosition returns the position for that entry.
func (entry ObjectCrossReferenceEntry) TilePosition() TilePosition {
return TilePosition{
X: byte(entry.TileX),
Y: byte(entry.TileY),
}
}
// SetTilePosition sets the position for that entry.
func (entry *ObjectCrossReferenceEntry) SetTilePosition(pos TilePosition) {
if pos == offMapReferencePosition() {
entry.TileX = -1
entry.TileY = 0
} else {
entry.TileX = int16(pos.X)
entry.TileY = int16(pos.Y)
}
}
// Reset clears the members of the entry.
func (entry *ObjectCrossReferenceEntry) Reset() {
*entry = ObjectCrossReferenceEntry{}
}
// ObjectCrossReferenceTable is a list of entries.
// The first entry is reserved for internal use. For the reserved entry,
// The NextInTile member refers to the head of the single-linked free chain.
type ObjectCrossReferenceTable []ObjectCrossReferenceEntry
// DefaultObjectCrossReferenceTable returns an initialized table with a default size.
func DefaultObjectCrossReferenceTable() ObjectCrossReferenceTable {
table := make(ObjectCrossReferenceTable, defaultObjectCrossReferenceEntryCount)
table.Reset()
return table
}
// Reset wipes the entire table and initializes all links.
func (table ObjectCrossReferenceTable) Reset() {
tableLen := len(table)
for i := 0; i < tableLen; i++ {
entry := &table[i]
entry.Reset()
entry.NextInTile = int16(i + 1)
}
if tableLen > 0 {
table[tableLen-1].NextInTile = 0
}
}
// Allocate attempts to reserve a free entry in the table and return its index.
// Returns 0 if exhausted.
func (table ObjectCrossReferenceTable) Allocate() int {
if len(table) < 2 {
return 0
}
start := &table[0]
if start.NextInTile == 0 {
return 0
}
index := start.NextInTile
entry := &table[index]
start.NextInTile = entry.NextInTile
entry.Reset()
return int(index)
}
// Release frees the entry with given index.
func (table ObjectCrossReferenceTable) Release(index int) {
if (index < 1) || (index >= len(table)) {
return
}
start := &table[0]
entry := &table[index]
entry.Reset()
entry.NextInTile = start.NextInTile
start.NextInTile = int16(index)
} | ss1/content/archive/level/ObjectCrossReferenceTable.go | 0.76882 | 0.47859 | ObjectCrossReferenceTable.go | starcoder |
package jsonschema
import (
"reflect"
)
// File named in respect to https://json-schema.org/latest/json-schema-validation.html#rfc.section.6.7
var andAnyOfType = reflect.TypeOf((*andAnyOf)(nil)).Elem()
var anyOfType = reflect.TypeOf((*anyOf)(nil)).Elem()
var andOneOfType = reflect.TypeOf((*andOneOf)(nil)).Elem()
var oneOfType = reflect.TypeOf((*oneOf)(nil)).Elem()
var andAllOfType = reflect.TypeOf((*andAllOf)(nil)).Elem()
var allOfType = reflect.TypeOf((*allOf)(nil)).Elem()
// AndAnyOf will generate the anyOf rule and retain the jsonschema rules for the struct that implements it
// `anyOf` is used to ensure that the data must be valid against *at least one* of the cases *or more*
// { "type": "number", "anyOf": [ { "multipleOf": 5 }, { "multipleOf": 3 } ]}
// In the example above, the input must be a number and can be either a multiple of 5 or 3 or both but never neither
type andAnyOf interface {
AndAnyOf() []reflect.StructField
}
// AnyOf will overrule all jsonschema rules for the struct that implements it
// `anyOf` is used to ensure that the data must be valid against *at least one* of the cases *or more*
// { "anyOf": [ { "type": "number", "multipleOf": 5 }, { "type": "number", "multipleOf": 3 } ] }
// In the example above, the input must be a number and can be either a multiple of 5 or 3 or both but never neither
type anyOf interface {
AnyOf() []reflect.StructField
}
// AndOneOf will generate the oneOf rule and retain the jsonschema rules for the struct that implements it
// `oneOf` can be used to factor out common parts of subschema and when *only one case* must be valid
// { "type": "number", "oneOf": [ { "multipleOf": 5 }, { "multipleOf": 3 } ]}
// In the example above, the input must be a number and must be either a multiple of 5 or 3 but not both and never neither
type andOneOf interface {
AndOneOf() []reflect.StructField
}
// OneOf will overrule all jsonschema rules for the struct that implements it
// `oneOf` can be used to factor out common parts of subschema and when *only one case* must be valid
// { "oneOf": [ { "type": "number", "multipleOf": 5 }, { "type": "number", "multipleOf": 3 } ] }
// In the example above, the input must be a number and must be either a multiple of 5 or 3 but not both and never neither
type oneOf interface {
OneOf() []reflect.StructField
}
// AllOf will generate the allOf rule and retain the jsonschema rules for the struct that implements it
// `allOf` is used to ensure that the data must be valid against *all cases*
// { "type": "number", "allOf": [ { "multipleOf": 5 }, { "multipleOf": 3 } ]}
// In the example above, the input must be a number and a multiple of 5 *and* 3
type andAllOf interface {
AndAllOf() []reflect.StructField
}
// AllOf will overrule all jsonschema rules for the struct that implements it
// `allOf` is used to ensure that the data must be valid against *all cases*
// { "allOf": [ { "type": "number", "multipleOf": 5 }, { "type": "number", "multipleOf": 3 } ] }
// In the example above, the input must be a number and a multiple of 5 *and* 3
type allOf interface {
AllOf() []reflect.StructField
}
// When AnyOf/OneOf/AllOf are implemented, the jsonschema for the implementing struct will be supplanted with
// exclusive anyOf/oneOf/allOf rules
func (r *Reflector) getExclusiveSubschemaForBooleanCases(definitions Definitions, t reflect.Type) *Type {
var nonNilPointer interface{}
t, nonNilPointer = getNonNilPointerTypeAndInterface(t)
if t.Implements(anyOfType) {
s := nonNilPointer.(anyOf).AnyOf()
return &Type{AnyOf: r.getSubschemasForBooleanCases(definitions, s)}
}
if t.Implements(oneOfType) {
s := nonNilPointer.(oneOf).OneOf()
return &Type{OneOf: r.getSubschemasForBooleanCases(definitions, s)}
}
if t.Implements(allOfType) {
s := nonNilPointer.(allOf).AllOf()
return &Type{AllOf: r.getSubschemasForBooleanCases(definitions, s)}
}
return nil
}
// Append jsonschema rules from AndOneOf/AndAnyOf/AndAllOf interfaces
// to the jsonschema for the struct that implements them
func (r *Reflector) addSubschemasForBooleanCases(schema *Type, definitions Definitions, t reflect.Type) {
if schema == nil {
return
}
var nonNilPointer interface{}
t, nonNilPointer = getNonNilPointerTypeAndInterface(t)
if t.Implements(andAnyOfType) {
s := nonNilPointer.(andAnyOf).AndAnyOf()
schema.AnyOf = r.getSubschemasForBooleanCases(definitions, s)
}
if t.Implements(andOneOfType) {
s := nonNilPointer.(andOneOf).AndOneOf()
schema.OneOf = r.getSubschemasForBooleanCases(definitions, s)
}
if t.Implements(andAllOfType) {
s := nonNilPointer.(andAllOf).AndAllOf()
schema.AllOf = r.getSubschemasForBooleanCases(definitions, s)
}
}
func (r *Reflector) getSubschemasForBooleanCases(definitions Definitions, s []reflect.StructField) []*Type {
oneOfList := make([]*Type, 0)
for _, oneType := range s {
if oneType.Type == nil {
oneOfList = append(oneOfList, &Type{Type: "null"})
} else {
oneOfList = append(oneOfList, r.reflectTypeToSchema(definitions, oneType.Type))
}
}
return oneOfList
} | subschemas_boolean.go | 0.664649 | 0.522629 | subschemas_boolean.go | starcoder |
package bcns
import (
"unsafe"
)
type fftCtx struct {
x1 [64][64]uint32
y1 [64][64]uint32
z1 [64][64]uint32
t1 [64]uint32
}
// Reduction modulo p = 2^32 - 1.
// This is not a prime since 2^32-1 = (2^1+1)*(2^2+1)*(2^4+1)*(2^8+1)*(2^16+1).
// But since 2 is a unit in Z/pZ we can use it for computing FFTs in
// Z/pZ[X]/(X^(2^7)+1)
// Caution:
// We use a redundant representation where the integer 0 is represented both
// by 0 and 2^32-1.
// This approach follows the describtion from the paper:
// <NAME>, <NAME>, <NAME>, and <NAME>: Fast Cryptography in Genus 2
// EUROCRYPT 2013, Lecture Notes in Computer Science 7881, pp. 194-210, Springer, 2013.
// More specifically see: Section 3 related to Modular Addition/Subtraction.
// Compute: c = (a+b) mod (2^32-1)
// Let, t = a+b = t_1*2^32 + t0, where 0 <= t_1 <= 1, 0 <= t_0 < 2^32.
// Then t mod (2^32-1) = t0 + t1
// Yawning: Golang is so fucking stupid sometimes. Like when I would kill to
// have macros. Or something that converts a bool to an int that's does not
// involve either branches, or using "unsafe". I should probably revisit this
// and provide a vectorized assembly implementation of the entire FFT multiply.
func boolToInt(b bool) uint32 {
// Yes, unsafe. Really. There is no better way to do this, which is all
// sorts of fucking braindamaged.
return uint32(*(*byte)(unsafe.Pointer(&b)))
}
func modadd(a, b uint32) (c uint32) {
t := a + b
c = t + boolToInt(t < a)
return
}
func modsub(a, b uint32) (c uint32) {
c = (a - b) - boolToInt(b > a)
return
}
func modmul(a, b uint32) (c uint32) {
t := uint64(a) * uint64(b)
c = modadd(uint32(t), (uint32(uint64(t) >> 32)))
return
}
func modmuladd(c, a, b uint32) uint32 {
t := uint64(a)*uint64(b) + uint64(c)
c = modadd(uint32(t), (uint32(t >> 32)))
return c
}
func div2(a uint32) (c uint32) {
c = uint32((uint64(a) + uint64(uint32(0-((a)&1))&0xFFFFFFFF)) >> 1)
return
}
func normalize(a uint32) (c uint32) {
c = a + boolToInt(a == 0xFFFFFFFF)
return c
}
func moddiv2(a uint32) (c uint32) {
c = normalize(a)
c = div2(c)
return
}
func neg(a uint32) (c uint32) {
c = 0xFFFFFFFF - a
c = normalize(c)
return
}
// Reverse the bits, approach from "Bit Twiddling Hacks"
// See: https://graphics.stanford.edu/~seander/bithacks.html
func reverse(x uint32) uint32 {
x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1))
x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2))
x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4))
x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8))
return ((x >> 16) | (x << 16))
}
// Nussbaumer approach, see:
// <NAME>. Fast polynomial transform algorithms for digital convolution. Acoustics, Speech and
// Signal Processing, IEEE Transactions on, 28(2):205{215, 1980
// We followed the describtion from Knuth:
// <NAME>. Seminumerical Algorithms. The Art of Computer Programming. Addison-Wesley, Reading,
// Massachusetts, USA, 3rd edition, 1997
// Exercise Exercise 4.6.4.59.
func naive(z, x, y *[64]uint32, n uint) {
for i := uint(0); i < n; i++ {
B := uint32(0)
A := modmul(x[0], y[i])
var j uint
for j = 1; j <= i; j++ {
A = modmuladd(A, x[j], y[i-j])
}
for k := uint(1); j < n; j, k = j+1, k+1 {
B = modmuladd(B, x[j], y[n-k])
}
z[i] = modsub(A, B)
}
}
func nussbaumerFFT(z []uint32, x []uint32, y []uint32, ctx *fftCtx) {
X1 := &ctx.x1
Y1 := &ctx.y1
for i := 0; i < 32; i++ {
for j := 0; j < 32; j++ {
X1[i][j] = x[32*j+i]
X1[i+32][j] = x[32*j+i]
Y1[i][j] = y[32*j+i]
Y1[i+32][j] = y[32*j+i]
}
}
Z1 := &ctx.z1
T1 := &ctx.t1
for j := 4; j >= 0; j-- {
jj := uint(j)
for i := uint32(0); i < (1 << (5 - jj)); i++ {
ssr := reverse(i)
for t := uint32(0); t < (1 << jj); t++ {
s := i
sr := ssr >> (32 - 5 + jj)
sr <<= jj
s <<= (jj + 1)
// X_i(w) = X_i(w) + w^kX_l(w) can be computed as
// X_ij = X_ij - X_l(j-k+r) for 0 <= j < k
// X_ij = X_ij + X_l(j-k) for k <= j < r
I := s + t
L := s + t + (1 << jj)
for a := sr; a < 32; a++ {
T1[a] = X1[L][a-sr]
}
for a := uint32(0); a < sr; a++ {
T1[a] = neg(X1[L][32+a-sr])
}
for a := 0; a < 32; a++ {
X1[L][a] = modsub(X1[I][a], T1[a])
X1[I][a] = modadd(X1[I][a], T1[a])
}
for a := sr; a < 32; a++ {
T1[a] = Y1[L][a-sr]
}
for a := uint32(0); a < sr; a++ {
T1[a] = neg(Y1[L][32+a-sr])
}
for a := 0; a < 32; a++ {
Y1[L][a] = modsub(Y1[I][a], T1[a])
Y1[I][a] = modadd(Y1[I][a], T1[a])
}
}
}
}
for i := 0; i < 2*32; i++ {
naive(&Z1[i], &X1[i], &Y1[i], 32)
}
for j := uint32(0); j <= 5; j++ {
for i := uint32(0); i < (1 << (5 - j)); i++ {
ssr := reverse(i)
for t := uint32(0); t < (1 << j); t++ {
s := i
sr := (ssr >> (32 - 5 + j))
sr <<= j
s <<= (j + 1)
A := s + t
B := s + t + (1 << j)
for a := 0; a < 32; a++ {
T1[a] = modsub(Z1[A][a], Z1[B][a])
T1[a] = moddiv2(T1[a])
Z1[A][a] = modadd(Z1[A][a], Z1[B][a])
Z1[A][a] = moddiv2(Z1[A][a])
}
// w^{-(r/m)s'} (Z_{s+t}(w)-Z_{s+t+2^j}(w))
for a := uint32(0); a < 32-sr; a++ {
Z1[B][a] = T1[a+sr]
}
for a := 32 - sr; a < 32; a++ {
Z1[B][a] = neg(T1[a-(32-sr)])
}
}
}
}
for i := 0; i < 32; i++ {
z[i] = modsub(Z1[i][0], Z1[32+i][32-1])
for j := 1; j < 32; j++ {
z[32*j+i] = modadd(Z1[i][j], Z1[32+i][j-1])
}
}
}
func (f *fftCtx) multiply(z, x, y *[1024]uint32) {
nussbaumerFFT(z[:], x[:], y[:], f)
}
func (f *fftCtx) add(z, x, y *[1024]uint32) {
for i := 0; i < 1024; i++ {
z[i] = modadd(x[i], y[i])
}
}
func init() {
// Validate the assumptions made regarding bool/unsafe, in case the
// developers decide to torment me further in the future.
if unsafe.Sizeof(true) != 1 {
panic("sizeof(bool) != 1")
}
if boolToInt(true) != 1 || boolToInt(false) != 0 {
panic("bool primitive type data format is unexpected.")
}
} | fft.go | 0.601945 | 0.593433 | fft.go | starcoder |
package sorts
import (
"math/bits"
"sort"
)
// Just some useful utility functions. Most of them
// wind up being inlined.
func lte(s sort.Interface, a, b int) bool {
return !s.Less(b, a)
}
func log2(s uint64) int {
return 64 - bits.LeadingZeros64(s) - 1
}
func median(a, b int) int {
return int(uint(a+b) >> 1)
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func rotateRightwards(sd sort.Interface, left, right int) {
for ; right > left; right-- {
sd.Swap(right, right-1)
}
}
func rotateLeftwards(sd sort.Interface, left, right int) {
for ; left < right; left++ {
sd.Swap(left, left+1)
}
}
func reverse(sd sort.Interface, left, right int) {
for left < right {
sd.Swap(left, right)
left++
right--
}
}
// swap the blocks at [a:a+count] and [b:b+count]
// Assumes ranges do not overlap.
func swapRange(sd sort.Interface, a, b, count int) {
for i := 0; i < count; i++ {
sd.Swap(a+i, b+i)
}
}
// It would be great if the compiler were able to inline these.
// Just saying.
// swap internal blocks [left:mid] and [mid:right+1] using
// the smallest number of swap operations. Assumes
// that left < mid < right.
func blockSwap(sd sort.Interface, left, mid, right int) {
i := mid - left
j := right - mid + 1
for i != j {
if i > j {
swapRange(sd, mid-i, mid, j)
i -= j
} else {
swapRange(sd, mid-i, mid+j-i, i)
j -= i
}
}
swapRange(sd, mid-i, mid, i)
}
// find the smallest item greater than ref between left and right
func smallestGreaterThan(sd sort.Interface, left, right, ref int) int {
for left < right {
m := median(left, right)
if lte(sd, m, ref) {
left = m + 1
} else {
right = m
}
}
return left
}
// find the largest item less than ref between left and right
func largestLessThan(sd sort.Interface, left, right, ref int) int {
right++
for left < right {
m := median(left, right)
if sd.Less(m, ref) {
left = m + 1
} else {
right = m
}
}
return left
} | utils.go | 0.692746 | 0.41117 | utils.go | starcoder |
package listsandstrings
import (
"fmt"
"math/rand"
"time"
)
// Implement the following sorting algorithms:
// Selection sort,
// Insertion sort,
// Merge sort,
// Quick sort,
// Stooge Sort.
// Check Wikipedia for descriptions.
func exercise17() {
size := 30000
slice := makeRandIntSlice(size)
fmt.Println("number of elements for each sort: ", size)
start := time.Now()
selectionSort(slice)
duration := time.Since(start)
fmt.Println("selectionSort took ", duration)
slice = makeRandIntSlice(size)
start = time.Now()
insertionSort(slice)
duration = time.Since(start)
fmt.Println("insertionSort took ", duration)
slice = makeRandIntSlice(size)
start = time.Now()
mergeSort(slice)
duration = time.Since(start)
fmt.Println("mergeSort took ", duration)
slice = makeRandIntSlice(size)
start = time.Now()
quickSort(slice, 0, len(slice)-1)
duration = time.Since(start)
fmt.Println("quickSort took ", duration)
if false { // too slow don't run
slice = makeRandIntSlice(size)
start = time.Now()
stoogeSort(slice, 0, len(slice)-1)
duration = time.Since(start)
fmt.Println("stoogeSort took (elements^(1/2))", duration)
}
}
// copied from wikipedia
func stoogeSort(slice []int, low, high int) []int {
var buf, third int
if slice[low] > slice[high] {
buf = slice[high]
slice[high] = slice[low]
slice[low] = buf
}
if (high - low + 1) > 2 {
third = (high - low + 1) / 3
stoogeSort(slice, low, high-third)
stoogeSort(slice, low+third, high)
stoogeSort(slice, low, high-third)
}
return slice
}
// copied from wikipedia from pseudo-code
func quickSort(slice []int, low, high int) {
if low < high {
p := quickPartition(slice, low, high)
quickSort(slice, low, p-1)
quickSort(slice, p+1, high)
}
}
func quickPartition(slice []int, low, high int) int {
pivot := slice[high]
i := low
var buf int
for j := low; j <= high; j++ {
if slice[j] < pivot {
buf = slice[j]
slice[j] = slice[i]
slice[i] = buf
i++
}
}
buf = slice[high]
slice[high] = slice[i]
slice[i] = buf
return i
}
// copied from geeks for geeks from python
func mergeSort(slice []int) {
if len(slice) > 1 {
middle := len(slice) / 2
leftSlice := make([]int, len(slice[:middle]))
copy(leftSlice, slice[:middle])
rightSlice := make([]int, len(slice[middle:]))
copy(rightSlice, slice[middle:])
mergeSort(leftSlice)
mergeSort(rightSlice)
i, j, k := 0, 0, 0
for (i < len(leftSlice)) && (j < len(rightSlice)) {
if leftSlice[i] < rightSlice[j] {
slice[k] = leftSlice[i]
i++
} else {
slice[k] = rightSlice[j]
j++
}
k++
}
for i < len(leftSlice) {
slice[k] = leftSlice[i]
i++
k++
}
for j < len(rightSlice) {
slice[k] = rightSlice[j]
j++
k++
}
}
}
// I did write this one, it's slow
func insertionSort(slice []int) {
for i := 0; i < len(slice); i++ {
for j := len(slice) - 1; j > 0; j-- {
if slice[j] < slice[j-1] {
buf := slice[j]
slice[j] = slice[j-1]
slice[j-1] = buf
}
}
}
}
// I also wrote this one, it's also slow.
func selectionSort(slice []int) {
for i := 0; i < len(slice); i++ {
minI := i
for j := i + 1; j < len(slice); j++ {
if slice[j] < slice[minI] {
minI = j
}
}
if minI != i {
buf := slice[i]
slice[i] = slice[minI]
slice[minI] = buf
}
}
}
func makeRandIntSlice(length int) []int {
seed := rand.NewSource(time.Now().UnixNano())
randomer := rand.New(seed)
slice := make([]int, length)
for i := 0; i < length; i++ {
slice[i] = randomer.Intn(length)
}
return slice
} | listsandstrings/exercise17sorting.go | 0.554712 | 0.452657 | exercise17sorting.go | starcoder |
package types
import (
"fmt"
"strings"
)
// guardState is used to ensure the DelayLine's low-level Read and Write
// functions are used correctly
type guardState bool
const (
readyToRead guardState = false
readyToWrite guardState = true
)
// DelayLine represents a circular buffer that can be used to delay a signal
// by a fixed number of samples.
type DelayLine struct {
buf []float32
ptr uintptr
guard guardState
}
// NewDelayLine creates a new DelayLine for the given number of samples.
func NewDelayLine(len int) *DelayLine {
if len < 1 {
panic("DelayLine cannot have length < 1")
}
buf := make([]float32, len)
return &DelayLine{
buf: buf,
ptr: 0,
}
}
// Step returns the output value and writes the next input value back to the
// front of the delay line.
func (d *DelayLine) Step(in float32) (out float32) {
out = d.Read()
d.Write(in)
return out
}
// Read only outputs the value at the end of the delay line, and must be
// followed by a Write.
func (d *DelayLine) Read() (out float32) {
if d.guard != readyToRead {
panic("Cannot read twice without writing")
}
out = d.buf[d.ptr]
d.guard = readyToWrite
return out
}
// Write writes the input value to the buffer and must be preceded by a Read.
func (d *DelayLine) Write(in float32) {
if d.guard != readyToWrite {
panic("Cannot write without reading first")
}
d.buf[d.ptr] = in
d.ptr = (d.ptr + 1) % uintptr(len(d.buf))
d.guard = readyToRead
}
// String returns a string representation of the delay line
func (d DelayLine) String() string {
str := []string{fmt.Sprintf("%v\n", d.buf)}
str = append(str, fmt.Sprintf(" "))
for i := range d.buf {
if d.ptr == uintptr(i) {
str = append(str, fmt.Sprintf("^ "))
continue
}
str = append(str, fmt.Sprintf(" "))
}
return strings.Join(str, "")
}
// SampleDelayLine is a delay line that operates on samples (with multiple
// channels).
type SampleDelayLine struct {
bufs []*DelayLine
guard guardState
}
// NewSampleDelayLine creates a new SampleDelayLine.
func NewSampleDelayLine(channels, len int) *SampleDelayLine {
bufs := make([]*DelayLine, channels)
for i := range bufs {
bufs[i] = NewDelayLine(len)
}
return &SampleDelayLine{
bufs: bufs,
}
}
// Step returns the output sample and writes the next input sample back to the
// front of the delay line.
func (d *SampleDelayLine) Step(in Sample) (out Sample) {
if in.Channels() != len(d.bufs) {
panic("Input sample must have same number of channels as SampleDelayLine")
}
out = NewSample(len(d.bufs))
for i := range d.bufs {
out[i] = d.bufs[i].Step(in[i])
}
return out
}
// Read only outputs the sample at the end of the delay line, and must be
// followed by a Write.
func (d *SampleDelayLine) Read() (out Sample) {
if d.guard != readyToRead {
panic("Cannot read twice without writing")
}
out = NewSample(len(d.bufs))
for i := range d.bufs {
out[i] = d.bufs[i].Read()
}
d.guard = readyToWrite
return out
}
// Write writes the input sample to the buffer and must be preceded by a Read.
func (d *SampleDelayLine) Write(in Sample) {
if in.Channels() != len(d.bufs) {
panic("Input sample must have same number of channels as SampleDelayLine")
}
if d.guard != readyToWrite {
panic("Cannot write without reading first")
}
for i := range d.bufs {
d.bufs[i].Write(in[i])
}
d.guard = readyToRead
} | types/delay_line.go | 0.551332 | 0.414069 | delay_line.go | starcoder |
package snapio
/* This file handles codes related to the generic Buffer object. Much of this
code is just type switches. It'll be obsolete if Guppy is ever ported to Go 2.
After writing and testing this, I realized that the code can be made much
simpler if Buffer just has an array of interface{} values instead of different
arrays for each type, so if someone does a rewrite of this, just do that.
*/
import (
"encoding/binary"
"fmt"
"io"
"reflect"
"unsafe"
)
type Buffer struct {
byteOrder binary.ByteOrder
varType map[string]string
index map[string]int
isRead map[string]bool
f32 [][]float32
f64 [][]float64
v32 [][][3]float32
v64 [][][3]float64
u32 [][]uint32
u64 [][]uint64
}
// NewBuffer creates a buffer object which can read files with the given Header.
func NewBuffer(hd Header) (*Buffer, error) {
return newBuffer(hd.ByteOrder(), hd.Names(), hd.Types())
}
// newBuffer returns a Buffer object that can read a set of variables with the
// specified types ("f32", "f64", "u32", "u64", "v32", "v64" for floats, uints,
// and 3-vectors with 32- and 64-bit widths, respectively). The byte order of
// the files this buffer will be used to read needs to also be specified.
// Variable names cannot be used more than once, and "id" must be specified
// and it must be "u32" or "u64".
func newBuffer(
byteOrder binary.ByteOrder, varNames, varTypes []string,
) (*Buffer, error) {
buf := &Buffer{
byteOrder: byteOrder, varType: map[string]string{ },
index: map[string]int{ }, isRead: map[string]bool{ },
}
for i, name := range varNames {
if _, ok := buf.varType[name]; ok {
return nil, fmt.Errorf(
"The property name '%s' is used more than once.", name,
)
} else if name == "id" && varTypes[i] != "u32" && varTypes[i] != "u64" {
return nil, fmt.Errorf(
"'id' is associated with '%s', which is not an integer type.",
varTypes[i],
)
}
buf.varType[name] = varTypes[i]
buf.isRead[name] = false
switch varTypes[i] {
case "f32":
buf.f32 = append(buf.f32, []float32{ })
buf.index[name] = len(buf.f32) - 1
case "f64":
buf.f64 = append(buf.f64, []float64{ })
buf.index[name] = len(buf.f64) - 1
case "v32":
buf.v32 = append(buf.v32, [][3]float32{ })
buf.index[name] = len(buf.v32) - 1
case "v64":
buf.v64 = append(buf.v64, [][3]float64{ })
buf.index[name] = len(buf.v64) - 1
case "u32":
buf.u32 = append(buf.u32, []uint32{ })
buf.index[name] = len(buf.u32) - 1
case "u64":
buf.u64 = append(buf.u64, []uint64{ })
buf.index[name] = len(buf.u64) - 1
default:
return nil, fmt.Errorf("'%s' is not a valid type. Only 'f32', 'f64', 'v32', 'v64', 'u32', and 'u64' are valid.", varTypes[i])
}
}
if _, ok := buf.varType["id"]; !ok {
return nil, fmt.Errorf("No 'id' property was specified.")
}
return buf, nil
}
// Reset resets a buffer so that a new file can be read into it. This allows
// informative internal errors to be thrown.
func (buf *Buffer) Reset() {
for name := range buf.isRead {
buf.isRead[name] = false
}
}
// read reads the data associated with a given variable name to Buffer. n values
// are read.
func (buf *Buffer) read(rd io.Reader, name string, n int) error {
varType, ok := buf.varType[name]
if !ok {
return fmt.Errorf("The property name '%s' hasn't been registered to the file.", name)
}
if buf.isRead[name] {
return fmt.Errorf("The property name '%s' is being read multiple times without a call to Reset().", name)
}
i := buf.index[name]
var err error
switch varType {
case "f32":
buf.f32[i], _ = expand(buf.f32[i], n).([]float32)
err = buf.readPrimitive(rd, buf.f32[i])
case "f64":
buf.f64[i], _ = expand(buf.f64[i], n).([]float64)
err = buf.readPrimitive(rd, buf.f64[i])
case "u32":
buf.u32[i], _ = expand(buf.u32[i], n).([]uint32)
err = buf.readPrimitive(rd, buf.u32[i])
case "u64":
buf.u64[i], _ = expand(buf.u64[i], n).([]uint64)
err = buf.readPrimitive(rd, buf.u64[i])
case "v32":
buf.v32[i], _ = expand(buf.v32[i], n).([][3]float32)
err = buf.readPrimitive(rd, buf.v32[i])
case "v64":
buf.v64[i], _ = expand(buf.v64[i], n).([][3]float64)
err = buf.readPrimitive(rd, buf.v64[i])
default:
return fmt.Errorf("'%s' is not a valid type. Only 'f32', 'f64', 'v32', 'v64', 'u32', and 'u64' are valid.", varType)
}
buf.isRead[name] = true
return err
}
// readPrimitive reads data from a reader into x, an interface around an array.
// Supported types are []float32, []float64, []uint32, []uint64, [][3]float32,
// [][3]float64. Returns an error if given an unsupported type or an I/O error.
func (buf *Buffer) readPrimitive(rd io.Reader, x interface{}) error {
var err error
switch xx := x.(type) {
case []float32: err = binary.Read(rd, buf.byteOrder, xx)
case []float64: err = binary.Read(rd, buf.byteOrder, xx)
case []uint32: err = binary.Read(rd, buf.byteOrder, xx)
case []uint64: err = binary.Read(rd, buf.byteOrder, xx)
case [][3]float32:
// This is done this way because binary.Read does a bunch of heap
// allocations when used on [][3]float32 arrays.
hd := *(*reflect.SliceHeader)(unsafe.Pointer(&xx))
hd.Len *= 3
hd.Cap *= 3
f32x := *(*[]float32)(unsafe.Pointer(&hd))
err = binary.Read(rd, buf.byteOrder, f32x)
hd.Len /= 3
hd.Cap /= 3
case [][3]float64:
// This is done this way because binary.Read does a bunch of heap
// allocations when used on [][3]float32 arrays.
hd := *(*reflect.SliceHeader)(unsafe.Pointer(&xx))
hd.Len *= 3
hd.Cap *= 3
f64x := *(*[]float64)(unsafe.Pointer(&hd))
err = binary.Read(rd, buf.byteOrder, f64x)
hd.Len /= 3
hd.Cap /= 3
default:
panic("(Supposedly) impossible type configuration")
}
return err
}
// expand expands an array to have size n.
func expand(x interface{}, n int) interface{} {
switch xx := x.(type) {
case []float32:
m := len(xx)
if m < n { xx = append(xx, make([]float32, n-m)...) }
return xx[:n]
case []float64:
m := len(xx)
if m < n { xx = append(xx, make([]float64, n-m)...) }
return xx[:n]
case [][3]float32:
m := len(xx)
if m < n { xx = append(xx, make([][3]float32, n-m)...) }
return xx[:n]
case [][3]float64:
m := len(xx)
if m < n { xx = append(xx, make([][3]float64, n-m)...) }
return xx[:n]
case []uint32:
m := len(xx)
if m < n { xx = append(xx, make([]uint32, n-m)...) }
return xx[:n]
case []uint64:
m := len(xx)
if m < n { xx = append(xx, make([]uint64, n-m)...) }
return xx[:n]
}
panic("(Supposedly) impossible type configuration.")
}
// Get returns an interface pointing to the slice associated with a given
// variable name.
func (buf *Buffer) Get(name string) (interface{}, error) {
varType, ok := buf.varType[name]
if !ok {
return nil, fmt.Errorf("'%s' is not a recognized variable name.", name)
} else if !buf.isRead[name] {
return nil, fmt.Errorf("'%s' has not been read.", name)
}
idx := buf.index[name]
switch varType {
case "f32": return buf.f32[idx], nil
case "f64": return buf.f64[idx], nil
case "v32": return buf.v32[idx], nil
case "v64": return buf.v64[idx], nil
case "u32": return buf.u32[idx], nil
case "u64": return buf.u64[idx], nil
}
panic("(Supposedly) impossible type configuration")
} | lib/snapio/buffer.go | 0.609408 | 0.433262 | buffer.go | starcoder |
package buffer
import (
"math"
xmath2 "github.com/drakos74/go-ex-machina/xmath"
)
// Ring acts like a ring buffer keeping the last x elements
type Ring struct {
index int
count int
values []float64
}
// Size returns the number of non-nil elements within the ring.
func (r *Ring) Size() int {
if r.count == r.index {
return r.count
}
return len(r.values)
}
// NewRing creates a new ring with the given buffer size.
func NewRing(size int) *Ring {
return &Ring{
values: make([]float64, size),
}
}
// Push adds an element to the ring.
func (r *Ring) Push(v float64) {
r.values[r.index] = v
r.index = r.next(r.index)
r.count++
}
func (r *Ring) next(index int) int {
return (index + 1) % len(r.values)
}
// Get returns an ordered slice of the ring elements
func (r *Ring) Get() []float64 {
v := make([]float64, len(r.values))
for i := 0; i < len(r.values); i++ {
idx := i
if r.count > len(r.values) {
idx = r.next(r.index - 1 + i)
}
v[i] = r.values[idx]
}
return v
}
// Get returns an ordered slice of the ring elements
func (r *Ring) Aggregate(process Func) float64 {
s := 0.0
for i := 0; i < len(r.values); i++ {
s = process(s, r.values[i])
}
return s / float64(len(r.values))
}
// Transform is a operation acting on a bucket and returning a float.
// It is used to get the relevant bucket metric, without the need to make repeated iterations.
type Transform func(bucket *Bucket) interface{}
// BucketRing acts like a ring buffer keeping the last x elements
type BucketRing struct {
index int
count int
values []*Bucket
}
// Size returns the number of non-nil elements within the ring.
func (r *BucketRing) Size() int {
if r.count == r.index {
return r.count
}
return len(r.values)
}
// NewBucketRing creates a new ring with the given buffer size.
func NewBucketRing(size int) *BucketRing {
return &BucketRing{
values: make([]*Bucket, size),
}
}
// Push adds an element to the ring.
func (r *BucketRing) Push(v *Bucket) {
r.values[r.index] = v
r.index = r.next(r.index)
r.count++
}
func (r *BucketRing) next(index int) int {
return (index + 1) % len(r.values)
}
// Get returns an ordered slice of the ring elements
func (r *BucketRing) Get(transform Transform) []interface{} {
l := len(r.values)
if r.count < l {
l = r.count
}
v := make([]interface{}, l)
for i := 0; i < l; i++ {
idx := i
if r.count > l {
idx = r.next(r.index - 1 + i)
}
v[i] = transform(r.values[idx])
}
return v
}
type Func func(p, v float64) float64
func Sum(s, v float64) float64 {
return s + v
}
func Pow(p float64) Func {
return func(s, v float64) float64 {
return s + math.Pow(v, p)
}
}
// VectorRing is a temporary cache of vectors
// it re-uses a slice of vectors (matrix) and keeps track of the starting index.
// In that sense it s effectively a ring.
// A major differentiating factor is the (+1) logic,
// where the last element is handled separately.
type VectorRing struct {
idx int
mem xmath2.Matrix
}
func NewVectorRing(n int) *VectorRing {
return &VectorRing{
mem: xmath2.Mat(n),
}
}
func NewSplitVectorRing(n int) *VectorRing {
return &VectorRing{
mem: xmath2.Mat(n + 1),
}
}
// Push adds an element to the window.
func (w *VectorRing) Push(v xmath2.Vector) (xmath2.Matrix, bool) {
w.mem[w.idx%len(w.mem)] = v
w.idx++
if w.isReady() {
batch := w.batch()
return batch, true
}
return nil, false
}
// isReady returns true if we completed the batch requirements.
func (w *VectorRing) isReady() bool {
return w.idx >= len(w.mem)
}
// batch returns the current batch.
func (w VectorRing) batch() xmath2.Matrix {
m := xmath2.Mat(len(w.mem))
for i := 0; i < len(w.mem); i++ {
ii := w.next(i)
m[i] = w.mem[ii]
}
return m
}
func (w VectorRing) Copy() VectorRing {
m := w.batch()
return VectorRing{
idx: w.idx,
mem: m,
}
}
func (w VectorRing) next(i int) int {
return (w.idx + i) % len(w.mem)
}
func (w VectorRing) Size() int {
return len(w.mem)
} | xmath/buffer/ring.go | 0.862163 | 0.617599 | ring.go | starcoder |
package ui
import (
"image"
"math/rand"
"time"
"gioui.org/io/event"
"gioui.org/io/key"
"gioui.org/layout"
"gioui.org/op"
)
type blockID uint8
const (
I blockID = iota
J
L
O
S
T
Z
)
type blockRotation uint8
// clockwise block rotations.
const (
block0 blockRotation = iota
block90
block180
block270
)
type block struct {
KeyMap func(string) int
Texture texture
ready bool // whether or not the block has been laid out at least once
id blockID
pos image.Point
rot blockRotation
data [][]texture
width int
height int
}
func (a blockRotation) Next() blockRotation {
return (a + 1) % 4
}
func (a blockRotation) Prev() blockRotation {
return (a + 3) % 4
}
func (a blockRotation) NextGradient(t texture) texture {
anchor := gradientNT
if t >= gradientNWT {
anchor = gradientNWT
}
g := (t.gradient() - anchor) >> texturePatternBits
g = (g + 1) % 4
return g<<texturePatternBits + anchor
}
// Dims returns the full dimensions of the block,
// including its padding.
func (b *block) Dims() (fullWidth, fullHeight int) {
return len(b.data[0]), len(b.data)
}
// Init sets the block's data to the one at blocks index idx.
func (b *block) Init(idx blockID, t texture) {
km := b.KeyMap
*b = blocks[idx]
b.KeyMap = km
b.Texture = t
}
// InitRandom sets the block's data randomly.
func (b *block) InitRandom(t texture) {
rand.Seed(time.Now().UnixNano())
idx := rand.Intn(len(blocks))
b.Init(blockID(idx), t)
}
func (b *block) ID() blockID {
return b.id
}
// Width returns the block's width, without padding.
func (b *block) Width() int {
return b.width
}
// Height returns the block's height, without padding.
func (b *block) Height() int {
return b.height
}
// Pos returns the current block position on the grid,
// including its padding.
func (b *block) Pos() image.Point {
return image.Pt(b.pos.X, max(0, b.pos.Y))
}
// MoveDown moves the block one line down and returns
// whether or not it was possible.
func (b *block) MoveDown(g *grid) (ok bool) {
if !b.ready {
// Consider that the move is successful while getting ready.
return true
}
b.layout(g, true)
b.pos.Y++
ok = b.check(g)
if !ok {
b.pos.Y--
b.layout(g, false)
}
return
}
func (b *block) walk(fn func(x, y int, t texture) bool) {
xn, yn := b.Dims()
isGradient := b.Texture.gradient() != uniformT
for y := 0; y < yn; y++ {
for x := 0; x < xn; x++ {
xx, yy := x, y
var t texture
switch b.rot {
case block0:
t = b.data[y][x]
case block90:
t = b.data[yn-1-y][x]
xx, yy = y, x
case block180:
t = b.data[yn-1-y][xn-1-x]
case block270:
t = b.data[y][xn-1-x]
xx, yy = y, x
}
// Skip transparent textures.
if t == transparentT {
continue
}
if isGradient {
if t.gradient() != uniformT {
switch b.rot {
case block0:
case block90:
t = b.rot.NextGradient(t)
case block180:
t = b.rot.NextGradient(t)
t = b.rot.NextGradient(t)
case block270:
t = b.rot.NextGradient(t)
t = b.rot.NextGradient(t)
t = b.rot.NextGradient(t)
}
}
t |= b.Texture.color()
} else {
t = b.Texture
}
if fn(xx, yy, t) {
return
}
}
}
}
// check reports whether or not all non transparent textures
// of the block do not collide with anything on the grid.
func (b *block) check(g *grid) (ok bool) {
ok = true
b.walk(func(x, y int, t texture) bool {
if b.pos.Y+y < 0 || g.Get(b.pos.X+x, b.pos.Y+y) != transparentT {
ok = false
return true
}
return false
})
return
}
// init positions the block for the first time. It may fail.
func (b *block) init(gtx layout.Context, g *grid) (ok bool) {
if b.ready {
return true
}
// First attempt at positioning the block.
b.ready = true
// Make a block starts mid width.
cols := g.Size().X
b.pos.X = (cols - b.Width()) / 2
b.pos.Y = 1 // the grid's first line is hidden.
// Skip first empty lines so that the block gets displayed at the top edge.
var skip int
b.walk(func(x, y int, t texture) bool {
if skip == y {
return t != transparentT
}
skip = y
return false
})
b.pos.Y -= skip
return b.check(g)
}
// layout draws or clears the block on the grid.
func (b *block) layout(g *grid, clear bool) {
b.walk(func(x, y int, t texture) bool {
if clear {
t = transparentT
}
if b.pos.Y+y >= 0 {
g.Set(b.pos.X+x, b.pos.Y+y, t)
}
return false
})
}
func (b *block) update(evs []event.Event, g *grid) (softDrops int, ok bool) {
if len(evs) == 0 {
b.layout(g, false)
return 0, true
}
for _, ev := range evs {
e, k := ev.(key.Event)
// You get one event for a key press and one for its release, ignore the first one.
if !k || e.State != key.Release {
continue
}
switch b.KeyMap(e.Name) {
case moveLeft:
b.layout(g, true)
b.pos.X--
if !b.check(g) {
b.pos.X++
}
b.layout(g, false)
case moveRight:
b.layout(g, true)
b.pos.X++
if !b.check(g) {
b.pos.X--
}
b.layout(g, false)
case dropHard:
for b.MoveDown(g) {
softDrops++
}
return
case dropSoft:
if !b.MoveDown(g) {
return
}
softDrops++
case rotateLeft:
rot := b.rot
b.layout(g, true)
b.rot = b.rot.Prev()
if !b.check(g) {
b.rot = rot
}
b.layout(g, false)
case rotateRight:
rot := b.rot
b.layout(g, true)
b.rot = b.rot.Next()
if !b.check(g) {
b.rot = rot
}
b.layout(g, false)
}
}
return softDrops, true
}
func (b *block) Layout(gtx layout.Context, g *grid, update func(int), over func()) layout.Dimensions {
if !b.init(gtx, g) {
over()
op.InvalidateOp{}.Add(gtx.Ops)
return layout.Dimensions{}
}
if softDrops, ok := b.update(gtx.Queue.Events(b), g); !ok {
// The user could not move the block down: update the game loop.
update(softDrops)
op.InvalidateOp{}.Add(gtx.Ops)
return layout.Dimensions{}
}
xn, yn := b.Dims()
cell := g.CellSize()
size := image.Point{
X: xn * cell.X,
Y: yn * cell.Y,
}
return layout.Dimensions{Size: size}
} | blocks/internal/ui/block.go | 0.636918 | 0.446495 | block.go | starcoder |
package hex
var HexByteToString = [256]string{
0: `00`,
1: `01`,
2: `02`,
3: `03`,
4: `04`,
5: `05`,
6: `06`,
7: `07`,
8: `08`,
9: `09`,
10: `0a`,
11: `0b`,
12: `0c`,
13: `0d`,
14: `0e`,
15: `0f`,
16: `10`,
17: `11`,
18: `12`,
19: `13`,
20: `14`,
21: `15`,
22: `16`,
23: `17`,
24: `18`,
25: `19`,
26: `1a`,
27: `1b`,
28: `1c`,
29: `1d`,
30: `1e`,
31: `1f`,
32: `20`,
33: `21`,
34: `22`,
35: `23`,
36: `24`,
37: `25`,
38: `26`,
39: `27`,
40: `28`,
41: `29`,
42: `2a`,
43: `2b`,
44: `2c`,
45: `2d`,
46: `2e`,
47: `2f`,
48: `30`,
49: `31`,
50: `32`,
51: `33`,
52: `34`,
53: `35`,
54: `36`,
55: `37`,
56: `38`,
57: `39`,
58: `3a`,
59: `3b`,
60: `3c`,
61: `3d`,
62: `3e`,
63: `3f`,
64: `40`,
65: `41`,
66: `42`,
67: `43`,
68: `44`,
69: `45`,
70: `46`,
71: `47`,
72: `48`,
73: `49`,
74: `4a`,
75: `4b`,
76: `4c`,
77: `4d`,
78: `4e`,
79: `4f`,
80: `50`,
81: `51`,
82: `52`,
83: `53`,
84: `54`,
85: `55`,
86: `56`,
87: `57`,
88: `58`,
89: `59`,
90: `5a`,
91: `5b`,
92: `5c`,
93: `5d`,
94: `5e`,
95: `5f`,
96: `60`,
97: `61`,
98: `62`,
99: `63`,
100: `64`,
101: `65`,
102: `66`,
103: `67`,
104: `68`,
105: `69`,
106: `6a`,
107: `6b`,
108: `6c`,
109: `6d`,
110: `6e`,
111: `6f`,
112: `70`,
113: `71`,
114: `72`,
115: `73`,
116: `74`,
117: `75`,
118: `76`,
119: `77`,
120: `78`,
121: `79`,
122: `7a`,
123: `7b`,
124: `7c`,
125: `7d`,
126: `7e`,
127: `7f`,
128: `80`,
129: `81`,
130: `82`,
131: `83`,
132: `84`,
133: `85`,
134: `86`,
135: `87`,
136: `88`,
137: `89`,
138: `8a`,
139: `8b`,
140: `8c`,
141: `8d`,
142: `8e`,
143: `8f`,
144: `90`,
145: `91`,
146: `92`,
147: `93`,
148: `94`,
149: `95`,
150: `96`,
151: `97`,
152: `98`,
153: `99`,
154: `9a`,
155: `9b`,
156: `9c`,
157: `9d`,
158: `9e`,
159: `9f`,
160: `a0`,
161: `a1`,
162: `a2`,
163: `a3`,
164: `a4`,
165: `a5`,
166: `a6`,
167: `a7`,
168: `a8`,
169: `a9`,
170: `aa`,
171: `ab`,
172: `ac`,
173: `ad`,
174: `ae`,
175: `af`,
176: `b0`,
177: `b1`,
178: `b2`,
179: `b3`,
180: `b4`,
181: `b5`,
182: `b6`,
183: `b7`,
184: `b8`,
185: `b9`,
186: `ba`,
187: `bb`,
188: `bc`,
189: `bd`,
190: `be`,
191: `bf`,
192: `c0`,
193: `c1`,
194: `c2`,
195: `c3`,
196: `c4`,
197: `c5`,
198: `c6`,
199: `c7`,
200: `c8`,
201: `c9`,
202: `ca`,
203: `cb`,
204: `cc`,
205: `cd`,
206: `ce`,
207: `cf`,
208: `d0`,
209: `d1`,
210: `d2`,
211: `d3`,
212: `d4`,
213: `d5`,
214: `d6`,
215: `d7`,
216: `d8`,
217: `d9`,
218: `da`,
219: `db`,
220: `dc`,
221: `dd`,
222: `de`,
223: `df`,
224: `e0`,
225: `e1`,
226: `e2`,
227: `e3`,
228: `e4`,
229: `e5`,
230: `e6`,
231: `e7`,
232: `e8`,
233: `e9`,
234: `ea`,
235: `eb`,
236: `ec`,
237: `ed`,
238: `ee`,
239: `ef`,
240: `f0`,
241: `f1`,
242: `f2`,
243: `f3`,
244: `f4`,
245: `f5`,
246: `f6`,
247: `f7`,
248: `f8`,
249: `f9`,
250: `fa`,
251: `fb`,
252: `fc`,
253: `fd`,
254: `fe`,
255: `ff`,
} | pkg/reader/byteFormatters/hex/hex_lookup.go | 0.635788 | 0.557243 | hex_lookup.go | starcoder |
package gofun
// Unzippable is the interface for unzipping.
type Unzippable interface {
// Unzip creates two Zippables where two elements from two Zippables are
// contained the pair from Unzippable. Fail must be a failure Zippable.
Unzip(fail Zippable) (Zippable, Zippable)
}
// UnzippableOrElse returns x if x is Unzippable, otherwise y.
func UnzippableOrElse(x interface{}, y Unzippable) Unzippable {
z, isOk := x.(Unzippable)
if isOk {
return z
} else {
return y
}
}
func (xs *Option) Unzip(fail Zippable) (Zippable, Zippable) {
if xs.IsSome() {
p, isOk := xs.Get().(*Pair)
if isOk {
return Some(p.First), Some(p.Second)
} else {
return None(), None()
}
} else {
return None(), None()
}
}
func (xs *Either) Unzip(fail Zippable) (Zippable, Zippable) {
if xs.IsRight() {
p, isOk := xs.GetRight().(*Pair)
if isOk {
return Right(p.First), Right(p.Second)
} else {
return fail, fail
}
} else {
return Left(xs.GetLeft()), Left(xs.GetLeft())
}
}
func (xs *List) Unzip(fail Zippable) (Zippable, Zippable) {
var ys *List = Nil()
var prev1 *List = nil
var zs *List = Nil()
var prev2 *List = nil
for l := xs; l.IsCons(); l = l.Tail() {
p, isOk := l.Head().(*Pair)
if isOk {
l2 := Cons(p.First, Nil())
l3 := Cons(p.Second, Nil())
if prev1 != nil {
prev1.SetTail(l2)
} else {
ys = l2
}
if prev2 != nil {
prev2.SetTail(l3)
} else {
zs = l3
}
prev1 = l2
prev2 = l3
}
}
return ys, zs
}
func (xs InterfaceSlice) Unzip(fail Zippable) (Zippable, Zippable) {
ys := make([]interface{}, 0, len(xs))
zs := make([]interface{}, 0, len(xs))
for _, x := range xs {
p, isOk := x.(*Pair)
if isOk {
ys = append(ys, p.First)
zs = append(zs, p.Second)
}
}
return InterfaceSlice(ys), InterfaceSlice(zs)
}
func (xs InterfacePairFunction) Unzip(fail Zippable) (Zippable, Zippable) {
f := InterfacePairFunction(func(x interface{}) interface{} {
p, isOk := xs(x).(*Pair)
if isOk {
return p.First
} else {
return x
}
})
g := InterfacePairFunction(func(x interface{}) interface{} {
p, isOk := xs(x).(*Pair)
if isOk {
return p.Second
} else {
return x
}
})
return f, g
} | unzippable.go | 0.677261 | 0.454533 | unzippable.go | starcoder |
package steganography
import (
"bytes"
"errors"
"fmt"
"github.com/stegoer/server/gqlgen"
"github.com/stegoer/server/pkg/util"
)
const (
metadataLength = 13
metadataBinaryLength = metadataLength * util.BitLength
metadataPixelOffset = 0
metadataLsbPos byte = 1
metadataDistributionDivisor = 1
)
// Metadata represents information which was used to encode data into an image.
type Metadata struct {
length uint64
lsbUsed byte
red bool
green bool
blue bool
evenDistribution bool
}
// GetBinaryLength returns the expected binary length of the data represented.
func (md Metadata) GetBinaryLength() uint64 {
return md.length * util.BitLength
}
// GetChannel returns the model.Channel represented by the Metadata.
func (md Metadata) GetChannel() util.Channel {
switch {
case md.red && md.green && md.blue:
return util.ChannelRedGreenBlue
case md.red && md.green && !md.blue:
return util.ChannelRedGreen
case md.red && !md.green && md.blue:
return util.ChannelRedBlue
case md.red && !md.green && !md.blue:
return util.ChannelRed
case !md.red && md.green && md.blue:
return util.ChannelGreenBlue
case !md.red && md.green && !md.blue:
return util.ChannelGreen
case !md.red && !md.green && md.blue:
return util.ChannelBlue
default:
// should be unreachable
return util.ChannelRedGreenBlue
}
}
// PixelsNeeded returns needed pixels for encoding data based on its Metadata.
func (md Metadata) PixelsNeeded() uint64 {
return md.GetBinaryLength() / uint64(
md.lsbUsed) / uint64(md.GetChannel().Count())
}
// ToByteArr turns the Metadata into an array of bytes.
func (md Metadata) ToByteArr() []byte {
result := util.Uint64ToBytes(md.length)
result = append(result, md.lsbUsed)
result = append(
result,
[]byte{
util.BoolToBit(md.red),
util.BoolToBit(md.green),
util.BoolToBit(md.blue),
util.BoolToBit(md.evenDistribution),
}...,
)
return result
}
// GetDistributionDivisor calculates the distribution divisor represented.
func (md Metadata) GetDistributionDivisor(imageData util.ImageData) int {
switch md.evenDistribution {
case true:
pixelsAvailable := imageData.PixelCount() - pixelDataOffset
if divisor := int(pixelsAvailable / md.PixelsNeeded()); divisor > 0 {
return divisor
}
fallthrough
default:
return 1
}
}
// EncodeIntoImageData encodes the data represented into util.ImageData.
func (md Metadata) EncodeIntoImageData(imageData util.ImageData) {
SetNRGBAValues(
imageData,
md.ToByteArr(),
metadataPixelOffset,
metadataLsbPos,
util.ChannelRedGreenBlue,
metadataDistributionDivisor,
)
}
// MetadataFromEncodeInput creates Metadata from generated.EncodeImageInput.
func MetadataFromEncodeInput(
input gqlgen.EncodeImageInput,
messageLength int,
) Metadata {
return Metadata{
length: uint64(messageLength),
lsbUsed: byte(input.LsbUsed),
red: input.Channel.IncludesRed(),
green: input.Channel.IncludesGreen(),
blue: input.Channel.IncludesBlue(),
evenDistribution: input.EvenDistribution,
}
}
// MetadataFromBinaryBuffer creates a new Metadata from bytes.Buffer.
func MetadataFromBinaryBuffer(binaryBuffer *bytes.Buffer) (*Metadata, error) {
byteSlice, err := util.BinaryBufferToBytes(binaryBuffer)
if err != nil {
return nil, fmt.Errorf("metadata: %w", err)
}
if len(byteSlice) != metadataLength {
return nil, errors.New(
"metadata: buffer length does not match expected metadata length",
)
}
if !ValidateLSB(byteSlice[8]) {
return nil, fmt.Errorf(
"metadata: invalid number of least significant bits: %d",
byteSlice[8],
)
}
for _, idx := range getBoolIndices() {
if !zeroOrOne(byteSlice[idx]) {
return nil, fmt.Errorf(
"metadata: invalid boolean byte: %d",
byteSlice[idx],
)
}
}
return &Metadata{
length: util.BytesToUint64(byteSlice[0:8]),
lsbUsed: byteSlice[8],
red: util.BitToBool(byteSlice[9]),
green: util.BitToBool(byteSlice[10]),
blue: util.BitToBool(byteSlice[11]),
evenDistribution: util.BitToBool(byteSlice[12]),
}, nil
}
// MetadataFromImageData creates a new Metadata from util.ImageData.
func MetadataFromImageData(imageData util.ImageData) (*Metadata, error) {
binaryBuffer, err := GetNRGBAValues(
imageData,
metadataPixelOffset,
metadataLsbPos,
util.ChannelRedGreenBlue,
metadataDistributionDivisor,
metadataBinaryLength,
)
if err != nil {
return nil, err
}
return MetadataFromBinaryBuffer(binaryBuffer)
}
func getBoolIndices() []byte {
return []byte{9, 10, 11, 12}
}
func zeroOrOne(b byte) bool {
return b == 0 || b == 1
} | pkg/steganography/metadata.go | 0.81468 | 0.40031 | metadata.go | starcoder |
package object
import (
"math"
"github.com/carlosroman/aun-otra-ray-tracer/go/internal/ray"
)
type cylinder struct {
obj
minimum, maximum float64
closed bool
}
func checkCap(r ray.Ray, t float64) bool {
x := r.Origin().GetX() + t*r.Direction().GetX()
z := r.Origin().GetZ() + t*r.Direction().GetZ()
return (math.Pow(x, 2) + math.Pow(z, 2)) <= 1
}
func (c cylinder) LocalIntersect(r ray.Ray) (xs Intersections) {
a := 2 * (math.Pow(r.Direction().GetX(), 2) + math.Pow(r.Direction().GetZ(), 2))
if a < epsilon {
return c.intersectCaps(r, xs)
}
b := 2 * (r.Origin().GetX()*r.Direction().GetX() +
r.Origin().GetZ()*r.Direction().GetZ())
cc := math.Pow(r.Origin().GetX(), 2) +
math.Pow(r.Origin().GetZ(), 2) - 1
disc := math.Pow(b, 2) - 2*a*cc
if disc < 0 {
return xs
}
sqrtDisc := math.Sqrt(disc) / a
x := -b / a
t0 := x - sqrtDisc
t1 := x + sqrtDisc
if t0 > t1 {
t := t0
t0 = t1
t1 = t
}
y0 := r.Origin().GetY() + t0*r.Direction().GetY()
if (c.minimum < y0) && (y0 < c.maximum) {
xs = append(xs, Intersection{
T: t0,
Obj: &c,
})
}
y1 := r.Origin().GetY() + t1*r.Direction().GetY()
if (c.minimum < y1) && (y1 < c.maximum) {
xs = append(xs, Intersection{
T: t1,
Obj: &c,
})
}
return c.intersectCaps(r, xs)
}
func (c cylinder) intersectCaps(r ray.Ray, xs Intersections) Intersections {
if !c.closed || math.Abs(r.Direction().GetY()) <= epsilon {
return xs
}
t0 := (c.minimum - r.Origin().GetY()) / r.Direction().GetY()
if checkCap(r, t0) {
xs = append(xs, Intersection{
T: t0,
Obj: &c,
})
}
t1 := (c.maximum - r.Origin().GetY()) / r.Direction().GetY()
if checkCap(r, t1) {
xs = append(xs, Intersection{
T: t1,
Obj: &c,
})
}
return xs
}
func (c cylinder) LocalNormalAt(worldPoint ray.Vector, hit Intersection) ray.Vector {
dist := math.Pow(worldPoint.GetX(), 2) + math.Pow(worldPoint.GetZ(), 2)
if dist < 1 && worldPoint.GetY() >= (c.maximum-epsilon) {
return ray.NewVec(0, 1, 0)
}
if dist < 1 && worldPoint.GetY() <= (c.minimum+epsilon) {
return ray.NewVec(0, -1, 0)
}
return ray.NewVec(worldPoint.GetX(), 0, worldPoint.GetZ())
}
func DefaultCylinder() Object {
return NewCylinder(math.Inf(-1), math.Inf(1), false)
}
func NewCylinder(minimum, maximum float64, closed bool) Object {
c := cylinder{
minimum: minimum,
maximum: maximum,
closed: closed,
}
_ = c.SetTransform(ray.DefaultIdentityMatrix())
c.m = DefaultMaterial()
return &c
} | go/internal/object/cylinder.go | 0.833019 | 0.432483 | cylinder.go | starcoder |
package ermahgerd
import (
"fmt"
"regexp"
"strings"
)
const beginNotWords string = `^\W+`
const endNotWords string = `\W+$`
/*
To replace substrings matched with the provided regular expression with another substring
*/
func replace(regex, replaceWith string, s *string) {
r := regexp.MustCompile(regex)
*s = r.ReplaceAllString(*s, replaceWith)
}
/*
Removes consecutive duplicate letters from a string retaining only
a single occurrence of that letter
Since Golang does not have lookahead regular expressions
*/
func removeDuplicates(word string) string {
var last rune
return strings.Map(func(r rune) rune {
if r != last {
last = r
return r
}
// Negative value discards the letter
return -1
}, word)
}
/*
Convert the regular word to an ERMAHGERD lexicon
*/
func parse(word string) string {
// Word is too short to translate
if len(word) < 2 {
return word
}
// Common words that already have a direct translation
switch word {
case "AWESOME":
return "ERSUM"
case "BANANA":
return "BERNERNER"
case "BAYOU":
return "BERU"
case "FAVORITE", "FAVOURITE":
return "FRAVRIT"
case "GOOSEBUMPS":
return "GERSBERMS"
case "LONG":
return "LERNG"
case "MY":
return "MAH"
case "THE":
return "DA"
case "THEY":
return "DEY"
case "WE'RE":
return "WER"
case "YOU":
return "U"
case "YOU'RE":
return "YER"
}
original := word
// Remove vowels that occur at the end of the word
// Only for words whose length is greater than 2 to prevent single character words
if len(original) > 2 {
replace(`[AEIOU]$`, "", &word)
}
// Reduce duplicate letters
word = removeDuplicates(word)
// Reduce consecutive vowels (and Y) to just one
replace(`[AEIOUY]{2,}`, "E", &word)
// Retain a single 'Y'
replace(`Y{2,}`, "Y", &word)
// DOWN -> DERN
replace(`OW`, "ER", &word)
// PANCAKES -> PERNKERKS
replace(`AKES`, "ERKS", &word)
// Replace vowels (and Y) with ER
replace(`[AEIOUY]`, "ER", &word)
// Other conversions that are direct
replace(`ERH`, "ER", &word)
replace(`MER`, "MAH", &word)
replace(`ERNG`, "IN", &word)
replace(`ERPERD`, "ERPED", &word)
replace(`MAHM`, "MERM", &word)
// If the word begins with Y, retain it
if original[0] == 89 {
word = "Y" + word
}
// Reduce any duplicate letters (again)
word = removeDuplicates(word)
r := regexp.MustCompile(`LOW$`)
l := regexp.MustCompile(`LER$`)
if r.MatchString(original) == true && l.MatchString(word) {
replace(`LER`, "LO", &word)
}
return word
}
/*
Public function that will convert the provided sentence to an
ERMAHGERD lexicon
*/
func Gert(sentence string) string {
var translatedWords []string
prefix := regexp.MustCompile(beginNotWords)
suffix := regexp.MustCompile(endNotWords)
sentence = strings.ToUpper(sentence)
words := strings.Split(sentence, " ")
for _, word := range words {
wordCopy := word
replace(beginNotWords, "", &word)
replace(endNotWords, "", &word)
if len(word) > 0 {
// Ensure that we do not lose out on any non alphabet character
// (exclamation, question marks etc)
beginString := prefix.FindAllString(wordCopy, 1)
endString := suffix.FindAllString(wordCopy, 1)
word = parse(word)
if beginString != nil {
word = beginString[0] + word
}
if endString != nil {
word = word + endString[0]
}
} else {
word = parse(word)
}
translatedWords = append(translatedWords, word)
}
return strings.Join(translatedWords, " ")
} | ermahgerd.go | 0.723016 | 0.401629 | ermahgerd.go | starcoder |
package main
import (
"bufio"
"errors"
"log"
"strings"
"time"
)
// measure defines the information needed to analyse the data passed
// to the reader.
type measure struct {
start string // defines the start of the task to measure
end string // defines the end of the task to measure
timeForm string // time format to look for
startPositionTime int // start position of the time
endPositionTime int // end position of the time
reader *strings.Reader // the data to measure
}
// Intervals scans the reader data and computes the avg duration of each
// interval and the number of intervals.
func (m measure) Intervals() (avg time.Duration, count int) {
s := bufio.NewScanner(m.reader)
s.Split(bufio.ScanLines)
var i interval
var err error
var ints intervals
for s.Scan() {
if i, err = m.line(s.Text(), i); err != nil {
i = interval{}
} else if i.isReady() {
i.duration = i.end.Sub(i.start)
ints = append(ints, i)
i = interval{}
}
}
avg = ints.avg()
count = len(ints)
return
}
// line scans a line with respect to an interval. It returns an updated
// interval object and an error the line scanned in inconsistent with the
// interval passed by paramenter.
func (m measure) line(line string, i interval) (interval, error) {
if m.isStart(line) {
if i.hasStart {
return i, errors.New("inconsistent interval")
}
if err := i.updateStart(m.timeForm, line[m.startPositionTime:m.endPositionTime]); err != nil {
log.Println(err)
return i, errors.New("unable to update start time in interval")
}
i.endLine = line
} else if m.isEnd(line) && i.hasStart {
if i.hasEnd {
return i, errors.New("inconsistent interval")
}
if err := i.updateEnd(m.timeForm, line[m.startPositionTime:m.endPositionTime]); err != nil {
log.Println(err)
return i, errors.New("unable to update end time in interval")
}
i.endLine = line
}
return i, nil
}
// isStart tells you if a line is the begining of a task.
func (m measure) isStart(line string) bool {
return strings.Contains(line, m.start)
}
// isEnd tells you if a line is the end of a task.
func (m measure) isEnd(line string) bool {
return strings.Contains(line, m.end)
} | measure.go | 0.660063 | 0.463262 | measure.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.