code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package scheduler
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/kennydo/automatic-light-controller/lib"
)
// LocationConfig describes the coordinates of a geographical location and the timezone in that location
type LocationConfig struct {
Timezone Timezone `toml:"timezone"`
Latitude float64 `toml:"latitude"`
Longitude float64 `toml:"longitude"`
}
// Timezone describes a timezone
type Timezone struct {
*time.Location
}
// UnmarshalText converts the text form of a location to the Timezone object
func (t *Timezone) UnmarshalText(text []byte) error {
loc, err := time.LoadLocation(string(text))
if err != nil {
return err
}
t.Location = loc
return nil
}
// Rule describes under which conditions which lights should be set into which state
type Rule struct {
Days []Weekday `toml:"days"`
LightGroups []string `toml:"light_groups"`
TimeTrigger TimeTrigger `toml:"time_trigger"`
LightState lib.LightState `toml:"light_state"`
Conditions []Condition `toml:"conditions"`
}
// TimeTrigger describes either a solar event or a hard-coded local time for something to happen
type TimeTrigger struct {
SolarEvent *SolarEventWrapper `toml:"solar_event"`
LocalTime *TimeInDay `toml:"local_time"`
}
// TimeInDay describes a certain time in a day
type TimeInDay struct {
Hour int
Minute int
}
// UnmarshalText converts text in "HH:MM" format into a TimeInDay
func (t *TimeInDay) UnmarshalText(text []byte) error {
var err error
stringText := string(text)
if strings.Count(stringText, ":") != 1 {
return fmt.Errorf("Time in day must contain exactly one \":\": %v", stringText)
}
elements := strings.Split(stringText, ":")
t.Hour, err = strconv.Atoi(elements[0])
if err != nil {
return err
}
t.Minute, err = strconv.Atoi(elements[1])
return err
}
// String returns a string in the form "HH:MM" for a time in day
func (t *TimeInDay) String() string {
return fmt.Sprintf("%02d:%02d", t.Hour, t.Minute)
}
// ForTime returns a timestamp where the year, month, day, and timezone are the same as the given object, but with the time of the argument
func (t *TimeInDay) ForTime(ti time.Time) time.Time {
return time.Date(
ti.Year(),
ti.Month(),
ti.Day(),
t.Hour,
t.Minute,
0,
0,
ti.Location(),
)
}
// SolarEventWrapper wraps a SolarEvent
type SolarEventWrapper struct {
SolarEvent
}
// SolarEvent is an enum for solar events
type SolarEvent int
const (
// Sunrise refers to the instant the sun rises
Sunrise SolarEvent = iota
// Sunset refers to the instant the sun sets
Sunset
)
var solarEvents = [...]string{
"Sunrise",
"Sunset",
}
// String returns the English name of the condition type
func (s SolarEvent) String() string { return solarEvents[s] }
// UnmarshalText converts text to the appropriate SolarEvent enum
func (s *SolarEventWrapper) UnmarshalText(text []byte) error {
stringText := string(text)
var solarEvent SolarEvent
switch stringText {
case "sunrise":
solarEvent = Sunrise
case "sunset":
solarEvent = Sunset
default:
return fmt.Errorf("Unrecognized solar event: %v", stringText)
}
s.SolarEvent = solarEvent
return nil
}
// Weekday refers to day of week
type Weekday struct {
time.Weekday
}
// UnmarshalText converts text into Weekday objects
func (w *Weekday) UnmarshalText(text []byte) error {
stringText := string(text)
var weekday time.Weekday
switch stringText {
case "MO":
weekday = time.Monday
case "TU":
weekday = time.Tuesday
case "WE":
weekday = time.Wednesday
case "TH":
weekday = time.Thursday
case "FR":
weekday = time.Friday
case "SA":
weekday = time.Saturday
case "SU":
weekday = time.Sunday
default:
return fmt.Errorf("Unrecognized weekday: %v", stringText)
}
w.Weekday = weekday
return nil
}
// Condition describes things that must be true for a rule to take effect
type Condition struct {
Type *ConditionTypeWrapper `toml:"type"`
}
// ConditionTypeWrapper wraps ConditionType
type ConditionTypeWrapper struct {
ConditionType
}
// ConditionType is an enum representing types of conditions
type ConditionType int
const (
// LightsAreOn is a condition type requiring the lights to be on
LightsAreOn ConditionType = iota
// LightsAreOff is a condition type requiring the lights to be off
LightsAreOff
)
var conditions = [...]string{
"LightsAreOn",
"LightsAreOff",
}
// String returns the English name of the condition type
func (c ConditionType) String() string { return conditions[c] }
// UnmarshalText converts text to condition type enum
func (c *ConditionTypeWrapper) UnmarshalText(text []byte) error {
stringText := string(text)
var conditionType ConditionType
switch stringText {
case "lights_are_on":
conditionType = LightsAreOn
case "lights_are_off":
conditionType = LightsAreOff
default:
return fmt.Errorf("Unrecognized condition type: %v", stringText)
}
c.ConditionType = conditionType
return nil
}
// ScheduledRule represents a specific instance of a rule execution that should be executed at a certain time
type ScheduledRule struct {
Rule Rule
ScheduledFor time.Time
} | lib/scheduler/models.go | 0.851691 | 0.429669 | models.go | starcoder |
package horizon
import (
"container/heap"
"github.com/golang/geo/s1"
"github.com/golang/geo/s2"
"github.com/google/btree"
)
// S2Storage Spatial datastore
/*
storageLevel - level for S2
edges - map of edges
BTree - b-tree (wraps)
*/
type S2Storage struct {
storageLevel int
edges map[uint64]*Edge
*btree.BTree
}
// NewS2Storage Returns pointer to created S2Storage
/*
storageLevel - level for S2
degree - degree of b-tree
*/
func NewS2Storage(storageLevel int, degree int) *S2Storage {
return &S2Storage{
storageLevel: storageLevel,
BTree: btree.New(degree),
edges: make(map[uint64]*Edge),
}
}
// indexedItem Object in datastore
type indexedItem struct {
s2.CellID
edgesInCell []uint64
}
// Less Method to feet b-tree
func (ii indexedItem) Less(than btree.Item) bool {
return uint64(ii.CellID) < uint64(than.(indexedItem).CellID)
}
// AddEdge Add edge (polyline) to storage
/*
edgeID - unique identifier
edge - edge
*/
func (storage *S2Storage) AddEdge(edgeID uint64, edge *Edge) error {
coverer := s2.RegionCoverer{MinLevel: storage.storageLevel, MaxLevel: storage.storageLevel}
cells := coverer.Covering(edge.Polyline)
for _, cell := range cells {
ii := indexedItem{CellID: cell}
item := storage.BTree.Get(ii)
if item != nil {
ii = item.(indexedItem)
}
ii.edgesInCell = append(ii.edgesInCell, edgeID)
storage.BTree.ReplaceOrInsert(ii)
}
storage.edges[edgeID] = edge
return nil
}
// SearchInRadiusLonLat Returns edges in radius
/*
lon - longitude
lat - latitude
radius - radius of search
*/
func (storage *S2Storage) SearchInRadiusLonLat(lon, lat float64, radius float64) (map[uint64]float64, error) {
latlng := s2.LatLngFromDegrees(lat, lon)
cell := s2.CellFromLatLng(latlng)
centerPoint := s2.PointFromLatLng(latlng)
centerAngle := radius / EarthRadius
cap := s2.CapFromCenterAngle(centerPoint, s1.Angle(centerAngle))
rc := s2.RegionCoverer{MaxLevel: storage.storageLevel, MinLevel: storage.storageLevel}
cu := rc.Covering(cap)
result := make(map[uint64]float64)
for _, cellID := range cu {
item := storage.BTree.Get(indexedItem{CellID: cellID})
if item != nil {
for _, edgeID := range item.(indexedItem).edgesInCell {
polyline := storage.edges[edgeID]
minEdge := s2.Edge{}
minDist := s1.ChordAngle(0)
for i := 0; i < polyline.Polyline.NumEdges(); i++ {
if i == 0 {
minEdge = polyline.Polyline.Edge(0)
minDist = cell.DistanceToEdge(minEdge.V0, minEdge.V1)
continue
}
edge := polyline.Polyline.Edge(i)
distance := cell.DistanceToEdge(edge.V0, edge.V1)
if distance < minDist {
minDist = distance
}
}
result[edgeID] = minDist.Angle().Radians() * EarthRadius
}
}
}
return result, nil
}
// SearchInRadius Returns edges in radius
/*
pt - s2.Point
radius - radius of search
*/
func (storage *S2Storage) SearchInRadius(pt s2.Point, radius float64) (map[uint64]float64, error) {
cell := s2.CellFromPoint(pt)
centerPoint := pt
centerAngle := radius / EarthRadius
cap := s2.CapFromCenterAngle(centerPoint, s1.Angle(centerAngle))
rc := s2.RegionCoverer{MaxLevel: storage.storageLevel, MinLevel: storage.storageLevel}
cu := rc.Covering(cap)
result := make(map[uint64]float64)
for _, cellID := range cu {
item := storage.BTree.Get(indexedItem{CellID: cellID})
if item != nil {
for _, edgeID := range item.(indexedItem).edgesInCell {
polyline := storage.edges[edgeID]
minEdge := s2.Edge{}
minDist := s1.ChordAngle(0)
for i := 0; i < polyline.Polyline.NumEdges(); i++ {
if i == 0 {
minEdge = polyline.Polyline.Edge(0)
minDist = cell.DistanceToEdge(minEdge.V0, minEdge.V1)
continue
}
edge := polyline.Polyline.Edge(i)
distance := cell.DistanceToEdge(edge.V0, edge.V1)
if distance < minDist {
minDist = distance
}
}
result[edgeID] = minDist.Angle().Radians() * EarthRadius
}
}
}
return result, nil
}
// NearestObject Nearest object to given point
/*
edgeID - unique identifier
distanceTo - distance to object
*/
type NearestObject struct {
edgeID uint64
distanceTo float64
}
// Implement heap (for getting top-N elements)
type s2Heap []NearestObject
func (h s2Heap) Less(i, j int) bool { return h[i].distanceTo < h[j].distanceTo }
func (h s2Heap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h s2Heap) Len() int { return len(h) }
func (h *s2Heap) Push(x interface{}) {
*h = append(*h, x.(NearestObject))
}
func (h *s2Heap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// NearestNeighborsInRadius Returns edges in radius with max objects restriction (KNN)
/*
pt - s2.Point
radius - radius of search
n - first N closest edges
*/
func (storage *S2Storage) NearestNeighborsInRadius(pt s2.Point, radius float64, n int) ([]NearestObject, error) {
found, err := storage.SearchInRadius(pt, radius)
if err != nil {
return nil, err
}
h := &s2Heap{}
heap.Init(h)
for k, v := range found {
heap.Push(h, NearestObject{k, v})
}
l := h.Len()
if l < n {
n = l
}
ans := make([]NearestObject, n)
for i := 0; i < n; i++ {
ans[i] = heap.Pop(h).(NearestObject)
}
return ans, nil
} | s2_datastore.go | 0.6705 | 0.404566 | s2_datastore.go | starcoder |
package pointer
import (
"time"
)
/*
Order as in spec:
bool byte complex64 complex128 error float32 float64
int int8 int16 int32 int64 rune string
uint uint8 uint16 uint32 uint64 uintptr
time.Duration time.Time
*/
// GetBool returns the value of the bool pointer passed in or false if the pointer is nil.
func GetBool(b *bool) bool {
if b == nil {
return false
}
return *b
}
// GetByte returns the value of the byte pointer passed in or 0 if the pointer is nil.
func GetByte(b *byte) byte {
if b == nil {
return 0
}
return *b
}
// GetComplex64 returns the value of the complex64 pointer passed in or 0 if the pointer is nil.
func GetComplex64(c *complex64) complex64 {
if c == nil {
return 0
}
return *c
}
// GetComplex128 returns the value of the complex128 pointer passed in or 0 if the pointer is nil.
func GetComplex128(c *complex128) complex128 {
if c == nil {
return 0
}
return *c
}
// GetError returns the value of the error pointer passed in or nil if the pointer is nil.
func GetError(e *error) error {
if e == nil {
return nil
}
return *e
}
// GetFloat32 returns the value of the float32 pointer passed in or 0 if the pointer is nil.
func GetFloat32(f *float32) float32 {
if f == nil {
return 0
}
return *f
}
// GetFloat64 returns the value of the float64 pointer passed in or 0 if the pointer is nil.
func GetFloat64(f *float64) float64 {
if f == nil {
return 0
}
return *f
}
// GetInt returns the value of the int pointer passed in or 0 if the pointer is nil.
func GetInt(i *int) int {
if i == nil {
return 0
}
return *i
}
// GetInt8 returns the value of the int8 pointer passed in or 0 if the pointer is nil.
func GetInt8(i *int8) int8 {
if i == nil {
return 0
}
return *i
}
// GetInt16 returns the value of the int16 pointer passed in or 0 if the pointer is nil.
func GetInt16(i *int16) int16 {
if i == nil {
return 0
}
return *i
}
// GetInt32 returns the value of the int32 pointer passed in or 0 if the pointer is nil.
func GetInt32(i *int32) int32 {
if i == nil {
return 0
}
return *i
}
// GetInt64 returns the value of the int64 pointer passed in or 0 if the pointer is nil.
func GetInt64(i *int64) int64 {
if i == nil {
return 0
}
return *i
}
// GetRune returns the value of the rune pointer passed in or 0 if the pointer is nil.
func GetRune(r *rune) rune {
if r == nil {
return 0
}
return *r
}
// GetString returns the value of the string pointer passed in or empty string if the pointer is nil.
func GetString(s *string) string {
if s == nil {
return ""
}
return *s
}
// GetUint returns the value of the uint pointer passed in or 0 if the pointer is nil.
func GetUint(u *uint) uint {
if u == nil {
return 0
}
return *u
}
// GetUint8 returns the value of the uint8 pointer passed in or 0 if the pointer is nil.
func GetUint8(u *uint8) uint8 {
if u == nil {
return 0
}
return *u
}
// GetUint16 returns the value of the uint16 pointer passed in or 0 if the pointer is nil.
func GetUint16(u *uint16) uint16 {
if u == nil {
return 0
}
return *u
}
// GetUint32 returns the value of the uint32 pointer passed in or 0 if the pointer is nil.
func GetUint32(u *uint32) uint32 {
if u == nil {
return 0
}
return *u
}
// GetUint64 returns the value of the uint64 pointer passed in or 0 if the pointer is nil.
func GetUint64(u *uint64) uint64 {
if u == nil {
return 0
}
return *u
}
// GetUintptr returns the value of the uintptr pointer passed in or 0 if the pointer is nil.
func GetUintptr(u *uintptr) uintptr {
if u == nil {
return 0
}
return *u
}
// GetDuration returns the value of the duration pointer passed in or 0 if the pointer is nil.
func GetDuration(d *time.Duration) time.Duration {
if d == nil {
return 0
}
return *d
}
// GetTime returns the value of the time pointer passed in or zero time.Time if the pointer is nil.
func GetTime(t *time.Time) time.Time {
if t == nil {
return time.Time{}
}
return *t
} | vendor/github.com/AlekSi/pointer/value.go | 0.687945 | 0.414129 | value.go | starcoder |
package versioning
import (
"context"
"github.com/ipfs/go-datastore"
)
// MigrationFunc is a function to transform an single element of one type of data into
// a single element of another type of data. It has the following form:
// func<T extends cbg.CBORUnmarshaller, U extends cbg.CBORMarshaller>(old T) (new U, error)
type MigrationFunc interface{}
// DatastoreMigration can run a migration of a datastore that is a table
// of one kind of structured data and write it to a table that is another kind of
// structured data
type DatastoreMigration interface {
Up(ctx context.Context, oldDs datastore.Batching, newDS datastore.Batching) ([]datastore.Key, error)
}
// ReversableDatastoreMigration is
type ReversableDatastoreMigration interface {
DatastoreMigration
Down(ctx context.Context, newDs datastore.Batching, oldDS datastore.Batching) ([]datastore.Key, error)
}
// VersionKey is an identifier for a databased version
type VersionKey string
// VersionedMigration is a migration that migrates data in a single database
// between versions
type VersionedMigration interface {
OldVersion() VersionKey
NewVersion() VersionKey
Up(ctx context.Context, ds datastore.Batching) ([]datastore.Key, error)
}
// ReversibleVersionedMigration is a migration that migrates data in a single database
// between versions, and can be reversed
type ReversibleVersionedMigration interface {
VersionedMigration
Down(ctx context.Context, ds datastore.Batching) ([]datastore.Key, error)
}
// VersionedMigrationList is a sortable list of versioned migrations
type VersionedMigrationList []VersionedMigration
// Len is the number of elements in the collection.
func (vml VersionedMigrationList) Len() int {
return len(vml)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (vml VersionedMigrationList) Less(i int, j int) bool {
return vml[i].NewVersion() < vml[j].NewVersion()
}
// Swap swaps the elements with indexes i and j.
func (vml VersionedMigrationList) Swap(i int, j int) {
vml[i], vml[j] = vml[j], vml[i]
}
// MigrationState is an interface that returns the current state of migrations being run
type MigrationState interface {
ReadyError() error
}
type readyError string
func (re readyError) Error() string {
return string(re)
}
// ErrMigrationsNotRun is run when ReadyError is called prior to running migrations
const ErrMigrationsNotRun = readyError("Database not migrations must be run or are still running")
// ErrContextCancelled means the context the migrations were run in was cancelled
const ErrContextCancelled = readyError("context cancelled") | pkg/types.go | 0.593138 | 0.554953 | types.go | starcoder |
package gcode
type ExtrusionTracker struct {
RelativeExtrusion bool // true == relative, false == absolute
TotalExtrusion float32 // total filament consumption -- never decreases
CurrentExtrusionValue float32 // current position of the E axis
PreviousExtrusionValue float32 // last position of the E axis
LastExtrudeWasRetract bool // true == last E movement was negative, false == positive E
LastRetractDistance float32 // most recent E axis value of negative E
CurrentRetraction float32 // current total retraction (negative, need this much positive E to be primed)
LastCommandWasG92 bool // true if the last E modification was a manual position being set
}
func (et *ExtrusionTracker) TrackInstruction(instruction Command) {
if len(instruction.Command) == 0 {
return
}
if instruction.IsLinearMove() || instruction.IsArcMove() {
if eValue, ok := instruction.Params["e"]; ok {
et.PreviousExtrusionValue = et.CurrentExtrusionValue
et.CurrentExtrusionValue = eValue
if et.RelativeExtrusion {
// relative extrusion
et.TotalExtrusion += eValue
if eValue < 0 {
// retraction
et.LastExtrudeWasRetract = true
et.LastRetractDistance = eValue
et.CurrentRetraction += eValue
} else if eValue > 0 {
et.LastExtrudeWasRetract = false
if et.CurrentRetraction + eValue >= 0 {
// normal extrusion
et.CurrentRetraction = 0
} else {
// restart
et.CurrentRetraction += eValue
}
}
} else {
// absolute extrusion
et.TotalExtrusion += eValue - et.PreviousExtrusionValue
if et.CurrentExtrusionValue < et.PreviousExtrusionValue {
// retraction
et.LastExtrudeWasRetract = true
et.LastRetractDistance = eValue - et.PreviousExtrusionValue
et.CurrentRetraction += et.LastRetractDistance
} else if et.CurrentExtrusionValue > et.PreviousExtrusionValue {
et.LastExtrudeWasRetract = false
if et.CurrentRetraction + (eValue - et.PreviousExtrusionValue) >= 0 {
// normal extrusion
et.CurrentRetraction = 0
} else {
// restart
et.CurrentRetraction += eValue - et.PreviousExtrusionValue
}
}
}
}
} else if setExtrusionMode, relative := instruction.IsSetExtrusionMode(); setExtrusionMode {
et.RelativeExtrusion = relative
} else if instruction.IsSetPosition() {
hasParamsOrFlags := len(instruction.Params) > 0 || len(instruction.Flags) > 0
if hasParamsOrFlags {
if eValue, ok := instruction.Params["e"]; ok {
et.LastCommandWasG92 = true
et.CurrentExtrusionValue = eValue
} else if aValue, ok := instruction.Params["a"]; ok {
et.LastCommandWasG92 = true
et.CurrentExtrusionValue = aValue
} else if bValue, ok := instruction.Params["b"]; ok {
et.LastCommandWasG92 = true
et.CurrentExtrusionValue = bValue
}
} else {
et.LastCommandWasG92 = true
et.CurrentExtrusionValue = 0
}
}
} | gcode/extrusion-tracker.go | 0.680879 | 0.406214 | extrusion-tracker.go | starcoder |
package num
import (
"fmt"
)
// Matrix is a slice of slices of Int
type Matrix []Set
// NewMatrix creates an empty matrix of 0s with dimensions rowsXcolumns
func NewMatrix(rows Int, cols Int) Matrix {
m := Matrix{}
for r := Int(0); r < rows; r++ {
m = append(m, make(Set, cols))
}
return m
}
// Implement the sort interface
func (m Matrix) Len() int { return len(m) }
func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
func (m Matrix) Less(i, j int) bool { return len(m[i]) < len(m[j]) }
// Find returns all sets in m that contain n
func (m Matrix) Find(n Int) chan Set {
c := make(chan Set)
go func() {
defer close(c)
for _, set := range m {
if set.Contains(n) {
c <- set
}
}
}()
return c
}
// MaxPathSum returns the maximum value available in a path through
// a numerical grid, i.e a Set of sets
func (m Matrix) MaxPathSum() Int {
for row := len(m) - 2; row >= 0; row-- {
for col := 0; col < len(m[row])-1; col++ {
if m[row+1][col] > m[row+1][col+1] {
m[row][col] += m[row+1][col]
} else {
m[row][col] += m[row+1][col+1]
}
}
}
return m[0][0]
}
// Coord represents the values of coordinates within a grid
type Coord struct {
Row Int
Col Int
}
// Direction represents an identifier for vector direction
type Direction int
// Vector Directions constants
const (
RIGHT Direction = iota // Left To Right
LEFT // Right To Left
UP // Up
DOWN // Down
RIGHTU // Left To Right Up (diagonal)
RIGHTD // Left To Right Down (diagonal)
LEFTU // Right To Left Up (diagonal)
LEFTD // Right To Left Down (diagonal)
)
// Vector returns a ln length set of values starting at row/col extending in Direction d.
// Vector also returns the coordinates of those values.
// If supplied Vector will set the values to replace (in order)
func (m Matrix) Vector(pos Coord, ln Int, d Direction, replaceWith ...Int) (Set, []Coord, error) {
var (
res Set
crds []Coord
)
for i := Int(0); i < ln; i++ {
crd := Coord{}
switch d {
case RIGHT:
crd = Coord{pos.Row, pos.Col + i}
case LEFT:
crd = Coord{pos.Row, pos.Col - i}
case DOWN:
crd = Coord{pos.Row + i, pos.Col}
case UP:
crd = Coord{pos.Row - i, pos.Col}
case RIGHTD:
crd = Coord{pos.Row + i, pos.Col + i}
case LEFTD:
crd = Coord{pos.Row + i, pos.Col - i}
case RIGHTU:
crd = Coord{pos.Row - i, pos.Col + i}
case LEFTU:
crd = Coord{pos.Row - i, pos.Col - i}
}
if crd.Row >= Int(len(m)) || crd.Row < 0 || crd.Col >= Int(len(m[crd.Row])) || crd.Col < 0 {
return res, crds, fmt.Errorf("Vector out of bounds [ROW|COL]:[%d|%d]", crd.Row, crd.Col)
}
if i < Int(len(replaceWith)) {
m[crd.Row][crd.Col] = replaceWith[i]
}
res = append(res, m[crd.Row][crd.Col])
crds = append(crds, crd)
}
return res, crds, nil
}
// Spiral creates a square grid number spiral of width size. If size is even it is incremented
// to become odd.
func Spiral(size Int) Matrix {
if size%2 == 0 {
size++
}
var (
// Lets declare our bits here in a nice orderly list
m = NewMatrix(size, size)
r = size / 2
c = size / 2
)
// LET THE FUCKERY BEGIN
m[r][c] = 1
for inc := Int(1); true; inc++ {
done := false
if inc%2 != 0 {
for _, d := range []Direction{RIGHT, DOWN} {
_, vec, err := m.Vector(Coord{r, c}, inc+1, d, Range(m[r][c], m[r][c]+inc)...)
if err != nil {
done = true
break
}
last := len(vec) - 1
r, c = vec[last].Row, vec[last].Col
}
} else {
for _, d := range []Direction{LEFT, UP} {
_, vec, err := m.Vector(Coord{r, c}, inc+1, d, Range(m[r][c], m[r][c]+inc)...)
if err != nil {
done = true
break
}
last := len(vec) - 1
r, c = vec[last].Row, vec[last].Col
}
}
if done {
break
}
}
return m
} | matrix.go | 0.793826 | 0.537891 | matrix.go | starcoder |
package metrics
import (
"errors"
"fmt"
"math/rand"
"net"
"runtime"
"strings"
"sync"
"time"
)
type Client interface {
// Close closes the connection and cleans up.
Close() error
// Increments a statsd count type.
// stat is a string name for the metric.
// value is the integer value
// rate is the sample rate (0.0 to 1.0)
Inc(stat string, value int64, rate float32) error
// Decrements a statsd count type.
// stat is a string name for the metric.
// value is the integer value.
// rate is the sample rate (0.0 to 1.0).
Dec(stat string, value int64, rate float32) error
// Submits/Updates a statsd gauge type.
// stat is a string name for the metric.
// value is the integer value.
// rate is the sample rate (0.0 to 1.0).
Gauge(stat string, value int64, rate float32) error
// Submits a delta to a statsd gauge.
// stat is the string name for the metric.
// value is the (positive or negative) change.
// rate is the sample rate (0.0 to 1.0).
GaugeDelta(stat string, value int64, rate float32) error
// Submits a statsd timing type.
// stat is a string name for the metric.
// value is the integer value.
// rate is the sample rate (0.0 to 1.0).
Timing(stat string, delta int64, rate float32) error
// Emit duration in milliseconds
TimingMs(stat string, tm time.Duration, rate float32) error
// Submits a stats set type, where value is the unique string
// rate is the sample rate (0.0 to 1.0).
UniqueString(stat string, value string, rate float32) error
// Submits a stats set type
// rate is the sample rate (0.0 to 1.0).
UniqueInt64(stat string, value int64, rate float32) error
// Reports runtime metrics
ReportRuntimeMetrics(prefix string, rate float32) error
// Sets/Updates the statsd client prefix
SetPrefix(prefix string)
}
func Metric(p ...string) string {
return strings.Join(p, ".")
}
func Escape(in string) string {
return strings.Replace(in, ".", "_", -1)
}
type client struct {
// underlying connection
c net.PacketConn
// resolved udp address
ra *net.UDPAddr
// prefix for statsd name
prefix string
// To report memory stats correctly
mtx *sync.Mutex
// Previosly reported garbage collection number
prevNumGC int32
// Last garbage collection time
lastGC uint64
}
func (s *client) Close() error {
err := s.c.Close()
return err
}
func (s *client) Inc(stat string, value int64, rate float32) error {
dap := fmt.Sprintf("%d|c", value)
return s.submit(stat, dap, rate)
}
func (s *client) Dec(stat string, value int64, rate float32) error {
return s.Inc(stat, -value, rate)
}
func (s *client) Gauge(stat string, value int64, rate float32) error {
dap := fmt.Sprintf("%d|g", value)
return s.submit(stat, dap, rate)
}
func (s *client) GaugeDelta(stat string, value int64, rate float32) error {
dap := fmt.Sprintf("%+d|g", value)
return s.submit(stat, dap, rate)
}
func (s *client) Timing(stat string, delta int64, rate float32) error {
dap := fmt.Sprintf("%d|ms", delta)
return s.submit(stat, dap, rate)
}
func (s *client) TimingMs(stat string, d time.Duration, rate float32) error {
return s.Timing(stat, int64(d/time.Millisecond), rate)
}
func (s *client) UniqueString(stat string, value string, rate float32) error {
dap := fmt.Sprintf("%s|s", value)
return s.submit(stat, dap, rate)
}
func (s *client) UniqueInt64(stat string, value int64, rate float32) error {
dap := fmt.Sprintf("%d|s", value)
return s.submit(stat, dap, rate)
}
func (s *client) ReportRuntimeMetrics(prefix string, rate float32) error {
stats := &runtime.MemStats{}
runtime.ReadMemStats(stats)
s.mtx.Lock()
defer s.mtx.Unlock()
s.Gauge(Metric(prefix, "runtime", "goroutines"), int64(runtime.NumGoroutine()), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "alloc"), int64(stats.Alloc), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "sys"), int64(stats.Sys), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "lookups"), int64(stats.Lookups), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "mallocs"), int64(stats.Mallocs), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "frees"), int64(stats.Frees), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "heap", "alloc"), int64(stats.HeapAlloc), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "heap", "sys"), int64(stats.HeapSys), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "heap", "idle"), int64(stats.HeapIdle), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "heap", "inuse"), int64(stats.HeapInuse), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "heap", "released"), int64(stats.HeapReleased), rate)
s.Gauge(Metric(prefix, "runtime", "mem", "heap", "objects"), int64(stats.HeapObjects), rate)
prevNumGC := s.prevNumGC
lastGC := s.lastGC
s.prevNumGC = int32(stats.NumGC)
s.lastGC = stats.LastGC
if prevNumGC == -1 {
return nil
}
countGC := int32(stats.NumGC) - prevNumGC
if countGC < 0 {
return fmt.Errorf("Invalid number of garbage collections: %d", countGC)
}
// Nothing changed since last call, nothing to report
if countGC == 0 {
return nil
}
// We have missed some reportings and overwrote the data
if countGC > 256 {
countGC = 256
}
s.Timing(Metric(prefix, "runtime", "gc", "periodns"), int64(stats.LastGC-lastGC), rate)
for i := int32(0); i < countGC; i += 1 {
idx := int((stats.NumGC-uint32(i))+255) % 256
s.Timing(Metric(prefix, "runtime", "gc", "pausens"), int64(stats.PauseNs[idx]), rate)
}
return nil
}
func (s *client) SetPrefix(prefix string) {
s.prefix = prefix
}
// submit formats the statsd event data, handles sampling, and prepares it,
// and sends it to the server.
func (s *client) submit(stat string, value string, rate float32) error {
if rate < 1 {
if rand.Float32() < rate {
value = fmt.Sprintf("%s|@%f", value, rate)
} else {
return nil
}
}
if s.prefix != "" {
stat = fmt.Sprintf("%s.%s", s.prefix, stat)
}
data := fmt.Sprintf("%s:%s", stat, value)
_, err := s.send([]byte(data))
if err != nil {
return err
}
return nil
}
// sends the data to the server endpoint
func (s *client) send(data []byte) (int, error) {
// no need for locking here, as the underlying fdNet
// already serialized writes
n, err := s.c.(*net.UDPConn).WriteToUDP([]byte(data), s.ra)
if err != nil {
return 0, err
}
if n == 0 {
return n, errors.New("Wrote no bytes")
}
return n, nil
}
// Returns a pointer to a new Client, and an error.
// addr is a string of the format "hostname:port", and must be parsable by
// net.ResolveUDPAddr.
// prefix is the statsd client prefix. Can be "" if no prefix is desired.
func NewStatsd(addr, prefix string) (Client, error) {
c, err := net.ListenPacket("udp", ":0")
if err != nil {
return nil, err
}
ra, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
client := &client{
c: c,
ra: ra,
prefix: prefix,
mtx: &sync.Mutex{},
prevNumGC: -1,
}
return client, nil
} | Godeps/_workspace/src/github.com/mailgun/metrics/client.go | 0.730866 | 0.479382 | client.go | starcoder |
package hash
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/base64"
"encoding/hex"
)
// HmacMd5 computes a Hash-based Message Authentication Code (HMAC) by using
// the MD5 hash function. The HMAC process mixes a secret key with the message
// data, hashes the result with the hash function, mixes that hash value with
// the secret key again, and then applies the hash function a second time.
func HmacMd5(message string, secret string) []byte {
key := []byte(secret)
hash := hmac.New(md5.New, key)
hash.Write([]byte(message))
return hash.Sum(nil)
}
// HmacMd5Hex mixes a secret key with the message data, hashes the result with the
// MD5 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using hexadecimal encoding.
func HmacMd5Hex(message string, secret string) string {
bytes := HmacMd5(message, secret)
return hex.EncodeToString(bytes)
}
// HmacMd5Base64StdEnc mixes a secret key with the message data, hashes the result with
// the MD5 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 standard encoding.
func HmacMd5Base64StdEnc(message string, secret string) string {
bytes := HmacMd5(message, secret)
return base64.StdEncoding.EncodeToString(bytes)
}
// HmacMd5Base64RawStdEnc mixes a secret key with the message data, hashes the result with
// the MD5 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using raw base64 standard encoding.
func HmacMd5Base64RawStdEnc(message string, secret string) string {
bytes := HmacMd5(message, secret)
return base64.RawStdEncoding.EncodeToString(bytes)
}
// HmacMd5Base64URLEnc mixes a secret key with the message data, hashes the result with
// the MD5 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 URL encoding.
func HmacMd5Base64URLEnc(message string, secret string) string {
bytes := HmacMd5(message, secret)
return base64.URLEncoding.EncodeToString(bytes)
}
// HmacMd5Base64RawURLEnc mixes a secret key with the message data, hashes the result with
// the MD5 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 raw URL encoding.
func HmacMd5Base64RawURLEnc(message string, secret string) string {
bytes := HmacMd5(message, secret)
return base64.RawURLEncoding.EncodeToString(bytes)
}
// HmacSha1 computes a Hash-based Message Authentication Code (HMAC) by using
// the SHA-1 hash function. The HMAC process mixes a secret key with the message
// data, hashes the result with the hash function, mixes that hash value with
// the secret key again, and then applies the hash function a second time.
func HmacSha1(message string, secret string) []byte {
key := []byte(secret)
hash := hmac.New(sha1.New, key)
hash.Write([]byte(message))
return hash.Sum(nil)
}
// HmacSha1Hex mixes a secret key with the message data, hashes the result with the
// SHA-1 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using hexadecimal encoding.
func HmacSha1Hex(message string, secret string) string {
bytes := HmacSha1(message, secret)
return hex.EncodeToString(bytes)
}
// HmacSha1Base64StdEnc mixes a secret key with the message data, hashes the result with
// the SHA-1 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 standard encoding.
func HmacSha1Base64StdEnc(message string, secret string) string {
bytes := HmacSha1(message, secret)
return base64.StdEncoding.EncodeToString(bytes)
}
// HmacSha1Base64RawStdEnc mixes a secret key with the message data, hashes the result with
// the SHA-1 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using raw base64 standard encoding.
func HmacSha1Base64RawStdEnc(message string, secret string) string {
bytes := HmacSha1(message, secret)
return base64.RawStdEncoding.EncodeToString(bytes)
}
// HmacSha1Base64URLEnc mixes a secret key with the message data, hashes the result with
// the SHA-1 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 URL encoding.
func HmacSha1Base64URLEnc(message string, secret string) string {
bytes := HmacSha1(message, secret)
return base64.URLEncoding.EncodeToString(bytes)
}
// HmacSha1Base64RawURLEnc mixes a secret key with the message data, hashes the result with
// the SHA-1 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 raw URL encoding.
func HmacSha1Base64RawURLEnc(message string, secret string) string {
bytes := HmacSha1(message, secret)
return base64.RawURLEncoding.EncodeToString(bytes)
}
// HmacSha256 computes a Hash-based Message Authentication Code (HMAC) by using
// the SHA-256 hash function. The HMAC process mixes a secret key with the message
// data, hashes the result with the hash function, mixes that hash value with
// the secret key again, and then applies the hash function a second time.
func HmacSha256(message string, secret string) []byte {
key := []byte(secret)
hash := hmac.New(sha256.New, key)
hash.Write([]byte(message))
return hash.Sum(nil)
}
// HmacSha256Hex mixes a secret key with the message data, hashes the result with the
// SHA-256 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using hexadecimal encoding.
func HmacSha256Hex(message string, secret string) string {
bytes := HmacSha256(message, secret)
return hex.EncodeToString(bytes)
}
// HmacSha256Base64StdEnc mixes a secret key with the message data, hashes the result with
// the SHA-256 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 standard encoding.
func HmacSha256Base64StdEnc(message string, secret string) string {
bytes := HmacSha256(message, secret)
return base64.StdEncoding.EncodeToString(bytes)
}
// HmacSha256Base64RawStdEnc mixes a secret key with the message data, hashes the result with
// the SHA-256 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using raw base64 standard encoding.
func HmacSha256Base64RawStdEnc(message string, secret string) string {
bytes := HmacSha256(message, secret)
return base64.RawStdEncoding.EncodeToString(bytes)
}
// HmacSha256Base64URLEnc mixes a secret key with the message data, hashes the result with
// the SHA-256 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using raw base64 standard encoding.
func HmacSha256Base64URLEnc(message string, secret string) string {
bytes := HmacSha256(message, secret)
return base64.URLEncoding.EncodeToString(bytes)
}
// HmacSha256Base64RawURLEnc mixes a secret key with the message data, hashes the result with
// the SHA-256 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 raw URL encoding.
func HmacSha256Base64RawURLEnc(message string, secret string) string {
bytes := HmacSha256(message, secret)
return base64.RawURLEncoding.EncodeToString(bytes)
}
// HmacSha224 computes a Hash-based Message Authentication Code (HMAC) by using
// the SHA-224 hash function. The HMAC process mixes a secret key with the message
// data, hashes the result with the hash function, mixes that hash value with
// the secret key again, and then applies the hash function a second time.
func HmacSha224(message string, secret string) []byte {
key := []byte(secret)
hash := hmac.New(sha256.New224, key)
hash.Write([]byte(message))
return hash.Sum(nil)
}
// HmacSha224Hex mixes a secret key with the message data, hashes the result with the
// SHA-224 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using hexadecimal encoding.
func HmacSha224Hex(message string, secret string) string {
bytes := HmacSha224(message, secret)
return hex.EncodeToString(bytes)
}
// HmacSha224Base64StdEnc mixes a secret key with the message data, hashes the result with
// the SHA-224 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 standard encoding.
func HmacSha224Base64StdEnc(message string, secret string) string {
bytes := HmacSha224(message, secret)
return base64.StdEncoding.EncodeToString(bytes)
}
// HmacSha224Base64RawStdEnc mixes a secret key with the message data, hashes the result with
// the SHA-224 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using raw base64 standard encoding.
func HmacSha224Base64RawStdEnc(message string, secret string) string {
bytes := HmacSha224(message, secret)
return base64.RawStdEncoding.EncodeToString(bytes)
}
// HmacSha224Base64URLEnc mixes a secret key with the message data, hashes the result with
// the SHA-224 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using raw base64 standard encoding.
func HmacSha224Base64URLEnc(message string, secret string) string {
bytes := HmacSha224(message, secret)
return base64.URLEncoding.EncodeToString(bytes)
}
// HmacSha224Base64RawURLEnc mixes a secret key with the message data, hashes the result with
// the SHA-224 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 raw URL encoding.
func HmacSha224Base64RawURLEnc(message string, secret string) string {
bytes := HmacSha224(message, secret)
return base64.RawURLEncoding.EncodeToString(bytes)
}
// HmacSha512 computes a Hash-based Message Authentication Code (HMAC) by using
// the SHA-512 hash function. The HMAC process mixes a secret key with the message
// data, hashes the result with the hash function, mixes that hash value with
// the secret key again, and then applies the hash function a second time.
func HmacSha512(message string, secret string) []byte {
key := []byte(secret)
hash := hmac.New(sha512.New, key)
hash.Write([]byte(message))
return hash.Sum(nil)
}
// HmacSha512Hex mixes a secret key with the message data, hashes the result with the
// SHA-512 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using hexadecimal encoding.
func HmacSha512Hex(message string, secret string) string {
bytes := HmacSha512(message, secret)
return hex.EncodeToString(bytes)
}
// HmacSha512Base64StdEnc mixes a secret key with the message data, hashes the result with
// the SHA-512 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 standard encoding.
func HmacSha512Base64StdEnc(message string, secret string) string {
bytes := HmacSha512(message, secret)
return base64.StdEncoding.EncodeToString(bytes)
}
// HmacSha512Base64RawStdEnc mixes a secret key with the message data, hashes the result with
// the SHA-512 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using raw base64 standard encoding.
func HmacSha512Base64RawStdEnc(message string, secret string) string {
bytes := HmacSha512(message, secret)
return base64.RawStdEncoding.EncodeToString(bytes)
}
// HmacSha512Base64URLEnc mixes a secret key with the message data, hashes the result with
// the SHA-512 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 standard encoding.
func HmacSha512Base64URLEnc(message string, secret string) string {
bytes := HmacSha512(message, secret)
return base64.URLEncoding.EncodeToString(bytes)
}
// HmacSha512Base64RawURLEnc mixes a secret key with the message data, hashes the result with
// the SHA-512 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 raw URL encoding.
func HmacSha512Base64RawURLEnc(message string, secret string) string {
bytes := HmacSha512(message, secret)
return base64.RawURLEncoding.EncodeToString(bytes)
}
// HmacSha384 computes a Hash-based Message Authentication Code (HMAC) by using
// the SHA-384 hash function. The HMAC process mixes a secret key with the message
// data, hashes the result with the hash function, mixes that hash value with
// the secret key again, and then applies the hash function a second time.
func HmacSha384(message string, secret string) []byte {
key := []byte(secret)
hash := hmac.New(sha512.New384, key)
hash.Write([]byte(message))
return hash.Sum(nil)
}
// HmacSha384Hex mixes a secret key with the message data, hashes the result with the
// SHA-384 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using hexadecimal encoding.
func HmacSha384Hex(message string, secret string) string {
bytes := HmacSha384(message, secret)
return hex.EncodeToString(bytes)
}
// HmacSha384Base64StdEnc mixes a secret key with the message data, hashes the result with
// the SHA-384 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 standard encoding.
func HmacSha384Base64StdEnc(message string, secret string) string {
bytes := HmacSha384(message, secret)
return base64.StdEncoding.EncodeToString(bytes)
}
// HmacSha384Base64RawStdEnc mixes a secret key with the message data, hashes the result with
// the SHA-384 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using raw base64 standard encoding.
func HmacSha384Base64RawStdEnc(message string, secret string) string {
bytes := HmacSha384(message, secret)
return base64.RawStdEncoding.EncodeToString(bytes)
}
// HmacSha384Base64URLEnc mixes a secret key with the message data, hashes the result with
// the SHA-384 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 standard encoding.
func HmacSha384Base64URLEnc(message string, secret string) string {
bytes := HmacSha384(message, secret)
return base64.URLEncoding.EncodeToString(bytes)
}
// HmacSha384Base64RawURLEnc mixes a secret key with the message data, hashes the result with
// the SHA-384 hash function, mixes that hash value with the secret key again, then applies
// the hash function a second time, and encodes the result using base64 raw URL encoding.
func HmacSha384Base64RawURLEnc(message string, secret string) string {
bytes := HmacSha384(message, secret)
return base64.RawURLEncoding.EncodeToString(bytes)
} | hash/hmac.go | 0.848957 | 0.593786 | hmac.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
)
/*
You have three stacks of cylinders where each cylinder has the same diameter, but they may vary in height. You can change the height of a stack by removing and discarding its topmost cylinder any number of times.
Find the maximum possible height of the stacks such that all of the stacks are exactly the same height. This means you must remove zero or more cylinders from the top of zero or more of the three stacks until they are all the same height, then return the height.
Example
h1 = [1, 2, 1, 1]
h2 = [1, 1, 2]
h3 = [1, 1]
There are 4,3 and 2 cylinders in the three stacks, with their heights in the three arrays. Remove the top 2 cylinders from h1 (heights = [1, 2]) and from h2 (heights = [1, 1]) so that the three stacks all are 2 units tall. Return 2 as the answer.
Note: An empty stack is still a stack.
Function Description
Complete the equalStacks function in the editor below.
equalStacks has the following parameters:
int h1[n1]: the first array of heights
int h2[n2]: the second array of heights
int h3[n3]: the third array of heights
Returns
int: the height of the stacks when they are equalized
*/
func calcH(a []int32) int32 {
h := int32(0)
for i := 0; i < len(a); i++ {
h += a[i]
}
return h
}
func equalStacks(h1 []int32, h2 []int32, h3 []int32) int32 {
done := false
zero := int32(0)
he1 := calcH(h1)
he2 := calcH(h2)
he3 := calcH(h3)
for !done {
fmt.Printf("h1:%v, h2:%v, h3:%v", he1, he2, he3)
if he1 == he2 && he2 == he3 {
return he1
}
if he1 == zero || he2 == zero || he3 == zero {
return zero
}
// pop a cilinder from the heightest stack
// and update the heigh
if he1 >= he2 && he1 >= he3 {
he1 -= h1[0]
h1 = h1[1:]
fmt.Printf(" h1>\n")
continue
} else if he2 >= he1 && he2 >= he3 {
he2 -= h2[0]
h2 = h2[1:]
fmt.Printf(" h2>\n")
continue
} else {
he3 -= h3[0]
h3 = h3[1:]
fmt.Printf(" h3>\n")
}
}
return zero
}
func parseInts(line string) []int32 {
var h []int32
strInts := strings.Split(line, " ")
for i := 0; i < len(strInts); i++ {
item, err := strconv.ParseInt(strInts[i], 10, 64)
if err != nil {
panic(err)
}
h = append(h, int32(item))
}
return h
}
func main() {
bytesRead, _ := ioutil.ReadFile("/mnt/data/projects/gotuto/github.com/lemenendez/gotuto/tuto/algo-25-input.txt")
file_content := string(bytesRead)
lines := strings.Split(file_content, "\n")
h1 := parseInts(lines[1])
h2 := parseInts(lines[2])
h3 := parseInts(lines[3])
fmt.Printf("heights h1:%v, h2:%v, h3:%v\n", calcH(h1), calcH(h2), calcH(h3))
h := equalStacks(h1, h2, h3)
fmt.Printf("height %v", h)
} | github.com/lemenendez/gotuto/tuto/algo-25-equal-stacks.go | 0.654232 | 0.60964 | algo-25-equal-stacks.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// FilterClause
type FilterClause struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Name of the operator to be applied to the source and target operands. Must be one of the supported operators. Supported operators can be discovered.
operatorName *string
// Name of source operand (the operand being tested). The source operand name must match one of the attribute names on the source object.
sourceOperandName *string
// Values that the source operand will be tested against.
targetOperand FilterOperandable
}
// NewFilterClause instantiates a new filterClause and sets the default values.
func NewFilterClause()(*FilterClause) {
m := &FilterClause{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateFilterClauseFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateFilterClauseFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewFilterClause(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *FilterClause) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *FilterClause) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["operatorName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetOperatorName(val)
}
return nil
}
res["sourceOperandName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSourceOperandName(val)
}
return nil
}
res["targetOperand"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreateFilterOperandFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetTargetOperand(val.(FilterOperandable))
}
return nil
}
return res
}
// GetOperatorName gets the operatorName property value. Name of the operator to be applied to the source and target operands. Must be one of the supported operators. Supported operators can be discovered.
func (m *FilterClause) GetOperatorName()(*string) {
if m == nil {
return nil
} else {
return m.operatorName
}
}
// GetSourceOperandName gets the sourceOperandName property value. Name of source operand (the operand being tested). The source operand name must match one of the attribute names on the source object.
func (m *FilterClause) GetSourceOperandName()(*string) {
if m == nil {
return nil
} else {
return m.sourceOperandName
}
}
// GetTargetOperand gets the targetOperand property value. Values that the source operand will be tested against.
func (m *FilterClause) GetTargetOperand()(FilterOperandable) {
if m == nil {
return nil
} else {
return m.targetOperand
}
}
// Serialize serializes information the current object
func (m *FilterClause) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("operatorName", m.GetOperatorName())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("sourceOperandName", m.GetSourceOperandName())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("targetOperand", m.GetTargetOperand())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *FilterClause) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetOperatorName sets the operatorName property value. Name of the operator to be applied to the source and target operands. Must be one of the supported operators. Supported operators can be discovered.
func (m *FilterClause) SetOperatorName(value *string)() {
if m != nil {
m.operatorName = value
}
}
// SetSourceOperandName sets the sourceOperandName property value. Name of source operand (the operand being tested). The source operand name must match one of the attribute names on the source object.
func (m *FilterClause) SetSourceOperandName(value *string)() {
if m != nil {
m.sourceOperandName = value
}
}
// SetTargetOperand sets the targetOperand property value. Values that the source operand will be tested against.
func (m *FilterClause) SetTargetOperand(value FilterOperandable)() {
if m != nil {
m.targetOperand = value
}
} | models/filter_clause.go | 0.628521 | 0.414247 | filter_clause.go | starcoder |
package vmath
import (
"fmt"
"github.com/maja42/vmath/math32"
"github.com/maja42/vmath/mathi"
)
// Recti represents a 2D, axis-aligned rectangle.
type Recti struct {
Min Vec2i
Max Vec2i
}
// RectiFromCorners creates a new rectangle given two opposite corners.
func RectiFromCorners(c1, c2 Vec2i) Recti {
if c1[0] > c2[0] {
c1[0], c2[0] = c2[0], c1[0]
}
if c1[1] > c2[1] {
c1[1], c2[1] = c2[1], c1[1]
}
return Recti{c1, c2}
}
// RectiFromPosSize creates a new rectangle with the given size and position.
// Negative dimensions are correctly inverted.
func RectiFromPosSize(pos, size Vec2i) Recti {
if size[0] < 0 {
size[0] = -size[0]
pos[0] -= size[0]
}
if size[1] < 0 {
size[1] = -size[1]
pos[1] -= size[1]
}
return Recti{
pos,
pos.Add(size),
}
}
// RectfFromEdges creates a new rectangle with the given edge positions.
func RectiFromEdges(left, right, bottom, top int) Recti {
return RectiFromCorners(Vec2i{left, bottom}, Vec2i{right, top})
}
// Normalize ensures that the Min position is smaller than the Max position in every dimension.
func (r Recti) Normalize() Recti {
if r.Min[0] > r.Max[0] {
r.Min[0], r.Max[0] = r.Max[0], r.Min[0]
}
if r.Min[1] > r.Max[1] {
r.Min[1], r.Max[1] = r.Max[1], r.Min[1]
}
return r
}
func (r Recti) String() string {
return fmt.Sprintf("Recti([%d x %d]-[%d x %d])",
r.Min[0], r.Min[1],
r.Max[0], r.Max[1])
}
// Rectf returns a float representation of the rectangle.
func (r Recti) Rectf() Rectf {
return Rectf{
r.Min.Vec2f(),
r.Max.Vec2f(),
}
}
// Size returns the rectangle's dimensions.
func (r *Recti) Size() Vec2i {
return r.Max.Sub(r.Min)
}
// Area returns the rectangle's area.
func (r Recti) Area() int {
size := r.Max.Sub(r.Min)
return size[0] * size[1]
}
// Left returns the rectangle's left position (smaller X).
func (r Recti) Left() int {
return r.Min[0]
}
// Right returns the rectangle's right position (bigger X).
func (r Recti) Right() int {
return r.Max[0]
}
// Bottom returns the rectangle's bottom position (smaller Y).
func (r Recti) Bottom() int {
return r.Min[1]
}
// Top returns the rectangle's top position (bigger Y).
func (r Recti) Top() int {
return r.Max[1]
}
// SetPos changes the rectangle position by modifying min, but keeps the rectangle's size.
func (r Recti) SetPos(pos Vec2i) {
size := r.Size()
r.Min = pos
r.Max = r.Min.Add(size)
}
// SetSize changes the rectangle size by keeping the min-position.
func (r Recti) SetSize(size Vec2i) {
r.Max = r.Min.Add(size)
}
// Add moves the rectangle with the given vector by adding it to the min- and max- components.
func (r Recti) Add(v Vec2i) Recti {
return Recti{
Min: r.Min.Add(v),
Max: r.Max.Add(v),
}
}
// Sub moves the rectangle with the given vector by subtracting it to the min- and max- components.
func (r Recti) Sub(v Vec2i) Recti {
return Recti{
Min: r.Min.Sub(v),
Max: r.Max.Sub(v),
}
}
// Overlaps checks if this rectangle overlaps another rectangle.
// Touching rectangles where floats are exactly equal are not considered to overlap.
func (r Recti) Overlaps(other Recti) bool {
return r.Min[0] < other.Max[0] &&
r.Max[0] > other.Min[0] &&
r.Max[1] > other.Min[1] &&
r.Min[1] < other.Max[1]
}
// OverlapsOrTouches checks if this rectangle overlaps or touches another rectangle.
func (r Recti) OverlapsOrTouches(other Recti) bool {
return r.Min[0] <= other.Max[0] &&
r.Max[0] >= other.Min[0] &&
r.Max[1] >= other.Min[1] &&
r.Min[1] <= other.Max[1]
}
// Contains checks if a given point resides within the rectangle.
// If the point is on an edge, it is also considered to be contained within the rectangle.
func (r Recti) ContainsPoint(point Vec2i) bool {
return point[0] >= r.Min[0] && point[0] <= r.Max[0] &&
point[1] >= r.Min[1] && point[1] <= r.Max[1]
}
// ContainsRecti checks if this rectangle completely contains another rectangle.
func (r Recti) ContainsRecti(other Recti) bool {
return r.Min[0] <= other.Min[0] &&
r.Max[0] >= other.Max[0] &&
r.Min[1] <= other.Min[1] &&
r.Max[1] >= other.Max[1]
}
// Merge returns a rectangle that contains both smaller rectangles.
func (r Recti) Merge(other Recti) Recti {
min := Vec2i{
mathi.Min(r.Min[0], other.Min[0]),
mathi.Min(r.Min[1], other.Min[1]),
}
max := Vec2i{
mathi.Max(r.Max[0], other.Max[0]),
mathi.Max(r.Max[1], other.Max[1]),
}
return Recti{min, max}
}
// SquarePointDistance returns the squared distance between the rectangle and a point.
// If the point is contained within the rectangle, 0 is returned.
// Otherwise, the squared distance between the point and the nearest edge or corner is returned.
func (r Recti) SquarePointDistance(pos Vec2i) int {
// Source: "Nearest Neighbor Queries" by <NAME>, <NAME> and <NAME>, ACM SIGMOD, pages 71-79, 1995.
sum := 0
for dim, val := range pos {
if val < r.Min[dim] {
// below/left of edge
d := val - r.Min[dim]
sum += d * d
} else if val > r.Max[dim] {
// above/right of edge
d := val - r.Max[dim]
sum += d * d
} else {
sum += 0
}
}
return sum
}
// PointDistance returns the distance between the rectangle and a point.
// If the point is contained within the rectangle, 0 is returned.
// Otherwise, the distance between the point and the nearest edge or corner is returned.
func (r Recti) PointDistance(pos Vec2i) float32 {
return math32.Sqrt(float32(r.SquarePointDistance(pos)))
} | recti.go | 0.910721 | 0.677741 | recti.go | starcoder |
package av
import (
"encoding/csv"
"io"
"sort"
"time"
"github.com/pkg/errors"
)
// TimeSeries specifies a given time series to query for.
// For valid options, see the TimeSeries* package constants.
type TimeSeries uint8
const (
TimeSeriesDaily TimeSeries = iota
TimeSeriesDailyAdjusted
TimeSeriesWeekly
TimeSeriesWeeklyAdjusted
TimeSeriesMonthly
TimeSeriesMonthlyAdjusted
timeSeriesIntraday // intentionally not exported
)
func (t TimeSeries) String() string {
switch t {
case timeSeriesIntraday:
return "TimeSeriesIntraday"
case TimeSeriesDaily:
return "TimeSeriesDaily"
case TimeSeriesDailyAdjusted:
return "TimeSeriesDailyAdjusted"
case TimeSeriesWeekly:
return "TimeSeriesWeekly"
case TimeSeriesWeeklyAdjusted:
return "TimeSeriesWeeklyAdjusted"
case TimeSeriesMonthly:
return "TimeSeriesMonthly"
case TimeSeriesMonthlyAdjusted:
return "TimeSeriesMonthlyAdjusted"
}
return "TimeSeriesUnknown"
}
// keyName returns the name of the TimeSeries used for Alpha Vantage API
func (t TimeSeries) keyName() string {
switch t {
case timeSeriesIntraday:
return "TIME_SERIES_INTRADAY"
case TimeSeriesDaily:
return "TIME_SERIES_DAILY"
case TimeSeriesDailyAdjusted:
return "TIME_SERIES_DAILY_ADJUSTED"
case TimeSeriesWeekly:
return "TIME_SERIES_WEEKLY"
case TimeSeriesWeeklyAdjusted:
return "TIME_SERIES_WEEKLY_ADJUSTED"
case TimeSeriesMonthly:
return "TIME_SERIES_MONTHLY"
case TimeSeriesMonthlyAdjusted:
return "TIME_SERIES_MONTHLY_ADJUSTED"
}
return "UNKNOWN"
}
// TimeInterval specifies a frequency to query for intraday stock data.
// For valid options, see the TimeInterval* package constants.
type TimeInterval uint8
const (
TimeIntervalOneMinute TimeInterval = iota
TimeIntervalFiveMinute
TimeIntervalFifteenMinute
TimeIntervalThirtyMinute
TimeIntervalSixtyMinute
)
func (t TimeInterval) String() string {
switch t {
case TimeIntervalOneMinute:
return "TimeIntervalOneMinute"
case TimeIntervalFiveMinute:
return "TimeIntervalFiveMinute"
case TimeIntervalFifteenMinute:
return "TimeIntervalFifteenMinute"
case TimeIntervalThirtyMinute:
return "TimeIntervalThirtyMinute"
case TimeIntervalSixtyMinute:
return "TimeIntervalSixtyMinute"
}
return "TimeIntervalUnknown"
}
// keyName returns the name of the TimeInterval used for Alpha Vantage API
func (t TimeInterval) keyName() string {
switch t {
case TimeIntervalOneMinute:
return "1min"
case TimeIntervalFiveMinute:
return "5min"
case TimeIntervalFifteenMinute:
return "15min"
case TimeIntervalThirtyMinute:
return "30min"
case TimeIntervalSixtyMinute:
return "60min"
}
return "unknown"
}
var (
// timeSeriesDateFormats are the expected date formats in time series data
timeSeriesDateFormats = []string{
"2006-01-02",
"2006-01-02 15:04:05",
}
)
// TimeSeriesValue is a piece of data for a given time about stock prices
type TimeSeriesValue struct {
Time time.Time
Open float64
High float64
Low float64
Close float64
Volume float64
}
// sortTimeSeriesValuesByDate allows TimeSeriesValue
// slices to be sorted by date in ascending order
type sortTimeSeriesValuesByDate []*TimeSeriesValue
func (b sortTimeSeriesValuesByDate) Len() int { return len(b) }
func (b sortTimeSeriesValuesByDate) Less(i, j int) bool { return b[i].Time.Before(b[j].Time) }
func (b sortTimeSeriesValuesByDate) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
// parseTimeSeriesData will parse csv data from a reader
func parseTimeSeriesData(r io.Reader) ([]*TimeSeriesValue, error) {
reader := csv.NewReader(r)
reader.ReuseRecord = true // optimization
reader.LazyQuotes = true
reader.TrailingComma = true
reader.TrimLeadingSpace = true
// strip header
if _, err := reader.Read(); err != nil {
if err == io.EOF {
return nil, nil
}
return nil, err
}
values := make([]*TimeSeriesValue, 0, 64)
for {
record, err := reader.Read()
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
value, err := parseTimeSeriesRecord(record)
if err != nil {
return nil, err
}
values = append(values, value)
}
// sort values by date
sort.Sort(sortTimeSeriesValuesByDate(values))
return values, nil
}
// parseDigitalCurrencySeriesRecord will parse an individual csv record
func parseTimeSeriesRecord(s []string) (*TimeSeriesValue, error) {
// these are the expected columns in the csv record
const (
timestamp = iota
open
high
low
close
volume
)
value := &TimeSeriesValue{}
d, err := parseDate(s[timestamp], timeSeriesDateFormats...)
if err != nil {
return nil, errors.Wrapf(err, "error parsing timestamp %s", s[timestamp])
}
value.Time = d
f, err := parseFloat(s[open])
if err != nil {
return nil, errors.Wrapf(err, "error parsing open %s", s[open])
}
value.Open = f
f, err = parseFloat(s[high])
if err != nil {
return nil, errors.Wrapf(err, "error parsing high %s", s[high])
}
value.High = f
f, err = parseFloat(s[low])
if err != nil {
return nil, errors.Wrapf(err, "error parsing low %s", s[low])
}
value.Low = f
f, err = parseFloat(s[close])
if err != nil {
return nil, errors.Wrapf(err, "error parsing close %s", s[close])
}
value.Close = f
f, err = parseFloat(s[volume])
if err != nil {
return nil, errors.Wrapf(err, "error parsing volume %s", s[volume])
}
value.Volume = f
return value, nil
} | vendor/github.com/cmckee-dev/go-alpha-vantage/time_series.go | 0.754644 | 0.550728 | time_series.go | starcoder |
package iso20022
// Cash movements from or to a fund as a result of investment funds transactions, eg, subscriptions or redemptions.
type EstimatedFundCashForecast5 struct {
// Unique technical identifier for an instance of a fund cash forecast within a fund cash forecast report as assigned by the issuer of the report.
Identification *Max35Text `xml:"Id"`
// Date and, if required, the time, at which the price will be applied.
TradeDateTime *DateAndDateTimeChoice `xml:"TradDtTm"`
// Previous date and time at which the price was applied.
PreviousTradeDateTime *DateAndDateTimeChoice `xml:"PrvsTradDtTm,omitempty"`
// Investment fund class to which the cash flow is related.
FinancialInstrumentDetails *FinancialInstrument9 `xml:"FinInstrmDtls"`
// Estimated total value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
EstimatedTotalNAV []*ActiveOrHistoricCurrencyAndAmount `xml:"EstmtdTtlNAV,omitempty"`
// Previous value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
PreviousTotalNAV []*ActiveOrHistoricCurrencyAndAmount `xml:"PrvsTtlNAV,omitempty"`
// Estimated total number of investment fund class units that have been issued.
EstimatedTotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"EstmtdTtlUnitsNb,omitempty"`
// Previous value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
PreviousTotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"PrvsTtlUnitsNb,omitempty"`
// Rate of change of the net asset value.
EstimatedTotalNAVChangeRate *PercentageRate `xml:"EstmtdTtlNAVChngRate,omitempty"`
// Currency of the investment fund class.
InvestmentCurrency []*ActiveOrHistoricCurrencyCode `xml:"InvstmtCcy,omitempty"`
// Information about the designation of the share class currency, that is, whether it is for onshore or offshore purposes and other information that may be required. This is typically only required for CNY funds.
CurrencyStatus *CurrencyDesignation1 `xml:"CcySts,omitempty"`
// Indicates whether the estimated net cash flow is exceptional.
ExceptionalNetCashFlowIndicator *YesNoIndicator `xml:"XcptnlNetCshFlowInd"`
// Price per unit of the previous trade date.
Price *UnitPrice19 `xml:"Pric,omitempty"`
// Foreign exchange rate.
ForeignExchangeRate *ForeignExchangeTerms19 `xml:"FXRate,omitempty"`
// Estimated net cash flow expressed as a percentage of the previous total NAV for the share class.
EstimatedPercentageOfShareClassTotalNAV *PercentageRate `xml:"EstmtdPctgOfShrClssTtlNAV,omitempty"`
// Estimated cash flow by party.
BreakdownByParty []*BreakdownByParty3 `xml:"BrkdwnByPty,omitempty"`
// Estimated cash flow by country.
BreakdownByCountry []*BreakdownByCountry2 `xml:"BrkdwnByCtry,omitempty"`
// Estimated cash flow by currency.
BreakdownByCurrency []*BreakdownByCurrency2 `xml:"BrkdwnByCcy,omitempty"`
// Estimated cash flow by a user defined parameter/s.
BreakdownByUserDefinedParameter []*BreakdownByUserDefinedParameter3 `xml:"BrkdwnByUsrDfndParam,omitempty"`
// Estimated net cash movements per financial instrument.
EstimatedNetCashForecastDetails []*NetCashForecast4 `xml:"EstmtdNetCshFcstDtls,omitempty"`
}
func (e *EstimatedFundCashForecast5) SetIdentification(value string) {
e.Identification = (*Max35Text)(&value)
}
func (e *EstimatedFundCashForecast5) AddTradeDateTime() *DateAndDateTimeChoice {
e.TradeDateTime = new(DateAndDateTimeChoice)
return e.TradeDateTime
}
func (e *EstimatedFundCashForecast5) AddPreviousTradeDateTime() *DateAndDateTimeChoice {
e.PreviousTradeDateTime = new(DateAndDateTimeChoice)
return e.PreviousTradeDateTime
}
func (e *EstimatedFundCashForecast5) AddFinancialInstrumentDetails() *FinancialInstrument9 {
e.FinancialInstrumentDetails = new(FinancialInstrument9)
return e.FinancialInstrumentDetails
}
func (e *EstimatedFundCashForecast5) AddEstimatedTotalNAV(value, currency string) {
e.EstimatedTotalNAV = append(e.EstimatedTotalNAV, NewActiveOrHistoricCurrencyAndAmount(value, currency))
}
func (e *EstimatedFundCashForecast5) AddPreviousTotalNAV(value, currency string) {
e.PreviousTotalNAV = append(e.PreviousTotalNAV, NewActiveOrHistoricCurrencyAndAmount(value, currency))
}
func (e *EstimatedFundCashForecast5) AddEstimatedTotalUnitsNumber() *FinancialInstrumentQuantity1 {
e.EstimatedTotalUnitsNumber = new(FinancialInstrumentQuantity1)
return e.EstimatedTotalUnitsNumber
}
func (e *EstimatedFundCashForecast5) AddPreviousTotalUnitsNumber() *FinancialInstrumentQuantity1 {
e.PreviousTotalUnitsNumber = new(FinancialInstrumentQuantity1)
return e.PreviousTotalUnitsNumber
}
func (e *EstimatedFundCashForecast5) SetEstimatedTotalNAVChangeRate(value string) {
e.EstimatedTotalNAVChangeRate = (*PercentageRate)(&value)
}
func (e *EstimatedFundCashForecast5) AddInvestmentCurrency(value string) {
e.InvestmentCurrency = append(e.InvestmentCurrency, (*ActiveOrHistoricCurrencyCode)(&value))
}
func (e *EstimatedFundCashForecast5) AddCurrencyStatus() *CurrencyDesignation1 {
e.CurrencyStatus = new(CurrencyDesignation1)
return e.CurrencyStatus
}
func (e *EstimatedFundCashForecast5) SetExceptionalNetCashFlowIndicator(value string) {
e.ExceptionalNetCashFlowIndicator = (*YesNoIndicator)(&value)
}
func (e *EstimatedFundCashForecast5) AddPrice() *UnitPrice19 {
e.Price = new(UnitPrice19)
return e.Price
}
func (e *EstimatedFundCashForecast5) AddForeignExchangeRate() *ForeignExchangeTerms19 {
e.ForeignExchangeRate = new(ForeignExchangeTerms19)
return e.ForeignExchangeRate
}
func (e *EstimatedFundCashForecast5) SetEstimatedPercentageOfShareClassTotalNAV(value string) {
e.EstimatedPercentageOfShareClassTotalNAV = (*PercentageRate)(&value)
}
func (e *EstimatedFundCashForecast5) AddBreakdownByParty() *BreakdownByParty3 {
newValue := new (BreakdownByParty3)
e.BreakdownByParty = append(e.BreakdownByParty, newValue)
return newValue
}
func (e *EstimatedFundCashForecast5) AddBreakdownByCountry() *BreakdownByCountry2 {
newValue := new (BreakdownByCountry2)
e.BreakdownByCountry = append(e.BreakdownByCountry, newValue)
return newValue
}
func (e *EstimatedFundCashForecast5) AddBreakdownByCurrency() *BreakdownByCurrency2 {
newValue := new (BreakdownByCurrency2)
e.BreakdownByCurrency = append(e.BreakdownByCurrency, newValue)
return newValue
}
func (e *EstimatedFundCashForecast5) AddBreakdownByUserDefinedParameter() *BreakdownByUserDefinedParameter3 {
newValue := new (BreakdownByUserDefinedParameter3)
e.BreakdownByUserDefinedParameter = append(e.BreakdownByUserDefinedParameter, newValue)
return newValue
}
func (e *EstimatedFundCashForecast5) AddEstimatedNetCashForecastDetails() *NetCashForecast4 {
newValue := new (NetCashForecast4)
e.EstimatedNetCashForecastDetails = append(e.EstimatedNetCashForecastDetails, newValue)
return newValue
} | EstimatedFundCashForecast5.go | 0.880341 | 0.586049 | EstimatedFundCashForecast5.go | starcoder |
package spaniel
import (
"encoding/json"
"time"
)
// TimeSpan represents a simple span of time, with no additional properties. It should be constructed with NewEmpty.
type TimeSpan struct {
start time.Time
end time.Time
startType EndPointType
endType EndPointType
}
// Start returns the start time of a span
func (ts TimeSpan) Start() time.Time { return ts.start }
// End returns the end time of a span
func (ts TimeSpan) End() time.Time { return ts.end }
// StartType returns the type of the start of the interval (Open in this case)
func (ts TimeSpan) StartType() EndPointType { return ts.startType }
// EndType returns the type of the end of the interval (Closed in this case)
func (ts TimeSpan) EndType() EndPointType { return ts.endType }
// String returns a string representation of a timespan
func (ts TimeSpan) String() string {
s := ""
if ts.StartType() == Closed {
s += "["
} else {
s += "("
}
s += ts.Start().String()
if ts.Start() != ts.End() {
s += ","
s += ts.End().String()
}
if ts.EndType() == Closed {
s += "]"
} else {
s += ")"
}
return s
}
// MarshalJSON implements json.Marshal
func (ts TimeSpan) MarshalJSON() ([]byte, error) {
o := struct {
Start time.Time `json:"start"`
End time.Time `json:"end"`
StartIncluded bool `json:"start_included"`
EndIncluded bool `json:"end_included"`
}{
Start: ts.start,
End: ts.end,
}
o.StartIncluded = endPointInclusionMarshal(ts.startType)
o.EndIncluded = endPointInclusionMarshal(ts.endType)
return json.Marshal(o)
}
// UnmarshalJSON implements json.Unmarshal
func (ts *TimeSpan) UnmarshalJSON(b []byte) (err error) {
var i struct {
Start time.Time `json:"start"`
End time.Time `json:"end"`
StartIncluded bool `json:"start_included"`
EndIncluded bool `json:"end_included"`
}
err = json.Unmarshal(b, &i)
if err != nil {
return err
}
ts.start = i.Start
ts.end = i.End
ts.startType = endPointInclusionUnmarhsal(i.StartIncluded)
ts.endType = endPointInclusionUnmarhsal(i.EndIncluded)
return
}
func endPointInclusionMarshal(e EndPointType) bool {
if e == Open {
return false
}
return true
}
func endPointInclusionUnmarhsal(b bool) EndPointType {
if b == true {
return Closed
}
return Open
}
// NewWithTypes creates a span with just a start and end time, and associated types, and is used when no handlers are provided to Union or Intersection.
func NewWithTypes(start, end time.Time, startType, endType EndPointType) *TimeSpan {
return &TimeSpan{start, end, startType, endType}
}
// NewInstant creates a span with just a single time.
func NewInstant(time time.Time) *TimeSpan {
return New(time, time)
}
// New creates a span with a start and end time, with the types set to [] for instants and [) for spans.
func New(start time.Time, end time.Time) *TimeSpan {
if start.Equal(end) {
// An instantaneous event has to be Closed (i.e. inclusive)
return NewWithTypes(start, end, Closed, Closed)
}
return NewWithTypes(start, end, Closed, Open)
} | timespan.go | 0.824991 | 0.520557 | timespan.go | starcoder |
package jsonmuncher
import (
"io"
"strconv"
"unicode/utf8"
"unsafe"
)
// JsonType represents the data type of a JsonValue.
type JsonType byte
const (
// Null values are always 'null'.
Null JsonType = iota
// Bool values are 'true' or 'false'.
Bool
// Number values are double-precision floating point numbers.
Number
// String values are unicode strings.
String
// Array values are ordered collections of arbitrary JSON values.
Array
// Object values are maps from strings to arbitrary JSON values.
Object
)
// JsonStatus represents the current read status of a JsonValue.
type JsonStatus byte
const (
// Incomplete means there was a read or parse error while parsing the value.
Incomplete JsonStatus = iota
// Working means the value is currently in the process of being parsed.
Working
// Complete means the value has been parsed successfully in its entirety.
Complete
)
// buffer is a read buffer for the JSON parser.
type buffer struct {
data []byte
stream io.Reader
foffs uint64
err error
readerr error
offs uint32
erroffs uint32
depth uint32
escapes byte
escape1 byte
escape2 byte
escape3 byte
escape4 byte
curr byte
}
// JsonValue represents a JSON value. This is the primary structure used in this
// library.
type JsonValue struct {
// buffer is a pointer to the read buffer.
buffer *buffer
// numval is the parsed value, assuming this is a Number.
numval float64
// depth is the nesting depth of this value.
depth uint32
// Type is the data type of this value.
Type JsonType
// Status is the read status of this value.
Status JsonStatus
// boolval is the parsed value, assuming this is a Bool. If this is an
// Object or Array, whether the first element has been parsed yet.
boolval bool
// keynext (assuming this is an Object) is true if the next thing to read is
// a key, false if it's a value.
keynext bool
}
// noescape prevents escape to the heap (unsafe, use with caution)
// only used to avoid heap allocations when we know they're not necessary
//go:nosplit
func noescape(p unsafe.Pointer) unsafe.Pointer {
x := uintptr(p)
return unsafe.Pointer(x ^ 0)
}
// feedq, feed, and next should all be the same function, but they've been
// separated for the sake of performance. feedq and next can be inlined, and so
// will run much more quickly than they otherwise would. These functions should
// be called like:
// _ = feedq(buf) && feed(buf)
// next(buf)
// feedq checks if the next byte is beyond the end of the buffer. Can be
// inlined; separated from `feed' to make inlining possible.
func feedq(buf *buffer) bool {
return int(buf.offs) >= len(buf.data)
}
// feed feeds the buffer with the next chunk, assuming feedq is true. Cannot be
// inlined, because of the call to Read().
func feed(buf *buffer) bool {
buf.foffs += uint64(len(buf.data))
var erroffs, readoffs int
var readerr error
for erroffs < len(buf.data) && readerr == nil {
readoffs, readerr = buf.stream.Read(buf.data[erroffs:])
erroffs += readoffs
}
buf.readerr = readerr
buf.erroffs = uint32(erroffs)
buf.err = nil
buf.offs = 0
return false
}
// next consumes the next byte from the input stream, storing it in the
// lookahead. Can be inlined; separated from the above to make inlining
// possible.
func next(buf *buffer, e ...byte) {
if buf.offs < buf.erroffs {
buf.curr = buf.data[buf.offs]
buf.offs++
} else {
buf.curr = 0
buf.err = buf.readerr
}
}
// foffs calculates a file offset based on the buffer.
func foffs(buf *buffer) uint64 {
return buf.foffs - uint64(len(buf.data)) + uint64(buf.offs) - 1
}
// newErrUnexpected is a slightly easier way to make an ErrUnexpectedChar.
func newErrUnexpected(buf *buffer, e ...byte) ErrUnexpectedChar {
if buf.err == io.EOF {
return newErrUnexpectedEOF(1+foffs(buf), e...)
}
return newErrUnexpectedChar(foffs(buf), buf.curr, e...)
}
// skipSpace skips whitespace until the next significant character.
func skipSpace(buf *buffer) (byte, error) {
if buf.err != nil && buf.err != io.EOF {
return 0, buf.err
}
c := buf.curr
for {
switch c {
case ' ', '\t', '\r', '\n':
_ = feedq(buf) && feed(buf)
next(buf)
if buf.err != nil && buf.err != io.EOF {
return 0, buf.err
}
c = buf.curr
default:
return c, nil
}
}
}
// readKeyword reads a boolean or null value from the stream.
func readKeyword(buf *buffer) (JsonValue, error) {
var kw string
var typ = Bool
var val = false
switch buf.curr {
case 'n':
kw = "null"
typ = Null
case 't':
kw = "true"
val = true
case 'f':
kw = "false"
}
for i := 1; i < len(kw); i++ {
_ = feedq(buf) && feed(buf)
next(buf)
if buf.err != nil && buf.err != io.EOF {
return JsonValue{}, buf.err
} else if kw[i] != buf.curr {
return JsonValue{}, newErrUnexpected(buf, kw[i])
}
}
_ = feedq(buf) && feed(buf)
next(buf)
return JsonValue{buf, 0, buf.depth + 1, typ, Complete, val, false}, nil
}
// readStream reads a string, array, or object from the stream.
func readStream(buf *buffer) (JsonValue, error) {
var typ JsonType
switch buf.curr {
case '"':
typ = String
_ = feedq(buf) && feed(buf)
next(buf)
case '{':
typ = Object
case '[':
typ = Array
}
buf.depth++
return JsonValue{buf, 0, buf.depth, typ, Working, false, typ == Object}, nil
}
// readValue reads any value from the stream.
func readValue(buf *buffer) (JsonValue, error) {
_, err := skipSpace(buf)
if err != nil {
return JsonValue{}, err
}
switch buf.curr {
case '{', '[', '"':
return readStream(buf)
case 'n', 't', 'f':
return readKeyword(buf)
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
buf.depth++
return JsonValue{buf, 0, buf.depth, Number, Working, false, false}, nil
default:
return JsonValue{}, newErrUnexpected(buf, '{', '[', '"', 'n', 't', 'f',
'-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
}
}
// Parse takes an io.Reader and begins to parse from it, returning a JsonValue.
// This function also takes a size (in bytes) to use when creating the read
// buffer.
func Parse(r io.Reader, size int) (JsonValue, error) {
data := make([]byte, size)
buf := buffer{data, r, 0, nil, nil, uint32(size), 0, 0, 0, 0, 0, 0, 0, 0}
_ = feedq(&buf) && feed(&buf)
next(&buf)
return readValue(&buf)
}
// escapemap is a mapping from escape sequences to escaped character values.
var escapemap = [...]byte{
'"': '"',
'/': '/',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
}
// Read implements the io.Reader interface for JsonValues (specifically, String
// values). Reading from this interface provides the value of the string.
func (data *JsonValue) Read(b []byte) (int, error) {
if data.Type != String {
return 0, newErrTypeMismatch(data.Type, String)
} else if data.Status == Complete {
return 0, io.EOF
} else if data.Status != Working {
return 0, ErrIncomplete
}
i := 0
if data.buffer.escapes > 0 {
i = streamEscape(data.buffer, b, 0)
}
for ; i < len(b); i++ {
if data.buffer.err != nil && data.buffer.err != io.EOF {
data.Status = Incomplete
return i, data.buffer.err
}
c := data.buffer.curr
switch {
case c == '"':
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
data.Status = Complete
data.buffer.depth--
return i, io.EOF
case c == '\\':
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
if data.buffer.err != nil && data.buffer.err != io.EOF {
data.Status = Incomplete
return i, data.buffer.err
}
k := data.buffer.curr
switch k {
case 'u':
err := readUnicode(data.buffer)
if err != nil {
data.Status = Incomplete
return i, err
}
i = streamEscape(data.buffer, b, i) - 1
case '"', '/', '\\', 'b', 'f', 'n', 'r', 't':
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
b[i] = escapemap[k]
default:
data.Status = Incomplete
return i, newErrUnexpected(data.buffer,
'"', '/', '\\', 'u', 'b', 'f', 'n', 'r', 't')
}
case c <= '\x1F':
data.Status = Incomplete
err := newErrUnexpected(data.buffer)
if data.buffer.err == io.EOF {
err.CustomMsg = "premature EOF while attempting to read string"
} else {
err.CustomMsg = "control characters are not allowed in string values"
}
return i, err
default:
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
b[i] = c
}
}
return len(b), nil
}
// readUnicode reads a unicode escape (\uXXXX) from within a string value. Also
// supports reading UTF-16 surrogate pairs.
func readUnicode(buf *buffer) error {
_ = feedq(buf) && feed(buf)
next(buf)
pt1, _, _, _, _, err := parseHex(buf)
if err != nil {
return err
}
var cp rune
if pt1 >= 0xD800 && pt1 <= 0xDFFF {
pt2, err := readSurrogate(buf)
if err != nil {
return err
}
cp = 0x10000 + rune(pt1-0xD800)<<10 + rune(pt2-0xDC00)
} else {
cp = rune(pt1)
}
var b [4]byte
ct := utf8.EncodeRune(b[:], cp)
buf.escapes = byte(5 - ct)
j := 0
for i := buf.escapes; i < 5; i++ {
switch i {
case 1:
buf.escape1 = b[j]
case 2:
buf.escape2 = b[j]
case 3:
buf.escape3 = b[j]
case 4:
buf.escape4 = b[j]
}
j++
}
return nil
}
// readSurrogate reads the second part of a UTF-16 surrogate pair encoded as a
// pair of unicode character escapes.
func readSurrogate(buf *buffer) (uint16, error) {
if buf.err != nil && buf.err != io.EOF {
return 0, buf.err
}
if buf.curr != '\\' {
return 0, newErrUnexpected(buf, '\\')
}
_ = feedq(buf) && feed(buf)
next(buf)
if buf.err != nil && buf.err != io.EOF {
return 0, buf.err
}
if buf.curr != 'u' {
return 0, newErrUnexpected(buf, 'u')
}
_ = feedq(buf) && feed(buf)
next(buf)
pt2, cx, cy, _, _, err := parseHex(buf)
if err != nil {
return 0, err
}
if pt2 < 0xD000 || pt2 > 0xDFFF {
return 0, newErrUnexpectedChar(foffs(buf)-4, cx, 'D', 'd')
} else if pt2 < 0xDC00 {
return 0, newErrUnexpectedChar(foffs(buf)-3, cy, 'C', 'D', 'E', 'F', 'c', 'd', 'e', 'f')
}
return pt2, nil
}
// parseHex reads the next four digits from the buffer, and parses them as a
// 16 bit hexadecimal value.
func parseHex(buf *buffer) (uint16, byte, byte, byte, byte, error) {
var n uint16
var cs [4]byte
for i := 0; i < 4; i++ {
if buf.err != nil && buf.err != io.EOF {
return 0, 0, 0, 0, 0, buf.err
}
switch {
case buf.curr <= '9' && buf.curr >= '0':
n = n<<4 + uint16(buf.curr-'0')
case buf.curr <= 'F' && buf.curr >= 'A':
n = n<<4 + uint16(buf.curr-'A'+10)
case buf.curr <= 'f' && buf.curr >= 'a':
n = n<<4 + uint16(buf.curr-'a'+10)
default:
return 0, 0, 0, 0, 0, newErrUnexpected(buf,
'A', 'B', 'C', 'D', 'E', 'F', 'a', 'b', 'c', 'd', 'e', 'f',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
}
cs[i] = buf.curr
_ = feedq(buf) && feed(buf)
next(buf)
}
return n, cs[0], cs[1], cs[2], cs[3], nil
}
// streamEscape streams unicode escapes out of the buffer.
func streamEscape(buf *buffer, b []byte, i int) int {
for buf.escapes < 5 && i < len(b) {
switch buf.escapes {
case 1:
b[i] = buf.escape1
case 2:
b[i] = buf.escape2
case 3:
b[i] = buf.escape3
case 4:
b[i] = buf.escape4
}
buf.escapes++
i++
}
if buf.escapes >= 5 {
buf.escapes = 0
}
return i
}
// readInt is a special case of readNumber, and is designed to parse integers.
// This parses integers in about half the time compared to strconv.ParseInt().
func readInt(data *JsonValue, sl []byte) error {
var val int64
neg := false
idx := 0
if sl[0] == '-' {
if len(sl) == 1 {
data.Status = Incomplete
return newErrUnexpected(data.buffer,
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
}
neg = true
idx = 1
}
for ; idx < len(sl); idx++ {
if sl[idx] <= '9' && sl[idx] >= '0' {
val = 10*val + int64(sl[idx]-'0')
} else {
data.Status = Incomplete
offs := foffs(data.buffer) + uint64(1+idx-len(sl))
return newErrUnexpectedChar(offs, sl[idx],
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
}
}
if neg {
val = -val
}
data.numval = float64(val)
data.Status = Complete
data.buffer.depth--
return nil
}
// readNumber reads a numeric value from the stream.
func readNumber(data *JsonValue) error {
var b [32]byte
sl := b[:0]
simple := true
L:
for {
if data.buffer.err != nil {
if data.buffer.err == io.EOF {
break L
}
data.Status = Incomplete
return data.buffer.err
}
switch data.buffer.curr {
case '+', '.', 'e', 'E':
simple = false
fallthrough
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
sl = append(sl, data.buffer.curr)
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
default:
break L
}
}
if simple && len(sl) < 19 {
return readInt(data, sl)
}
// strconv.ParseFloat takes a string, but we only have a []byte.
// Converting to string requires a new alloc and a copy, plus an escape
// to heap for the underlying array. This line does an unsafe cast and
// sidesteps escape analysis, avoiding those expensive extra steps.
f, err := strconv.ParseFloat(*(*string)(noescape(unsafe.Pointer(&sl))), 64)
if err != nil {
data.Status = Incomplete
return err
}
data.numval = f
data.Status = Complete
data.buffer.depth--
return nil
}
// ValueNum returns the value of a Number.
func (data *JsonValue) ValueNum() (float64, error) {
if data.Type != Number {
return 0, newErrTypeMismatch(data.Type, Number)
} else if data.Status == Complete {
return data.numval, nil
} else if data.Status != Working {
return 0, ErrIncomplete
}
err := readNumber(data)
if err != nil {
return 0, err
}
return data.numval, nil
}
// ValueBool returns the value of a Bool.
func (data *JsonValue) ValueBool() (bool, error) {
if data.Type == Bool {
return data.boolval, nil
}
return false, newErrTypeMismatch(data.Type, Bool)
}
// readNext reads the next key or value from an Object or Array, respectively.
// This is a shared function, because the logic is the same in both cases.
func readNext(data *JsonValue, open byte, close byte) error {
c, err := skipSpace(data.buffer)
if err != nil {
data.Status = Incomplete
return err
}
if c == close {
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
data.Status = Complete
data.buffer.depth--
return EndOfValue
}
var expect byte = ','
if data.boolval == false {
expect = open
}
if c != expect {
data.Status = Incomplete
return newErrUnexpected(data.buffer, expect, close)
}
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
if data.boolval == false {
c, err = skipSpace(data.buffer)
if err != nil {
data.Status = Incomplete
return err
}
if c == close {
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
data.Status = Complete
data.buffer.depth--
return EndOfValue
}
}
return nil
}
// NextKey reads the next key from an Object, and returns it as a JsonValue. If
// the next part of the Object to parse is a value, that value is discarded, and
// the following key is returned. If the end of the Object is found, an
// EndOfValue error is returned.
func (data *JsonValue) NextKey() (JsonValue, error) {
if data.Type != Object {
return JsonValue{}, newErrTypeMismatch(data.Type, Object)
} else if data.Status == Complete {
return JsonValue{}, EndOfValue
} else if data.Status != Working {
return JsonValue{}, ErrIncomplete
} else if data.depth != data.buffer.depth {
return JsonValue{}, ErrWorkingChild
}
if data.keynext == false {
val, err := objectNextValue(data)
if err != nil {
data.Status = Incomplete
return JsonValue{}, err
}
err = val.Close()
if err != nil {
data.Status = Incomplete
return JsonValue{}, err
}
}
err := readNext(data, '{', '}')
if err != nil {
return JsonValue{}, err
}
_, err = skipSpace(data.buffer)
if err != nil {
data.Status = Incomplete
return JsonValue{}, err
}
if data.buffer.curr != '"' {
data.Status = Incomplete
return JsonValue{}, newErrUnexpected(data.buffer, '"')
}
val, _ := readStream(data.buffer)
data.boolval = true
data.keynext = false
return val, nil
}
// arrayNextValue reads the next value from an Object.
func objectNextValue(data *JsonValue) (JsonValue, error) {
if data.depth != data.buffer.depth {
return JsonValue{}, ErrWorkingChild
}
if data.keynext == true {
key, err := data.NextKey()
if err != nil {
data.Status = Incomplete
return JsonValue{}, err
}
err = key.Close()
if err != nil {
data.Status = Incomplete
return JsonValue{}, err
}
}
c, err := skipSpace(data.buffer)
if err != nil {
data.Status = Incomplete
return JsonValue{}, err
}
if c != ':' {
data.Status = Incomplete
return JsonValue{}, newErrUnexpected(data.buffer, ':')
}
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
val, err1 := readValue(data.buffer)
if err1 != nil {
data.Status = Incomplete
return JsonValue{}, err1
}
data.keynext = true
return val, nil
}
// arrayNextValue reads the next value from an Array.
func arrayNextValue(data *JsonValue) (JsonValue, error) {
if data.depth != data.buffer.depth {
return JsonValue{}, ErrWorkingChild
}
err := readNext(data, '[', ']')
if err != nil {
return JsonValue{}, err
}
val, err1 := readValue(data.buffer)
if err1 != nil {
data.Status = Incomplete
return JsonValue{}, err1
}
data.boolval = true
return val, nil
}
// NextValue reads the next value from an Object or Array, and returns it as a
// JsonValue. If this is used on an Object, and the next part of the Object to
// parse is a key, that key is discarded and the corresponding value is
// returned. If the end of the Object or Array is found, an EndOfValue error is
// returned.
func (data *JsonValue) NextValue() (JsonValue, error) {
if data.Type == Array && data.Status == Working {
return arrayNextValue(data)
} else if data.Type == Object && data.Status == Working {
return objectNextValue(data)
} else if data.Type != Array && data.Type != Object {
return JsonValue{}, newErrTypeMismatch(data.Type, Array, Object)
} else if data.Status == Complete {
return JsonValue{}, EndOfValue
}
return JsonValue{}, ErrIncomplete
}
// Close implements the io.Closer interface for JsonValues. Closing a JsonValue
// discards the remainder of that value from the stream. This is a fast way to
// ignore unimportant parts of the input to reach useful information.
func (data *JsonValue) Close() error {
if data.Status == Complete {
return nil
} else if data.Status != Working {
return ErrIncomplete
} else if data.depth != data.buffer.depth {
return ErrWorkingChild
}
if data.Type == Number {
return closeNumber(data)
}
if data.boolval == false {
if data.Type == Object && data.buffer.curr == '{' ||
data.Type == Array && data.buffer.curr == '[' {
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
}
}
return closeObjectArray(data)
}
// closeNumber is a special case for Close, and works on Numbers.
func closeNumber(data *JsonValue) error {
for {
if data.buffer.err != nil {
if data.buffer.err == io.EOF {
data.Status = Incomplete
data.buffer.depth--
return nil
}
data.Status = Incomplete
return data.buffer.err
}
i := data.buffer.offs - 1
for i < data.buffer.erroffs {
switch data.buffer.data[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '+', '-', 'e', 'E':
i++
default:
data.buffer.offs = i
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
data.Status = Incomplete
data.buffer.depth--
return nil
}
}
data.buffer.offs = data.buffer.erroffs
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
}
}
// closeObjectArray is the general case for Close, and works on Objects and
// Arrays.
func closeObjectArray(data *JsonValue) error {
instr := data.Type == String
depth := 0
for {
if data.buffer.err != nil {
data.Status = Incomplete
if data.buffer.err == io.EOF {
err := newErrUnexpected(data.buffer)
err.CustomMsg = "premature EOF while attempting to close value"
return err
}
return data.buffer.err
}
i := data.buffer.offs - 1
InStr:
if instr {
for i < data.buffer.erroffs {
switch data.buffer.data[i] {
case '\\':
i++
case '"':
if data.Type == String {
data.buffer.offs = i + 1
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
data.Status = Complete
data.buffer.depth--
return nil
}
instr = false
i++
goto InStr
}
i++
}
} else {
for i < data.buffer.erroffs {
switch data.buffer.data[i] {
case '{', '[':
depth++
case '}', ']':
depth--
if depth < 0 {
data.buffer.offs = i + 1
_ = feedq(data.buffer) && feed(data.buffer)
next(data.buffer)
data.Status = Complete
data.buffer.depth--
return nil
}
case '"':
instr = true
i++
goto InStr
}
i++
}
}
data.buffer.offs = data.buffer.erroffs
if feedq(data.buffer) {
feed(data.buffer)
data.buffer.offs = i - uint32(len(data.buffer.data))
}
next(data.buffer)
}
}
// simpleSort sorts the inputs in-place. Usually this is a short list, and may
// already be sorted (or mostly sorted), so a simple insertion sort is a good
// choice here.
func simpleSort(vals []string) {
for i := 1; i < len(vals); i++ {
for j := i; j > 0 && vals[j] < vals[j-1]; j-- {
vals[j], vals[j-1] = vals[j-1], vals[j]
}
}
}
// compareRead reads a String value and does a comparison against the given
// slice of strings. The strings must be sorted for this to work properly.
func compareRead(data *JsonValue, vals []string) (string, bool, error) {
var buf [16]byte
x, y, z := 0, 0, 0
for {
l, err := data.Read(buf[:])
if err != nil && err != io.EOF {
return "", false, err
}
y = z
z += l
for {
if len(vals[x]) >= z && vals[x][y:z] == string(buf[:l]) {
break
} else if x+1 >= len(vals) || vals[x][:y] != vals[x+1][:y] {
return "", false, data.Close()
}
x++
}
if err == io.EOF && z == len(vals[x]) {
return vals[x], true, nil
} else if err == io.EOF {
return "", false, nil
}
}
}
// Compare is a helper function, designed to read a String value and compare it
// against one or more arguments. If the String value matches none of the
// arguments, false is returned. Otherwise, true is returned along with the
// matched string.
func (data *JsonValue) Compare(vals ...string) (string, bool, error) {
if len(vals) <= 0 {
return "", false, ErrNoParamsSpecified
}
simpleSort(vals)
return compareRead(data, vals)
}
// FindKey is a helper function, designed to find a specific value in an Object.
// It reads the object until it finds the first key that matches one of the
// provided arguments. If no keys match, false is returned. Otherwise, true is
// returned along with the matched string and the value associated with the
// matched key. Note that this will discard everything in the stream prior to
// the matched key/value pair.
func (data *JsonValue) FindKey(keys ...string) (string, JsonValue, bool, error) {
if len(keys) <= 0 {
return "", JsonValue{}, false, ErrNoParamsSpecified
}
simpleSort(keys)
for {
key, err := data.NextKey()
if err == EndOfValue {
return "", JsonValue{}, false, nil
} else if err != nil {
return "", JsonValue{}, false, err
}
k, match, err1 := compareRead(&key, keys)
if err1 != nil {
return "", JsonValue{}, false, err1
} else if match {
val, err := data.NextValue()
if err != nil {
return "", JsonValue{}, false, err
}
return k, val, true, nil
}
}
} | parser.go | 0.50952 | 0.424173 | parser.go | starcoder |
package fourq
import (
"math/big"
"github.com/cloudflare/circl/internal/conv"
)
// Size of scalars used for point multiplication.
const Size = 32
// Point represents an affine point of the curve. The identity is (0,1).
type Point struct{ X, Y Fq }
// CurveParams contains the parameters of the elliptic curve.
type CurveParams struct {
Name string // The canonical name of the curve.
P *big.Int // The order of the underlying field Fp.
N *big.Int // The order of the generator point.
G Point // This is the generator point.
}
// Params returns the parameters for the curve.
func Params() *CurveParams {
params := CurveParams{Name: "FourQ"}
params.P = conv.Uint64Le2BigInt(prime[:])
params.N = conv.Uint64Le2BigInt(orderGenerator[:])
params.G.X = genX
params.G.Y = genY
return ¶ms
}
// IsOnCurve reports whether the given P=(x,y) lies on the curve.
func (P *Point) IsOnCurve() bool {
var _P pointR1
P.toR1(&_P)
return _P.IsOnCurve()
}
// SetGenerator assigns to P the generator point G.
func (P *Point) SetGenerator() { P.X = genX; P.Y = genY }
// SetIdentity assigns to P the identity element.
func (P *Point) SetIdentity() {
var _P pointR1
_P.SetIdentity()
P.fromR1(&_P)
}
// IsIdentity returns true if P is the identity element.
func (P *Point) IsIdentity() bool {
var _P pointR1
P.toR1(&_P)
return _P.IsIdentity()
}
// Add calculates a point addition P = Q + R
func (P *Point) Add(Q, R *Point) {
var _Q, _R pointR1
var _R2 pointR2
Q.toR1(&_Q)
R.toR1(&_R)
_R2.FromR1(&_R)
_Q.add(&_R2)
P.fromR1(&_Q)
}
// ScalarMult calculates P = k*Q, where Q is an N-torsion point.
func (P *Point) ScalarMult(k *[Size]byte, Q *Point) {
var _P, _Q pointR1
Q.toR1(&_Q)
_Q.ClearCofactor()
_P.ScalarMult(k, &_Q)
P.fromR1(&_P)
}
// ScalarBaseMult calculates P = k*G, where G is the generator point.
func (P *Point) ScalarBaseMult(k *[Size]byte) {
var _P pointR1
_P.ScalarBaseMult(k)
P.fromR1(&_P)
}
func (P *Point) fromR1(Q *pointR1) {
Q.ToAffine()
P.X = Q.X
P.Y = Q.Y
}
func (P *Point) toR1(projP *pointR1) {
projP.X = P.X
projP.Y = P.Y
projP.Ta = P.X
projP.Tb = P.Y
projP.Z.setOne()
} | ecc/fourq/curve.go | 0.879535 | 0.473049 | curve.go | starcoder |
package goiaf
import (
"net/url"
"strconv"
)
// HouseRequest contains method which can be used to filter the response.
type HouseRequest interface {
ParamConverter
// Limit sets the maximum houses to return.
Limit(int) HouseRequest
// Name can be used to filter the returned houses by their name.
Name(string) HouseRequest
// Region sets the value for the region parameter.
// Only houses that belong in the given region are included in the response.
Region(string) HouseRequest
// Words sets the value for the words parameter.
// Only houses that has the given words are included in the response.
Words(string) HouseRequest
// HasWords sets the value for the hasWords parameter.
// Only houses that have words are included in the response.
HasWords(bool) HouseRequest
// HasTitles sets the value for the hasTitles parameter.
// Only houses that have titles are included in the response.
HasTitles(bool) HouseRequest
// HasSeats sets the value for the hasSeats parameter.
// Only houses that have seats are included in the response.
HasSeats(bool) HouseRequest
// HasDiedOut sets the value for the hasDiedOut parameter.
// Only houses that are extinct are included in the response.
HasDiedOut(bool) HouseRequest
// HasAncestralWeapons sets the value for the hasAncestralWeapons parameter.
// Only houses that have ancestral weapons are included in the response.
HasAncestralWeapons(bool) HouseRequest
}
// NewHouseRequest returns a new HouseRequest which can be used to filter houses.
func NewHouseRequest() HouseRequest {
h := houseRequest{}
h.limit = 10
return h
}
type houseRequest struct {
request
name *string
region *string
words *string
hasWords *bool
hasTitles *bool
hasSeats *bool
hasDiedOut *bool
hasAncestralWeapons *bool
}
func (request houseRequest) Limit(value int) HouseRequest {
request.limit = value
return request
}
func (request houseRequest) Convert() url.Values {
params := url.Values{}
params.Add("page", strconv.Itoa(request.limit))
if request.page != nil {
params.Set("pageSize", strconv.Itoa(*request.page))
}
if request.name != nil {
params.Set("name", *request.name)
}
if request.region != nil {
params.Set("region", *request.region)
}
if request.words != nil {
params.Set("words", *request.words)
}
if request.hasWords != nil {
params.Set("hasWords", strconv.FormatBool(*request.hasWords))
}
if request.hasTitles != nil {
params.Set("hasTitles", strconv.FormatBool(*request.hasTitles))
}
if request.hasSeats != nil {
params.Set("hasSeats", strconv.FormatBool(*request.hasSeats))
}
if request.hasDiedOut != nil {
params.Set("hasDiedOut", strconv.FormatBool(*request.hasDiedOut))
}
if request.hasAncestralWeapons != nil {
params.Set("hasAncestralWeapons", strconv.FormatBool(*request.hasAncestralWeapons))
}
return params
}
func (request houseRequest) Name(value string) HouseRequest {
request.name = &value
return request
}
func (request houseRequest) Region(value string) HouseRequest {
request.region = &value
return request
}
func (request houseRequest) Words(value string) HouseRequest {
request.words = &value
return request
}
func (request houseRequest) HasWords(value bool) HouseRequest {
request.hasWords = &value
return request
}
func (request houseRequest) HasTitles(value bool) HouseRequest {
request.hasTitles = &value
return request
}
func (request houseRequest) HasSeats(value bool) HouseRequest {
request.hasSeats = &value
return request
}
func (request houseRequest) HasDiedOut(value bool) HouseRequest {
request.hasDiedOut = &value
return request
}
func (request houseRequest) HasAncestralWeapons(value bool) HouseRequest {
request.hasAncestralWeapons = &value
return request
} | house_request.go | 0.631594 | 0.405154 | house_request.go | starcoder |
package input
import (
"github.com/Jragonmiris/mathgl"
"github.com/go-gl/glfw"
"math"
)
type Camera struct {
pos mathgl.Vec3f
hAngle, vAngle float64
time float64
}
const (
speed float64 = 3.0
mouseSpeed = .005
width = 1024.0
height = 768.0
initialFOV = 45.0
)
func NewCamera() *Camera {
return &Camera{pos: mathgl.Vec3f{0., 0., 5.}, hAngle: math.Pi, vAngle: 0.0, time: -1.0} // Make time -1 since it will never naturally be, this acts as a "first time?" flag
}
// Since go has multiple return values, I just went ahead and made it return the view and perspective matrices (in that order) rather than messing with getter methods
func (c *Camera) ComputeViewPerspective() (mathgl.Mat4f, mathgl.Mat4f) {
if mathgl.FloatEqual(-1.0, c.time) {
c.time = glfw.Time()
}
currTime := glfw.Time()
deltaT := currTime - c.time
xPos, yPos := glfw.MousePos()
glfw.SetMousePos(width/2.0, height/2.0)
c.hAngle += mouseSpeed * ((width / 2.0) - float64(xPos))
c.vAngle += mouseSpeed * ((height / 2.0) - float64(yPos))
dir := mathgl.Vec3f{
float32(math.Cos(c.vAngle) * math.Sin(c.hAngle)),
float32(math.Sin(c.vAngle)),
float32(math.Cos(c.vAngle) * math.Cos(c.hAngle))}
right := mathgl.Vec3f{
float32(math.Sin(c.hAngle - math.Pi/2.0)),
0.0,
float32(math.Cos(c.hAngle - math.Pi/2.0))}
up := right.Cross(dir)
if glfw.Key(glfw.KeyUp) == glfw.KeyPress || glfw.Key('W') == glfw.KeyPress {
c.pos = c.pos.Add(dir.Mul(float32(deltaT * speed)))
}
if glfw.Key(glfw.KeyDown) == glfw.KeyPress || glfw.Key('S') == glfw.KeyPress {
c.pos = c.pos.Sub(dir.Mul(float32(deltaT * speed)))
}
if glfw.Key(glfw.KeyRight) == glfw.KeyPress || glfw.Key('D') == glfw.KeyPress {
c.pos = c.pos.Add(right.Mul(float32(deltaT * speed)))
}
if glfw.Key(glfw.KeyLeft) == glfw.KeyPress || glfw.Key('A') == glfw.KeyPress {
c.pos = c.pos.Sub(right.Mul(float32(deltaT * speed)))
}
// Adding to the original tutorial, Space goes up
if glfw.Key(glfw.KeySpace) == glfw.KeyPress {
c.pos = c.pos.Add(up.Mul(float32(deltaT * speed)))
}
// Adding to the original tutorial, left control goes down
if glfw.Key(glfw.KeyLctrl) == glfw.KeyPress {
c.pos = c.pos.Sub(up.Mul(float32(deltaT * speed)))
}
fov := initialFOV - 5.0*float64(glfw.MouseWheel())
proj := mathgl.Perspective(fov, 4.0/3.0, 0.1, 100.0)
view := mathgl.LookAtV(c.pos, c.pos.Add(dir), up)
c.time = currTime
return view, proj
} | examples/opengl-tutorial/input/input.go | 0.767167 | 0.470007 | input.go | starcoder |
package main
import (
"github.com/jeffail/gabs"
"io/ioutil"
"log"
"math"
)
//These represent the tile order in Tiled sprite
const (
TypeGround = 1
TypeGoal = 2
TypeStart = 3
TypeBlock = 4
)
var TypeNames = map[int]string{
TypeStart: "Start",
TypeGoal: "Goal",
TypeGround: "Ground",
TypeBlock: "Block",
}
// Two dimensional map, consisting of Tiles
type Map map[int]map[int]*Tile
type Tile struct {
X, Y int
Type int
M *Map
}
func (m Map) addTile(t *Tile) {
if m[t.X] == nil {
m[t.X] = map[int]*Tile{}
}
m[t.X][t.Y] = t
t.M = &m
}
func (m Map) getTile(x, y int) *Tile {
if (m[x] != nil) {
return m[x][y]
}
return nil
}
func calcTileCoords(index, mapWidth int) (int, int) {
x := index / mapWidth
y := index - x * mapWidth
return x, y
}
func parseMapFile(filePath string) *Map {
mapData, err := ioutil.ReadFile(filePath)
if err != nil {
log.Fatal(err)
}
jsonParsed, err := gabs.ParseJSON(mapData)
if err != nil {
log.Fatal(err)
}
mapWidth := int(jsonParsed.Path("width").Data().(float64))
// Only first layer from Tiled data is used, rest ignored
firstMapLayer := jsonParsed.Search("layers", "data").Index(0)
tileTypes, _ := firstMapLayer.Children()
m := Map{}
for index, tileType := range tileTypes {
x, y := calcTileCoords(index, mapWidth)
tile := Tile{X: x, Y: y, Type: int(tileType.Data().(float64))}
m.addTile(&tile)
}
return &m
}
func (t *Tile) getWalkableNeighbours() []*Tile {
var neighbours []*Tile
// Path cannot be diagonal
for _, offset := range [][]int {
{-1, 0},
{0, -1},
{0, 1},
{1, 0},
} {
if t := t.M.getTile(t.X+offset[0], t.Y+offset[1]); t != nil && t.Type == TypeGround {
neighbours = append(neighbours, t)
}
}
return neighbours
}
func (from *Tile) calcDistance(to *Tile) int {
xDiff := math.Abs(float64(to.X - from.X))
yDiff := math.Abs(float64(to.Y - from.Y))
return int(xDiff + yDiff)
}
func (m *Map) getStartTile() *Tile {
return m.findTileOfType(TypeStart)
}
func (m *Map) getGoalTile() *Tile {
return m.findTileOfType(TypeGoal)
}
func (m *Map) findTileOfType(tileType int) *Tile {
for _, row := range *m {
for _, tile := range row {
if (tile.Type == tileType) {
return tile
}
}
}
return nil
} | map.go | 0.586641 | 0.425546 | map.go | starcoder |
// api.go
// Implements the methods interacting between the model and the server
package hipmodel
import (
"errors"
"fmt"
"github.com/emer/etable/etable"
"github.com/emer/etable/etensor"
"time"
)
/* Update the data used for training/testing the model and re-init the network */
func (ss *Sim) RestUpdateTrainingData(update *DatasetUpdate) error {
dataset := update.Dataset
DPrintf("Update dataset: %v\n", dataset)
var err error = nil
// update dataset from file
if update.Source == "file" {
DPrintf("updating training data with file: %v\n", update.Filename)
err = ss.OpenPatComma(ss.TrainAB, update.Filename, "Training Patterns", "Training Patterns")
if err != nil {
return err
}
err = ss.OpenPatComma(ss.TestAB, update.Filename, "Testing Patterns", "Same as Training Patterns")
} else if update.Source == "body" {
DPrintf("protobuf dimensions: %v", dataset.Dimensions)
DPrintf("protobuf patterns: %v", dataset.Data)
// parse patterns to array of etensors
patterns := etensor.NewFloat32(convert_slice_to_int(dataset.Dimensions), nil, nil)
patterns.SetFloats(dataset.Data)
// Create training etable
trainpats := etable.NewTable("Training Patterns")
trainpats.SetMetaData("desc", "Training data from API")
num_patterns := patterns.Shape.Shp[0]
trainpats.SetNumRows(num_patterns)
// setup columns for filling
rowNames := etensor.NewString([]int{num_patterns}, nil, []string{"row"})
col := etensor.NewFloat32(patterns.Shape.Shp, nil, []string{"row"})
col.CopyFrom(patterns)
DPrintf("Column: \n\n%v\n\n", col)
// add the pattern columns to the table
trainpats.AddCol(rowNames, "Name")
trainpats.AddCol(col, "Input")
trainpats.AddCol(col, "ECout")
// Set training etable in model
DPrintf("\n\nTrainAB BEFORE\n\n%v\n", ss.TrainAB)
ss.TrainAB = trainpats
ss.TestAB = trainpats
DPrintf("\n\nTrainAB AFTER\n\n%v\n", ss.TrainAB)
// re-init model
ss.Init()
DPrintf("\n\nTrain Env AFTER: \n\n%v\n", ss.TrainEnv)
DPrintf("\n\nTest Env AFTER: \n\n%v\n", ss.TestEnv)
} else {
return errors.New("Invalid update method")
}
return err
}
/* Test an item */
func (ss *Sim) RestTestPattern(tr *TestItem) (*NameError, error) {
if ss.IsRunning {
return nil, errors.New("Model is already running, couldn't test item yet")
}
ss.IsRunning = true
DPrintf("testing pattern: %v\n", tr.CorruptedPattern)
// setup the environment as we want it
corrupted_tensor := etensor.NewFloat32(convert_slice_to_int(tr.CorruptedPattern.Dimensions), nil, nil)
corrupted_tensor.SetFloats(tr.CorruptedPattern.Data)
ss.UpdateTestEnvWithTestPattern(corrupted_tensor)
ss.TestItem(0) // always use 0, that's where we'll put the item
ss.IsRunning = false
return ss.NameErrorResult, nil
}
/* Start the model's training process */
func (ss *Sim) RestStartTraining(tr *TrainRequest) (string, error) {
if ss.IsRunning {
return "", errors.New("Training is already running")
}
ss.MaxRuns = tr.MaxRuns
ss.MaxEpcs = tr.MaxEpcs
// re-init model to clear previous weights and reset training parameters
ss.Init()
// start the training in a goroutine with a channel for completion
doneCh := make(chan bool)
ss.IsRunning = true
go func() { ss.Train(); doneCh <- true }()
// wait for training to complete, recording time
start := time.Now()
<-doneCh
end := time.Now()
elapsed := end.Sub(start)
return fmt.Sprintf("Training completed in %v seconds. Max Runs: %v, Max Epochs: %v\n", elapsed.Seconds(), ss.MaxRuns, ss.MaxEpcs), nil
}
/* Check on the model's training status */
func (ss *Sim) GetTrainingStatus() string {
if ss.IsRunning {
return "Training"
} else {
return "Not Training"
}
} | pkg/hipmodel/api.go | 0.530723 | 0.415788 | api.go | starcoder |
package model
import (
"errors"
"gopkg.in/yaml.v2"
"strings"
)
/*
The Api structure is the fundamental type exposed by the skilldrill model
package, and provides CRUD interfaces to do things like adding skills or people
into the model and registering a person as having a particular skill. All
model editing operations should be done via Api calls rather than accessing the
internal objects directly, so that the integrity of various supplemental look
up tables is preserved. The design intent is that none of Api fields are
exported, but the reason that some are, is solely to facilitate automated
serialization by yaml.Marshal().
*/
type Api struct {
SerializeVers int
Skills []*skillNode
People []*person
SkillRoot int // root of taxonomy tree (skill.Uid)
SkillHoldings *skillHoldings // who has what skill?
NextSkill int
UiStates map[string]*uiState
// Supplemental, (duplicate) data for quick lookups
skillFromId map[int]*skillNode
persFromMail map[string]*person
}
// The function NewApi() is a (compulsory) constructor for an initialized, but
// empty Api struct.
func NewApi() *Api {
return &Api{
SerializeVers: 1,
Skills: make([]*skillNode, 0),
People: make([]*person, 0),
SkillRoot: -1,
SkillHoldings: newSkillHoldings(),
NextSkill: 1,
UiStates: make(map[string]*uiState),
// Supplemental fields
skillFromId: make(map[int]*skillNode),
persFromMail: make(map[string]*person),
}
}
// The function NewFromSerialized() is a factory for an Api based on
// content previously serialized using the Api.Serialize() method.
func NewFromSerialized(in []byte) (api *Api, err error) {
api = NewApi()
err = yaml.Unmarshal(in, api)
if err != nil {
return
}
api.finishBuildFromDeSerialize()
return
}
//--------------------------------------------------------------------------
// Methods For Adding things to the model
//--------------------------------------------------------------------------
// The AddPerson() method adds a person to the model in terms of the user name
// part of their email address. It is an error to add a person that already
// exists in the model. The email address is coerced to lowercase.
func (api *Api) AddPerson(email string) (err error) {
// disallow duplicate additions
email = strings.ToLower(email)
_, ok := api.persFromMail[email]
if ok {
return errors.New(PersonExists)
}
// be sure to keep this symmetrical with RemovePerson()
incomer := newPerson(email)
api.People = append(api.People, incomer)
api.persFromMail[email] = incomer
api.SkillHoldings.registerPerson(email)
api.UiStates[email] = newUiState()
return nil
}
/*
The AddSkill() method adds a skill into the model's hierachy of skills. You
specify the skill in terms of description and title strings. These strings
should describe how they additionally qualify their context in the hierachy,
and should not duplicate context information. You specify the tree location by
providing the Uid of the parent skill, and the new Uid for the added skill is
returned. The role parameter should be one of the constants Skill or Category.
When the skill tree is empty, this skill will be added as the root, and the
parentUid parameter is ignored. Errors are generated if you attempt to add a
skill to a node that is not a Category, or if the parent skill you provide is
not recognized.
*/
func (api *Api) AddSkill(role string, title string, desc string,
parent int) (uid int, err error) {
// Be sure to keep this symmetrical with RemoveSkill
// Sanitize parent except when adding root skill
if api.SkillRoot != -1 {
parentSkill, ok := api.skillFromId[parent]
if !ok {
err = errors.New(UnknownParent)
return
}
if parentSkill.Role != Category {
err = errors.New(ParentNotCategory)
return
}
}
uid = api.NextSkill
api.NextSkill++
newSkill := newSkillNode(uid, role, title, desc, parent, api)
// Note we keep the children - in alphabetical order of title
api.Skills = append(api.Skills, newSkill)
api.skillFromId[uid] = newSkill
api.SkillHoldings.registerSkill(uid)
if api.SkillRoot == -1 {
api.SkillRoot = uid
return
}
parentSkill := api.skillFromId[parent]
parentSkill.addChild(newSkill.Uid)
return
}
/*
The GivePersonSkill() method adds the given skill into the set of skills the
model holds for that person. You are only allowed to give people Skill, not
CATEGORIES. An error is generated if either the person or skill given are not
recognized, or you give a person a Category rather than a Skill. The email you
provide is lower-cased before it is used.
*/
func (api *Api) GivePersonSkill(email string, skillId int) (err error) {
if err = api.tweakParams(&email, &skillId); err != nil {
return
}
foundSkill := api.skillFromId[skillId]
if foundSkill.Role == Category {
err = errors.New(CannotBestowCategory)
return
}
foundPerson := api.persFromMail[email]
api.SkillHoldings.bind(foundSkill.Uid, foundPerson.Email)
return
}
//--------------------------------------------------------------------------
// Methods For Editing the UXP State
//--------------------------------------------------------------------------
/*
The CollapseSkill() method operates on the part of the model that represents
the abstracted user experience. In this case to collapse a node in the tree
display of skills hierachy. Errors are generated when either the person or the
skill is not recognized.
*/
func (api *Api) CollapseSkill(email string, skillId int) (err error) {
if err = api.tweakParams(&email, &skillId); err != nil {
return
}
foundSkill := api.skillFromId[skillId]
api.UiStates[email].collapseNode(foundSkill)
return
}
//--------------------------------------------------------------------------
// Getter Style Methods
//--------------------------------------------------------------------------
/*
The method SkillWording() returns the title and description of the given skill.
The description is provided in three different forms: The description in
isolation of the skill node, this same description tacked on to the end of a
description of the ancestry, and this ancestry part isolated. Can generate the
UnknownSkill error.
*/
func (api *Api) SkillWording(skillId int) (title string, desc string,
descInContext string, contextAlone string, err error) {
if err = api.tweakParams(nil, &skillId); err != nil {
return
}
foundSkill := api.skillFromId[skillId]
treeOps := &skillTreeOps{api}
title, desc, descInContext, contextAlone = treeOps.skillWording(foundSkill)
return
}
/*
The method PeopleWithSkill() provides a list of the people (email address) who
hold the given skill. Can generate the following errors: UnknownSkill,
CannotBestowCategory.
*/
func (api *Api) PeopleWithSkill(skillId int) (emails []string, err error) {
if err = api.tweakParams(nil, &skillId); err != nil {
return
}
if api.skillFromId[skillId].Role == Category {
err = errors.New(CannotBestowCategory)
return
}
emails = api.SkillHoldings.PeopleWithSkill[skillId].AsSlice()
return
}
/*
The method PersonExists() returns true if the given person is registered.
*/
func (api *Api) PersonExists(email string) bool {
_, exists := api.persFromMail[email]
return exists
}
/*
The method PersonHasSkill() returns true if the given person is registered as
having the given skill. Can generate the following errors: UnknownSkill,
UnknownPerson, CannotBestowCategory.
*/
func (api *Api) PersonHasSkill(email string, skillId int) (
hasSkill bool, err error) {
if err = api.tweakParams(&email, &skillId); err != nil {
return
}
if api.skillFromId[skillId].Role == Category {
err = errors.New(CannotBestowCategory)
return
}
sh := api.SkillHoldings
sop := sh.SkillsOfPerson
set := sop[email]
present := set.Contains(skillId)
_ = present
hasSkill = api.SkillHoldings.SkillsOfPerson[email].Contains(skillId)
return
}
/*
The method EnumerateTree() provides a list of skill Uids in the order they
should appear when displaying the tree. It is person-specific, and omits the
nodes that have been collapsed (using CollapseSkill()) - including their
children. Can generate the UnknownPerson error.
*/
func (api *Api) EnumerateTree(email string) (skills []int,
depths []int, err error) {
if err = api.tweakParams(&email, nil); err != nil {
return
}
treeOps := &skillTreeOps{api}
collapsedNodes := api.UiStates[email].CollapsedNodes
skills, depths = treeOps.enumerateTree(collapsedNodes)
return
}
//--------------------------------------------------------------------------
// Methods That Change Existing Content
//--------------------------------------------------------------------------
/*
The SetSkillTitle() method replaces the given skill's title with the text
given. Can generate the following errors: SkillUnknown error, TooLong.
*/
func (api *Api) SetSkillTitle(skillId int, newTitle string) (err error) {
if err = api.tweakParams(nil, &skillId); err != nil {
return
}
skill := api.skillFromId[skillId]
if len(newTitle) > MaxSkillTitle {
err = errors.New(TooLong)
return
}
skill.Title = newTitle
return
}
/*
The SetSkillDesc() method replaces the given skill's description with the text
given. Can generate the following errors: SkillUnknown error, TooLong.
*/
func (api *Api) SetSkillDesc(skillId int, newDesc string) (err error) {
if err = api.tweakParams(nil, &skillId); err != nil {
return
}
if len(newDesc) > MaxSkillDesc {
err = errors.New(TooLong)
return
}
skill := api.skillFromId[skillId]
skill.Desc = newDesc
return
}
/*
The method ReParentSkill() moves a skill node and all its children to a
different position in the tree. The new parent given must be a skill node with
the CATEGORY role. The following errors can be generated: UnknownSkill,
IllegalWithRoot, and ParentNotCategory.
*/
func (api *Api) ReParentSkill(toMove int, newParent int) (err error) {
if err = api.tweakParams(nil, &toMove); err != nil {
return
}
if err = api.tweakParams(nil, &newParent); err != nil {
return
}
if toMove == api.SkillRoot {
return errors.New(IllegalWithRoot)
}
childSkill := api.skillFromId[toMove]
oldParentSkill := api.skillFromId[childSkill.Parent]
newParentSkill := api.skillFromId[newParent]
if newParentSkill.Role != Category {
return errors.New(ParentNotCategory)
}
oldParentSkill.removeChild(toMove)
newParentSkill.addChild(toMove)
childSkill.Parent = newParent
return
}
/*
The RemovePerson() method removes a previously registered person from the model
in terms of the user name part of their email address. It is an error to pass
in a person that does not exist in the model. The email address is coerced to
lowercase.
*/
func (api *Api) RemovePerson(email string) (err error) {
if err = api.tweakParams(&email, nil); err != nil {
return
}
// be sure to keep this symmetrical with AddPerson()
departingPerson := api.persFromMail[email]
oldList := api.People
api.People = []*person{}
for _, incumbentPerson := range oldList {
if incumbentPerson != departingPerson {
api.People = append(api.People, incumbentPerson)
}
}
delete(api.persFromMail, email)
api.SkillHoldings.UnRegisterPerson(*departingPerson)
delete(api.UiStates, email)
return
}
/*
The RemoveSkill() method removes a skill from the model's hierachy of skills.
It can generate the following errors: UnknownSkill,
CannotRemoveSkillWithChildren, CannotRemoveRootSkill. CannotRemoveSkillHeld.
*/
func (api *Api) RemoveSkill(skillId int) (err error) {
// Be sure to keep this symmetrical with AddSkill
if err = api.tweakParams(nil, &skillId); err != nil {
return
}
// The order of the following tests makes it easier to design tests.
if skillId == api.SkillRoot {
err = errors.New(CannotRemoveRootSkill)
return
}
departingSkill := api.skillFromId[skillId]
if len(departingSkill.Children) != 0 {
err = errors.New(CannotRemoveSkillWithChildren)
return
}
if len(api.SkillHoldings.PeopleWithSkill[skillId].AsSlice()) != 0 {
err = errors.New(CannotRemoveSkillHeld)
return
}
parentSkill := api.skillFromId[departingSkill.Parent]
parentSkill.removeChild(skillId)
oldList := api.Skills
api.Skills = []*skillNode{}
for _, incumbentSkill := range oldList {
if incumbentSkill != departingSkill {
api.Skills = append(api.Skills, incumbentSkill)
}
}
delete(api.skillFromId, skillId)
// For all people, remove this skillid from their collapsed nodes
for _, skillHolder := range api.People {
api.UiStates[skillHolder.Email].NotifySkillIsRemoved(skillId)
}
api.SkillHoldings.UnRegisterSkill(*departingSkill)
return
}
//--------------------------------------------------------------------------
// Serialize Methods
//--------------------------------------------------------------------------
/*
The function Serialize() makes a machine-readable representation of the Api
object and packages it into a slice of bytes. See also NewFromSerialized().
*/
func (api *Api) Serialize() (out []byte, err error) {
return yaml.Marshal(api)
}
/*
The function finishBuildFromDeSerialize() takes the state of an Api object that
has been partly initialized from de-serialization, and builds the supplemental
fields required. These are mainly look up tables for convenience and speed.
*/
func (api *Api) finishBuildFromDeSerialize() {
for _, skill := range api.Skills {
uid := skill.Uid
api.skillFromId[uid] = skill
}
for _, person := range api.People {
email := person.Email
api.persFromMail[email] = person
}
}
//--------------------------------------------------------------------------
// Module Private Methods
//--------------------------------------------------------------------------
/*
The method tweakParams(), receives either or both of an email and a skill Uid,
and coerces the email when given into lowercase, and then ensures the email is
one known to the model, and the skillUid is legitimate. It can return either
of the errors: UnknownPerson or UnknownSkill.
*/
func (api *Api) tweakParams(email *string, skillId *int) (err error) {
if email != nil {
// coerce caller's email to lower case
*email = strings.ToLower(*email)
_, ok := api.persFromMail[*email]
if !ok {
return errors.New(UnknownPerson)
}
}
if skillId != nil {
_, ok := api.skillFromId[*skillId]
if !ok {
return errors.New(UnknownSkill)
}
}
return
}
// The method titleFromId() exists to satisfy the titleMapper interface.
func (api *Api) titleFromId(skillUid int) (title string) {
return api.skillFromId[skillUid].Title
} | model-hidden/api.go | 0.569015 | 0.41941 | api.go | starcoder |
package check
import (
"reflect"
)
func elem(a interface{}) (interface{}, bool) {
value := reflect.ValueOf(a)
if value.Kind() == reflect.Ptr {
if value.IsNil() {
return nil, true
}
return value.Elem().Interface(), false
}
return a, false
}
func compareInt(a, b int) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareInt8(a, b int8) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareInt16(a, b int16) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareInt32(a, b int32) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareInt64(a, b int64) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareUint(a, b uint) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareUint8(a, b uint8) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareUint16(a, b uint16) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareUint32(a, b uint32) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareUint64(a, b uint64) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareFloat32(a, b float32) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compareFloat64(a, b float64) int {
switch {
case a > b:
return 1
case a < b:
return -1
default:
return 0
}
}
func compare(a, b interface{}) (int, bool) {
switch aTyped := a.(type) {
case int:
bTyped, ok := b.(int)
return compareInt(aTyped, bTyped), ok
case int8:
bTyped, ok := b.(int8)
return compareInt8(aTyped, bTyped), ok
case int16:
bTyped, ok := b.(int16)
return compareInt16(aTyped, bTyped), ok
case int32:
bTyped, ok := b.(int32)
return compareInt32(aTyped, bTyped), ok
case int64:
bTyped, ok := b.(int64)
return compareInt64(aTyped, bTyped), ok
case uint:
bTyped, ok := b.(uint)
return compareUint(aTyped, bTyped), ok
case uint8:
bTyped, ok := b.(uint8)
return compareUint8(aTyped, bTyped), ok
case uint16:
bTyped, ok := b.(uint16)
return compareUint16(aTyped, bTyped), ok
case uint32:
bTyped, ok := b.(uint32)
return compareUint32(aTyped, bTyped), ok
case uint64:
bTyped, ok := b.(uint64)
return compareUint64(aTyped, bTyped), ok
case float32:
bTyped, ok := b.(float32)
return compareFloat32(aTyped, bTyped), ok
case float64:
bTyped, ok := b.(float64)
return compareFloat64(aTyped, bTyped), ok
}
return 0, false
}
func maybeCompare(
actual, expected interface{}, comparator func(v int) bool,
msgAndArgs []interface{}, internalMsgAndArgs ...interface{},
) error {
actualValue, actualIsNil := elem(actual)
expectedValue, expectedIsNil := elem(expected)
comparison, ok := compare(actualValue, expectedValue)
switch {
case actualIsNil || expectedIsNil:
return nil
case !ok:
return check(false, msgAndArgs, "incomparable types %s(%v) and %s(%v)",
reflect.TypeOf(actual).Kind(), actual, reflect.TypeOf(expected).Kind(), expected)
case !comparator(comparison):
return check(false, msgAndArgs, internalMsgAndArgs...)
default:
return nil
}
}
// GreaterThan checks whether `actual` is greater than `expected`.
func GreaterThan(actual, expected interface{}, msgAndArgs ...interface{}) error {
return maybeCompare(actual, expected, func(comparison int) bool { return comparison > 0 },
msgAndArgs, "%v is not greater than %v", actual, expected)
}
// GreaterThanOrEqualTo checks whether `actual` is greater than or equal to `expected`.
func GreaterThanOrEqualTo(actual, expected interface{}, msgAndArgs ...interface{}) error {
return maybeCompare(actual, expected, func(comparison int) bool { return comparison >= 0 },
msgAndArgs, "%v is not greater than or equal to %v", actual, expected)
}
// LessThan checks whether `actual` is less than `expected`.
func LessThan(actual, expected interface{}, msgAndArgs ...interface{}) error {
return maybeCompare(actual, expected, func(comparison int) bool { return comparison < 0 },
msgAndArgs, "%v is not less than %v", actual, expected)
}
// LessThanOrEqualTo checks whether `actual` is less than or equal to `expected`.
func LessThanOrEqualTo(actual, expected interface{}, msgAndArgs ...interface{}) error {
return maybeCompare(actual, expected, func(comparison int) bool { return comparison <= 0 },
msgAndArgs, "%v is not less than or equal to %v", actual, expected)
} | master/pkg/check/check_numbers.go | 0.655336 | 0.525247 | check_numbers.go | starcoder |
package drago
import (
"fmt"
"github.com/gonum/matrix/mat64"
)
// Network struct represents the neural network
// Values are exported but should not be messed with during training,
// are exported simply for ease of examining the network state
type Network struct {
Activators []Activator
Activations []*mat64.Dense
Weights []*mat64.Dense
Errors []*mat64.Dense
Topology []int
Layers int
LearningRate float64
Iterations int
Loss Criterion
currentErr float64
// Controls logging output during training.
Verbose bool
}
// New creates a new neural network
// Topology specifies number of hidden layers and nodes in each, as well as
// size of samples and labels (first and last values, respectively).
// Acts array should have one activator for each hidden layer
func New(learnRate float64, iterations int, topology []int, acts []Activator) *Network {
net := &Network{
LearningRate: learnRate,
Iterations: iterations,
Activators: make([]Activator, len(topology)),
Activations: make([]*mat64.Dense, len(topology)),
Errors: make([]*mat64.Dense, len(topology)),
Weights: make([]*mat64.Dense, len(topology)-1),
Topology: topology,
Layers: len(topology),
Loss: new(MSE),
Verbose: true,
}
net.initActivations(topology)
net.initWeights(topology)
net.initActivators(acts)
net.initErrors(topology)
return net
}
func (n *Network) verbosePrint(a ...interface{}) {
if n.Verbose {
fmt.Println(a...)
}
}
func (n *Network) initActivations(topology []int) {
for i, nodes := range topology {
n.Activations[i] = mat64.NewDense(nodes, 1, nil)
}
}
func (n *Network) initErrors(topology []int) {
for i := 0; i < n.Layers; i++ {
n.Errors[i] = mat64.NewDense(topology[i], 1, nil)
}
}
func (n *Network) initWeights(topology []int) {
for i := 0; i < n.Layers-1; i++ {
n.Weights[i] = randomMatrix(topology[i+1], topology[i])
}
}
func (n *Network) initActivators(acts []Activator) {
acts = append([]Activator{new(Linear)}, append(acts, new(Linear))...)
for i := 0; i < len(acts); i++ {
n.Activators[i] = acts[i]
}
}
// Predict returns the predicted value of the provided sample
// Dimensions must match those from provided topology
// Only use after training the network
func (n *Network) Predict(sample []float64) *mat64.Dense {
n.Forward(sample)
return n.Activations[n.Layers-1]
}
// Learn trains the network using the provided dataset
// Samples must have number of features and labels as specified by topology
// when constructing the network
func (n *Network) Learn(dataset [][][]float64) {
n.verbosePrint("Learning...")
for i := 0; i < n.Iterations; i++ {
n.verbosePrint("=== Iteration ", i+1, " ===")
n.currentErr = 0
for _, sample := range dataset {
n.Forward(sample[0])
n.Back(sample[1])
}
n.verbosePrint("Error : ", n.currentErr/float64(len(dataset)))
}
}
// Forward calculates activations at each layer for given sample
func (n *Network) Forward(sample []float64) {
n.Activations[0].SetCol(0, sample)
for i := 0; i < len(n.Weights); i++ {
n.activateLayer(i)
}
}
func (n *Network) activateLayer(layer int) {
n.Activations[layer+1].Mul(n.Weights[layer], n.Activations[layer])
n.Activations[layer+1].Apply(n.Activators[layer+1].Apply, n.Activations[layer+1])
}
// Back performs back propagation to update weights at each layer
func (n *Network) Back(label []float64) {
n.calculateErrors(label)
n.updateWeights()
}
func (n *Network) calculateErrors(label []float64) {
actual := mat64.NewDense(len(label), 1, label)
n.Errors[n.Layers-1] = n.Loss.Derivative(n.Activations[n.Layers-1], actual)
n.currentErr += n.Loss.Apply(n.Activations[n.Layers-1], actual)
for i := n.Layers - 2; i >= 0; i-- {
n.calculateErrorForLayer(i)
}
}
func (n *Network) calculateErrorForLayer(layer int) {
n.Errors[layer].Mul(n.Weights[layer].T(), n.Errors[layer+1])
n.Errors[layer].MulElem(n.Errors[layer], n.Activations[layer])
mat := &mat64.Dense{}
mat.Apply(n.Activators[layer].Derivative, n.Activations[layer])
n.Errors[layer].MulElem(mat, n.Errors[layer])
}
func (n *Network) updateWeights() {
for i := 0; i < n.Layers-1; i++ {
mat := &mat64.Dense{}
mat.Mul(n.Errors[i+1], n.Activations[i].T())
mat.Scale(n.LearningRate, mat)
n.Weights[i].Sub(n.Weights[i], mat)
}
} | Network.go | 0.68595 | 0.507019 | Network.go | starcoder |
package ann
import (
"math/rand"
"sort"
mat "github.com/gonum/matrix/mat64"
)
type mrpt struct {
xs [][]float64
trees []*tree
}
type tree struct {
root *node
r mat.Matrix
}
type node struct {
split float64
left *node
right *node
indices []int
}
// NewMRPTANNer creates a NN index using random projection trees
// See https://arxiv.org/pdf/1509.06957.pdf for additional details
// t -> number of trees, l -> depth of tree
func NewMRPTANNer(t int, l int, xs [][]float64) ANNer {
a := 0.5 // TODO(temporary)
// Infer dimensions from input matrix
n, d := len(xs), len(xs[0])
// Convert xs to a gonum matrix
X := mat.NewDense(d, n, nil)
for i := 0; i < n; i++ {
X.SetCol(i, xs[i])
}
return &mrpt{
xs: xs,
trees: growTrees(X, t, l, a),
}
}
func growTrees(X mat.Matrix, t, l int, a float64) []*tree {
// feature vector dimension
d, n := X.Dims()
trees := []*tree{}
// Create t trees
for i := 0; i < t; i++ {
// Create a RP matrix
R := mat.NewDense(d, l-1, nil)
// Create one random vector per tree level
for j := 0; j < (l - 1); j++ {
rs := []float64{}
for k := 0; k < d; k++ {
// TODO: only use non-zero value with prob a
// TODO: Use a sparse vector strategy
rs = append(rs, rand.NormFloat64())
}
// Set the random vector into the matrix
R.SetCol(j, rs)
}
// Calculate the projections
var P mat.Dense
P.Mul(X.T(), R)
// Create indices list
indices := []int{}
for i := 0; i < n; i++ {
indices = append(indices, i)
}
trees = append(trees, &tree{
r: R,
root: growTree(&P, l, 0, indices),
})
}
return trees
}
func growTree(P mat.Matrix, l, level int, indices []int) *node {
// Stop if we're at the leaf
if level == l-1 {
return &node{indices: indices}
}
// Get the projections for this level
ps := mat.Col(nil, level, P)
// Get the median of the projections
m := median(ps)
// Divide indices to left and right based on median value
leftIndices := []int{}
rightIndices := []int{}
for _, i := range indices {
if ps[i] <= m {
leftIndices = append(leftIndices, i)
} else {
rightIndices = append(rightIndices, i)
}
}
return &node{
left: growTree(P, l, level+1, leftIndices),
right: growTree(P, l, level+1, rightIndices),
split: m,
}
}
func (nn *mrpt) ANN(q []float64, k int) []int {
// Keep track of votes
votesMap := map[int]int{}
// How many votes does a vector need to be included in the output set
reqVotes := 1
// Convert q to a vector
qVec := mat.NewVector(len(q), q)
// Query the trees to get candidates
for _, tree := range nn.trees {
// Calculate projections for the query vector
var psVec mat.Vector
psVec.MulVec(tree.r.T(), qVec)
ps := mat.Col(nil, 0, &psVec)
indices := querySubtree(tree.root, ps, 0, k)
for _, i := range indices {
// Count vote
votesMap[i]++
}
}
xsCandidates := [][]float64{}
xsIndices := []int{}
for i, votes := range votesMap {
if votes >= reqVotes {
xsCandidates = append(xsCandidates, nn.xs[i])
// Track the index of each vector so we can retrieve it later
xsIndices = append(xsIndices, i)
}
}
// Perform naive k-nearest-neighbor search on candidates set
knn := NewExhaustiveNNer(xsCandidates)
knnIndices := knn.ANN(q, k)
// Convert the above knnIndices to the indices of the vectors in the scope
// of all of our data
indices := []int{}
for _, i := range knnIndices {
indices = append(indices, xsIndices[i])
}
return indices
}
func querySubtree(node *node, ps []float64, level int, k int) []int {
// Stop when we have reached the bottom
if level == len(ps) {
return node.indices
}
// Get the projection for the current level
p := ps[level]
indices := []int{}
if p <= node.split {
// Left branch
indices = querySubtree(node.left, ps, level+1, k)
// If not enough found on left, try right
if len(indices) < k {
indices = append(indices, querySubtree(node.right, ps, level+1, k)...)
}
} else {
// Right branch
indices = querySubtree(node.right, ps, level+1, k)
// If not enough found on right, try left
if len(indices) < k {
indices = append(indices, querySubtree(node.left, ps, level+1, k)...)
}
}
return indices
}
// median calculates the median value of a series of elements
func median(vals []float64) float64 {
// Make a copy so we don't alter the given slice
vs := make([]float64, len(vals))
copy(vs, vals)
sort.Float64s(vs)
// Even number of elements
if len(vs)%2 == 0 {
m := (vs[len(vs)/2-1] + vs[len(vs)/2]) / 2
return m
}
// Odd number of elements
m := vs[len(vs)/2]
return m
} | mrpt.go | 0.6508 | 0.497498 | mrpt.go | starcoder |
package explainer
import (
"fmt"
"log"
"strings"
"github.com/mgutz/ansi"
"github.com/luhring/reach/reach"
"github.com/luhring/reach/reach/aws"
"github.com/luhring/reach/reach/helper"
)
// An Explainer provides mechanisms to explain the business logic behind analyses to users via natural language.
type Explainer struct {
analysis reach.Analysis
}
// New returns a reference to a new Explainer.
func New(analysis reach.Analysis) *Explainer {
return &Explainer{
analysis: analysis,
}
}
// Explain returns a natural language representation of the logic used during an analysis to compute the final result.
func (ex *Explainer) Explain() string {
var outputItems []string
for _, v := range ex.analysis.NetworkVectors {
outputItems = append(outputItems, ex.ExplainNetworkVector(v))
}
output := ""
output += strings.Join(outputItems, "\n")
return output
}
// ExplainNetworkVector returns the part of an analysis explanation that's specific to an individual network vector.
func (ex *Explainer) ExplainNetworkVector(v reach.NetworkVector) string {
var outputSections []string
// setting the stage: the source and destination
var vectorHeader string
vectorHeader += fmt.Sprintf("%s %s\n", helper.Bold("source:"), ex.NetworkPointName(v.Source))
vectorHeader += fmt.Sprintf("%s %s\n", helper.Bold("destination:"), ex.NetworkPointName(v.Destination))
outputSections = append(outputSections, vectorHeader)
// explain source
sourceHeader := helper.Bold("source factors:")
outputSections = append(outputSections, sourceHeader)
sourceContent := ex.ExplainNetworkPoint(v.Source, v.SourcePerspective())
outputSections = append(outputSections, helper.Indent(sourceContent, 2))
// explain destination
destinationHeader := helper.Bold("destination factors:")
outputSections = append(outputSections, destinationHeader)
destinationContent := ex.ExplainNetworkPoint(v.Destination, v.DestinationPerspective())
outputSections = append(outputSections, helper.Indent(destinationContent, 2))
// final results
forwardResults := fmt.Sprintf("%s\n%s", helper.Bold("network traffic allowed from source to destination:"), v.Traffic.ColorStringWithSymbols())
outputSections = append(outputSections, forwardResults)
returnResults := fmt.Sprintf("%s\n%s", helper.Bold("network traffic allowed to return from destination to source:"), v.ReturnTraffic.StringWithSymbols())
outputSections = append(outputSections, returnResults)
return strings.Join(outputSections, "\n")
}
// ExplainCapabilityChecks returns a report on whether or not Reach's capabilities are sufficient to handle the requested analysis.
func (ex *Explainer) ExplainCapabilityChecks(v reach.NetworkVector) string {
var outputItems []string
var checksItems []string
checksHeader := helper.Bold("analysis capability checks:")
outputItems = append(outputItems, checksHeader)
awsEx := aws.NewExplainer(ex.analysis)
if awsEx.CheckBothInAWS(v) {
checksItems = append(checksItems, "✓ both source and destination are in AWS")
} else {
log.Fatal("source and/or destination is not in AWS, and this is not yet supported")
}
if awsEx.CheckBothInSameVPC(v) {
checksItems = append(checksItems, "✓ both source and destination are in same VPC")
} else {
log.Fatal("source and/or destination are not in same VPC, and this is not yet supported")
}
if awsEx.CheckBothInSameSubnet(v) {
checksItems = append(checksItems, "✓ both source and destination are in same subnet")
} else {
log.Fatal("source and/or destination are not in same subnet, and this is not yet supported")
}
outputItems = append(outputItems, checksItems...)
return strings.Join(outputItems, "\n")
}
// ExplainNetworkPoint returns the part of an analysis explanation that's specific to an individual network point (within a network vector).
func (ex *Explainer) ExplainNetworkPoint(point reach.NetworkPoint, p reach.Perspective) string {
if aws.IsUsedByNetworkPoint(point) {
awsEx := aws.NewExplainer(ex.analysis)
return awsEx.NetworkPoint(point, p)
}
return fmt.Sprintf("unable to explain analysis for network point with IP address '%s'", point.IPAddress)
}
// NetworkPointName returns an understandable string representation of a network point.
func (ex *Explainer) NetworkPointName(point reach.NetworkPoint) string {
// ignoring errors because it's okay if we can't find a particular kind of AWS resource in the lineage
eni, _ := aws.GetENIFromLineage(point.Lineage, ex.analysis.Resources)
ec2Instance, _ := aws.GetEC2InstanceFromLineage(point.Lineage, ex.analysis.Resources)
output := point.IPAddress.String()
if eni != nil {
output = fmt.Sprintf("%s -> %s", eni.Name(), output)
if ec2Instance != nil {
output = fmt.Sprintf("%s -> %s", ec2Instance.Name(), output)
}
}
return output
}
// WarningsFromRestrictedReturnPath returns a slice of warning strings based on the input slice of restricted protocols.
func WarningsFromRestrictedReturnPath(restrictedProtocols []reach.RestrictedProtocol) (bool, string) {
if len(restrictedProtocols) == 0 {
return false, ""
}
var warnings []string
for _, rp := range restrictedProtocols {
var warning string
if rp.Protocol == reach.ProtocolTCP { // We have a specific message based on the knowledge that the protocol is TCP.
if rp.NoReturnTraffic {
warning = ansi.Color("All TCP connection attempts will be unsuccessful. No TCP traffic is allowed to return to the source.", "red+b")
} else {
warning = ansi.Color("TCP connection attempts might be unsuccessful. TCP traffic is allowed to return to the source only at particular source ports.", "yellow+b")
}
} else {
firstSentence := fmt.Sprintf("%s-based communication might be unsuccessful.", rp.Protocol)
var secondSentence string
if rp.NoReturnTraffic {
secondSentence = fmt.Sprintf("No %s traffic is able to return to the source.", rp.Protocol)
} else {
secondSentence = fmt.Sprintf("Some %s traffic is unable to return to the source.", rp.Protocol)
}
warning = ansi.Color(fmt.Sprintf("%s %s", firstSentence, secondSentence), "yellow+b")
}
warnings = append(warnings, warning)
}
return true, "warnings from return traffic obstructions:\n" + strings.Join(warnings, "\n")
} | reach/explainer/explainer.go | 0.72331 | 0.462594 | explainer.go | starcoder |
package fontman
var consolasRegular65 font = font{
Height: 135,
Description: fontDescription{
Family: "Consolas",
Style: "Regular",
Size: 65,
},
Metricts: fontMetrics{
Ascender: 86,
Descender: -30,
Height: 135,
},
Texture: fontTexture{
Name: "t_consolas_regular_65",
Width: 1024,
Height: 1024,
},
Chars: []fontChar{
{Char: ' ', Width: 64, X: 1, Y: 86, W: 0, H: 0, OX: 0, OY: 0},
{Char: '!', Width: 64, X: 2, Y: 6, W: 15, H: 81, OX: 24, OY: 80},
{Char: '\'', Width: 64, X: 18, Y: 6, W: 36, H: 27, OX: 14, OY: 80},
{Char: '#', Width: 64, X: 55, Y: 12, W: 59, H: 74, OX: 2, OY: 74},
{Char: '$', Width: 64, X: 115, Y: 1, W: 51, H: 98, OX: 6, OY: 85},
{Char: '%', Width: 64, X: 167, Y: 5, W: 62, H: 82, OX: 1, OY: 81},
{Char: '&', Width: 64, X: 230, Y: 6, W: 61, H: 81, OX: 3, OY: 80},
{Char: '"', Width: 64, X: 292, Y: 6, W: 13, H: 27, OX: 26, OY: 80},
{Char: '(', Width: 64, X: 306, Y: 2, W: 33, H: 108, OX: 16, OY: 84},
{Char: ')', Width: 64, X: 340, Y: 2, W: 33, H: 108, OX: 15, OY: 84},
{Char: '*', Width: 64, X: 374, Y: 6, W: 47, H: 48, OX: 9, OY: 80},
{Char: '+', Width: 64, X: 422, Y: 27, W: 54, H: 55, OX: 5, OY: 59},
{Char: ',', Width: 64, X: 477, Y: 68, W: 28, H: 37, OX: 13, OY: 18},
{Char: '-', Width: 64, X: 506, Y: 51, W: 34, H: 9, OX: 15, OY: 35},
{Char: '.', Width: 64, X: 541, Y: 68, W: 19, H: 19, OX: 22, OY: 18},
{Char: '/', Width: 64, X: 561, Y: 6, W: 49, H: 92, OX: 7, OY: 80},
{Char: '0', Width: 64, X: 611, Y: 11, W: 54, H: 76, OX: 5, OY: 75},
{Char: '1', Width: 64, X: 666, Y: 12, W: 48, H: 74, OX: 8, OY: 74},
{Char: '2', Width: 64, X: 715, Y: 11, W: 49, H: 75, OX: 8, OY: 75},
{Char: '3', Width: 64, X: 765, Y: 11, W: 47, H: 76, OX: 9, OY: 75},
{Char: '4', Width: 64, X: 813, Y: 12, W: 58, H: 74, OX: 2, OY: 74},
{Char: '5', Width: 64, X: 872, Y: 12, W: 46, H: 75, OX: 10, OY: 74},
{Char: '6', Width: 64, X: 919, Y: 12, W: 52, H: 75, OX: 7, OY: 74},
{Char: '7', Width: 64, X: 972, Y: 12, W: 50, H: 74, OX: 7, OY: 74},
{Char: '8', Width: 64, X: 1, Y: 117, W: 51, H: 76, OX: 7, OY: 75},
{Char: '9', Width: 64, X: 53, Y: 117, W: 52, H: 75, OX: 6, OY: 75},
{Char: ':', Width: 64, X: 106, Y: 134, W: 18, H: 59, OX: 23, OY: 58},
{Char: ';', Width: 64, X: 125, Y: 134, W: 28, H: 77, OX: 14, OY: 58},
{Char: '<', Width: 64, X: 154, Y: 129, W: 44, H: 64, OX: 8, OY: 63},
{Char: ':', Width: 64, X: 199, Y: 147, W: 49, H: 27, OX: 8, OY: 45},
{Char: '>', Width: 64, X: 249, Y: 129, W: 44, H: 64, OX: 12, OY: 63},
{Char: '?', Width: 64, X: 294, Y: 112, W: 35, H: 81, OX: 17, OY: 80},
{Char: '@', Width: 64, X: 330, Y: 111, W: 63, H: 105, OX: 0, OY: 81},
{Char: 'A', Width: 64, X: 394, Y: 118, W: 64, H: 74, OX: 0, OY: 74},
{Char: 'B', Width: 64, X: 459, Y: 118, W: 49, H: 74, OX: 9, OY: 74},
{Char: 'C', Width: 64, X: 509, Y: 117, W: 52, H: 76, OX: 5, OY: 75},
{Char: 'D', Width: 64, X: 562, Y: 118, W: 54, H: 74, OX: 6, OY: 74},
{Char: 'E', Width: 64, X: 617, Y: 118, W: 42, H: 74, OX: 11, OY: 74},
{Char: 'F', Width: 64, X: 660, Y: 118, W: 42, H: 74, OX: 12, OY: 74},
{Char: 'G', Width: 64, X: 703, Y: 117, W: 54, H: 76, OX: 4, OY: 75},
{Char: 'H', Width: 64, X: 758, Y: 118, W: 52, H: 74, OX: 6, OY: 74},
{Char: 'I', Width: 64, X: 811, Y: 118, W: 45, H: 74, OX: 10, OY: 74},
{Char: 'J', Width: 64, X: 857, Y: 118, W: 40, H: 75, OX: 10, OY: 74},
{Char: 'K', Width: 64, X: 898, Y: 118, W: 51, H: 74, OX: 9, OY: 74},
{Char: 'L', Width: 64, X: 950, Y: 118, W: 43, H: 74, OX: 13, OY: 74},
{Char: 'M', Width: 64, X: 1, Y: 225, W: 60, H: 74, OX: 2, OY: 74},
{Char: 'N', Width: 64, X: 62, Y: 225, W: 50, H: 74, OX: 7, OY: 74},
{Char: 'O', Width: 64, X: 113, Y: 224, W: 58, H: 76, OX: 3, OY: 75},
{Char: 'P', Width: 64, X: 172, Y: 225, W: 49, H: 74, OX: 9, OY: 74},
{Char: 'Q', Width: 64, X: 222, Y: 224, W: 61, H: 95, OX: 3, OY: 75},
{Char: 'R', Width: 64, X: 284, Y: 225, W: 51, H: 74, OX: 10, OY: 74},
{Char: 'S', Width: 64, X: 336, Y: 224, W: 51, H: 76, OX: 6, OY: 75},
{Char: 'T', Width: 64, X: 388, Y: 225, W: 55, H: 74, OX: 5, OY: 74},
{Char: 'U', Width: 64, X: 444, Y: 225, W: 52, H: 75, OX: 6, OY: 74},
{Char: 'V', Width: 64, X: 497, Y: 225, W: 64, H: 74, OX: 0, OY: 74},
{Char: 'W', Width: 64, X: 562, Y: 225, W: 60, H: 74, OX: 2, OY: 74},
{Char: 'X', Width: 64, X: 623, Y: 225, W: 62, H: 74, OX: 1, OY: 74},
{Char: 'Y', Width: 64, X: 686, Y: 225, W: 65, H: 74, OX: 0, OY: 74},
{Char: 'Z', Width: 64, X: 752, Y: 225, W: 52, H: 74, OX: 6, OY: 74},
{Char: '[', Width: 64, X: 805, Y: 217, W: 29, H: 105, OX: 19, OY: 82},
{Char: '\\', Width: 64, X: 835, Y: 219, W: 49, H: 92, OX: 9, OY: 80},
{Char: ']', Width: 64, X: 885, Y: 217, W: 28, H: 105, OX: 17, OY: 82},
{Char: '^', Width: 64, X: 914, Y: 225, W: 51, H: 36, OX: 7, OY: 74},
{Char: '_', Width: 64, X: 1, Y: 419, W: 64, H: 8, OX: 0, OY: -15},
{Char: '`', Width: 64, X: 66, Y: 324, W: 38, H: 23, OX: 0, OY: 80},
{Char: 'a', Width: 64, X: 105, Y: 346, W: 47, H: 59, OX: 8, OY: 58},
{Char: 'b', Width: 64, X: 153, Y: 324, W: 50, H: 81, OX: 9, OY: 80},
{Char: 'c', Width: 64, X: 204, Y: 346, W: 44, H: 59, OX: 9, OY: 58},
{Char: 'd', Width: 64, X: 249, Y: 324, W: 48, H: 81, OX: 6, OY: 80},
{Char: 'e', Width: 64, X: 298, Y: 346, W: 51, H: 59, OX: 7, OY: 58},
{Char: 'f', Width: 64, X: 350, Y: 323, W: 60, H: 81, OX: 0, OY: 81},
{Char: 'g', Width: 64, X: 411, Y: 346, W: 54, H: 82, OX: 5, OY: 58},
{Char: 'h', Width: 64, X: 466, Y: 324, W: 46, H: 80, OX: 9, OY: 80},
{Char: 'i', Width: 64, X: 513, Y: 323, W: 46, H: 81, OX: 10, OY: 81},
{Char: 'j', Width: 64, X: 560, Y: 323, W: 42, H: 105, OX: 8, OY: 81},
{Char: 'k', Width: 64, X: 603, Y: 324, W: 50, H: 80, OX: 10, OY: 80},
{Char: 'l', Width: 64, X: 654, Y: 324, W: 46, H: 80, OX: 10, OY: 80},
{Char: 'm', Width: 64, X: 701, Y: 346, W: 54, H: 58, OX: 5, OY: 58},
{Char: 'n', Width: 64, X: 756, Y: 346, W: 46, H: 58, OX: 9, OY: 58},
{Char: 'o', Width: 64, X: 803, Y: 346, W: 54, H: 59, OX: 5, OY: 58},
{Char: 'p', Width: 64, X: 858, Y: 346, W: 50, H: 81, OX: 9, OY: 58},
{Char: 'q', Width: 64, X: 909, Y: 346, W: 48, H: 81, OX: 6, OY: 58},
{Char: 'r', Width: 64, X: 958, Y: 346, W: 48, H: 58, OX: 11, OY: 58},
{Char: 's', Width: 64, X: 1, Y: 464, W: 45, H: 59, OX: 10, OY: 58},
{Char: 't', Width: 64, X: 47, Y: 447, W: 51, H: 76, OX: 4, OY: 75},
{Char: 'u', Width: 64, X: 99, Y: 465, W: 46, H: 58, OX: 9, OY: 57},
{Char: 'v', Width: 64, X: 146, Y: 465, W: 57, H: 57, OX: 4, OY: 57},
{Char: 'w', Width: 64, X: 204, Y: 465, W: 61, H: 57, OX: 1, OY: 57},
{Char: 'x', Width: 64, X: 266, Y: 465, W: 57, H: 57, OX: 4, OY: 57},
{Char: 'y', Width: 64, X: 324, Y: 465, W: 57, H: 81, OX: 3, OY: 57},
{Char: 'z', Width: 64, X: 382, Y: 465, W: 46, H: 57, OX: 9, OY: 57},
{Char: '{', Width: 64, X: 429, Y: 440, W: 42, H: 105, OX: 9, OY: 82},
{Char: '|', Width: 64, X: 472, Y: 429, W: 10, H: 116, OX: 27, OY: 93},
{Char: '}', Width: 64, X: 483, Y: 440, W: 42, H: 105, OX: 13, OY: 82},
{Char: '~', Width: 64, X: 526, Y: 479, W: 59, H: 23, OX: 3, OY: 43},
},
} | fontman/consolas_regular_65.go | 0.603231 | 0.629162 | consolas_regular_65.go | starcoder |
package sudogo
type Stack[T any] struct {
items []T
size int
}
func NewStack[T any](initialCapacity int) Stack[T] {
return Stack[T]{
items: make([]T, initialCapacity),
size: 0,
}
}
func (stack *Stack[T]) Peek() *T {
if stack.size == 0 {
return nil
}
return &stack.items[stack.size-1]
}
func (stack *Stack[T]) Pop() *T {
if stack.size == 0 {
return nil
}
stack.size--
return &stack.items[stack.size]
}
func (stack *Stack[T]) Next() *T {
if len(stack.items) <= stack.size {
var value T
stack.items = append(stack.items, value)
}
next := &stack.items[stack.size]
stack.size++
return next
}
func (stack *Stack[T]) Push(value T) {
next := stack.Next()
*next = value
}
func (stack *Stack[T]) Empty() bool {
return stack.size == 0
}
func (stack *Stack[T]) Size() int {
return stack.size
}
func (stack *Stack[T]) Clear() {
stack.size = 0
}
func (stack *Stack[T]) At(index int) *T {
if index < 0 || index >= stack.size {
return nil
}
return &stack.items[index]
}
type Queue[T any] struct {
head *QueueNode[T]
tail *QueueNode[T]
size int
}
func NewQueue[T any]() Queue[T] {
return Queue[T]{
head: nil,
tail: nil,
size: 0,
}
}
type QueueNode[T any] struct {
value T
next *QueueNode[T]
}
func (queue *Queue[T]) Peek() *T {
if queue.size == 0 {
return nil
}
return &queue.head.value
}
func (queue *Queue[T]) Poll() *T {
if queue.size == 0 {
return nil
}
queue.size--
value := &queue.head.value
if queue.size == 0 {
queue.Clear()
} else {
queue.head = queue.head.next
}
return value
}
func (queue *Queue[T]) Next() *T {
node := &QueueNode[T]{}
if queue.head == nil {
queue.head = node
}
if queue.tail != nil {
queue.tail.next = node
}
queue.tail = node
queue.size++
return &node.value
}
func (queue *Queue[T]) Offer(value T) {
next := queue.Next()
*next = value
}
func (queue *Queue[T]) Empty() bool {
return queue.size == 0
}
func (queue *Queue[T]) Size() int {
return queue.size
}
func (queue *Queue[T]) Clear() {
queue.size = 0
queue.tail = nil
queue.head = nil
} | pkg/lists.go | 0.615319 | 0.479138 | lists.go | starcoder |
package metadata
import (
"time"
"go.opentelemetry.io/collector/model/pdata"
)
// MetricSettings provides common settings for a particular metric.
type MetricSettings struct {
Enabled bool `mapstructure:"enabled"`
}
// MetricsSettings provides settings for polygonreceiver metrics.
type MetricsSettings struct {
PolygonBorAverageBlockTime MetricSettings `mapstructure:"polygon.bor.average_block_time"`
PolygonBorLastBlock MetricSettings `mapstructure:"polygon.bor.last_block"`
PolygonEthStateSync MetricSettings `mapstructure:"polygon.eth.state_sync"`
PolygonEthSubmitCheckpointTime MetricSettings `mapstructure:"polygon.eth.submit_checkpoint_time"`
PolygonHeimdallAverageBlockTime MetricSettings `mapstructure:"polygon.heimdall.average_block_time"`
PolygonHeimdallCheckpointValidatorsSigned MetricSettings `mapstructure:"polygon.heimdall.checkpoint_validators_signed"`
PolygonHeimdallCurrentSpanEndBlock MetricSettings `mapstructure:"polygon.heimdall.current_span_end_block"`
PolygonHeimdallLastBlock MetricSettings `mapstructure:"polygon.heimdall.last_block"`
PolygonHeimdallTotalTxs MetricSettings `mapstructure:"polygon.heimdall.total_txs"`
PolygonHeimdallUnconfirmedTxs MetricSettings `mapstructure:"polygon.heimdall.unconfirmed_txs"`
PolygonPolygonStateSync MetricSettings `mapstructure:"polygon.polygon.state_sync"`
}
func DefaultMetricsSettings() MetricsSettings {
return MetricsSettings{
PolygonBorAverageBlockTime: MetricSettings{
Enabled: true,
},
PolygonBorLastBlock: MetricSettings{
Enabled: true,
},
PolygonEthStateSync: MetricSettings{
Enabled: true,
},
PolygonEthSubmitCheckpointTime: MetricSettings{
Enabled: true,
},
PolygonHeimdallAverageBlockTime: MetricSettings{
Enabled: true,
},
PolygonHeimdallCheckpointValidatorsSigned: MetricSettings{
Enabled: true,
},
PolygonHeimdallCurrentSpanEndBlock: MetricSettings{
Enabled: true,
},
PolygonHeimdallLastBlock: MetricSettings{
Enabled: true,
},
PolygonHeimdallTotalTxs: MetricSettings{
Enabled: true,
},
PolygonHeimdallUnconfirmedTxs: MetricSettings{
Enabled: true,
},
PolygonPolygonStateSync: MetricSettings{
Enabled: true,
},
}
}
type metricPolygonBorAverageBlockTime struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.bor.average_block_time metric with initial data.
func (m *metricPolygonBorAverageBlockTime) init() {
m.data.SetName("polygon.bor.average_block_time")
m.data.SetDescription("The average block time.")
m.data.SetUnit("seconds")
m.data.SetDataType(pdata.MetricDataTypeGauge)
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonBorAverageBlockTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonBorAverageBlockTime) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonBorAverageBlockTime) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonBorAverageBlockTime(settings MetricSettings) metricPolygonBorAverageBlockTime {
m := metricPolygonBorAverageBlockTime{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonBorLastBlock struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.bor.last_block metric with initial data.
func (m *metricPolygonBorLastBlock) init() {
m.data.SetName("polygon.bor.last_block")
m.data.SetDescription("The current block number.")
m.data.SetUnit("block")
m.data.SetDataType(pdata.MetricDataTypeSum)
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonBorLastBlock) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonBorLastBlock) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonBorLastBlock) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonBorLastBlock(settings MetricSettings) metricPolygonBorLastBlock {
m := metricPolygonBorLastBlock{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonEthStateSync struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.eth.state_sync metric with initial data.
func (m *metricPolygonEthStateSync) init() {
m.data.SetName("polygon.eth.state_sync")
m.data.SetDescription("Total number of StateSync transactions emited.")
m.data.SetUnit("txs")
m.data.SetDataType(pdata.MetricDataTypeGauge)
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonEthStateSync) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonEthStateSync) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonEthStateSync) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonEthStateSync(settings MetricSettings) metricPolygonEthStateSync {
m := metricPolygonEthStateSync{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonEthSubmitCheckpointTime struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.eth.submit_checkpoint_time metric with initial data.
func (m *metricPolygonEthSubmitCheckpointTime) init() {
m.data.SetName("polygon.eth.submit_checkpoint_time")
m.data.SetDescription("Latest checkpoint transaction time.")
m.data.SetUnit("seconds")
m.data.SetDataType(pdata.MetricDataTypeGauge)
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonEthSubmitCheckpointTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonEthSubmitCheckpointTime) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonEthSubmitCheckpointTime) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonEthSubmitCheckpointTime(settings MetricSettings) metricPolygonEthSubmitCheckpointTime {
m := metricPolygonEthSubmitCheckpointTime{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonHeimdallAverageBlockTime struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.heimdall.average_block_time metric with initial data.
func (m *metricPolygonHeimdallAverageBlockTime) init() {
m.data.SetName("polygon.heimdall.average_block_time")
m.data.SetDescription("The average block time.")
m.data.SetUnit("seconds")
m.data.SetDataType(pdata.MetricDataTypeGauge)
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonHeimdallAverageBlockTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetDoubleVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonHeimdallAverageBlockTime) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonHeimdallAverageBlockTime) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonHeimdallAverageBlockTime(settings MetricSettings) metricPolygonHeimdallAverageBlockTime {
m := metricPolygonHeimdallAverageBlockTime{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonHeimdallCheckpointValidatorsSigned struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.heimdall.checkpoint_validators_signed metric with initial data.
func (m *metricPolygonHeimdallCheckpointValidatorsSigned) init() {
m.data.SetName("polygon.heimdall.checkpoint_validators_signed")
m.data.SetDescription("Number of validators who signed last checkpoint.")
m.data.SetUnit("")
m.data.SetDataType(pdata.MetricDataTypeGauge)
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonHeimdallCheckpointValidatorsSigned) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, chainAttributeValue string, validatorAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
dp.Attributes().Insert(A.Validator, pdata.NewAttributeValueString(validatorAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonHeimdallCheckpointValidatorsSigned) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonHeimdallCheckpointValidatorsSigned) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonHeimdallCheckpointValidatorsSigned(settings MetricSettings) metricPolygonHeimdallCheckpointValidatorsSigned {
m := metricPolygonHeimdallCheckpointValidatorsSigned{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonHeimdallCurrentSpanEndBlock struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.heimdall.current_span_end_block metric with initial data.
func (m *metricPolygonHeimdallCurrentSpanEndBlock) init() {
m.data.SetName("polygon.heimdall.current_span_end_block")
m.data.SetDescription("The end block of the current span.")
m.data.SetUnit("block")
m.data.SetDataType(pdata.MetricDataTypeSum)
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonHeimdallCurrentSpanEndBlock) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonHeimdallCurrentSpanEndBlock) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonHeimdallCurrentSpanEndBlock) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonHeimdallCurrentSpanEndBlock(settings MetricSettings) metricPolygonHeimdallCurrentSpanEndBlock {
m := metricPolygonHeimdallCurrentSpanEndBlock{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonHeimdallLastBlock struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.heimdall.last_block metric with initial data.
func (m *metricPolygonHeimdallLastBlock) init() {
m.data.SetName("polygon.heimdall.last_block")
m.data.SetDescription("The current block number.")
m.data.SetUnit("block")
m.data.SetDataType(pdata.MetricDataTypeSum)
m.data.Sum().SetIsMonotonic(false)
m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative)
m.data.Sum().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonHeimdallLastBlock) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Sum().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonHeimdallLastBlock) updateCapacity() {
if m.data.Sum().DataPoints().Len() > m.capacity {
m.capacity = m.data.Sum().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonHeimdallLastBlock) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonHeimdallLastBlock(settings MetricSettings) metricPolygonHeimdallLastBlock {
m := metricPolygonHeimdallLastBlock{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonHeimdallTotalTxs struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.heimdall.total_txs metric with initial data.
func (m *metricPolygonHeimdallTotalTxs) init() {
m.data.SetName("polygon.heimdall.total_txs")
m.data.SetDescription("Total number of transactions.")
m.data.SetUnit("txs")
m.data.SetDataType(pdata.MetricDataTypeGauge)
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonHeimdallTotalTxs) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonHeimdallTotalTxs) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonHeimdallTotalTxs) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonHeimdallTotalTxs(settings MetricSettings) metricPolygonHeimdallTotalTxs {
m := metricPolygonHeimdallTotalTxs{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonHeimdallUnconfirmedTxs struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.heimdall.unconfirmed_txs metric with initial data.
func (m *metricPolygonHeimdallUnconfirmedTxs) init() {
m.data.SetName("polygon.heimdall.unconfirmed_txs")
m.data.SetDescription("Number of unconfirmed transactions.")
m.data.SetUnit("txs")
m.data.SetDataType(pdata.MetricDataTypeGauge)
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonHeimdallUnconfirmedTxs) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonHeimdallUnconfirmedTxs) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonHeimdallUnconfirmedTxs) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonHeimdallUnconfirmedTxs(settings MetricSettings) metricPolygonHeimdallUnconfirmedTxs {
m := metricPolygonHeimdallUnconfirmedTxs{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
type metricPolygonPolygonStateSync struct {
data pdata.Metric // data buffer for generated metric.
settings MetricSettings // metric settings provided by user.
capacity int // max observed number of data points added to the metric.
}
// init fills polygon.polygon.state_sync metric with initial data.
func (m *metricPolygonPolygonStateSync) init() {
m.data.SetName("polygon.polygon.state_sync")
m.data.SetDescription("Total number of StateSync transactions received.")
m.data.SetUnit("txs")
m.data.SetDataType(pdata.MetricDataTypeGauge)
m.data.Gauge().DataPoints().EnsureCapacity(m.capacity)
}
func (m *metricPolygonPolygonStateSync) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, chainAttributeValue string) {
if !m.settings.Enabled {
return
}
dp := m.data.Gauge().DataPoints().AppendEmpty()
dp.SetStartTimestamp(start)
dp.SetTimestamp(ts)
dp.SetIntVal(val)
dp.Attributes().Insert(A.Chain, pdata.NewAttributeValueString(chainAttributeValue))
}
// updateCapacity saves max length of data point slices that will be used for the slice capacity.
func (m *metricPolygonPolygonStateSync) updateCapacity() {
if m.data.Gauge().DataPoints().Len() > m.capacity {
m.capacity = m.data.Gauge().DataPoints().Len()
}
}
// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points.
func (m *metricPolygonPolygonStateSync) emit(metrics pdata.MetricSlice) {
if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 {
m.updateCapacity()
m.data.MoveTo(metrics.AppendEmpty())
m.init()
}
}
func newMetricPolygonPolygonStateSync(settings MetricSettings) metricPolygonPolygonStateSync {
m := metricPolygonPolygonStateSync{settings: settings}
if settings.Enabled {
m.data = pdata.NewMetric()
m.init()
}
return m
}
// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations
// required to produce metric representation defined in metadata and user settings.
type MetricsBuilder struct {
startTime pdata.Timestamp
metricPolygonBorAverageBlockTime metricPolygonBorAverageBlockTime
metricPolygonBorLastBlock metricPolygonBorLastBlock
metricPolygonEthStateSync metricPolygonEthStateSync
metricPolygonEthSubmitCheckpointTime metricPolygonEthSubmitCheckpointTime
metricPolygonHeimdallAverageBlockTime metricPolygonHeimdallAverageBlockTime
metricPolygonHeimdallCheckpointValidatorsSigned metricPolygonHeimdallCheckpointValidatorsSigned
metricPolygonHeimdallCurrentSpanEndBlock metricPolygonHeimdallCurrentSpanEndBlock
metricPolygonHeimdallLastBlock metricPolygonHeimdallLastBlock
metricPolygonHeimdallTotalTxs metricPolygonHeimdallTotalTxs
metricPolygonHeimdallUnconfirmedTxs metricPolygonHeimdallUnconfirmedTxs
metricPolygonPolygonStateSync metricPolygonPolygonStateSync
}
// metricBuilderOption applies changes to default metrics builder.
type metricBuilderOption func(*MetricsBuilder)
// WithStartTime sets startTime on the metrics builder.
func WithStartTime(startTime pdata.Timestamp) metricBuilderOption {
return func(mb *MetricsBuilder) {
mb.startTime = startTime
}
}
func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder {
mb := &MetricsBuilder{
startTime: pdata.NewTimestampFromTime(time.Now()),
metricPolygonBorAverageBlockTime: newMetricPolygonBorAverageBlockTime(settings.PolygonBorAverageBlockTime),
metricPolygonBorLastBlock: newMetricPolygonBorLastBlock(settings.PolygonBorLastBlock),
metricPolygonEthStateSync: newMetricPolygonEthStateSync(settings.PolygonEthStateSync),
metricPolygonEthSubmitCheckpointTime: newMetricPolygonEthSubmitCheckpointTime(settings.PolygonEthSubmitCheckpointTime),
metricPolygonHeimdallAverageBlockTime: newMetricPolygonHeimdallAverageBlockTime(settings.PolygonHeimdallAverageBlockTime),
metricPolygonHeimdallCheckpointValidatorsSigned: newMetricPolygonHeimdallCheckpointValidatorsSigned(settings.PolygonHeimdallCheckpointValidatorsSigned),
metricPolygonHeimdallCurrentSpanEndBlock: newMetricPolygonHeimdallCurrentSpanEndBlock(settings.PolygonHeimdallCurrentSpanEndBlock),
metricPolygonHeimdallLastBlock: newMetricPolygonHeimdallLastBlock(settings.PolygonHeimdallLastBlock),
metricPolygonHeimdallTotalTxs: newMetricPolygonHeimdallTotalTxs(settings.PolygonHeimdallTotalTxs),
metricPolygonHeimdallUnconfirmedTxs: newMetricPolygonHeimdallUnconfirmedTxs(settings.PolygonHeimdallUnconfirmedTxs),
metricPolygonPolygonStateSync: newMetricPolygonPolygonStateSync(settings.PolygonPolygonStateSync),
}
for _, op := range options {
op(mb)
}
return mb
}
// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording
// another set of data points. This function will be doing all transformations required to produce metric representation
// defined in metadata and user settings, e.g. delta/cumulative translation.
func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) {
mb.metricPolygonBorAverageBlockTime.emit(metrics)
mb.metricPolygonBorLastBlock.emit(metrics)
mb.metricPolygonEthStateSync.emit(metrics)
mb.metricPolygonEthSubmitCheckpointTime.emit(metrics)
mb.metricPolygonHeimdallAverageBlockTime.emit(metrics)
mb.metricPolygonHeimdallCheckpointValidatorsSigned.emit(metrics)
mb.metricPolygonHeimdallCurrentSpanEndBlock.emit(metrics)
mb.metricPolygonHeimdallLastBlock.emit(metrics)
mb.metricPolygonHeimdallTotalTxs.emit(metrics)
mb.metricPolygonHeimdallUnconfirmedTxs.emit(metrics)
mb.metricPolygonPolygonStateSync.emit(metrics)
}
// RecordPolygonBorAverageBlockTimeDataPoint adds a data point to polygon.bor.average_block_time metric.
func (mb *MetricsBuilder) RecordPolygonBorAverageBlockTimeDataPoint(ts pdata.Timestamp, val float64, chainAttributeValue string) {
mb.metricPolygonBorAverageBlockTime.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// RecordPolygonBorLastBlockDataPoint adds a data point to polygon.bor.last_block metric.
func (mb *MetricsBuilder) RecordPolygonBorLastBlockDataPoint(ts pdata.Timestamp, val int64, chainAttributeValue string) {
mb.metricPolygonBorLastBlock.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// RecordPolygonEthStateSyncDataPoint adds a data point to polygon.eth.state_sync metric.
func (mb *MetricsBuilder) RecordPolygonEthStateSyncDataPoint(ts pdata.Timestamp, val int64, chainAttributeValue string) {
mb.metricPolygonEthStateSync.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// RecordPolygonEthSubmitCheckpointTimeDataPoint adds a data point to polygon.eth.submit_checkpoint_time metric.
func (mb *MetricsBuilder) RecordPolygonEthSubmitCheckpointTimeDataPoint(ts pdata.Timestamp, val float64, chainAttributeValue string) {
mb.metricPolygonEthSubmitCheckpointTime.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// RecordPolygonHeimdallAverageBlockTimeDataPoint adds a data point to polygon.heimdall.average_block_time metric.
func (mb *MetricsBuilder) RecordPolygonHeimdallAverageBlockTimeDataPoint(ts pdata.Timestamp, val float64, chainAttributeValue string) {
mb.metricPolygonHeimdallAverageBlockTime.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// RecordPolygonHeimdallCheckpointValidatorsSignedDataPoint adds a data point to polygon.heimdall.checkpoint_validators_signed metric.
func (mb *MetricsBuilder) RecordPolygonHeimdallCheckpointValidatorsSignedDataPoint(ts pdata.Timestamp, val int64, chainAttributeValue string, validatorAttributeValue string) {
mb.metricPolygonHeimdallCheckpointValidatorsSigned.recordDataPoint(mb.startTime, ts, val, chainAttributeValue, validatorAttributeValue)
}
// RecordPolygonHeimdallCurrentSpanEndBlockDataPoint adds a data point to polygon.heimdall.current_span_end_block metric.
func (mb *MetricsBuilder) RecordPolygonHeimdallCurrentSpanEndBlockDataPoint(ts pdata.Timestamp, val int64, chainAttributeValue string) {
mb.metricPolygonHeimdallCurrentSpanEndBlock.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// RecordPolygonHeimdallLastBlockDataPoint adds a data point to polygon.heimdall.last_block metric.
func (mb *MetricsBuilder) RecordPolygonHeimdallLastBlockDataPoint(ts pdata.Timestamp, val int64, chainAttributeValue string) {
mb.metricPolygonHeimdallLastBlock.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// RecordPolygonHeimdallTotalTxsDataPoint adds a data point to polygon.heimdall.total_txs metric.
func (mb *MetricsBuilder) RecordPolygonHeimdallTotalTxsDataPoint(ts pdata.Timestamp, val int64, chainAttributeValue string) {
mb.metricPolygonHeimdallTotalTxs.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// RecordPolygonHeimdallUnconfirmedTxsDataPoint adds a data point to polygon.heimdall.unconfirmed_txs metric.
func (mb *MetricsBuilder) RecordPolygonHeimdallUnconfirmedTxsDataPoint(ts pdata.Timestamp, val int64, chainAttributeValue string) {
mb.metricPolygonHeimdallUnconfirmedTxs.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// RecordPolygonPolygonStateSyncDataPoint adds a data point to polygon.polygon.state_sync metric.
func (mb *MetricsBuilder) RecordPolygonPolygonStateSyncDataPoint(ts pdata.Timestamp, val int64, chainAttributeValue string) {
mb.metricPolygonPolygonStateSync.recordDataPoint(mb.startTime, ts, val, chainAttributeValue)
}
// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted,
// and metrics builder should update its startTime and reset it's internal state accordingly.
func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {
mb.startTime = pdata.NewTimestampFromTime(time.Now())
for _, op := range options {
op(mb)
}
}
// Attributes contains the possible metric attributes that can be used.
var Attributes = struct {
// Chain (The name of a chain.)
Chain string
// Validator (The address of a validator.)
Validator string
}{
"chain",
"validator",
}
// A is an alias for Attributes.
var A = Attributes | receiver/polygonreceiver/internal/metadata/generated_metrics_v2.go | 0.832577 | 0.434341 | generated_metrics_v2.go | starcoder |
package progresscli
import (
"os"
"io"
"fmt"
"unicode/utf8"
"math"
"regexp"
"github.com/nathan-fiscaletti/consolesize-go"
)
// Style represents the style that can be applied to a progress bar.
type Style struct {
// The open and close characters are the characters on either end
// of the progress bar. They can be used to encapsulate the
// progress bar itself.
OpenChar string
CloseChar string
// The done character is the character used to represent a
// completed section of the progress bar.
DoneChar string
// The not-done character is the character used to represent a
// section of the progress bar that has not yet been completed.
NotDoneChar string
// The in-progress character is the character used to represent a
// section of the progress bar that is currently in progress.
InProgressChar string
// The percentage color is the text that can be placed immediately
// before the percentage print out and is most commonly used for
// ANSI escape sequences to change the color of the text.
PercentageColor string
}
// ProgressBar represents an instance of a Progress Bar. You should
// initialize a new progress-bar using the New() or NewWithStyle()
// functions.
type ProgressBar struct {
style Style
max float64
showPercentage bool
showPercentageDecimal bool
label string
showLabel bool
writer io.Writer
value float64
maxWidth int
useCustomMaxWidth bool
finished bool
visible bool
}
// SetLabel sets the label for the progress bar. The label will be
// displayed on the left side of the progress bar.
func (pb *ProgressBar) SetLabel(label string) {
pb.label = label
pb.showLabel = strLen(label) > 0
if pb.visible {
pb.Increment(0)
}
}
// SetShowPercentage will tell the progress bar to either display the
// current percentage or not to display it.
func (pb *ProgressBar) SetShowPercentage(show bool) {
pb.showPercentage = show
if pb.visible {
pb.Increment(0)
}
}
// SetShowPercentageDecimal will tell the progress bar to display the
// percentage with two character decimal precision. When called, this
// function will automatically force the percentage to be displayed,
// so it is not required that you also call SetShowPercentage(true).
func (pb *ProgressBar) SetShowPercentageDecimal(show bool) {
if show {
pb.showPercentage = true
}
pb.showPercentageDecimal = show
if pb.visible {
pb.Increment(0)
}
}
// SetMax will set the maximum value for the progress bar. The default
// maximum value is 100.
func (pb *ProgressBar) SetMax(max float64) {
pb.max = max
if pb.visible {
pb.Increment(0)
}
}
// GetMax will retrieve the current max value for the progress bar.
func (pb *ProgressBar) GetMax() float64 {
return pb.max
}
// SetMaxWidth will set the maximum width for the progress bar in
// columns. The default value is the current width of the console.
func (pb *ProgressBar) SetMaxWidth(maxWidth int) {
pb.maxWidth = maxWidth
pb.useCustomMaxWidth = true
if pb.visible {
pb.Increment(0)
}
}
// UseFullWidth will set the progress bar to use the current width in
// columns of the open console window. This is the default setting.
func (pb *ProgressBar) UseFullWidth() {
pb.maxWidth = 0
pb.useCustomMaxWidth = false
if pb.visible {
pb.Increment(0)
}
}
// GetMaxWidth will retrieve the current maximum width of the
// progress bar in columns. If no custom maximum width has been set,
// the current width of the open console window will be returned.
func (pb *ProgressBar) GetMaxWidth() int {
if pb.useCustomMaxWidth {
return pb.maxWidth
}
cols, _ := consolesize.GetConsoleSize()
return cols
}
// GetValue will retrieve the current value of the progress bar.
func (pb *ProgressBar) GetValue() float64 {
return pb.value
}
// SetValue will set the current value of the progress bar.
func (pb *ProgressBar) SetValue(value float64) {
pb.value = value
if pb.visible {
pb.Increment(0)
}
}
// Show will show the progress bar in STDOUT.
func (pb *ProgressBar) Show() {
pb.ShowIn(os.Stdout)
}
// ShowIn will show the progress bar in the specified io.Writer
func (pb *ProgressBar) ShowIn(w io.Writer) {
pb.visible = true
pb.writer = w
pb.finished = false
pb.value = 0
pb.Increment(0)
}
// Increment will increment the progress bar by the specified count.
// The value of the progress bar will be constrained to 0-max where
// max is the current max value for the progress bar.
func (pb *ProgressBar) Increment(count float64) {
if pb.finished || !pb.visible {
return
}
pb.value += count
if pb.value > pb.max {
pb.value = pb.max
}
if pb.value < 0 {
pb.value = 0
}
var output string
var percent float64
var labelLength int
var labelSpacerLength int
var percentLabel string
var percentLabelLength int
var percentLabelSpacerLength int
var progressBarAvailableLength int
var progressBarMinimumLength int
var labelsLength int
percent = (pb.value / pb.max) * 100.0;
if !pb.showPercentageDecimal {
percent = math.Trunc(percent)
}
if pb.showLabel {
labelLength = strLen(pb.label)
labelSpacerLength = 1
}
if pb.showPercentage {
if pb.showPercentageDecimal {
percentLabel = fmt.Sprintf("%.2f%%", percent)
percentLabelLength = strLen(fmt.Sprintf("%.2f%%", 100.0))
} else {
percentLabel = fmt.Sprintf("%.0f%%", percent)
percentLabelLength = strLen(fmt.Sprintf("%.0f%%", 100.0))
}
percentLabelSpacerLength = 1
}
if pb.showPercentage {
labelsLength += percentLabelLength + percentLabelSpacerLength
}
if pb.showLabel {
labelsLength += labelLength + labelSpacerLength
}
progressBarMinimumLength = strLen(pb.style.DoneChar) +
strLen(pb.style.NotDoneChar) +
strLen(pb.style.InProgressChar)
cols, _ := consolesize.GetConsoleSize()
if pb.useCustomMaxWidth {
progressBarAvailableLength = pb.maxWidth -
labelsLength -
strLen(pb.style.CloseChar) -
strLen(pb.style.OpenChar)
} else {
progressBarAvailableLength = cols -
labelsLength -
strLen(pb.style.CloseChar) -
strLen(pb.style.OpenChar)
}
// Clear the line before writing to it
output += "\r"
for i := 0; i<cols; i++ {
output += " "
}
output += "\r"
if progressBarAvailableLength < progressBarMinimumLength {
if pb.showLabel && pb.showPercentage {
output += fmt.Sprintf("%s %s", pb.label, percentLabel)
} else if pb.showPercentage {
output += fmt.Sprintf("%s", percentLabel)
} else {
output += fmt.Sprintf("%s", "Loading...")
}
} else {
if pb.showLabel {
output += fmt.Sprintf("%s ", pb.label)
}
output += fmt.Sprintf("%s", pb.style.OpenChar)
var progressFillSize int
progressFillSize = progressBarAvailableLength -
strLen(pb.style.InProgressChar)
filledBarLength := int(math.Trunc((percent / 100) *
float64(progressFillSize)))
if filledBarLength > 0 {
for i := 0; i < filledBarLength; i++ {
output += fmt.Sprintf("%s", pb.style.DoneChar)
}
}
if strLen(pb.style.InProgressChar) > 0 {
if percent < 100 {
output += fmt.Sprintf("%s", pb.style.InProgressChar)
} else {
output += fmt.Sprintf("%s", pb.style.DoneChar)
}
}
for j := 0; j < progressBarAvailableLength -
filledBarLength -
strLen(pb.style.InProgressChar); j++ {
output += fmt.Sprintf("%s", pb.style.NotDoneChar)
}
if strLen(pb.style.CloseChar) > 0 {
output += fmt.Sprintf("%s", pb.style.CloseChar)
}
if pb.showPercentage {
output += fmt.Sprintf(
" %s%4s", pb.style.PercentageColor, percentLabel)
}
}
if percent >= 100 {
pb.finished = true
fmt.Fprintf(pb.writer, "%s\n", output)
} else {
fmt.Fprintf(pb.writer, "%s", output)
}
}
// New will create a new progress bar using the default style.
func New() *ProgressBar {
return NewWithStyle(DefaultStyle())
}
// NewWithStyle will create a new progress bar using the specified
// style object.
func NewWithStyle(style Style) *ProgressBar {
return &ProgressBar{
style: style,
max: 100.0,
showLabel: false,
showPercentage: true,
}
}
// DefaultStyle will retrieve the default Style for progress bars.
func DefaultStyle() Style {
return Style {
OpenChar: "",
CloseChar: "",
DoneChar: "\033[1;32m█\033[0m",
NotDoneChar: "\033[1;37m░\033[0m",
InProgressChar: "\033[1;37m░\033[0m",
}
}
// DefaultStyleNoColor will retrieve the default Style for progress
// bars without any ANSI color escape sequences.
func DefaultStyleNoColor() Style {
return Style {
OpenChar: "",
CloseChar: "",
DoneChar: "█",
NotDoneChar: "░",
InProgressChar: "░",
}
}
// LineStyle will retrieve a line type Style for progress bars.
func LineStyle() Style {
return Style {
OpenChar: "\033[1;37m╠\033[0m",
CloseChar: "\033[1;37m╣\033[0m",
DoneChar: "\033[1;32m═\033[0m",
NotDoneChar: "\033[1;37m─\033[0m",
InProgressChar: "\033[1;37m─\033[0m",
}
}
// LineStyleNoColor will retrieve a line type Style for progress bars
// without any ANSI color escape sequences.
func LineStyleNoColor() Style {
return Style {
OpenChar: "╠",
CloseChar: "╣",
DoneChar: "═",
NotDoneChar: "─",
InProgressChar: "─",
}
}
const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
var ansi_re = regexp.MustCompile(ansi)
func strLen(s string) int {
return utf8.RuneCountInString(ansi_re.ReplaceAllString(s, ""))
} | progress.go | 0.646572 | 0.446615 | progress.go | starcoder |
package main
import (
"fmt"
"math"
"math/rand"
"sync"
)
/* 3D Vectors */
type Vec3 struct {
e [3]float32
}
func (v Vec3) x() float32 {
return v.e[0]
}
func (v Vec3) y() float32 {
return v.e[1]
}
func (v Vec3) z() float32 {
return v.e[2]
}
func (v Vec3) r() float32 {
return v.e[0]
}
func (v Vec3) g() float32 {
return v.e[1]
}
func (v Vec3) b() float32 {
return v.e[2]
}
func (v Vec3) add(other Vec3) Vec3 {
return Vec3{
e: [3]float32{
v.e[0] + other.e[0],
v.e[1] + other.e[1],
v.e[2] + other.e[2],
},
}
}
func (v Vec3) prod(other Vec3) Vec3 {
return Vec3{
e: [3]float32{
v.e[0] * other.e[0],
v.e[1] * other.e[1],
v.e[2] * other.e[2],
},
}
}
func (v Vec3) sub(other Vec3) Vec3 {
return Vec3{
e: [3]float32{
v.e[0] - other.e[0],
v.e[1] - other.e[1],
v.e[2] - other.e[2],
},
}
}
func (v Vec3) dot(other Vec3) float32 {
return v.e[0]*other.e[0] + v.e[1]*other.e[1] + v.e[2]*other.e[2]
}
func (v Vec3) cross(other Vec3) Vec3 {
return Vec3{
e: [3]float32{
v.e[1]*other.e[2] - v.e[2]*other.e[1],
-v.e[0]*other.e[2] + v.e[2]*other.e[0],
v.e[0]*other.e[1] - v.e[1]*other.e[0],
},
}
}
func (v Vec3) scalar_mult(s float32) Vec3 {
return vec3(s*v.x(), s*v.y(), s*v.z())
}
func (v Vec3) norm() float32 {
return float32(math.Sqrt(float64(v.e[0]*v.e[0] + v.e[1]*v.e[1] + v.e[2]*v.e[2])))
}
func (v Vec3) normalize() Vec3 {
l := v.norm()
return Vec3{
e: [3]float32{
v.e[0] / l,
v.e[1] / l,
v.e[2] / l,
},
}
}
func (v Vec3) r2() float32 {
return v.e[0]*v.e[0] + v.e[1]*v.e[1] + v.e[2]*v.e[2]
}
func (v Vec3) gamma(g float32) Vec3 {
return vec3(
float32(math.Pow(float64(v.e[0]), 1.0/float64(g))),
float32(math.Pow(float64(v.e[1]), 1.0/float64(g))),
float32(math.Pow(float64(v.e[2]), 1.0/float64(g))),
)
}
func (v Vec3) reflect(n Vec3) Vec3 {
if n.norm()-1.0 > 0.001 {
n = n.normalize()
}
return v.sub(n.scalar_mult(2 * v.dot(n)))
}
func (v Vec3) refract(n Vec3, ni_over_nt float32, refracted *Vec3) bool {
uv := v.normalize()
dt := uv.dot(n)
discriminant := float64(1.0 - (ni_over_nt*ni_over_nt)*(1-dt*dt))
if discriminant > 0 {
*refracted = uv.sub(n.scalar_mult(dt)).scalar_mult(ni_over_nt).sub(n.scalar_mult(float32(math.Sqrt(discriminant))))
return true
} else {
return false
}
}
func vec3(x, y, z float32) Vec3 {
return Vec3{
e: [3]float32{x, y, z},
}
}
/* 3D Rays */
func ray(a, b Vec3) Ray {
return Ray{
A: a,
B: b,
}
}
func (r Ray) origin() Vec3 { return r.A }
func (r Ray) direction() Vec3 { return r.B }
func (r Ray) point(t float32) Vec3 { return r.origin().add(r.direction().scalar_mult(t)) }
type Ray struct{ A, B Vec3 }
/* 3D Spheres */
func sphere(center Vec3, radius float32, mat Material) Sphere {
return Sphere{
center,
radius,
mat,
}
}
func (s Sphere) hit(r Ray, t_min, t_max float32, rec *HitRecord) bool {
oc := r.origin().sub(s.center)
a := r.direction().r2()
b := oc.dot(r.direction())
c := oc.r2() - s.radius*s.radius
discriminant := b*b - a*c
if discriminant > 0 {
tmp := (-b - float32(math.Sqrt(float64(discriminant)))) / a
if tmp < t_max && tmp > t_min {
rec.t = tmp
rec.p = r.point(rec.t)
rec.normal = rec.p.sub(s.center).scalar_mult(1.0 / s.radius)
rec.material = s.material
return true
}
tmp = (-b + float32(math.Sqrt(float64(discriminant)))) / a
if tmp < t_max && tmp > t_min {
rec.t = tmp
rec.p = r.point(rec.t)
rec.normal = rec.p.sub(s.center).scalar_mult(1.0 / s.radius)
rec.material = s.material
return true
}
}
return false
}
func random_sphere_point() Vec3 {
p := vec3(1, 1, 1)
for p.r2() >= 1.0 {
p = vec3(rand.Float32(), rand.Float32(), rand.Float32()).scalar_mult(2.0).sub(vec3(1, 1, 1))
}
return p
}
func random_disk_point() Vec3 {
p := vec3(1, 1, 1)
for p.r2() >= 1.0 {
p = vec3(rand.Float32(), rand.Float32(), 0).scalar_mult(2.0).sub(vec3(1, 1, 0))
}
return p
}
type Sphere struct {
center Vec3
radius float32
material Material
}
/* Camera */
type Camera struct {
origin,
lower_left_corner,
horizontal,
vertical,
u, v, w Vec3
lens_radius float32
}
func (c Camera) get_ray(s, t float32) Ray {
rd := random_disk_point().scalar_mult(c.lens_radius)
offset := c.u.scalar_mult(rd.x()).add(c.v.scalar_mult(rd.y()))
direction := c.lower_left_corner
direction = direction.add(c.horizontal.scalar_mult(s))
direction = direction.add(c.vertical.scalar_mult(t))
direction = direction.sub(c.origin)
direction = direction.sub(offset)
return Ray{
A: c.origin.add(offset),
B: direction,
}
}
func camera(lookfrom, lookat, vup Vec3, vfov, aspect, aperture, focus_dist float32) Camera {
theta := float64(vfov * math.Pi / 180.0)
half_height := float32(math.Tan(theta / 2.0))
half_width := aspect * half_height
origin := lookfrom
w := lookfrom.sub(lookat).normalize()
u := vup.cross(w).normalize()
v := w.cross(u)
lower_left_corner := origin.sub(u.scalar_mult(half_width * focus_dist))
lower_left_corner = lower_left_corner.sub(v.scalar_mult(half_height * focus_dist))
lower_left_corner = lower_left_corner.sub(w.scalar_mult(focus_dist))
horizontal := u.scalar_mult(2.0 * half_width * focus_dist)
vertical := v.scalar_mult(2.0 * half_height * focus_dist)
return Camera{
origin,
lower_left_corner,
horizontal,
vertical,
u, v, w,
aperture / 2.0,
}
}
func schlick(cosine float32, ref_idx float32) float32 {
r0 := (1.0 - ref_idx) / (1.0 + ref_idx)
r0 = r0 * r0
return r0 + (1.0-r0)*float32(math.Pow(float64(1.0-cosine), 5.0))
}
/* example scenes to render */
func random_scene() HitableList {
var world HitableList
world = append(world, sphere(vec3(0, -1000, 0), 1000, lambertian(0.5, 0.5, 0.5)))
for a := -11; a < 11; a++ {
for b := -11; b < 11; b++ {
mat_prob := rand.Float32()
center := vec3(float32(a)+0.9*rand.Float32(), 0.2, float32(b)+0.9*rand.Float32())
if center.sub(vec3(4, 0.2, 0)).norm() > 0.9 {
if mat_prob < 0.8 {
world = append(world, sphere(center, 0.2, lambertian(
rand.Float32()*rand.Float32(),
rand.Float32()*rand.Float32(),
rand.Float32()*rand.Float32())))
} else if mat_prob < 0.95 {
world = append(world, sphere(center, 0.2, metal(
0.5*(1+rand.Float32()),
0.5*(1+rand.Float32()),
0.5*(1+rand.Float32()),
0.5*(1+rand.Float32()),
)))
} else {
world = append(world, sphere(center, 0.2, dielectric(1.5)))
}
}
}
}
world = append(world, sphere(vec3(0, 1, 0), 1.0, dielectric(1.5)))
world = append(world, sphere(vec3(-4, 1, 0), 1.0, lambertian(0.4, 0.2, 0.1)))
world = append(world, sphere(vec3(4, 1, 0), 1.0, metal(0.7, 0.6, 0.5, 0.0)))
return world
}
func basic_scene() HitableList {
var world HitableList
world = append(world, sphere(vec3(0, 0, -1), 0.5, lambertian(0.1, 0.2, 0.5)))
world = append(world, sphere(vec3(0, -100.5, -1), 100, lambertian(0.8, 0.8, 0.0)))
world = append(world, sphere(vec3(1, 0, -1), 0.5, metal(0.8, 0.6, 0.2, 0.0)))
world = append(world, sphere(vec3(-1, 0, -1), 0.5, dielectric(1.5)))
world = append(world, sphere(vec3(-1, 0, -1), -0.45, dielectric(1.5)))
return world
}
func pos_camera_scene() HitableList {
var world HitableList
R := float32(math.Cos(math.Pi / 4.0))
world = append(world, sphere(vec3(-R, 0, -1), R, lambertian(0, 0, 1)))
world = append(world, sphere(vec3(R, 0, -1), R, lambertian(1, 0, 0)))
return world
}
/* main ray-tracing code */
/* Hitables */
type HitRecord struct {
t float32
p, normal Vec3
material Material
}
type Hitable interface {
hit(r Ray, t_min, t_max float32, rec *HitRecord) bool
}
type HitableList []Hitable
func (h HitableList) hit(r Ray, t_min, t_max float32, rec *HitRecord) bool {
tmp_rec := new_hitrec()
hit_anything := false
closest_so_far := t_max
for i := 0; i < len(h); i++ {
if h[i].hit(r, t_min, closest_so_far, &tmp_rec) {
hit_anything = true
closest_so_far = tmp_rec.t
*rec = tmp_rec
}
}
return hit_anything
}
func color(r Ray, world Hitable, depth int) Vec3 {
rec := new_hitrec()
if world.hit(r, 0.001, math.MaxFloat32, &rec) {
scattered := ray(vec3(0, 0, 0), vec3(0, 0, 0))
attenuation := vec3(0, 0, 0)
if depth < 5 && rec.material.scatter(r, &rec, &attenuation, &scattered) {
return attenuation.prod(color(scattered, world, depth+1))
} else {
return vec3(0, 0, 0)
}
} else {
unit_dir := r.direction().normalize()
t := 0.5 * (unit_dir.y() + 1.0)
return vec3(1.0, 1.0, 1.0).scalar_mult(1.0 - t).add(vec3(0.5, 0.7, 1.0).scalar_mult(t))
}
}
/* Materials */
type MaterialType int
const (
NullMaterial MaterialType = 0
Lambertian MaterialType = 1
Metal MaterialType = 2
Dielectric MaterialType = 3
)
type Material struct {
mat MaterialType
albedo Vec3
fuzz, ref_idx float32
}
func lambertian(ax, ay, az float32) Material {
return Material{
mat: Lambertian,
albedo: vec3(ax, ay, az),
}
}
func metal(ax, ay, az, fuzz float32) Material {
return Material{
mat: Metal,
albedo: vec3(ax, ay, az),
fuzz: fuzz,
}
}
func dielectric(ref_idx float32) Material {
return Material{
mat: Dielectric,
albedo: vec3(1.0, 1.0, 1.0),
fuzz: 0,
ref_idx: ref_idx,
}
}
func (m Material) scatter(r Ray, rec *HitRecord, attenuation *Vec3, scattered *Ray) bool {
switch m.mat {
case Lambertian:
target := rec.p.add(rec.normal).add(random_sphere_point())
*scattered = ray(rec.p, target.sub(rec.p))
*attenuation = m.albedo
return true
case Metal:
reflected := r.direction().normalize().reflect(rec.normal)
*scattered = ray(rec.p, reflected.add(random_sphere_point().scalar_mult(m.fuzz)))
*attenuation = m.albedo
return scattered.direction().dot(rec.normal) > 0
case Dielectric:
outward_normal := vec3(0, 0, 0)
reflected := r.direction().reflect(rec.normal)
ni_over_nt := float32(0.0)
*attenuation = vec3(1, 1, 1)
refracted := vec3(0, 0, 0)
var reflect_prob float32
var cosine float32
if r.direction().dot(rec.normal) > 0 {
outward_normal = rec.normal.scalar_mult(-1.0)
ni_over_nt = m.ref_idx
cosine = m.ref_idx * r.direction().dot(rec.normal) / r.direction().norm()
} else {
outward_normal = rec.normal
ni_over_nt = float32(1.0 / m.ref_idx)
cosine = -1.0 * r.direction().dot(rec.normal) / r.direction().norm()
}
if r.direction().refract(outward_normal, ni_over_nt, &refracted) {
reflect_prob = schlick(cosine, m.ref_idx)
} else {
*scattered = ray(rec.p, reflected)
reflect_prob = 1.0
}
if rand.Float32() < reflect_prob {
*scattered = ray(rec.p, reflected)
} else {
*scattered = ray(rec.p, refracted)
}
return true
default:
return true
}
}
func new_hitrec() HitRecord {
return HitRecord{
t: -1.0,
p: vec3(0, 0, 0),
normal: vec3(0, 0, 0),
material: Material{
mat: NullMaterial,
albedo: vec3(0, 0, 0),
},
}
}
func main() {
scale := 0.4
nx := int(1920 * scale)
ny := int(1080 * scale)
tile_x := 4
tile_y := 2
tile_w := nx / tile_x
tile_h := ny / tile_y
ns := 40
fmt.Print("P3\n", nx, " ", ny, "\n255\n")
lookfrom := vec3(13, 2, 3)
lookat := vec3(0, 0, 0)
// focus_dist := lookfrom.sub(lookat).norm()
focus_dist := float32(10.0)
aperture := float32(0.1)
cam := camera(lookfrom, lookat, vec3(0, 1, 0), 20, float32(nx)/float32(ny), aperture, focus_dist)
world := random_scene()
buf := make([][]Vec3, ny)
for i := range buf {
buf[i] = make([]Vec3, nx)
}
var wg sync.WaitGroup
wg.Add(tile_x * tile_y)
for j := 0; j < tile_y; j++ {
for i := 0; i < tile_x; i++ {
go func(i, j, tile_i, tile_j int, buf *[][]Vec3) {
defer wg.Done()
for y := 0; y < tile_h; y++ {
for x := 0; x < tile_w; x++ {
col := vec3(0, 0, 0)
for s := 0; s < ns; s++ {
u := (float32(i*(tile_i)+x) + rand.Float32()) / float32(nx)
v := (float32(j*(tile_j)+y) + rand.Float32()) / float32(ny)
r := cam.get_ray(u, v)
col = col.add(color(r, world, 0))
}
col = col.scalar_mult(1.0 / float32(ns)).gamma(2.0)
ir := float32(255.99 * col.x())
ig := float32(255.99 * col.y())
ib := float32(255.99 * col.z())
(*buf)[j*(tile_j)+y][i*(tile_i)+x] = vec3(ir, ig, ib)
}
}
}(i, j, tile_w, tile_h, &buf)
}
}
wg.Wait()
for j := ny - 1; j >= 0; j-- {
for i := 0; i < nx; i++ {
color := buf[j][i]
fmt.Print(int(color.r()), " ",
int(color.g()), " ",
int(color.b()), "\n")
}
}
} | go_trace.go | 0.859251 | 0.587411 | go_trace.go | starcoder |
package main
const usage = `Usage: %[1]s parser-cmd [ parser-cmd-flags ]
toml-test is a tool to verify the correctness of TOML parsers and writers.
https://github.com/BurntSushi/toml-test
The parser-cmd positional argument should be a program that accepts TOML data
on stdin until EOF, and is expected to write the corresponding JSON encoding on
stdout. Please see 'README.md' for details on how to satisfy the interface
expected by 'toml-test' with your own parser.
Any positional arguments are use as the parser-cmd; to pass flags, remember to
stop toml-test's flag parsing with --
$ %[1]s -- my-parser -x -y
There are two tests:
decoder This is the default.
encoder When -encoder is given.
Tests are split in to two groups:
valid Valid TOML files
invalid Invalid TOML files that should be rejected with an error.
invalid-encoder Invalid input for the encoder
All tests are referred to relative to to the tests/ directory: valid/dir/name or
invalid/dir/name.
Flags:
-help Show this help and exit.
-version Show version and exit.
-encoder The given parser-cmd will be tested as a TOML encoder.
The parser-cmd will be sent JSON on stdin and is expected to
write TOML to stdout. The JSON will be in the same format as
specified in the toml-test README. Note that this depends on the
correctness of my TOML parser!
-v List all tests, even passing ones. Add twice to show detailed
output for passing tests.
-run Specify a list of tests to run; the default is to run all tests.
Test names include the directory, i.e. "valid/test-name" or
"invalid/test-name". You can use globbing patterns , for example
to run all string tests:
$ toml-test toml-test-decoder -run 'valid/string*'
You can specify this argument more than once, and/or specify
multiple tests by separating them with a comma:
$ toml-test toml-test-decoder \
-run valid/string-empty \
-run valid/string-nl,valid/string-simple
This will run three tests (string-empty, string-nl,
string-simple).
Globbing patterns: https://pkg.go.dev/path/filepath#Match
-skip Tests to skip, this uses the same syntax as the -run flag.
-color Output color; possible values:
always Show test failures in bold and red (default).
bold Show test failures in bold only.
never Never output any escape codes.
-testdir Location of the tests; the default is to use the tests compiled
in the binary; this is only useful if you want to add or modify
tests.
A test in the invalid directory is a toml file that is known to
be invalid and should be rejected by the parser.
A test in the valid directory is a toml and json file with the
same name, where the json file is the JSON representation of the
TOML file according to the syntax described in the README.
For encoders, the same directory scheme above is used, except
the invalid-encoder directory is used instead of the invalid
directory.
` | cmd/toml-test/usage.go | 0.79649 | 0.700242 | usage.go | starcoder |
package components
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"strings"
"sync"
)
const (
// TypeMesh represents a mesh component's type.
TypeMesh = "mesh"
// MeshSrcDir is the expected location of meshes
MeshSrcDir = "assets/models/"
)
// Mesh represents a component that holds the data representing a mesh.
type Mesh interface {
Component
// Data retrieves the mesh data.
Data() MeshData
// Set updates the mesh data with the new data passed in as a parameter.
Set(MeshData)
// Load loads the mesh data from file.
Load(string) error
}
// NewMesh creates a new Mesh component.
func NewMesh() Mesh {
m := mesh{}
return &m
}
type mesh struct {
data MeshData
dataLock sync.RWMutex
isLoaded bool
}
// Type retrieves the type of this component.
func (m *mesh) Type() string {
return TypeMesh
}
// Data retrieves the mesh data from the component.
func (m *mesh) Data() MeshData {
m.dataLock.RLock()
defer m.dataLock.RUnlock()
return m.data
}
// Set sets the MeshData to the new data.
func (m *mesh) Set(md MeshData) {
m.dataLock.Lock()
defer m.dataLock.Unlock()
m.data = md
}
// Load loads a mesh from file and returns an error if an error occurs while loading the file. If the mesh has already been loaded an "Already Loaded" error will be returned.
func (m *mesh) Load(fileName string) error {
m.dataLock.Lock()
defer m.dataLock.Unlock()
if m.isLoaded {
return errors.New("Already Loaded")
}
fullFileName := ""
if strings.Contains(fileName, MeshSrcDir) {
fullFileName = fileName
} else {
fullFileName = fmt.Sprintf("%s%s", MeshSrcDir, fileName)
}
data, err := ioutil.ReadFile(fullFileName)
if err != nil {
fmt.Println(err.Error())
return err
}
err = json.Unmarshal(data, &m.data)
if err == nil {
m.isLoaded = true
}
return err
}
// MeshData represents the the data needed to construct a 3d mesh.
type MeshData struct {
Indexed bool `json:"indexed"`
Verts []float32 `json:"verts"`
Indices []uint32 `json:"indices"`
VertSize int32 `json:"vertSize"`
TextureFile string `json:"textureFile"`
FragShaderFile string `json:"fragShaderFile"`
VertShaderFile string `json:"vertShaderFile"`
} | components/mesh.go | 0.671147 | 0.426919 | mesh.go | starcoder |
package circlinebuffer
import (
"errors"
"fmt"
"unicode/utf8"
)
// Optimised for Writing and frequent small reads
// Reading the entire buffer is a bugger for perf
type circLineBuffer struct {
size int
bufHalfSize int
cursorOff int // Read Cursor for Partial Reads or only newest
readOff int // Read Offset aligned to nearest end of line
writeOff int // Write Offset aligned to nearest end of line
buf []byte
}
var (
// ErrEmptyWrite - Empty write to buffer
ErrEmptyWrite = errors.New("Cannot write empty slice")
// ErrWriteTooBig - Can only write half the size of CLB
ErrWriteTooBig = errors.New("Cannot write more than half the size")
// ErrBuffTooSmallForLine - Cannot read entire line into buffer
ErrBuffTooSmallForLine = errors.New("Cannot read entire line into buffer")
)
func makeCircLineBuffer(bufsize int) *circLineBuffer {
clb := &circLineBuffer{
size: bufsize,
bufHalfSize: bufsize / 2,
buf: make([]byte, bufsize, bufsize),
readOff: 0,
writeOff: 0,
cursorOff: 0,
}
return clb
}
func (clb *circLineBuffer) dec(pos *int) {
*pos--
if *pos < 0 {
*pos += clb.size
}
}
func (clb *circLineBuffer) inc(pos *int) {
*pos++
if *pos >= clb.size {
*pos -= clb.size
}
}
func (clb *circLineBuffer) crossed(pos int, start int, end int) bool {
if pos == start {
return false
}
pos -= start
if pos < 0 {
pos += clb.size
}
l := end - start
if l < 0 {
l += clb.size
}
return pos <= l
}
func (clb *circLineBuffer) Size() int {
return clb.size
}
func (clb *circLineBuffer) Len() int {
l := clb.writeOff - clb.readOff
if l < 0 {
l += clb.size
}
return l
}
func (clb *circLineBuffer) readInteral(pos int, endpos int, dst []byte) {
wrapCopy := (endpos < pos)
// Copy Accross
if wrapCopy {
mp := clb.size - pos
copy(dst[0:mp], clb.buf[pos:])
copy(dst[mp:], clb.buf[0:endpos])
} else {
copy(dst, clb.buf[pos:endpos])
}
}
func (clb *circLineBuffer) writeInternal(pos int, endpos int, src []byte) {
wrapCopy := (endpos < pos)
// Copy Accross
if wrapCopy {
mp := clb.size - pos
copy(clb.buf[pos:], src[0:mp])
copy(clb.buf[0:endpos], src[mp:])
} else {
copy(clb.buf[pos:endpos], src)
}
}
func (clb *circLineBuffer) Bytes() []byte {
bLen := clb.writeOff - clb.readOff
if bLen < 0 {
bLen += clb.size
}
outBuf := make([]byte, bLen, bLen)
clb.readInteral(clb.readOff, clb.writeOff, outBuf)
return outBuf
}
func (clb *circLineBuffer) String() string {
b := clb.Bytes()
for i := len(b) - 1; i > 0; i-- {
if b[i] == 0 {
b[i] = '\n'
i--
if b[i] == '\n' {
b[i] = ' '
}
}
}
return string(b)
}
func (clb *circLineBuffer) NextLine() string {
if clb.cursorOff == clb.writeOff {
return ""
}
retString := ""
byteChunk := []byte{}
c := clb.buf[clb.cursorOff]
if c == 0 {
clb.inc(&clb.cursorOff)
c = clb.buf[clb.cursorOff]
}
for c != 0 {
if len(byteChunk) > 0 && utf8.RuneStart(c) {
retString += string(byteChunk)
byteChunk = []byte{c}
} else {
byteChunk = append(byteChunk, c)
}
clb.inc(&clb.cursorOff)
c = clb.buf[clb.cursorOff]
}
retString += string(byteChunk)
strL := len(retString)
if strL > 3 && retString[strL-1] == '\n' && retString[strL-2] == '\n' {
retString = retString[:strL-1]
}
clb.inc(&clb.cursorOff)
return retString
}
func (clb *circLineBuffer) Reset() {
clb.writeOff = 0
clb.readOff = 0
}
func (clb *circLineBuffer) Write(p []byte) (n int, err error) {
wl := len(p)
if wl == 0 {
return 0, ErrEmptyWrite
}
if wl > clb.bufHalfSize {
return 0, ErrWriteTooBig
}
// Update Cursors
pos := clb.writeOff
endPos := clb.writeOff + wl
if endPos >= clb.size {
endPos -= clb.size
}
// Do Actual Write
clb.writeInternal(pos, endPos, p)
// If we dont have a zero at the end
if p[wl-1] != 0 {
clb.buf[endPos] = 0
clb.inc(&endPos)
}
clb.writeOff = endPos
// Did we cross read head
if clb.crossed(clb.readOff, pos, endPos) {
clb.inc(&endPos)
// Advance 0 to avoid partial string
for clb.buf[endPos] != 0 {
clb.buf[endPos] = 0 // Slight perf cost but saves a lot of headaches
clb.inc(&endPos)
}
clb.readOff = endPos + 1
// Did we cross cursor head as well
if clb.crossed(clb.cursorOff, pos, endPos) {
clb.ResetCursor()
}
}
// fmt.Printf("%2d %2d - %v\n", clb.readOff, clb.writeOff, clb.buf)
return wl, nil
}
// ResetCursor - Reset Cursor position
func (clb *circLineBuffer) ResetCursor() {
clb.cursorOff = clb.readOff
}
// EndCursor - Move Cursor to End
func (clb *circLineBuffer) EndCursor() {
clb.cursorOff = clb.writeOff
}
// Read - Read from Cursor Pos
func (clb *circLineBuffer) Read(p []byte) (n int, err error) {
if clb.cursorOff == clb.writeOff {
return 0, nil
}
// Setup
maxLen := len(p)
startPos := clb.cursorOff
endPos := clb.writeOff
// Figure out Actual Read Length
readLen := endPos - startPos
if readLen < 0 {
readLen += clb.size
}
// Do full read
if maxLen >= readLen {
clb.readInteral(startPos, endPos-1, p)
clb.cursorOff = endPos
return readLen, nil
}
endPos = startPos + maxLen
if endPos >= clb.size {
endPos -= clb.size
}
// Wind back up to the last 0
for (clb.buf[endPos] != 0) && endPos > startPos {
clb.dec(&endPos)
}
readLen = endPos - startPos
if readLen < 0 {
readLen += clb.size
}
fmt.Println(startPos, endPos, readLen)
if readLen == 0 {
return 0, ErrBuffTooSmallForLine
}
// Update Cursor then Read
clb.readInteral(startPos, endPos, p)
clb.cursorOff = endPos + 1
return readLen, nil
} | CircLineBuffer/circ_line_buffer.go | 0.524638 | 0.41478 | circ_line_buffer.go | starcoder |
package entity
import (
"fmt"
"math"
"sync"
)
// ExperienceManager manages experience and levels for entities, and provides functions to add, remove, and calculate
// experience needed for upcoming levels.
type ExperienceManager struct {
mu sync.RWMutex
experience int
}
// NewExperienceManager returns a new ExperienceManager with no experience.
func NewExperienceManager() *ExperienceManager {
return &ExperienceManager{}
}
// Experience returns the amount of experience the manager currently has.
func (e *ExperienceManager) Experience() int {
e.mu.RLock()
defer e.mu.RUnlock()
return e.experience
}
// Add adds experience to the total experience and recalculates the level and progress if necessary.
func (e *ExperienceManager) Add(amount int) (level int, progress float64) {
e.mu.Lock()
defer e.mu.Unlock()
e.experience += amount
return progressFromExperience(e.experience)
}
// Remove removes experience from the total experience and recalculates the level and progress if necessary.
func (e *ExperienceManager) Remove(amount int) (level int, progress float64) {
e.mu.Lock()
defer e.mu.Unlock()
e.experience -= amount
return progressFromExperience(e.experience)
}
// Level returns the current experience level.
func (e *ExperienceManager) Level() int {
e.mu.RLock()
defer e.mu.RUnlock()
level, _ := progressFromExperience(e.experience)
return level
}
// SetLevel sets the level of the manager.
func (e *ExperienceManager) SetLevel(level int) {
if level < 0 || level > math.MaxInt32 {
panic(fmt.Sprintf("level must be between 0 and 2,147,483,647, got %d", level))
}
e.mu.Lock()
defer e.mu.Unlock()
_, progress := progressFromExperience(e.experience)
e.experience = experienceForLevels(level) + int(float64(experienceForLevel(level))*progress)
}
// Progress returns the progress towards the next level.
func (e *ExperienceManager) Progress() float64 {
e.mu.RLock()
defer e.mu.RUnlock()
_, progress := progressFromExperience(e.experience)
return progress
}
// SetProgress sets the progress of the manager.
func (e *ExperienceManager) SetProgress(progress float64) {
if progress < 0 || progress > 1 {
panic(fmt.Sprintf("progress must be between 0 and 1, got %f", progress))
}
e.mu.Lock()
defer e.mu.Unlock()
currentLevel, _ := progressFromExperience(e.experience)
e.experience = experienceForLevels(currentLevel) + int(float64(experienceForLevel(currentLevel))*progress)
}
// Reset resets the total experience, level, and progress of the manager to zero.
func (e *ExperienceManager) Reset() {
e.mu.Lock()
defer e.mu.Unlock()
e.experience = 0
}
// progressFromExperience returns the level and progress from the total experience given.
func progressFromExperience(experience int) (level int, progress float64) {
var a, b, c float64
if experience <= experienceForLevels(16) {
a, b = 1.0, 6.0
} else if experience <= experienceForLevels(31) {
a, b, c = 2.5, -40.5, 360.0
} else {
a, b, c = 4.5, -162.5, 2220.0
}
var sol float64
if d := b*b - 4*a*(c-float64(experience)); d > 0 {
s := math.Sqrt(d)
sol = math.Max((-b+s)/(2*a), (-b-s)/(2*a))
} else if d == 0 {
sol = -b / (2 * a)
}
return int(sol), sol - math.Trunc(sol)
}
// experienceForLevels calculates the amount of experience needed in total to reach a certain level.
func experienceForLevels(level int) int {
if level <= 16 {
return level*level + level*6
} else if level <= 31 {
return int(float64(level*level)*2.5 - 40.5*float64(level) + 360)
}
return int(float64(level*level)*4.5 - 162.5*float64(level) + 2220)
}
// experienceForLevel returns the amount experience needed to reach level + 1.
func experienceForLevel(level int) int {
if level <= 15 {
return 2*level + 7
} else if level <= 30 {
return 5*level - 38
}
return 9*level - 158
} | server/entity/experience.go | 0.614625 | 0.42931 | experience.go | starcoder |
package dist
import (
"github.com/jesand/stats"
"math"
"math/rand"
)
// Represents a probability distribution
type Dist interface {
// Return a "score" (density or probability) for the given values
Score(vars, params []float64) float64
// The number of random variables the distribution is over
NumVars() int
// The number of parameters in the distribution
NumParams() int
// Update the distribution parameters
SetParams(vals []float64)
}
// Represents a distribution over reals for a random variable
type RealDist interface {
// The mean, or expected value, of the random variable
Mean() float64
// The mode of the random variable
Mode() float64
// The variance of the random variable
Variance() float64
}
// Represents a continuous distribution over a subset of reals
type ContinuousDist interface {
Dist
RealDist
// Sample an outcome from the distribution
Sample() float64
// Sample a sequence of n outcomes from the distribution
SampleN(n int) []float64
// Return the corresponding sample space
Space() RealSpace
// The value of the CDF: Pr(X <= val) for random variable X over this space
CDF(val float64) float64
// Return the density at a given value
PDF(val float64) float64
// Return the probability of a given interval
Prob(from, to float64) float64
// Return the log probability (base 2) of a given interval
LgProb(from, to float64) float64
}
// Represents a discrete distribution over a sample space
type DiscreteDist interface {
Dist
// Sample an outcome from the distribution
Sample() Outcome
// Sample a sequence of n outcomes from the distribution
SampleN(n int) []Outcome
// Return the corresponding sample space
Space() DiscreteSpace
// Return the probability of a given outcome
Prob(outcome Outcome) float64
// Return the log probability (base 2) of a given outcome
LgProb(outcome Outcome) float64
}
// A discrete distribution over the reals
type DiscreteRealDist interface {
DiscreteDist
RealDist
}
// A discrete distribution whose underlying probability measure can change
type MutableDiscreteDist interface {
// A mutable dist is a dist
DiscreteDist
// Set all probabilities to zero
Reset()
// Set the probability of a particular outcome
SetProb(outcome Outcome, prob float64)
// Set the unnormalized measure for a particular outcome. It is
// up to the particular distribution to normalize these weights.
SetWeight(outcome Outcome, weight float64)
// Normalize all weights, assuming 0 weight for outcomes not assigned with
// SetWeight() since the last call to Normalize().
Normalize()
// Normalize all weights, assigning `rest` weight uniformly to all outcomes
// currently assigned zero weight.
NormalizeWithExtra(rest float64)
}
// A default implementation of Sample() for a DiscreteDist
type DefDiscreteDistSample struct{ dist DiscreteDist }
func (dist DefDiscreteDistSample) Sample() Outcome {
var remaining = rand.Float64()
for i := Outcome(0); int(i) < dist.dist.Space().Size(); i++ {
remaining -= dist.dist.Prob(i)
if remaining <= 0 {
return i
}
}
panic(stats.ErrNotNormalized)
}
// A default implementation of SampleN() for a DiscreteDist
type DefDiscreteDistSampleN struct{ dist DiscreteDist }
func (dist DefDiscreteDistSampleN) SampleN(n int) []Outcome {
var outcomes []Outcome
for i := 0; i < n; i++ {
outcomes = append(outcomes, dist.dist.Sample())
}
return outcomes
}
// A default implementation of LgProb() for a DiscreteDist
type DefDiscreteDistLgProb struct{ dist DiscreteDist }
// Return the log probability (base 2) of a given outcome
func (dist DefDiscreteDistLgProb) LgProb(outcome Outcome) float64 {
return math.Log2(dist.dist.Prob(outcome))
}
// A default implementation of SampleN() for a ContinuousDist
type DefContinuousDistSampleN struct{ dist ContinuousDist }
func (dist DefContinuousDistSampleN) SampleN(n int) []float64 {
var outcomes []float64
for i := 0; i < n; i++ {
outcomes = append(outcomes, dist.dist.Sample())
}
return outcomes
}
// A default implementation of Prob() for a ContinuousDist
type DefContinuousDistProb struct{ dist ContinuousDist }
// Return the log probability (base 2) of a given outcome
func (dist DefContinuousDistProb) Prob(from, to float64) float64 {
return dist.dist.CDF(to) - dist.dist.CDF(from)
}
// A default implementation of LgProb() for a ContinuousDist
type DefContinuousDistLgProb struct{ dist ContinuousDist }
// Return the log probability (base 2) of a given outcome
func (dist DefContinuousDistLgProb) LgProb(from, to float64) float64 {
return math.Log2(dist.dist.Prob(from, to))
} | dist/dist.go | 0.850375 | 0.697854 | dist.go | starcoder |
package crom
import (
"fmt"
"reflect"
"strings"
)
type CockroachDialect struct {
suffix string
}
func (d CockroachDialect) Name() string { return "CockroachDialect" }
func (d CockroachDialect) QuerySuffix() string { return ";" }
func (d CockroachDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string {
switch val.Kind() {
case reflect.String:
return "STRING"
case reflect.Ptr:
return d.ToSqlType(val.Elem(), maxsize, isAutoIncr)
case reflect.Bool:
return "BOOL"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
if isAutoIncr {
return "SERIAL"
}
return "INT"
case reflect.Int64, reflect.Uint64:
if isAutoIncr {
return "bigserial"
}
return "INT"
case reflect.Float64:
return "FLOAT"
case reflect.Float32:
return "real"
case reflect.Slice:
if val.Elem().Kind() == reflect.Uint8 {
return "BYTES"
}
if val.Elem().Kind() == reflect.String {
return "ARRAY"
}
if val.Elem().Kind() == reflect.Int64 {
return "ARRAY"
}
case reflect.Map:
return "JSONB"
}
switch val.Name() {
case "NullInt64":
return "INT"
case "NullFloat64":
return "FLOAT"
case "NullBool":
return "BOOL"
case "Time", "NullTime":
return "TIMESTAMPTZ"
}
return "STRING"
}
// Returns empty string
func (d CockroachDialect) AutoIncrStr() string {
return ""
}
func (d CockroachDialect) AutoIncrBindValue() string {
return "default"
}
func (d CockroachDialect) AutoIncrInsertSuffix(col *ColumnMap) string {
return " returning " + d.QuoteField(col.ColumnName)
}
func (d CockroachDialect) CreateIndexSuffix() string {
return "using"
}
func (d CockroachDialect) TruncateClause() string {
return "truncate"
}
// Returns "$(i+1)"
func (d CockroachDialect) BindVar(i int) string {
return fmt.Sprintf("$%d", i+1)
}
func (d CockroachDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error {
rows, err := exec.Query(insertSql, params...)
if err != nil {
return err
}
defer rows.Close()
if !rows.Next() {
return fmt.Errorf("No serial value returned for insert: %s Encountered error: %s", insertSql, rows.Err())
}
if err := rows.Scan(target); err != nil {
return err
}
if rows.Next() {
return fmt.Errorf("more than two serial value returned for insert: %s", insertSql)
}
return rows.Err()
}
func (d CockroachDialect) QuoteField(f string) string {
return `"` + strings.ToLower(f) + `"`
}
func (d CockroachDialect) QuotedTableForQuery(schema string, table string) string {
if strings.TrimSpace(schema) == "" {
return d.QuoteField(table)
}
return schema + "." + d.QuoteField(table)
}
func (d CockroachDialect) IfSchemaNotExists(command, schema string) string {
return fmt.Sprintf("%s if not exists", command)
}
func (d CockroachDialect) IfTableExists(command, schema, table string) string {
return fmt.Sprintf("%s if exists", command)
}
func (d CockroachDialect) IfTableNotExists(command, schema, table string) string {
return fmt.Sprintf("%s if not exists", command)
} | apiv2-crom/dialect_cockroach.go | 0.575111 | 0.446857 | dialect_cockroach.go | starcoder |
package voronoi
import (
"fmt"
"math"
"sort"
"github.com/quasoft/dcel"
)
// CloseTwins adds a vertex to the specified edges.
func (v *Voronoi) CloseTwins(list []*dcel.HalfEdge, vertex *dcel.Vertex) {
for i := 0; i < len(list); i++ {
he := list[i]
if he.Twin != nil && he.Twin.Target == nil {
he.Twin.Target = vertex
} else if he.Target == nil {
he.Target = vertex
}
}
}
// halfEdgesByCCW implements a slice of half-edges that sort in counter-clockwise order.
type halfEdgesByCCW []*dcel.HalfEdge
func (s halfEdgesByCCW) Len() int {
return len(s)
}
func (s halfEdgesByCCW) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s halfEdgesByCCW) Less(i, j int) bool {
if s[i].Target == nil {
return false
} else if s[j].Target == nil {
return true
}
// Find center of polygon
var sumX int64
var sumY int64
var cnt int
for _, v := range s {
if v.Target != nil {
sumX += int64(v.Target.X)
sumY += int64(v.Target.Y)
cnt++
}
}
centerX := float64(sumX) / float64(cnt)
centerY := float64(sumY) / float64(cnt)
// Sort counter-clockwise
a1 := math.Atan2(float64(s[i].Target.Y)-centerY, float64(s[i].Target.X)-centerX)
a2 := math.Atan2(float64(s[j].Target.Y)-centerY, float64(s[j].Target.X)-centerX)
return a1 >= a2
}
func (s halfEdgesByCCW) UpdateLinks() {
for i := 0; i < len(s); i++ {
if i > 0 {
s[i].Prev, s[i-1].Next = s[i-1], s[i]
}
if i < len(s)-1 {
s[i].Next, s[i+1].Prev = s[i+1], s[i]
}
}
if len(s) == 1 {
s[0].Prev, s[0].Next = nil, nil
} else if len(s) > 1 {
s[0].Prev, s[len(s)-1].Next = s[len(s)-1], s[0]
}
}
// ReorderFaceEdges reorders face half-edges in a clockwise way, while also removing duplicates.
func (v *Voronoi) ReorderFaceEdges(face *dcel.Face) {
var edges []*dcel.HalfEdge
//exists := make(map[string]bool)
edge := face.HalfEdge
for edge != nil {
/*
id := fmt.Sprintf("%v", edge.Target)
if !exists[id] {
exists[id] = true
edges = append(edges, edge)
}
*/
edges = append(edges, edge)
edge = edge.Next
if edge == face.HalfEdge {
break
}
}
sort.Sort(halfEdgesByCCW(edges))
halfEdgesByCCW(edges).UpdateLinks()
}
// GetFaceHalfEdges returns the half-edges that form the boundary of a face (cell).
func (v *Voronoi) GetFaceHalfEdges(face *dcel.Face) []*dcel.HalfEdge {
v.ReorderFaceEdges(face)
var edges []*dcel.HalfEdge
edge := face.HalfEdge
for edge != nil {
edges = append(edges, edge)
edge = edge.Next
if edge == face.HalfEdge {
break
}
}
return edges
}
// verticesByCCW implements a slice of vertices that sort in counter-clockwise order.
type verticesByCCW []*dcel.Vertex
func (s verticesByCCW) Len() int {
return len(s)
}
func (s verticesByCCW) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s verticesByCCW) Less(i, j int) bool {
// Find center of polygon
var sumX float64
var sumY float64
for _, v := range s {
sumX += float64(v.X)
sumY += float64(v.Y)
}
centerX := sumX / float64(len(s))
centerY := sumY / float64(len(s))
// Sort counter-clockwise
a1 := math.Atan2(float64(s[i].Y)-centerY, float64(s[i].X)-centerX)
a2 := math.Atan2(float64(s[j].Y)-centerY, float64(s[j].X)-centerX)
return a1 >= a2
}
// GetFaceVertices returns the vertices that form the boundary of a face (cell),
// sorted in counter-clockwise order.
func (v *Voronoi) GetFaceVertices(face *dcel.Face) []*dcel.Vertex {
var vertices []*dcel.Vertex
exists := make(map[string]bool)
edge := face.HalfEdge
for edge != nil {
if edge.Target != nil {
id := fmt.Sprintf("%v", edge.Target)
if !exists[id] {
exists[id] = true
vertices = append(vertices, edge.Target)
}
}
if edge.Twin != nil && edge.Twin.Target != nil {
id := fmt.Sprintf("%v", edge.Twin.Target)
if !exists[id] {
exists[id] = true
vertices = append(vertices, edge.Twin.Target)
}
}
edge = edge.Next
if edge == face.HalfEdge {
break
}
}
sort.Sort(verticesByCCW(vertices))
return vertices
} | dcel.go | 0.735737 | 0.41484 | dcel.go | starcoder |
package glicko2
import (
"math"
)
type Result struct {
Opponent *Rating
Score float64
}
type Rating struct {
Mu float64
Phi float64
Sigma float64
Results []Result
}
type System struct {
Tau float64
Epsilon float64
Mu float64
Phi float64
Players []*Rating
}
func (p *Rating) AddResult(r Result) {
s := p.Results
s = append(s, r)
p.Results = s
}
func (p *Rating) estimateFunctionG() (g float64) {
denom := math.Sqrt(1 + ((3 * (math.Pow(p.Phi, 2))) / math.Pow(math.Pi, 2)))
return 1 / denom
}
func (p *Rating) estimateFunctionE(r *Rating) (e float64) {
denom := 1 + math.Exp(-r.estimateFunctionG()*(p.Mu-r.Mu))
return 1 / denom
}
func (p *Rating) estimateVariance() (v float64) {
denom := 0.0
for _, res := range p.Results {
e := p.estimateFunctionE(res.Opponent)
denom += (math.Pow(res.Opponent.estimateFunctionG(), 2)) * e * (1 - e)
}
return 1 / denom
}
func (p *Rating) estimateDelta(v float64) (delta float64) {
sum := 0.0
for _, res := range p.Results {
sum += res.Opponent.estimateFunctionG() * (res.Score - p.estimateFunctionE(res.Opponent))
}
return v * sum
}
func (s *System) ScaleDown(p *Rating) {
mu := (p.Mu - s.Mu) / 173.7178
phi := p.Phi / 173.7178
p.Mu = mu
p.Phi = phi
}
func (s *System) ScaleUp(p *Rating) {
mu := p.Mu*173.7178 + s.Mu
phi := p.Phi * 173.7178
p.Mu = mu
p.Phi = phi
}
func convergenceFunction(x float64, delta float64, phi float64, v float64, tau float64, a float64) (f float64) {
denomleft := 2 * math.Pow(math.Pow(phi, 2)+v+math.Exp(x), 2)
numleft := math.Exp(x) * (math.Pow(delta, 2) - math.Pow(phi, 2) - v - math.Exp(x))
denomright := math.Pow(tau, 2)
numright := x - a
return (numleft / denomleft) - (numright / denomright)
}
func (s *System) updateVolatility(p *Rating, v float64, delta float64) {
a := math.Log(math.Pow(p.Sigma, 2))
bigA := a
bigB := 0.0
if math.Pow(delta, 2) > (math.Pow(p.Phi, 2) + v) {
bigB = math.Log(math.Pow(delta, 2) - math.Pow(p.Phi, 2) - v)
} else {
k := 1.0
for convergenceFunction(a-(k*s.Tau), delta, p.Phi, v, s.Tau, a) < 0 {
k += 1.0
}
bigB = a - (k * s.Tau)
}
fA := convergenceFunction(bigA, delta, p.Phi, v, s.Tau, a)
fB := convergenceFunction(bigB, delta, p.Phi, v, s.Tau, a)
for math.Abs(bigB-bigA) > s.Epsilon {
bigC := bigA + (bigA-bigB)*fA/(fB-fA)
fC := convergenceFunction(bigC, delta, p.Phi, v, s.Tau, a)
if fC*fB < 0 {
bigA = bigB
fA = fB
} else {
fA = fA / 2
}
bigB = bigC
fB = fC
}
p.Sigma = math.Exp(bigA / 2)
}
func (p *Rating) determinePhiStar() (phistar float64) {
return math.Sqrt(math.Pow(p.Phi, 2) + math.Pow(p.Sigma, 2))
}
func (p *Rating) updatePhi(phistar float64, v float64) {
denom := math.Sqrt((1 / math.Pow(phistar, 2)) + (1 / v))
phiprime := 1 / denom
p.Phi = phiprime
}
func (p *Rating) updateMu() {
sum := 0.0
for _, res := range p.Results {
sum += res.Opponent.estimateFunctionG() * (res.Score - p.estimateFunctionE(res.Opponent))
}
p.Mu = p.Mu + (math.Pow(p.Phi, 2) * sum)
}
func (s *System) ratePlayer(p *Rating) (r *Rating) {
r = &Rating{Mu: p.Mu, Phi: p.Phi, Sigma: p.Sigma, Results: p.Results}
variance := r.estimateVariance()
delta := r.estimateDelta(variance)
s.updateVolatility(r, variance, delta)
phistar := r.determinePhiStar()
r.updatePhi(phistar, variance)
r.updateMu()
return r
}
func (s *System) Update() {
updatedplayers := make([]*Rating, 0)
for _, player := range s.Players {
s.ScaleDown(player)
}
for _, player := range s.Players {
updatedplayers = append(updatedplayers, s.ratePlayer(player))
}
s.Players = updatedplayers
for _, player := range s.Players {
s.ScaleUp(player)
}
} | glicko2.go | 0.726717 | 0.478224 | glicko2.go | starcoder |
package graph
import (
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/graph/simple"
"gonum.org/v1/gonum/graph/topo"
)
// simpleNode is used internally to present the graph.Node interface
// to the graph library used here
type simpleNode struct {
id int64
node Node
}
func (n *simpleNode) ID() int64 {
return n.id
}
func (n *simpleNode) unwrap() Node {
return n.node
}
func wrap(n Node, id int64) *simpleNode {
return &simpleNode{id: id, node: n}
}
// Edge is a simple graph edge.
type Edge struct {
F, T Node
}
// From returns the from-node of the edge.
func (e Edge) From() Node { return e.F }
// To returns the to-node of the edge.
func (e Edge) To() Node { return e.T }
// Node defines NodeID() which is used to identify a Node in a Graph
type Node interface {
NodeID() string
}
// Graph used to sort Components based on dependencies
type Graph struct {
graph *simple.DirectedGraph
nodes map[string]Node
wrapped map[string]*simpleNode
index int64
}
// NewGraph returns an empty Graph
func NewGraph() *Graph {
return &Graph{
graph: simple.NewDirectedGraph(),
nodes: map[string]Node{},
wrapped: map[string]*simpleNode{},
}
}
// Add a Node to the Graph
func (g *Graph) Add(n ...Node) *Graph {
for _, node := range n {
g.add(node)
}
return g
}
// Count returns the number of Nodes in the Graph
func (g *Graph) Count() int {
return len(g.nodes)
}
func (g *Graph) add(n Node) *simpleNode {
nodeID := n.NodeID()
if wrapped, found := g.wrapped[nodeID]; found {
return wrapped
}
g.index++
wrapped := wrap(n, g.index)
g.nodes[nodeID] = n
g.wrapped[nodeID] = wrapped
g.graph.AddNode(wrapped)
return wrapped
}
// Remove a Node from the Graph
func (g *Graph) Remove(n ...Node) *Graph {
for _, node := range n {
nodeID := node.NodeID()
wrapped, found := g.get(node)
if found {
g.graph.RemoveNode(wrapped.id)
delete(g.nodes, nodeID)
delete(g.wrapped, nodeID)
}
}
return g
}
func (g *Graph) get(n Node) (*simpleNode, bool) {
wrapped, found := g.wrapped[n.NodeID()]
return wrapped, found
}
// Connect declares a directional link two nodes in the Graph
func (g *Graph) Connect(from, to Node) *Graph {
f := g.add(from)
t := g.add(to)
g.graph.SetEdge(simple.Edge{F: f, T: t})
return g
}
// GetNode returns the Node with the specified ID and a boolean indicating
// whether it was found
func (g *Graph) GetNode(nodeID string) (Node, bool) {
if node, found := g.nodes[nodeID]; found {
return node, true
}
return nil, false
}
// Sort returns a topological sort of the Graph
func (g *Graph) Sort() ([]Node, error) {
sorted, err := topo.Sort(g.graph)
if err != nil {
return nil, err
}
resolved := make([]Node, len(sorted))
for i, n := range sorted {
resolved[i] = n.(*simpleNode).unwrap()
}
return resolved, nil
}
// From returns all nodes in the graph that can be reached directly from the
// specified Node
func (g *Graph) From(n Node) []Node {
wrapped, ok := g.get(n)
if !ok {
return nil
}
return nodesFromIterator(g.graph.From(wrapped.ID()))
}
// To returns all nodes in the graph that can reach the specified node
func (g *Graph) To(n Node) []Node {
wrapped, ok := g.get(n)
if !ok {
return nil
}
return nodesFromIterator(g.graph.To(wrapped.ID()))
}
// Visit all nodes in the Graph and call the provided callback for each
func (g *Graph) Visit(callback VisitCallback) {
for _, n := range g.nodes {
result := callback(n)
// Return early if the callback responded with false
if !result {
return
}
}
}
// VisitCallback is a function signature for visiting nodes in the graph
type VisitCallback func(n Node) bool
// Filter nodes in the Graph
func (g *Graph) Filter(filter NodeFilter) (result []Node) {
for _, n := range g.nodes {
if filter(n) {
result = append(result, n)
}
}
return
}
// NodeFilter is a filter function for Graph nodes
type NodeFilter func(n Node) bool
func nodesFromIterator(iter graph.Nodes) []Node {
nodes := make([]Node, 0, iter.Len())
for iter.Next() {
nodes = append(nodes, iter.Node().(*simpleNode).unwrap())
}
return nodes
} | graph/graph.go | 0.814828 | 0.406685 | graph.go | starcoder |
package toolkit
import (
"bytes"
"encoding/binary"
"reflect"
)
/**
* Collected methods which allow easy implementation of <code>hashCode</code>.
*
* Example use case:
* <pre>
* public int hashCode(){
* int result = HashCodeUtil.SEED;
* //collect the contributions of various fields
* result = HashCodeUtil.hash(result, fPrimitive);
* result = HashCodeUtil.hash(result, fObject);
* result = HashCodeUtil.hash(result, fArray);
* return result;
* }
* </pre>
*/
/**
* An initial value for a <code>hashCode</code>, to which is added contributions
* from fields. Using a non-zero value decreases collisons of <code>hashCode</code>
* values.
*/
const HASH_SEED = 23
const prime_number = 37
/**
* booleans.
*/
func HashBool(aSeed int, aBoolean bool) int {
b := 0
if aBoolean {
b = 1
}
return firstTerm(aSeed) + b
}
func HashInt(aSeed int, aInt int) int {
return firstTerm(aSeed) + aInt
}
func HashUnit(aSeed int, aInt byte) int {
return firstTerm(aSeed) + numberHashCode(aInt)
}
func HashTiny(aSeed int, aInt int8) int {
return firstTerm(aSeed) + numberHashCode(aInt)
}
func HashShort(aSeed int, aInt int16) int {
return firstTerm(aSeed) + numberHashCode(aInt)
}
func HashInteger(aSeed int, aInt int32) int {
return firstTerm(aSeed) + numberHashCode(aInt)
}
func HashLong(aSeed int, aLong int64) int {
return firstTerm(aSeed) + numberHashCode(aLong)
}
func HashFloat(aSeed int, aFloat float32) int {
return firstTerm(aSeed) + numberHashCode(aFloat)
}
func HashDouble(aSeed int, aDouble float64) int {
return firstTerm(aSeed) + numberHashCode(aDouble)
}
func HashString(aSeed int, aString string) int {
return firstTerm(aSeed) + hashCode([]byte(aString))
}
func HashBytes(aSeed int, aBytes []byte) int {
return firstTerm(aSeed) + hashCode(aBytes)
}
func HashBase(aSeed int, a Base) int {
return firstTerm(aSeed) + a.HashCode()
}
func HashType(aSeed int, aType interface{}) int {
typ := reflect.TypeOf(aType)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
var t string
if typ.PkgPath() != "" {
t = typ.PkgPath() + "/" + typ.Name()
} else {
t = typ.Name()
}
return HashString(aSeed, t)
}
func Hash(aSeed int, aObject interface{}) int {
result := hash(aSeed, aObject)
if result == 0 {
result = aSeed
}
return result
}
func hash(aSeed int, aObject interface{}) int {
result := 0
if aObject == nil {
result = HashInt(aSeed, 0)
} else if t, ok := aObject.(Hasher); ok {
result = HashInt(aSeed, t.HashCode())
} else {
v := reflect.ValueOf(aObject)
k := v.Kind()
if k == reflect.Array || k == reflect.Slice {
length := v.Len()
for i := 1; i < length; i++ {
item := v.Index(i).Interface()
result = Hash(aSeed, item)
}
} else if k == reflect.Bool {
result = HashBool(aSeed, aObject.(bool))
} else if k >= reflect.Int && k <= reflect.Complex128 {
result = HashInt(aSeed, numberHashCode(aObject))
} else if k == reflect.String {
result = HashString(aSeed, aObject.(string))
} else if k == reflect.Ptr {
// tries pointer element
o := v.Elem().Interface()
r := hash(aSeed, o)
if r == 0 {
// no luck with the pointer. lets use pointer address value
r = HashInt(aSeed, int(v.Pointer()))
}
}
}
return result
}
func numberHashCode(aObject interface{}) int {
h := 0
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.LittleEndian, aObject)
if err == nil {
h = hashCode(buf.Bytes())
}
return h
}
func hashCode(what []byte) int {
h := 0
for _, v := range what {
//h = 31*h + int(v)
h = (h << 5) - h + int(v)
}
return h
}
func firstTerm(aSeed int) int {
return prime_number * aSeed
} | hashCodeUtils.go | 0.78316 | 0.423935 | hashCodeUtils.go | starcoder |
package chesseract
import "fmt"
func init() {
RegisterRuleSet("Boring2D", func() RuleSet {
return Boring2D{}
})
}
// A position2D represents a position on the old-fashioned 2D chess board
type position2D [2]int
func (p position2D) String() string {
return fmt.Sprintf("%c%d", 'a'+rune(p[0]), p[1]+1)
}
func (p position2D) Equals(q Position) bool {
if q0, ok := q.(position2D); ok {
return p == q0
}
return false
}
func (p position2D) CellColour() Colour {
if (p[0]+p[1])%2 == 0 {
return BLACK
} else {
return WHITE
}
}
func (p position2D) WorldPosition() (x, y, z float32) {
y = 0.0
x = float32(p[0]) - 3.5
z = float32(p[1]) - 3.5
return
}
// The Boring2D type implements the old 2D 8x8 board we're so used to by now
type Boring2D struct{}
func (Boring2D) String() string {
return "Boring2D"
}
func (Boring2D) PlayerColours() []Colour {
return []Colour{WHITE, BLACK}
}
// DefaultBoard sets up the initial board configuration
func (Boring2D) DefaultBoard() Board {
return Board{
Pieces: []Piece{
{ROOK, WHITE, position2D{0, 0}},
{KNIGHT, WHITE, position2D{1, 0}},
{BISHOP, WHITE, position2D{2, 0}},
{QUEEN, WHITE, position2D{3, 0}},
{KING, WHITE, position2D{4, 0}},
{BISHOP, WHITE, position2D{5, 0}},
{KNIGHT, WHITE, position2D{6, 0}},
{ROOK, WHITE, position2D{7, 0}},
{PAWN, WHITE, position2D{0, 1}},
{PAWN, WHITE, position2D{1, 1}},
{PAWN, WHITE, position2D{2, 1}},
{PAWN, WHITE, position2D{3, 1}},
{PAWN, WHITE, position2D{4, 1}},
{PAWN, WHITE, position2D{5, 1}},
{PAWN, WHITE, position2D{6, 1}},
{PAWN, WHITE, position2D{7, 1}},
{PAWN, BLACK, position2D{0, 6}},
{PAWN, BLACK, position2D{1, 6}},
{PAWN, BLACK, position2D{2, 6}},
{PAWN, BLACK, position2D{3, 6}},
{PAWN, BLACK, position2D{4, 6}},
{PAWN, BLACK, position2D{5, 6}},
{PAWN, BLACK, position2D{6, 6}},
{PAWN, BLACK, position2D{7, 6}},
{ROOK, BLACK, position2D{0, 7}},
{KNIGHT, BLACK, position2D{1, 7}},
{BISHOP, BLACK, position2D{2, 7}},
{QUEEN, BLACK, position2D{3, 7}},
{KING, BLACK, position2D{4, 7}},
{BISHOP, BLACK, position2D{5, 7}},
{KNIGHT, BLACK, position2D{6, 7}},
{ROOK, BLACK, position2D{7, 7}},
},
Turn: WHITE,
}
}
// AllPositions returns an iterator that allows one to range over all possible positions on the board in this variant
func (Boring2D) AllPositions() []Position {
rv := make([]Position, 8*8)
for i := 0; i < 8; i++ {
for j := 0; j < 8; j++ {
rv[8*i+j] = position2D{j, i}
}
}
return rv
}
// ParsePosition converts a string representation into a Position of the correct type
func (Boring2D) ParsePosition(s string) (Position, error) {
if len(s) != 2 {
return invalidPosition{}, errInvalidFormat
}
rv := position2D{}
for i, r := range s {
if i%2 == 0 {
rv[i] = int(r - 'a')
} else {
rv[i] = int(r - '1')
}
if rv[i] < 0 || rv[i] >= 8 {
return invalidPosition{}, errInvalidFormat
}
}
return rv, nil
}
// CanMove tests whether a piece can move to the specified new position on the board.
// Note: this only tests movement rules; the check check is performed elsewhere.
func (Boring2D) CanMove(board Board, piece Piece, pos Position) bool {
var oldPos, newPos position2D
var ok bool
if oldPos, ok = piece.Position.(position2D); !ok {
return false
}
if newPos, ok = pos.(position2D); !ok {
return false
}
// Check board boundaries
if newPos[0] < 0 || newPos[0] >= 8 || newPos[1] < 0 || newPos[1] >= 8 {
return false
}
// Pieces have to move
dx, dy := newPos[0]-oldPos[0], newPos[1]-oldPos[1]
if dx == 0 && dy == 0 {
return false
}
capture := false
// You can't capture your own pieces
if op, ok := board.At(newPos); ok {
if op.Colour == piece.Colour {
return false
} else {
capture = true
}
}
if piece.PieceType == KING {
// TODO: castling
// Move one square in any direction
if dx*dx > 1 || dy*dy > 1 {
return false
}
return true
} else if piece.PieceType == QUEEN {
// Diagonal or straight
if dx*dx != 0 && dy*dy != 0 && dx*dx != dy*dy {
return false
}
} else if piece.PieceType == BISHOP {
// Diagonal only
if dx*dx != dy*dy {
return false
}
} else if piece.PieceType == KNIGHT {
// quit horsin' around
dx = dx * dx
dy = dy * dy
return (dx == 1 && dy == 4) || (dx == 4 && dy == 1)
} else if piece.PieceType == ROOK {
// Straight only
if dx*dx != 0 && dy*dy != 0 {
return false
}
} else if piece.PieceType == PAWN {
// Check direction
if (piece.Colour == WHITE && dy <= 0) || (piece.Colour == BLACK && dy >= 0) {
return false
}
if capture {
return dy*dy == 1 && dx*dx == 1
} else {
if dx != 0 {
return false
} else if dy*dy == 1 {
return true
} else if dy*dy == 4 {
if (piece.Colour == WHITE && oldPos[1] != 1) || (piece.Colour == BLACK && oldPos[1] != 6) {
return false
}
// Check trajectory below
} else {
return false
}
}
} else {
// Unknown piece
return false
}
// Check the trajectory in between
var r int
dx, dy, r = normalise2d(dx, dy)
for i := 1; i < r; i++ {
p := position2D{oldPos[0] + i*dx, oldPos[1] + i*dy}
if _, ok := board.At(p); ok {
return false
}
}
return true
}
func normalise2d(dx, dy int) (vx, vy, r int) {
if dx < 0 {
vx = -1
r = -1 * dx
} else if dx > 0 {
vx = 1
r = dx
}
if dy < 0 {
vy = -1
r = -1 * dy
} else if dy > 0 {
vy = 1
r = dy
}
return
}
// ApplyMove performs a move on the board, and returns the resulting board
func (rs Boring2D) ApplyMove(board Board, move Move) (Board, error) {
piece, ok := board.At(move.From)
if !ok {
return Board{}, errIllegalMove
}
if !rs.CanMove(board, piece, move.To) {
return Board{}, errIllegalMove
}
newBoard := board.movePiece(move)
// TODO: pawn promotion
// TODO: castling
// TODO: check if this results in the player being in check. Reject with errIllegalMove if it does.
if newBoard.Turn == BLACK {
newBoard.Turn = WHITE
} else {
newBoard.Turn = BLACK
}
return newBoard, nil
} | chesseract/boring2D.go | 0.710025 | 0.577049 | boring2D.go | starcoder |
package stdlib
import (
"fmt"
"math"
"math/big"
"github.com/pieterclaerhout/go-log"
"gonum.org/v1/gonum/floats"
"github.com/vulogov/derBund/internal/vm"
)
func AddOperator(v *vm.VM, e1 *vm.Elem, e2 *vm.Elem) (*vm.Elem, error) {
if e1.Type != e2.Type {
return nil, fmt.Errorf("Datatypes for operation '+' must be the same")
}
switch e1.Type {
case "int":
return &vm.Elem{Type: "int", Value: (e1.Value.(int64) + e2.Value.(int64))}, nil
case "uint":
return &vm.Elem{Type: "uint", Value: (e1.Value.(uint64) + e2.Value.(uint64))}, nil
case "big":
d1 := e1.Value.(*big.Int)
d2 := e2.Value.(*big.Int)
d1.Add(d1, d2)
return &vm.Elem{Type: "big", Value: d1}, nil
case "flt":
return &vm.Elem{Type: "flt", Value: (e1.Value.(float64) + e2.Value.(float64))}, nil
case "uflt":
return &vm.Elem{Type: "uflt", Value: (e1.Value.(float64) + e2.Value.(float64))}, nil
case "str":
return &vm.Elem{Type: "str", Value: (e1.Value.(string) + e2.Value.(string))}, nil
case "fblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '+' must be the same arity")
}
floats.Add(ar1, ar2)
return vm.ArrayToBlock("fblock", ar1), nil
case "iblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '+' must be the same arity")
}
floats.Add(ar1, ar2)
return vm.ArrayToBlock("iblock", ar1), nil
case "uiblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '+' must be the same arity")
}
floats.Add(ar1, ar2)
return vm.ArrayToBlock("uiblock", ar1), nil
}
return nil, fmt.Errorf("I do not know how to perform '+' for this data")
}
func AddEqualOperator(v *vm.VM, e1 *vm.Elem, e2 *vm.Elem) (*vm.Elem, error) {
if e1.Type == e2.Type {
return AddOperator(v, e1, e2)
}
switch e1.Type {
case "iblock":
if e2.Type == "int" {
ar1 := vm.BlockToArray(e1)
floats.AddConst(float64(e2.Value.(int64)), ar1)
return vm.ArrayToBlock("iblock", ar1), nil
}
case "uiblock":
if e2.Type == "uint" {
ar1 := vm.BlockToArray(e1)
floats.AddConst(float64(e2.Value.(uint64)), ar1)
return vm.ArrayToBlock("uiblock", ar1), nil
}
case "fblock":
if e2.Type == "flt" {
ar1 := vm.BlockToArray(e1)
floats.AddConst(e2.Value.(float64), ar1)
return vm.ArrayToBlock("fblock", ar1), nil
}
}
return nil, fmt.Errorf("I do not know how to perform '+=' for this data: %v %v", e1.Type, e2.Type)
}
func MulOperator(v *vm.VM, e1 *vm.Elem, e2 *vm.Elem) (*vm.Elem, error) {
if e1.Type != e2.Type {
return nil, fmt.Errorf("Datatypes for operation '×' must be the same")
}
switch e1.Type {
case "int":
return &vm.Elem{Type: "int", Value: (e1.Value.(int64) * e2.Value.(int64))}, nil
case "uint":
return &vm.Elem{Type: "uint", Value: (e1.Value.(uint64) * e2.Value.(uint64))}, nil
case "big":
d1 := e1.Value.(*big.Int)
d2 := e2.Value.(*big.Int)
d1.Mul(d1, d2)
return &vm.Elem{Type: "big", Value: d1}, nil
case "flt":
return &vm.Elem{Type: "flt", Value: (e1.Value.(float64) * e2.Value.(float64))}, nil
case "uflt":
return &vm.Elem{Type: "uflt", Value: (e1.Value.(float64) * e2.Value.(float64))}, nil
case "fblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '*' must be the same arity")
}
floats.Mul(ar1, ar2)
return vm.ArrayToBlock("fblock", ar1), nil
case "iblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '*' must be the same arity")
}
floats.Mul(ar1, ar2)
return vm.ArrayToBlock("iblock", ar1), nil
case "uiblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '*' must be the same arity")
}
floats.Mul(ar1, ar2)
return vm.ArrayToBlock("uiblock", ar1), nil
}
return nil, fmt.Errorf("I do not know how to perform '×' for this data")
}
func MulEqualOperator(v *vm.VM, e1 *vm.Elem, e2 *vm.Elem) (*vm.Elem, error) {
if e1.Type == e2.Type {
return MulOperator(v, e1, e2)
}
switch e1.Type {
case "iblock":
if e2.Type == "int" {
ar1 := vm.BlockToArray(e1)
vm.MulConst(float64(e2.Value.(int64)), ar1)
return vm.ArrayToBlock("iblock", ar1), nil
}
case "uiblock":
if e2.Type == "uint" {
ar1 := vm.BlockToArray(e1)
vm.MulConst(float64(e2.Value.(uint64)), ar1)
return vm.ArrayToBlock("uiblock", ar1), nil
}
case "fblock":
if e2.Type == "flt" {
ar1 := vm.BlockToArray(e1)
vm.MulConst(e2.Value.(float64), ar1)
return vm.ArrayToBlock("fblock", ar1), nil
}
}
return nil, fmt.Errorf("I do not know how to perform '+=' for this data: %v %v", e1.Type, e2.Type)
}
func DivOperator(v *vm.VM, e1 *vm.Elem, e2 *vm.Elem) (*vm.Elem, error) {
if e1.Type != e2.Type {
return nil, fmt.Errorf("Datatypes for operation '÷' must be the same")
}
switch e1.Type {
case "int":
return &vm.Elem{Type: "int", Value: (e1.Value.(int64) / e2.Value.(int64))}, nil
case "uint":
return &vm.Elem{Type: "uint", Value: (e1.Value.(uint64) / e2.Value.(uint64))}, nil
case "big":
d1 := e1.Value.(*big.Int)
d2 := e2.Value.(*big.Int)
d1.Div(d1, d2)
return &vm.Elem{Type: "big", Value: d1}, nil
case "flt":
return &vm.Elem{Type: "flt", Value: (e1.Value.(float64) / e2.Value.(float64))}, nil
case "uflt":
return &vm.Elem{Type: "uflt", Value: (e1.Value.(float64) / e2.Value.(float64))}, nil
case "fblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '÷' must be the same arity")
}
floats.Div(ar1, ar2)
return vm.ArrayToBlock("fblock", ar1), nil
case "iblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '÷' must be the same arity")
}
floats.Div(ar1, ar2)
return vm.ArrayToBlock("iblock", ar1), nil
case "uiblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '÷' must be the same arity")
}
floats.Div(ar1, ar2)
return vm.ArrayToBlock("uiblock", ar1), nil
}
return nil, fmt.Errorf("I do not know how to perform '÷' for this data")
}
func DivEqualOperator(v *vm.VM, e1 *vm.Elem, e2 *vm.Elem) (*vm.Elem, error) {
if e1.Type == e2.Type {
return DivOperator(v, e1, e2)
}
switch e1.Type {
case "iblock":
if e2.Type == "int" {
ar1 := vm.BlockToArray(e1)
vm.DivConst(float64(e2.Value.(int64)), ar1)
return vm.ArrayToBlock("iblock", ar1), nil
}
case "uiblock":
if e2.Type == "uint" {
ar1 := vm.BlockToArray(e1)
vm.DivConst(float64(e2.Value.(uint64)), ar1)
return vm.ArrayToBlock("uiblock", ar1), nil
}
case "fblock":
if e2.Type == "flt" {
ar1 := vm.BlockToArray(e1)
vm.DivConst(e2.Value.(float64), ar1)
return vm.ArrayToBlock("fblock", ar1), nil
}
}
return nil, fmt.Errorf("I do not know how to perform '+=' for this data: %v %v", e1.Type, e2.Type)
}
func SubOperator(v *vm.VM, e1 *vm.Elem, e2 *vm.Elem) (*vm.Elem, error) {
if e1.Type != e2.Type {
return nil, fmt.Errorf("Datatypes for operation '-' must be the same")
}
switch e1.Type {
case "int":
return &vm.Elem{Type: "int", Value: (e1.Value.(int64) - e2.Value.(int64))}, nil
case "uint":
return &vm.Elem{Type: "uint", Value: (e1.Value.(uint64) - e2.Value.(uint64))}, nil
case "big":
d1 := e1.Value.(*big.Int)
d2 := e2.Value.(*big.Int)
d1.Sub(d1, d2)
return &vm.Elem{Type: "big", Value: d1}, nil
case "flt":
return &vm.Elem{Type: "flt", Value: (e1.Value.(float64) - e2.Value.(float64))}, nil
case "uflt":
return &vm.Elem{Type: "uflt", Value: (e1.Value.(float64) - e2.Value.(float64))}, nil
case "fblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '-' must be the same arity")
}
floats.Sub(ar1, ar2)
return vm.ArrayToBlock("fblock", ar1), nil
case "iblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '-' must be the same arity")
}
floats.Sub(ar1, ar2)
return vm.ArrayToBlock("iblock", ar1), nil
case "uiblock":
ar1 := vm.BlockToArray(e1)
ar2 := vm.BlockToArray(e2)
if len(ar1) != len(ar2) {
return nil, fmt.Errorf("Datatypes for operation '-' must be the same arity")
}
floats.Sub(ar1, ar2)
return vm.ArrayToBlock("uiblock", ar1), nil
}
return nil, fmt.Errorf("I do not know how to perform '-' for this data")
}
func SubEqualOperator(v *vm.VM, e1 *vm.Elem, e2 *vm.Elem) (*vm.Elem, error) {
if e1.Type == e2.Type {
return SubOperator(v, e1, e2)
}
switch e1.Type {
case "iblock":
if e2.Type == "int" {
ar1 := vm.BlockToArray(e1)
vm.SubConst(float64(e2.Value.(int64)), ar1)
return vm.ArrayToBlock("iblock", ar1), nil
}
case "uiblock":
if e2.Type == "uint" {
ar1 := vm.BlockToArray(e1)
vm.SubConst(float64(e2.Value.(uint64)), ar1)
return vm.ArrayToBlock("uiblock", ar1), nil
}
case "fblock":
if e2.Type == "flt" {
ar1 := vm.BlockToArray(e1)
vm.SubConst(e2.Value.(float64), ar1)
return vm.ArrayToBlock("fblock", ar1), nil
}
}
return nil, fmt.Errorf("I do not know how to perform '+=' for this data: %v %v", e1.Type, e2.Type)
}
func ExpOperator(v *vm.VM, e1 *vm.Elem, e2 *vm.Elem) (*vm.Elem, error) {
if e1.Type == e2.Type {
switch e1.Type {
case "int":
r1 := e1.Value.(int64)
r2 := e2.Value.(int64)
return &vm.Elem{Type: "int", Value: int64(math.Pow(float64(r1), float64(r2)))}, nil
case "uint":
r1 := e1.Value.(uint64)
r2 := e2.Value.(uint64)
return &vm.Elem{Type: "uint", Value: uint64(math.Pow(float64(r1), float64(r2)))}, nil
case "flt":
r1 := e1.Value.(float64)
r2 := e2.Value.(float64)
return &vm.Elem{Type: "flt", Value: math.Pow(r1, r2)}, nil
case "big":
res := big.NewInt(0)
r1 := e1.Value.(*big.Int)
r2 := e2.Value.(*big.Int)
return &vm.Elem{Type: "big", Value: res.Exp(r1, r2, nil)}, nil
}
}
return nil, fmt.Errorf("I do not know how to perform '+=' for this data: %v %v", e1.Type, e2.Type)
}
func InitMathOperators() {
log.Debug("[ BUND ] bund.InitMathOperators() reached")
vm.AddOperator("+", AddOperator)
vm.AddOperator("*", MulOperator)
vm.AddOperator("×", MulOperator)
vm.AddOperator("÷", DivOperator)
vm.AddOperator("\\", DivOperator)
vm.AddOperator("-", SubOperator)
vm.AddOperator("+=", AddEqualOperator)
vm.AddOperator("*=", MulEqualOperator)
vm.AddOperator("×=", MulEqualOperator)
vm.AddOperator("\\=", DivEqualOperator)
vm.AddOperator("÷=", DivEqualOperator)
vm.AddOperator("-=", SubEqualOperator)
vm.AddOperator("**", ExpOperator)
vm.AddOperator("↑", ExpOperator)
} | internal/stdlib/opmath.go | 0.507324 | 0.429728 | opmath.go | starcoder |
package cios
import (
"encoding/json"
)
// SeriesDataBulkRequest struct for SeriesDataBulkRequest
type SeriesDataBulkRequest struct {
// 時系列データの配列、配列の長さ上限 36000
Data []SeriesDataLocationUnix `json:"data"`
}
// NewSeriesDataBulkRequest instantiates a new SeriesDataBulkRequest object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewSeriesDataBulkRequest(data []SeriesDataLocationUnix, ) *SeriesDataBulkRequest {
this := SeriesDataBulkRequest{}
this.Data = data
return &this
}
// NewSeriesDataBulkRequestWithDefaults instantiates a new SeriesDataBulkRequest object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewSeriesDataBulkRequestWithDefaults() *SeriesDataBulkRequest {
this := SeriesDataBulkRequest{}
return &this
}
// GetData returns the Data field value
func (o *SeriesDataBulkRequest) GetData() []SeriesDataLocationUnix {
if o == nil {
var ret []SeriesDataLocationUnix
return ret
}
return o.Data
}
// GetDataOk returns a tuple with the Data field value
// and a boolean to check if the value has been set.
func (o *SeriesDataBulkRequest) GetDataOk() (*[]SeriesDataLocationUnix, bool) {
if o == nil {
return nil, false
}
return &o.Data, true
}
// SetData sets field value
func (o *SeriesDataBulkRequest) SetData(v []SeriesDataLocationUnix) {
o.Data = v
}
func (o SeriesDataBulkRequest) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["data"] = o.Data
}
return json.Marshal(toSerialize)
}
type NullableSeriesDataBulkRequest struct {
value *SeriesDataBulkRequest
isSet bool
}
func (v NullableSeriesDataBulkRequest) Get() *SeriesDataBulkRequest {
return v.value
}
func (v *NullableSeriesDataBulkRequest) Set(val *SeriesDataBulkRequest) {
v.value = val
v.isSet = true
}
func (v NullableSeriesDataBulkRequest) IsSet() bool {
return v.isSet
}
func (v *NullableSeriesDataBulkRequest) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableSeriesDataBulkRequest(val *SeriesDataBulkRequest) *NullableSeriesDataBulkRequest {
return &NullableSeriesDataBulkRequest{value: val, isSet: true}
}
func (v NullableSeriesDataBulkRequest) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableSeriesDataBulkRequest) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | cios/model_series_data_bulk_request.go | 0.551091 | 0.450057 | model_series_data_bulk_request.go | starcoder |
package common
import (
"encoding/json"
"math/big"
"time"
"github.com/KyberNetwork/reserve-stats/lib/timeutil"
)
// ReserveRateEntry hold 4 float number represent necessary data for a rate entry
type ReserveRateEntry struct {
BuyReserveRate float64 `json:"buy_reserve_rate"`
BuySanityRate float64 `json:"buy_sanity_rate"`
SellReserveRate float64 `json:"sell_reserve_rate"`
SellSanityRate float64 `json:"sell_sanity_rate"`
}
// LastRate store last rate
type LastRate struct {
FromBlock uint64 `json:"from_block"`
ToBlock uint64 `json:"to_block"`
Rate *ReserveRateEntry
}
// NewReserveRateEntry returns new ReserveRateEntry from results of GetReserveRate method.
// The reserve rates are stored in following structure:
// - reserveRate: [sellReserveRate(index: 0)]-[buyReserveRate)(index: 0)]-[sellReserveRate(index: 1)]-[buyReserveRate)(index: 1)]...
// - sanityRate: [sellSanityRate(index: 0)]-[buySanityRate)(index: 0)]-[sellSanityRate(index: 1)]-[buySanityRate)(index: 1)]...
func NewReserveRateEntry(reserveRates, sanityRates []*big.Int, index int) ReserveRateEntry {
return ReserveRateEntry{
BuyReserveRate: fromWeiETH(reserveRates[index*2+1]),
BuySanityRate: fromWeiETH(sanityRates[index*2+1]),
SellReserveRate: fromWeiETH(reserveRates[index*2]),
SellSanityRate: fromWeiETH(sanityRates[index*2]),
}
}
func fromWeiETH(amount *big.Int) float64 {
const ethDecimal = 18
if amount == nil {
return 0
}
f := new(big.Float).SetInt(amount)
power := new(big.Float).SetInt(new(big.Int).Exp(
big.NewInt(10), big.NewInt(ethDecimal), nil,
))
res := new(big.Float).Quo(f, power)
result, _ := res.Float64()
return result
}
// ReserveRates hold all the pairs's rate for a particular reserve and metadata
type ReserveRates struct {
Timestamp time.Time `json:"timestamp"`
FromBlock uint64 `json:"from_block"`
ToBlock uint64 `json:"to_block"`
Rates ReserveRateEntry `json:"rates"`
}
// MarshalJSON implements custom JSON marshaler for ReserveRates to format timestamp in unix millis instead of RFC3339.
func (rr ReserveRates) MarshalJSON() ([]byte, error) {
type AliasReserveRates ReserveRates
return json.Marshal(struct {
Timestamp uint64 `json:"timestamp"`
AliasReserveRates
}{
AliasReserveRates: (AliasReserveRates)(rr),
Timestamp: timeutil.TimeToTimestampMs(rr.Timestamp),
})
}
// UnmarshalJSON implements custom JSON unmarshaler for ReserveRates to format timestamp in unix millis instead of RFC3339.
func (rr *ReserveRates) UnmarshalJSON(data []byte) error {
type AliasReserveRates ReserveRates
decoded := new(struct {
Timestamp uint64 `json:"timestamp"`
AliasReserveRates
})
if err := json.Unmarshal(data, decoded); err != nil {
return err
}
rr.Timestamp = timeutil.TimestampMsToTime(decoded.Timestamp)
rr.FromBlock = decoded.FromBlock
rr.ToBlock = decoded.ToBlock
rr.Rates = decoded.Rates
return nil
} | reserverates/common/types.go | 0.744842 | 0.487307 | types.go | starcoder |
package gomfa
import (
"math"
)
func Rm2v(r *[3][3]float64, w *[3]float64) {
/*
** - - - - -
** R m 2 v
** - - - - -
**
** Express an r-matrix as an r-vector.
**
** Given:
** r *[3][3] float64 rotation matrix
**
** Returned:
** w *[3] float64 rotation vector (Note 1)
**
** Notes:
**
** 1) A rotation matrix describes a rotation through some angle about
** some arbitrary axis called the Euler axis. The "rotation vector"
** returned by this function has the same direction as the Euler axis,
** and its magnitude is the angle in radians. (The magnitude and
** direction can be separated by means of the function Pn.)
**
** 2) If r is null, so is the result. If r is not a rotation matrix
** the result is undefined; r must be proper (i.e. have a positive
** determinant) and real orthogonal (inverse = transpose).
**
** 3) The reference frame rotates clockwise as seen looking along
** the rotation vector from the origin.
**
** This revision: 2021 May 11
*/
var x, y, z, s2, c2, phi, f float64
x = r[1][2] - r[2][1]
y = r[2][0] - r[0][2]
z = r[0][1] - r[1][0]
s2 = math.Sqrt(x*x + y*y + z*z)
if s2 > 0 {
c2 = r[0][0] + r[1][1] + r[2][2] - 1.0
phi = math.Atan2(s2, c2)
f = phi / s2
w[0] = x * f
w[1] = y * f
w[2] = z * f
} else {
w[0] = 0.0
w[1] = 0.0
w[2] = 0.0
}
/* Finished. */
}
/*----------------------------------------------------------------------
**
**
** Copyright (C) 2021, <NAME>
** All rights reserved.
**
** This library is derived, with permission, from the International
** Astronomical Union's "Standards of Fundamental Astronomy" library,
** available from http://www.iausofa.org.
**
** The GOMFA version is intended to retain identical functionality to
** the SOFA library, but made distinct through different namespaces and
** file names, as set out in the SOFA license conditions. The SOFA
** original has a role as a reference standard for the IAU and IERS,
** and consequently redistribution is permitted only in its unaltered
** state. The GOMFA version is not subject to this restriction and
** therefore can be included in distributions which do not support the
** concept of "read only" software.
**
** Although the intent is to replicate the SOFA API (other than
** replacement of prefix names) and results (with the exception of
** bugs; any that are discovered will be fixed), SOFA is not
** responsible for any errors found in this version of the library.
**
** If you wish to acknowledge the SOFA heritage, please acknowledge
** that you are using a library derived from SOFA, rather than SOFA
** itself.
**
**
** TERMS AND CONDITIONS
**
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
**
** 1 Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
**
** 2 Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in
** the documentation and/or other materials provided with the
** distribution.
**
** 3 Neither the name of the Standards Of Fundamental Astronomy Board,
** the International Astronomical Union nor the names of its
** contributors may be used to endorse or promote products derived
** from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
** FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
** COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
** BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
** CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
** LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
** ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
** POSSIBILITY OF SUCH DAMAGE.
**
*/ | rm2v.go | 0.7641 | 0.668664 | rm2v.go | starcoder |
package day14
import (
"fmt"
"github.com/chigley/advent2021"
)
type Pair [2]rune
type Polymer struct {
// pairFreq tracks the frequency of each pair of elements in the polymer.
pairFreq map[Pair]int
// lastChar stores the last element from the input string used to
// initialsise the polymer, used to compute its score.
lastChar rune
}
func NewPolymer(template string) (*Polymer, error) {
if len(template) < 2 {
return nil, fmt.Errorf("day14: got template length %d, expected 2+", len(template))
}
pairFreq := make(map[Pair]int)
for i := 0; i < len(template)-1; i++ {
p1, p2 := rune(template[i]), rune(template[i+1])
pairFreq[Pair{p1, p2}]++
}
return &Polymer{
pairFreq: pairFreq,
lastChar: rune(template[len(template)-1]),
}, nil
}
func (p *Polymer) StepN(r Rules, n int) error {
for i := 0; i < n; i++ {
if err := p.Step(r); err != nil {
return fmt.Errorf("day14: step %d: %w", i, err)
}
}
return nil
}
func (p *Polymer) Step(r Rules) error {
nextPairFreq := make(map[Pair]int)
for pair, freq := range p.pairFreq {
element, ok := r[pair]
if !ok {
return fmt.Errorf("day14: missing rule for %c%c", pair[0], pair[1])
}
nextPairFreq[Pair{pair[0], element}] += freq
nextPairFreq[Pair{element, pair[1]}] += freq
}
p.pairFreq = nextPairFreq
return nil
}
func (p *Polymer) Score() int {
// Count occurrences of the first character in every pair, then manually
// increment the count for the last character from the initial template
// input.
elementFreqs := make(map[rune]int)
for pair, freq := range p.pairFreq {
elementFreqs[pair[0]] += freq
}
elementFreqs[p.lastChar]++
var i int
occs := make([]int, len(elementFreqs))
for _, f := range elementFreqs {
occs[i] = f
i++
}
return advent2021.MaxInts(occs) - advent2021.MinInts(occs)
}
func (p *Polymer) Clone() *Polymer {
pairFreqCopy := make(map[Pair]int, len(p.pairFreq))
for k, v := range p.pairFreq {
pairFreqCopy[k] = v
}
return &Polymer{
pairFreq: pairFreqCopy,
lastChar: p.lastChar,
}
}
type Rules map[Pair]rune
// https://www.youtube.com/watch?v=k2qgadSvNyU
func NewRules(rules []string) (Rules, error) {
r := make(Rules, len(rules))
for _, rr := range rules {
var p1, p2, out rune
if _, err := fmt.Sscanf(rr, "%c%c -> %c", &p1, &p2, &out); err != nil {
return nil, fmt.Errorf("day14: failed to parse %q", rr)
}
r[Pair{p1, p2}] = out
}
return r, nil
} | day14/polymer.go | 0.674694 | 0.434761 | polymer.go | starcoder |
package main
// Layer is the storyboard layer type to use for osu!.
type Layer int
const (
Back Layer = iota
Front
)
// BMSFileData shows most things you'd want to know about a bms file.
type BMSFileData struct {
// Metadata contains the BMS file's metadata.
Metadata BMSMetadata `json:"metadata"`
// StartingBPM is what BPM the first track will start with unless it is changed.
StartingBPM float64
// LNObject is the designated LN object for this file, unless it is LNTYPE 2,
// where it will not be used.
LNObject string
// TrackLines contains a map of track numbers and their lines.
TrackLines map[int][]Line
// HitObjects contains a map of lane # as the key, and hit objects as the value.
HitObjects map[int][]HitObject
// TimingPoints contains a map of starting times (in ms) for timing points, and what BPM to
// change to at that time.
TimingPoints map[float64]float64
// SoundEffects contains an array of sound effects to use in the chart.
SoundEffects []SoundEffect
// BGAFrames contains an array of background animation frames to use.
// This is only applicable to osu! or if the file is being output to JSON.
BGAFrames []BGAFrame
// Audio contains information about the file's audio. Ref AudioData for more information.
Audio AudioData
// Indices contains a list of indexes mapping hexadecimal codes to values.
Indices IndexData
}
// AudioData contains data about the BMS file's audio, EXCEPT for sound effects, which
// are included at the highest level. This simply contains two lists which map out a hexadecimal
// code to an audio file. Both are separated due to each having a separate purpose.
type AudioData struct {
StringArray []string
HexadecimalArray []string
}
// IndexData contains indices which map hexadecimal codes to values.
type IndexData struct {
// BPMChanges maps hexadecimal codes to new BPM values.
BPMChanges map[string]float64
// Stops maps hexadecimal codes to STOP values.
Stops map[string]float64
// BGA maps hexadecimal codes to a file path.
BGA map[string]string
}
// BGAFrame is a specific BGA frame of the chart.
type BGAFrame struct {
// StartTime is the precise time, in milliseconds, when this BGA frame should appear.
StartTime float64 `json:"start_time"`
// File is the location of the background animation frame as an image.
File string `json:"file"`
// Layer is what hypothetical z-index should be used (only applicable to osu!).
Layer Layer `json:"layer"`
}
// BMSMetadata contains the general metadata of the map, and does not contain any technical
// information.
type BMSMetadata struct {
// Title is the name of the song.
Title string `json:"title"`
// Artist is the composer of the song.
Artist string `json:"artist"`
// Tags are used to help find the map wherever it is posted.
Tags string `json:"tags"`
// Difficulty is the name of the chart. (This is synonymous to osu!'s "Difficulty Name"
// metadata field in the editor)
Difficulty string `json:"difficulty"`
// StageFile is the main background for the chart, presumably used in BMS clients on
// song select screen.
StageFile string `json:"stage_file"`
// Subtitle is any additional comments left by the map creator and/or artist.
Subtitle string `json:"subtitle"`
// SubArtists is an array of usernames who have assisted with map creation.
// Typically, object/playtester/etc. people are put here.
SubArtists []string `json:"subartists"`
// Banner is used in the song select screen and, for some clients, the image
// which appears while the chart is loading.
Banner string `json:"banner"`
}
// SoundEffect is a sound effect which will always play at the start time given.
// The player does not have to hit a note for this to trigger.
type SoundEffect struct {
// StartTime is the time, in milliseconds, where the sound effect should play.
StartTime float64 `json:"start_time"`
// Sample is the index of the sample which should be played (see AudioData).
Sample int `json:"sample"`
// Volume is the volume of this sound effect. Can be from 0 to 100.
Volume int `json:"volume"`
}
// HitObject is a note or long note in the chart.
type HitObject struct {
// StartTime defines the exact millisecond timestamp where the note should be hit at.
StartTime float64 `json:"start_time"`
// EndTime is only used if IsLongNote is true, and denotes when the long note ends.
EndTime float64 `json:"end_time"`
// IsLongNote is true if the hit object type is a long note (hold and release).
IsLongNote bool `json:"is_long_note"`
// KeySounds contains data about the key sound which plays when the player hits this note.
KeySounds *KeySound `json:"key_sounds,omitempty"`
}
// KeySound represents a sound effect which should be played when the player
// hits a note.
type KeySound struct {
// Sample is the index of the sample which should be played (see BMSFileData.StringArray).
Sample int `json:"sample"`
// Volume is the volume of this key sound. Can be from 0 to 100.
Volume int `json:"volume"`
}
// Line is a #, followed by channel #, followed by the message.
// A Line is associated with a specific track.
// This is not a definition for headers.
type Line struct {
// What channel this line represents.
Channel string `json:"channel"`
// The message of the line.
Message string `json:"message"`
}
// LocalBPMChange represents a BPM change (or exBPM) which occurs within a specific track.
// See both https://hitkey.nekokan.dyndns.info/cmds.htm#BPMXX and
// https://hitkey.nekokan.dyndns.info/cmds.htm#EXBPMXX for more information on this.
type LocalBPMChange struct {
// The precise location of where the BPM change occurs.
Position float64 `json:"position"`
// The new BPM value.
Bpm float64 `json:"bpm"`
// Whether the BPM value is negative or not. If the BPM is 0.0 this will still be false.
IsNegative bool `json:"is_negative"`
}
// LocalStop represents a #STOP directive which occurs within a specific track.
// See https://hitkey.nekokan.dyndns.info/cmds.htm#STOPXX for more information on this
// directive.
type LocalStop struct {
// Duration is how long the #STOP should last for.
Duration float64 `json:"duration"`
// Position is the precise location of where the #STOP occurs.
Position float64 `json:"position"`
}
// LocalTrackData holds information about a track's measure scale, BPM changes, and #STOP
// directives. It does not contain information on where notes should be in the track.
type LocalTrackData struct {
// MeasureScale is the definition of the length of this track.
// It is based on 4/4 meter.
MeasureScale float64 `json:"measure_scale"`
// A record of all BPM changes which occur in this track.
BPMChanges []LocalBPMChange `json:"bpm_changes"`
// A record of all #STOP directives which occur in this track.
Stops []LocalStop `json:"stops"`
}
type ConversionStatus struct {
// Name of the folder.
Name string
// Success indicates how many files within the folder succeeded.
Success int
// Fail indicates how many files within the folder failed or did not convert.
Fail int
// Skip is true when the folder was skipped altogether.
Skip bool
} | structs.go | 0.665084 | 0.421314 | structs.go | starcoder |
package format
import (
"math"
"time"
)
const fractionDivisor = 0x10000
// Timestamp represents a point in time using fixed resolution.
type Timestamp struct {
Second uint8
Fraction uint16
}
// TimestampLimit returns the highest possible timestamp value.
func TimestampLimit() Timestamp {
return Timestamp{
Second: math.MaxUint8,
Fraction: math.MaxUint16,
}
}
// TimestampFromSeconds creates a timestamp instance from given floating point value.
func TimestampFromSeconds(value float32) Timestamp {
second := uint8(value)
return Timestamp{
Second: second,
Fraction: uint16((value - float32(second)) * fractionDivisor),
}
}
// TimestampFromDuration creates a timestamp instance from given duration value.
func TimestampFromDuration(d time.Duration) Timestamp {
limit := TimestampLimit()
if d >= limit.ToDuration() {
return limit
}
return Timestamp{
Second: uint8(d / time.Second),
Fraction: uint16(((d - time.Second) * fractionDivisor) / time.Second),
}
}
// ToDuration returns the equivalent duration for this timestamp.
func (ts Timestamp) ToDuration() time.Duration {
return (time.Duration(ts.Second) * time.Second) + (time.Duration(ts.Fraction)*time.Second)/fractionDivisor
}
// IsAfter returns true if this timestamp is later than the given one.
func (ts Timestamp) IsAfter(other Timestamp) bool {
return (ts.Second > other.Second) || ((ts.Second == other.Second) && (ts.Fraction > other.Fraction))
}
// IsBefore returns true if this timestamp is before the given one.
func (ts Timestamp) IsBefore(other Timestamp) bool {
return (ts.Second < other.Second) || ((ts.Second == other.Second) && (ts.Fraction < other.Fraction))
}
// Plus returns a timestamp with the given one added to the current one.
// The result is saturated if the addition is larger than the timestamp can hold.
func (ts Timestamp) Plus(other Timestamp) Timestamp {
return TimestampFromDuration(ts.ToDuration() + other.ToDuration())
}
// Minus returns a timestamp with given one removed from the current one.
// The result is clipped to 0 if subtraction would result in a negative number.
func (ts Timestamp) Minus(other Timestamp) Timestamp {
otherLinear := other.toLinear()
tsLinear := ts.toLinear()
if otherLinear >= tsLinear {
return timestampFromLinear(0)
}
return timestampFromLinear(tsLinear - otherLinear)
}
func (ts Timestamp) toLinear() uint32 {
return uint32(ts.Second)<<16 + uint32(ts.Fraction)
}
func timestampFromLinear(value uint32) Timestamp {
return Timestamp{
Second: uint8(value >> 16),
Fraction: uint16(value),
}
} | ss1/content/movie/internal/format/Timestamp.go | 0.910049 | 0.445168 | Timestamp.go | starcoder |
package bulletproofs
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"github.com/btcsuite/btcd/btcec"
"math/big"
)
var (
// prefix1 is the ascii string "1st generation: "
prefix1, _ = hex.DecodeString("3173742067656e65726174696f6e3a20")
// prefix2 is the ascii string "2nd generation: "
prefix2, _ = hex.DecodeString("326e642067656e65726174696f6e3a20")
// curve is secp256k1
curve = btcec.S256()
// sqrtMinusThree as a field element in secp256k1
sqrtMinusThree = computeSqrtMinusThree()
// sqrtMinusThreeSubOneOverTwo as a field element in secp256k1
sqrtMinusThreeSubOneOverTwo = subOneOverTwo(sqrtMinusThree)
)
// newRFC6979 generates an ECDSA nonce deterministically according to RFC 6979.
func newRFC6979(hash []byte) ([]byte, []byte) {
oneInitializer := []byte{0x01}
// Step B
v := bytes.Repeat(oneInitializer, 32)
// Step C
k := make([]byte, 32)
// Step D
k = mac(k, append(append(v, 0x00), hash...))
// Step E
v = mac(k, v)
// Step F
k = mac(k, append(append(v, 0x01), hash...))
// Step G
v = mac(k, v)
return v, k
}
// update computes and returns the next values for V and K.
func update(v, k []byte) ([]byte, []byte) {
// K = HMAC_K(V || 0x00)
k = mac(k, append(v, 0x00))
// V = HMAC_K(V)
v = mac(k, v)
return v, k
}
// generate returns a key from v, k and returns new values of v, k.
func generate(v, k []byte) ([]byte, []byte, []byte) {
qlen := curve.Params().N.BitLen()
// Step H1
var t []byte
// Step H2
for len(t)*8 < qlen {
// V = HMAC_K(V)
v = mac(k, v)
// T = T || V
t = append(t, v...)
}
return t, v, k
}
// mac returns an HMAC of the given key and message.
func mac(k, m []byte) []byte {
h := hmac.New(sha256.New, k)
h.Write(m)
return h.Sum(nil)
}
// computeSqrtMinusThree returns the field element a such that a*a = -3 mod p,
// where p is the order of the field secp256k1.
func computeSqrtMinusThree() *big.Int {
curveOrder := curve.P
curveOrderMinus3 := new(big.Int).Sub(curveOrder, new(big.Int).SetUint64(3))
sqrt := new(big.Int)
if sqrt.ModSqrt(curveOrderMinus3, curveOrder) == nil {
panic("failed to find root")
}
return sqrt
}
// subOneOverTwo computes (c - 1)/2 on the secp256k1 field.
func subOneOverTwo(c *big.Int) *big.Int {
// This is modified by DivMod.
curveOrder := new(big.Int).Set(curve.P)
// (c - 1) mod p == (c + p - 1) mod p
result := new(big.Int).Add(c, new(big.Int).Sub(curve.P, big.NewInt(1)))
result.DivMod(result, big.NewInt(2), curveOrder)
return result
}
// EncodeFieldElementToCurve uses the Shallue–van de Woestijne encoding from the
// paper "Indifferentiable Hashing to Barreto-Naehrig Curves" to map the given
// field element to a point on secp256k1. Note that this implementation is not
// constant time.
func EncodeFieldElementToCurve(t *big.Int) *Point {
// Calculate the following:
// w = sqrt(-3) * t / (1 + b + t²)
// x1 = (-1 + sqrt(-3))/2 - t*w
// x2 = -(x1 + 1)
// x3 = 1 + 1/w^2
tt := new(big.Int).Mul(t, t)
tt.Mod(tt, curve.P)
// Note that b = 7 for secp256k1.
wd2 := new(big.Int).Add(big.NewInt(7+1), tt)
wd2.ModInverse(wd2, curve.P)
w := new(big.Int).Mul(sqrtMinusThree, t)
w.Mul(w, wd2)
w.Mod(w, curve.P)
x1 := new(big.Int).Sub(sqrtMinusThreeSubOneOverTwo, new(big.Int).Mul(t, w))
x1.Mod(x1, curve.P)
x2 := new(big.Int).Add(big.NewInt(1), x1)
x2.Sub(curve.P, x2)
x2.Mod(x2, curve.P)
wwInv := new(big.Int).Mul(w, w)
wwInv.ModInverse(wwInv, curve.P)
x3 := new(big.Int).Add(big.NewInt(1), wwInv)
x3.Mod(x3, curve.P)
// Compute y² = x³ + 7 for each x coordinate.
alpha := new(big.Int).Mul(x1, x1)
alpha.Mul(alpha, x1)
alpha.Add(alpha, curve.Params().B)
alpha.Mod(alpha, curve.P)
y1 := ModSqrtFast(alpha)
alphaQuadraticResidue := IsQuadraticResidue(alpha)
beta := new(big.Int).Mul(x2, x2)
beta.Mul(beta, x2)
beta.Add(beta, curve.Params().B)
beta.Mod(beta, curve.P)
y2 := ModSqrtFast(beta)
betaQuadraticResidue := IsQuadraticResidue(beta)
gamma := new(big.Int).Mul(x3, x3)
gamma.Mul(gamma, x3)
gamma.Add(gamma, curve.Params().B)
gamma.Mod(gamma, curve.P)
y3 := ModSqrtFast(gamma)
var x, y *big.Int
if !alphaQuadraticResidue && betaQuadraticResidue {
x = x2
y = y2
} else if !alphaQuadraticResidue && !betaQuadraticResidue {
x = x3
y = y3
} else {
x = x1
y = y1
}
if isOdd(t) {
y.Sub(curve.P, y) // negate y
}
xBytes := x.Bytes()
yBytes := y.Bytes()
return &Point{
new(big.Int).SetBytes(xBytes[:]),
new(big.Int).SetBytes(yBytes[:]),
}
}
// createSingleGenerator computes a group element from hashing key32.
func createSingleGenerator(key32 []byte) *Point {
var h []byte
h = append(h, prefix1...)
h = append(h, key32...)
b32 := sha256.Sum256(h)
var h2 []byte
h2 = append(h2, prefix2...)
h2 = append(h2, key32...)
b322 := sha256.Sum256(h2)
G1 := EncodeFieldElementToCurve(new(big.Int).SetBytes(b32[:]))
G2 := EncodeFieldElementToCurve(new(big.Int).SetBytes(b322[:]))
accumx, accumy := curve.Add(G1.X, G1.Y, G2.X, G2.Y)
if !curve.IsOnCurve(accumx, accumy) {
panic("should be on curve")
}
return &Point{accumx, accumy}
}
// GeneratorsCreate creates and returns a list of nothing-up-my-sleeve
// generator points.
func GeneratorsCreate(n int) []*Point {
var seed []byte
seed = append(seed, curve.Gx.Bytes()...)
seed = append(seed, curve.Gy.Bytes()...)
var points []*Point
var t []byte
v, k := newRFC6979(seed)
for i := 0; i < n; i++ {
t, v, k = generate(v, k)
points = append(points, createSingleGenerator(t))
v, k = update(v, k)
}
return points
} | generators.go | 0.776284 | 0.425187 | generators.go | starcoder |
package bitfield
// https://cs.opensource.google/go/x/text/+/master:internal/gen/bitfield/bitfield.go
// https://github.com/HewlettPackard/structex
var maskMap = map[int]uint64{
0: 0b0000000000000000000000000000000000000000000000000000000000000000,
1: 0b0000000000000000000000000000000000000000000000000000000000000001,
2: 0b0000000000000000000000000000000000000000000000000000000000000011,
3: 0b0000000000000000000000000000000000000000000000000000000000000111,
4: 0b0000000000000000000000000000000000000000000000000000000000001111,
5: 0b0000000000000000000000000000000000000000000000000000000000011111,
6: 0b0000000000000000000000000000000000000000000000000000000000111111,
7: 0b0000000000000000000000000000000000000000000000000000000001111111,
8: 0b0000000000000000000000000000000000000000000000000000000011111111,
9: 0b0000000000000000000000000000000000000000000000000000000111111111,
10: 0b0000000000000000000000000000000000000000000000000000001111111111,
11: 0b0000000000000000000000000000000000000000000000000000011111111111,
12: 0b0000000000000000000000000000000000000000000000000000111111111111,
13: 0b0000000000000000000000000000000000000000000000000001111111111111,
14: 0b0000000000000000000000000000000000000000000000000011111111111111,
15: 0b0000000000000000000000000000000000000000000000000111111111111111,
16: 0b0000000000000000000000000000000000000000000000001111111111111111,
17: 0b0000000000000000000000000000000000000000000000011111111111111111,
18: 0b0000000000000000000000000000000000000000000000111111111111111111,
19: 0b0000000000000000000000000000000000000000000001111111111111111111,
20: 0b0000000000000000000000000000000000000000000011111111111111111111,
21: 0b0000000000000000000000000000000000000000000111111111111111111111,
22: 0b0000000000000000000000000000000000000000001111111111111111111111,
23: 0b0000000000000000000000000000000000000000011111111111111111111111,
24: 0b0000000000000000000000000000000000000000111111111111111111111111,
25: 0b0000000000000000000000000000000000000001111111111111111111111111,
26: 0b0000000000000000000000000000000000000011111111111111111111111111,
27: 0b0000000000000000000000000000000000000111111111111111111111111111,
28: 0b0000000000000000000000000000000000001111111111111111111111111111,
29: 0b0000000000000000000000000000000000011111111111111111111111111111,
30: 0b0000000000000000000000000000000000111111111111111111111111111111,
31: 0b0000000000000000000000000000000001111111111111111111111111111111,
32: 0b0000000000000000000000000000000011111111111111111111111111111111,
33: 0b0000000000000000000000000000000111111111111111111111111111111111,
34: 0b0000000000000000000000000000001111111111111111111111111111111111,
35: 0b0000000000000000000000000000011111111111111111111111111111111111,
36: 0b0000000000000000000000000000111111111111111111111111111111111111,
37: 0b0000000000000000000000000001111111111111111111111111111111111111,
38: 0b0000000000000000000000000011111111111111111111111111111111111111,
39: 0b0000000000000000000000000111111111111111111111111111111111111111,
40: 0b0000000000000000000000001111111111111111111111111111111111111111,
41: 0b0000000000000000000000011111111111111111111111111111111111111111,
42: 0b0000000000000000000000111111111111111111111111111111111111111111,
43: 0b0000000000000000000001111111111111111111111111111111111111111111,
44: 0b0000000000000000000011111111111111111111111111111111111111111111,
45: 0b0000000000000000000111111111111111111111111111111111111111111111,
46: 0b0000000000000000001111111111111111111111111111111111111111111111,
47: 0b0000000000000000011111111111111111111111111111111111111111111111,
48: 0b0000000000000000111111111111111111111111111111111111111111111111,
49: 0b0000000000000001111111111111111111111111111111111111111111111111,
50: 0b0000000000000011111111111111111111111111111111111111111111111111,
51: 0b0000000000000111111111111111111111111111111111111111111111111111,
52: 0b0000000000001111111111111111111111111111111111111111111111111111,
53: 0b0000000000011111111111111111111111111111111111111111111111111111,
54: 0b0000000000111111111111111111111111111111111111111111111111111111,
55: 0b0000000001111111111111111111111111111111111111111111111111111111,
56: 0b0000000011111111111111111111111111111111111111111111111111111111,
57: 0b0000000111111111111111111111111111111111111111111111111111111111,
58: 0b0000001111111111111111111111111111111111111111111111111111111111,
59: 0b0000011111111111111111111111111111111111111111111111111111111111,
60: 0b0000111111111111111111111111111111111111111111111111111111111111,
61: 0b0001111111111111111111111111111111111111111111111111111111111111,
62: 0b0011111111111111111111111111111111111111111111111111111111111111,
63: 0b0111111111111111111111111111111111111111111111111111111111111111,
}
//Extract bitfield value as uint64 from start bit and use len bits
func Extract(v uint64, start, len int) uint64 {
r := (v >> start) & maskMap[len]
return r
} | vendor/github.com/Pencroff/go-toolkit/bitfield/bitfield.go | 0.689933 | 0.587115 | bitfield.go | starcoder |
package cast
import (
"fmt"
"strconv"
)
// ToFloat64 casts an interface to a float64 type, discarding any errors.
func ToFloat64(i interface{}) float64 {
ret, _ := ToFloat64E(i)
return ret
}
// ToFloat64E casts an interface to a float64 type.
func ToFloat64E(i interface{}) (float64, error) {
i = indirect(i)
switch s := i.(type) {
case float64:
return s, nil
case float32:
return float64(s), nil
case int:
return float64(s), nil
case int64:
return float64(s), nil
case int32:
return float64(s), nil
case int16:
return float64(s), nil
case int8:
return float64(s), nil
case uint:
return float64(s), nil
case uint64:
return float64(s), nil
case uint32:
return float64(s), nil
case uint16:
return float64(s), nil
case uint8:
return float64(s), nil
case string:
v, err := strconv.ParseFloat(s, 64)
if err == nil {
return v, nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
case bool:
if s {
return 1, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
}
}
// ToFloat32 casts an interface to a float32 type, discarding any errors.
func ToFloat32(i interface{}) float32 {
ret, _ := ToFloat32E(i)
return ret
}
// ToFloat32E casts an interface to a float32 type.
func ToFloat32E(i interface{}) (float32, error) {
i = indirect(i)
switch s := i.(type) {
case float64:
return float32(s), nil
case float32:
return s, nil
case int:
return float32(s), nil
case int64:
return float32(s), nil
case int32:
return float32(s), nil
case int16:
return float32(s), nil
case int8:
return float32(s), nil
case uint:
return float32(s), nil
case uint64:
return float32(s), nil
case uint32:
return float32(s), nil
case uint16:
return float32(s), nil
case uint8:
return float32(s), nil
case string:
v, err := strconv.ParseFloat(s, 32)
if err == nil {
return float32(v), nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
case bool:
if s {
return 1, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
}
} | float.go | 0.788461 | 0.42054 | float.go | starcoder |
package models
// Dataset: Earnings by place of work
// Dimensions: Sex | Employment status
// Variables: gross weekly pay, weekly pay excluding overtime
// Table
// Observation | Variable (Earnings)| Sex | Employment status
// 123 | Gross weekly pay | Male | Full Time
// 70 | Gross weekly pay | Male | Part Time
// 119 | Gross weekly pay | Female| Full Time
// 70 | Gross weekly pay | Female| Part Time
// 123 | weekly pay excluding overtime | Male | Full Time
// 70 | weekly pay excluding overtime | Male | Part Time
// 119 | weekly pay excluding overtime | Female| Full Time
// 70 | weekly pay excluding overtime | Female| Part Time
// Table - Observation | DimensionID | DimensionOptionID | DimensionOptionLabel
// Observation | Geography ID | Geography Label | Sex ID | Sex Label | Age ID | Age Label | Residence Type ID | Residence Type Label
// 105 | K04000001 | England and Wales | 1 | Females | 1 | 50+ | 1 | Lives in a communal establishment
// human readable flag - include labels / replace them?
// The csv output should not rely on fixed column locations. columns should be identified by their header value/ label.
// by doing this we can remove all of the blank
//Denormalisation
// ---------------
// The ID / details / label for the dimension can be held at the dataset level - it should not be repeated throughout the table.
// Fixed dimension options - if the dimension only ever has one value for the dataset, we can define it as a fixed dimension.
// This prevents it being repeated for each observation.
// Create ID's for dimensions and their options
// when multiple datasets have the same dimensions they can be the same ID instead of having various string representations of the same thing.
// e.g. year can mean different things in different datasets - financial year or calendar year
// can we translate dimensions to ID's as part of the data baker step??
type Datasets struct {
Items []*Dataset `json:"items,omitempty"`
Count int `json:"count"`
Total int `json:"total"`
StartIndex int `json:"startIndex"`
ItemsPerPage int `json:"itemsPerPage"`
First string `json:"first"`
Next string `json:"next,omitempty"`
Prev string `json:"prev,omitempty"`
Last string `json:"last"`
Page int `json:"page"`
TotalPages int `json:"totalPages"`
}
type DataResource struct {
ID string `json:"id"`
Title string `json:"title"`
Metadata *Metadata `json:"metadata"`
Datasets []string `json:"datasets"`
}
type Dataset struct {
ID string `json:"id"`
CustomerFacingID string `json:"customerFacingId"`
Title string `json:"title"`
URL string `json:"url,omitempty"`
Metadata *Metadata `json:"metadata,omitempty"`
Dimensions []*Dimension `json:"dimensions,omitempty"`
Data *Table `json:"data,omitempty"`
}
type GeoArea struct {
ID string `json:"id"`
Name string `json:"name"`
ExtCode string `json:"extCode"`
Metadata *Metadata `json:"metadata,omitempty"`
RelGeoArea string `json:"relid,omitempty"`
AreaType string `json:"areaType"`
AreaLevel string `json:"areaLevel"`
Populations []*Population `json:"populations,omitempty"`
}
type GeoHierarchy struct {
Name string `json:"name"`
GeoAreas []*GeoArea `json:"geoAreas,omitempty"`
}
type Population struct {
GeoAreaID string `json:"geoAreaID"`
ExtCode string `json:"extCode"`
TimePeriodID string `json:"timePeriodID"`
}
type TimePeriod struct {
ID string `json:"id"`
Name string `json:"name"`
StartDate string `json:"startDate"`
EndDate string `json:"endDate"`
TimeTypeID string `json:"timeTypeID"`
}
type TimeType struct {
ID string `json:"id"`
}
type Metadata struct {
Description string `json:"description,omitempty"`
Taxonomies []string `json:"taxonomies,omitempty"`
Contact *Contact `json:"contact"`
ReleaseDate string `json:"releaseDate"`
NextRelease string `json:"nextReleaseDate"`
NationalStatistics bool `json:"nationalStatistics"` // whether these are official National Statistics
Publications []string `json:"associatedPublications"`
Methodology []*Methodology `json:"methodology"`
TermsAndConditions string `json:"termsAndConditions,omitempty"`
}
type Contact struct {
Name string `json:"name"`
Email string `json:"email,omitempty"`
Phone string `json:"phone,omitempty"`
}
type Methodology struct {
Title string `json:"title"`
URL string `json:"url"`
}
type Dimension struct {
ID string `json:"id"`
Name string `json:"name"` // eg.: Sex
Type string `json:"type"` // standard, classification, time, geography
URL string `json:"url"`
Hierarchical bool `json:"hierarchical"`
Options []*DimensionOption `json:"options,omitempty"`
SelectedOption *DimensionOption `json:"selectedOption,omitempty"`
}
type DimensionOption struct {
ID string `json:"id"`
Name string `json:"name"` // Male
Options []*DimensionOption `json:"options,omitempty"`
}
type Hierarchy struct {
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
Options []*HierarchyEntry `json:"options,omitempty"`
}
type HierarchyEntry struct {
ID string `json:"id"`
Name string `json:"name"`
LevelType *HierarchyLevelType `json:"levelType,omitempty"`
HasData bool `json:"hasData,omitempty"` // used only for sparsely populated hierarchy
Options []*HierarchyEntry `json:"options,omitempty"`
}
type HierarchyLevelType struct {
Code string `json:"code"`
Name string `json:"name"`
Level int `json:"level"`
}
type Row struct {
Observation interface{} // 123
Dimensions []*DimensionOption // Sex=Male
}
type Table struct {
Rows []*Row
} | models/dataset.go | 0.678859 | 0.54056 | dataset.go | starcoder |
package pt
import (
"math"
"math/rand"
)
type Camera struct {
p, u, v, w Vector
m float64
focalDistance float64
apertureRadius float64
}
func LookAt(eye, center, up Vector, fovy float64) Camera {
c := Camera{}
c.p = eye
c.w = center.Sub(eye).Normalize()
c.u = up.Cross(c.w).Normalize()
c.v = c.w.Cross(c.u).Normalize()
c.m = 1 / math.Tan(fovy*math.Pi/360)
return c
}
func (c *Camera) SetFocus(focalPoint Vector, apertureRadius float64) {
c.focalDistance = focalPoint.Sub(c.p).Length()
c.apertureRadius = apertureRadius
}
func (c *Camera) CastRay(x, y, w, h int, u, v float64, rnd *rand.Rand) Ray {
aspect := float64(w) / float64(h)
px := ((float64(x)+u-0.5)/(float64(w)-1))*2 - 1
py := ((float64(y)+v-0.5)/(float64(h)-1))*2 - 1
d := Vector{}
d = d.Add(c.u.MulScalar(-px * aspect))
d = d.Add(c.v.MulScalar(-py))
d = d.Add(c.w.MulScalar(c.m))
d = d.Normalize()
p := c.p
if c.apertureRadius > 0 {
focalPoint := c.p.Add(d.MulScalar(c.focalDistance))
angle := rnd.Float64() * 2 * math.Pi
radius := rnd.Float64() * c.apertureRadius
p = p.Add(c.u.MulScalar(math.Cos(angle) * radius))
p = p.Add(c.v.MulScalar(math.Sin(angle) * radius))
d = focalPoint.Sub(p).Normalize()
}
return Ray{p, d}
}
// OrthogonalCamera implements a simple orthogonal camera
type OrthogonalCamera struct {
up, right, pos, dir Vector
width float64
}
// OrthoLookAt sets up a new camera with the center of the camera at location
// its center points at the given target (i.e. the target point is centered in the image)
// up defines the up direction for the camera, width defines the width of the image
func OrthoLookAt(location, up, target Vector, width float64) OrthogonalCamera {
oc := OrthogonalCamera{
up: up.Normalize(),
pos: location,
dir: target.Sub(location).Normalize(),
width: width,
}
oc.right = oc.dir.Cross(oc.up).Normalize()
oc.up = oc.dir.Cross(oc.right).Normalize()
return oc
}
// CalculateOrthoSize computes width and height that would be needed by the camera to record an image that covers
// all given points in front of the camera. points behind the camera are ignored
// width can either be adjusted to fit the desired aspect ratio or the aspect ratio can be choosen to fit the size
func CalculateOrthoSize(location, up, target Vector, points ...Vector) (w, h float64) {
// create camera to have accurate precomputed values
oc := OrthoLookAt(location, up, target, 1.0)
// max values
dir := oc.dir.MulScalar(-1)
for _, p := range points {
t := oc.pos.Sub(p).Dot(dir)
if t < 0 {
// we can not work with points behind the plane
continue
}
// compute point in plane
pp := p.Add(dir.MulScalar(t))
// we are only interested in the absolute since we keep the image symetric
u := math.Abs(oc.up.Dot(pp))
v := math.Abs(oc.right.Dot(pp))
w = math.Max(w, v)
h = math.Max(h, u)
}
// since we calculated distance from the center, this is both
return w * 2, h * 2
}
// CastRay implements the RenderCamera interface and creates the ray used for rendering
func (oc *OrthogonalCamera) CastRay(x, y, w, h int, u, v float64, rnd *rand.Rand) Ray {
wf, xf := float64(w), float64(x)
hf, yf := float64(h), float64(y)
size := oc.width / wf
right := oc.right.MulScalar(size * (xf - wf/2 + u))
up := oc.up.MulScalar(size * (yf - hf/2 + v))
return Ray{
Origin: oc.pos.Add(up).Add(right),
Direction: oc.dir,
}
} | pt/camera.go | 0.857619 | 0.657353 | camera.go | starcoder |
package expect
import (
"fmt"
"path/filepath"
"regexp"
"runtime"
"strings"
"testing"
)
// Expectation is an expectation to be tested
type Expectation struct {
value interface{}
test *testing.T
}
// NegativeExpectation is a negative expectation to be tested
type NegativeExpectation struct {
value interface{}
test *testing.T
}
type expectationBuilder struct {
test *testing.T
}
// New returns a new expectationBuilder
// usefull if test suits contains many assertions
func New(t *testing.T) *expectationBuilder {
return &expectationBuilder{t}
}
// Expect returns a new Expectation
func (e *expectationBuilder) Expect(val interface{}) *Expectation {
return Expect(val, e.test)
}
// ToEqual expects 2 values to be equal
func (e *Expectation) ToEqual(val interface{}) {
if e.value != val {
e.test.Errorf("%+v should be equal to %+v", e.value, val)
}
}
// ToPanic expects a function to panic when executed
func (e *Expectation) ToPanic() {
defer func() {
if err := recover(); err == nil {
e.test.Errorf("%v should panic", e.value)
}
}()
e.value.(func())()
}
// ToBe expects 2 values to be equal
func (e *Expectation) ToBe(val interface{}) {
e.ToEqual(val)
}
// ToMatch expects a value to match a regular expression
func (e *Expectation) ToMatch(val string) {
if match, err := regexp.MatchString(val, e.value.(string)); err != nil {
e.test.Error(err)
} else if match == false {
e.test.Errorf("%+v should match to %+v", e.value, val)
}
}
// ToBeNil expects a value to be nil
func (e *Expectation) ToBeNil() {
if e.value != nil {
e.test.Errorf("%+v should be nil", e.value)
}
}
// ToBeTrue expects a value to be true
func (e *Expectation) ToBeTrue() {
if e.value.(bool) != true {
e.test.Errorf("%+v should be true", e.value)
}
}
// ToBeFalse expects a value to be false
func (e *Expectation) ToBeFalse() {
if e.value.(bool) != false {
e.test.Errorf("%+v should be false", e.value)
}
}
// ToContain expects a string to be a substring of value
func (e *Expectation) ToContain(word string) {
if strings.Contains(e.value.(string), word) == false {
e.test.Errorf("%+v should contain %+v", e.value, word)
}
}
// toBeLessThan expects value to be less than number
func (e *Expectation) toBeLessThan(number interface{}) {
if toFloat64(e.value) >= toFloat64(number) {
e.test.Errorf("%+v should be less then %+v", e.value, number)
}
}
// ToBeGreaterThan expects value to be greater than number
func (e *Expectation) ToBeGreaterThan(number interface{}) {
if toFloat64(e.value) <= toFloat64(number) {
e.test.Errorf("%+v should greater than %+v", e.value, number)
}
}
// Not reverse expectations
func (e *Expectation) Not() *NegativeExpectation {
return &NegativeExpectation{e.value, e.test}
}
func (e *NegativeExpectation) ToEqual(val interface{}) {
if e.value == val {
e.test.Errorf("%+v should not be equal to %+v", e.value, val)
}
}
func (e *NegativeExpectation) ToBe(val interface{}) {
e.ToEqual(val)
}
func (e *NegativeExpectation) ToMatch(val string) {
if match, err := regexp.MatchString(val, e.value.(string)); err != nil {
e.test.Error(err)
} else if match == true {
e.test.Errorf("%+v should not match to %+v", e.value, val)
}
}
func (e *NegativeExpectation) ToBeNil() {
if e.value == nil {
e.test.Errorf("%+v should not be nil", e.value)
}
}
func (e *NegativeExpectation) ToBeTrue() {
if e.value.(bool) == true {
e.test.Errorf("%+v should not be true", e.value)
}
}
func (e *NegativeExpectation) ToBeFalse() {
if e.value.(bool) == false {
e.test.Errorf("%+v should not be false", e.value)
}
}
func (e *NegativeExpectation) ToContain(word string) {
if strings.Contains(e.value.(string), word) == true {
e.test.Errorf("%+v should not contain %+v", e.value, word)
}
}
func (e *NegativeExpectation) toBeLessThan(number float64) {
if toFloat64(e.value) < toFloat64(number) {
e.test.Errorf("%+v should not be less than %+v", e.value, number)
}
}
func (e *NegativeExpectation) ToBeGreaterThan(number interface{}) {
if toFloat64(e.value) > toFloat64(number) {
e.test.Errorf("%+v should not be greater than %+v", e.value, number)
}
}
// ToPanic expects a function not to panic when executed
func (e *NegativeExpectation) ToPanic() {
defer func() {
if err := recover(); err != nil {
e.test.Errorf("%+v should not panic", e.value)
}
}()
e.value.(func())()
}
// Expect returns a new Expectation,
// usefull if a test suit contains only one assertion
func Expect(val interface{}, t *testing.T) *Expectation {
return &Expectation{val, t}
}
// Equal is a helper used to reduce the boilerplate during test
func Equal(t *testing.T, got, want interface{}, comments ...string) {
var comment string
if want != got {
if len(comments) > 0 {
comment = comments[0]
} else {
comment = "Expect"
}
_, file, line, _ := runtime.Caller(1)
t.Fatalf(fmt.Sprintf("Expect\r%s:%d:\r\t%s : %s", filepath.Base(file), line, comment, "want '%v' got '%v'."), want, got)
}
} | expect.go | 0.653901 | 0.602734 | expect.go | starcoder |
package arg
import (
"encoding"
"fmt"
"reflect"
"unicode"
"unicode/utf8"
scalar "github.com/alexflint/go-scalar"
)
var textUnmarshalerType = reflect.TypeOf([]encoding.TextUnmarshaler{}).Elem()
// cardinality tracks how many tokens are expected for a given spec
// - zero is a boolean, which does to expect any value
// - one is an ordinary option that will be parsed from a single token
// - multiple is a slice or map that can accept zero or more tokens
type cardinality int
const (
zero cardinality = iota
one
multiple
unsupported
)
func (k cardinality) String() string {
switch k {
case zero:
return "zero"
case one:
return "one"
case multiple:
return "multiple"
case unsupported:
return "unsupported"
default:
return fmt.Sprintf("unknown(%d)", int(k))
}
}
// cardinalityOf returns true if the type can be parsed from a string
func cardinalityOf(t reflect.Type) (cardinality, error) {
if scalar.CanParse(t) {
if isBoolean(t) {
return zero, nil
}
return one, nil
}
// look inside pointer types
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
// look inside slice and map types
switch t.Kind() {
case reflect.Slice:
if !scalar.CanParse(t.Elem()) {
return unsupported, fmt.Errorf("cannot parse into %v because %v not supported", t, t.Elem())
}
return multiple, nil
case reflect.Map:
if !scalar.CanParse(t.Key()) {
return unsupported, fmt.Errorf("cannot parse into %v because key type %v not supported", t, t.Elem())
}
if !scalar.CanParse(t.Elem()) {
return unsupported, fmt.Errorf("cannot parse into %v because value type %v not supported", t, t.Elem())
}
return multiple, nil
default:
return unsupported, fmt.Errorf("cannot parse into %v", t)
}
}
// isBoolean returns true if the type can be parsed from a single string
func isBoolean(t reflect.Type) bool {
switch {
case t.Implements(textUnmarshalerType):
return false
case t.Kind() == reflect.Bool:
return true
case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Bool:
return true
default:
return false
}
}
// isExported returns true if the struct field name is exported
func isExported(field string) bool {
r, _ := utf8.DecodeRuneInString(field) // returns RuneError for empty string or invalid UTF8
return unicode.IsLetter(r) && unicode.IsUpper(r)
}
// isZero returns true if v contains the zero value for its type
func isZero(v reflect.Value) bool {
t := v.Type()
if t.Kind() == reflect.Slice || t.Kind() == reflect.Map {
return v.IsNil()
}
if !t.Comparable() {
return false
}
return v.Interface() == reflect.Zero(t).Interface()
} | reflect.go | 0.707708 | 0.422624 | reflect.go | starcoder |
package neuralnet
import (
"encoding/json"
"fmt"
"github.com/gookit/color"
"gopkg.in/cheggaaa/pb.v1"
"math/rand"
"os"
"time"
)
type NeuralNetwork struct {
HiddenLayer []float64
InputLayer []float64
OutputLayer []float64
WeightHidden [][]float64
WeightOutput [][]float64
ErrOutput []float64
ErrHidden []float64
LastChangeHidden [][]float64
LastChangeOutput [][]float64
Rate1 float64
Rate2 float64
}
func LoadNeuralNetwork(fileName string) *NeuralNetwork {
inF, err := os.Open(fileName)
if err != nil {
panic("failed to load " + fileName)
}
defer inF.Close()
decoder := json.NewDecoder(inF)
neuralNetwork := &NeuralNetwork{}
err = decoder.Decode(neuralNetwork)
if err != nil {
panic(err)
}
return neuralNetwork
}
// CreateNetwork returns a new network where layers are built with number of input, hidden and output
// layers and the learning rates.
func CreateNetwork(input, hidden, output int, rate1, rate2 float64) *NeuralNetwork {
input++
hidden++
rand.Seed(time.Now().UnixNano())
return &NeuralNetwork{
Rate1: rate1,
Rate2: rate2,
InputLayer: make([]float64, input),
HiddenLayer: make([]float64, hidden),
OutputLayer: make([]float64, output),
ErrOutput: make([]float64, output),
ErrHidden: make([]float64, hidden),
WeightHidden: RandomMatrix(hidden, input, -1.0, 1.0),
WeightOutput: RandomMatrix(output, hidden, -1.0, 1.0),
LastChangeHidden: MakeMatrix(hidden, input, 0.0),
LastChangeOutput: MakeMatrix(output, hidden, 0.0),
}
}
func (neuralNetwork NeuralNetwork) Save(fileName string) {
outF, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0777)
if err != nil {
panic("failed to dump the network to " + fileName)
}
defer outF.Close()
encoder := json.NewEncoder(outF)
err = encoder.Encode(neuralNetwork)
if err != nil {
panic(err)
}
}
// FeedForward makes forward propagation for a single input
func (neuralNetwork *NeuralNetwork) FeedForward(input []float64) (output []float64) {
if len(input)+1 != len(neuralNetwork.InputLayer) {
panic("amount of input variable doesn't match")
}
for i, inputValue := range input {
neuralNetwork.InputLayer[i] = inputValue
}
neuralNetwork.InputLayer[len(neuralNetwork.InputLayer)-1] = 1.0 // Bias node for input layer
// Apply weights on the input layer to give the hidden layer
hiddenLayer := ApplyWeights(
len(neuralNetwork.HiddenLayer)-1,
neuralNetwork.InputLayer,
neuralNetwork.WeightHidden,
)
neuralNetwork.HiddenLayer = ApplyFunc(hiddenLayer, Sigmoid)
neuralNetwork.HiddenLayer[len(neuralNetwork.HiddenLayer)-1] = 1.0 // Bias node for hidden layer
// Apply weights on the hidden layer to give the output layer
neuralNetwork.OutputLayer = ApplyWeights(
len(neuralNetwork.OutputLayer),
neuralNetwork.HiddenLayer,
neuralNetwork.WeightOutput,
)
return neuralNetwork.OutputLayer[:]
}
// FeedBack makes back propagation for a single target
func (neuralNetwork *NeuralNetwork) FeedBack(target []float64) {
// Insert output errors in the array
for i := 0; i < len(neuralNetwork.OutputLayer); i++ {
neuralNetwork.ErrOutput[i] = neuralNetwork.OutputLayer[i] - target[i]
}
// Calculate the errors in the hidden layer
for i := 0; i < len(neuralNetwork.HiddenLayer)-1; i++ {
err := 0.0
for j := 0; j < len(neuralNetwork.OutputLayer); j++ {
err += neuralNetwork.ErrOutput[j] * neuralNetwork.WeightOutput[j][i]
}
neuralNetwork.ErrHidden[i] = err
}
// Apply the changes to the output weights
for i := 0; i < len(neuralNetwork.OutputLayer); i++ {
for j := 0; j < len(neuralNetwork.HiddenLayer); j++ {
delta := neuralNetwork.ErrOutput[i]
change := neuralNetwork.Rate1*delta*neuralNetwork.HiddenLayer[j] +
neuralNetwork.Rate2*neuralNetwork.LastChangeOutput[i][j]
neuralNetwork.WeightOutput[i][j] -= change
neuralNetwork.LastChangeOutput[i][j] = change
}
}
// Apply the changes to the hidden weights
for i := 0; i < len(neuralNetwork.HiddenLayer)-1; i++ {
for j := 0; j < len(neuralNetwork.InputLayer); j++ {
delta := neuralNetwork.ErrHidden[i] * SigmoidDerivative(neuralNetwork.HiddenLayer[i])
change := neuralNetwork.Rate1*delta*neuralNetwork.InputLayer[j] +
neuralNetwork.Rate2*neuralNetwork.LastChangeHidden[i][j]
neuralNetwork.WeightHidden[i][j] -= change
neuralNetwork.LastChangeHidden[i][j] = change
}
}
}
func (neuralNetwork *NeuralNetwork) CalculateError(target []float64) float64 {
errSum := 0.0
for i := 0; i < len(neuralNetwork.OutputLayer); i++ {
err := neuralNetwork.OutputLayer[i] - target[i]
errSum += 0.5 * err * err
}
return errSum
}
func RandomIndexes(length int) []int {
indexes := make([]int, length)
for i := range indexes {
indexes[i] = i
}
for i := 0; i < length; i++ {
j := i + int(rand.Float64()*float64(length-i))
indexes[i], indexes[j] = indexes[j], indexes[i]
}
return indexes
}
func (neuralNetwork *NeuralNetwork) Train(inputs [][]float64, targets [][]float64, iterations int) {
if len(inputs[0])+1 != len(neuralNetwork.InputLayer) {
panic("The amount of input variable doesn't match.")
}
if len(targets[0]) != len(neuralNetwork.OutputLayer) {
panic("The amount of output variable doesn't match.")
}
blue := color.FgBlue.Render
bar := pb.New(iterations).Postfix(fmt.Sprintf(" - %s", blue("Creating the neural network")))
bar.Format("(██ )")
bar.SetMaxWidth(60)
bar.ShowCounters = false
bar.Start()
currentError := 0.0
for i := 0; i < iterations; i++ {
indexesArray := RandomIndexes(len(inputs))
for j := 0; j < len(inputs); j++ {
neuralNetwork.FeedForward(inputs[indexesArray[j]])
neuralNetwork.FeedBack(targets[indexesArray[j]])
// Sum the error to the current error
if i == iterations-1 {
currentError += neuralNetwork.CalculateError(targets[indexesArray[j]])
}
}
// Increment the progress bar
bar.Increment()
}
bar.Finish()
arrangedError := fmt.Sprintf("%.5f", currentError/float64(len(inputs)))
red := color.FgGreen.Render
fmt.Printf("The error rate is %s.", red(arrangedError))
} | neuralnet/neuralnet.go | 0.644337 | 0.430746 | neuralnet.go | starcoder |
package main
import (
"bufio"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
)
const (
// Up is the north direction in the grid.
Up = iota
// Right is the east direction in the grid.
Right
// Down is the south direction in the grid.
Down
// Left is the west direction in the grid.
Left
)
// Step represent a path component.
type Step struct {
Direction uint8 // Direction is either Up, Right, Down or Left.
Count int64 // Count is the step's number of port.
}
// Path represent a circuit wire connection description.
type Path []Step
// Point represent a position in the grid.
// y grows Up, x grows Right, the zero value is the central port.
type Point struct {
x, y int64
}
// Segment are straight connections between two points.
type Segment struct {
From, To Point
xmin, xmax int64
ymin, ymax int64
steps int64
}
// Wire represent a wire connected into the grid from the central port.
type Wire []*Segment
// CentralPort returns the point in the grid where from where all wire
// connections begins.
func CentralPort() Point {
// By convention, the central port is the Point zero value.
return Point{x: 0, y: 0}
}
// Add returns p + other.
func (p Point) Add(other Point) Point {
return Point{x: p.x + other.x, y: p.y + other.y}
}
// Distance compute and returns the Manhattan distance between two points.
func (p Point) Distance(other Point) int64 {
return abs(p.x-other.x) + abs(p.y-other.y)
}
// NewSegment create a new Segment given the starting and ending points.
func NewSegment(from, to Point) *Segment {
seg := &Segment{From: from, To: to}
if from.x < to.x {
seg.xmin, seg.xmax = from.x, to.x
} else {
seg.xmin, seg.xmax = to.x, from.x
}
if from.y < to.y {
seg.ymin, seg.ymax = from.y, to.y
} else {
seg.ymin, seg.ymax = to.y, from.y
}
seg.steps = from.Distance(to)
return seg
}
// IntersectWith check if the other Segment and seg share a point. If they do,
// the intersection point and true is returned. Otherwise the Point zero value
// and false is returned.
func (seg *Segment) IntersectWith(other *Segment) (Point, bool) {
switch {
case seg.xmin <= other.xmin && seg.xmax >= other.xmin &&
seg.ymin <= other.ymax && seg.ymin >= other.ymin:
return Point{x: other.xmin, y: seg.ymin}, true
case other.xmin <= seg.xmin && other.xmax >= seg.xmin &&
other.ymin <= seg.ymax && other.ymin >= seg.ymin:
return Point{x: seg.xmin, y: other.ymin}, true
}
return Point{}, false
}
// NewWire place a given wire path into the grid and return the resulting Wire.
func NewWire(path Path) Wire {
var wire Wire
start := CentralPort() // the current position, starting at the central port.
for _, step := range path {
stop := start
switch step.Direction {
case Up:
stop.y += step.Count
case Right:
stop.x += step.Count
case Down:
stop.y -= step.Count
case Left:
stop.x -= step.Count
}
wire = append(wire, NewSegment(start, stop))
start = stop
}
return wire
}
// Connect link a couple of wire on the grid. It returns the the Manhattan
// distance from the central port to the closest intersection (md) and the
// fewest combined steps the wires must take to reach an intersection (ms).
func Connect(a, b Wire) (md, ms int64) {
md = -1
ms = -1
cp := CentralPort()
var astep int64 = 0
for _, aseg := range a {
var bstep int64 = 0
for _, bseg := range b {
// we omit the intersection at the central port, hence p != cp.
if p, ok := aseg.IntersectWith(bseg); ok && p != cp {
// min distance
d := cp.Distance(p)
if md == -1 || d < md {
md = d
}
// min combined step
s := astep + aseg.From.Distance(p) +
bstep + bseg.From.Distance(p)
if ms == -1 || s < ms {
ms = s
}
}
bstep += bseg.steps
}
astep += aseg.steps
}
return
}
// main compute and display the Manhattan distance from the central port to the
// closest intersection of the wires description given on stdin.
func main() {
paths, err := Parse(os.Stdin)
if err != nil {
log.Fatalf("input error: %s\n", err)
}
fst := NewWire(paths[0])
snd := NewWire(paths[1])
md, ms := Connect(fst, snd)
fmt.Printf("The Manhattan distance fron the central port to the closest intersection is %v,\n", md)
fmt.Printf("and the fewest combined steps the wires must take to reach an intersection is %v.\n", ms)
}
// Parse a couple of wire paths.
// It returns the parsed paths and any read or parsing error encountered.
func Parse(r io.Reader) ([]Path, error) {
var paths []Path
scanner := bufio.NewScanner(r)
for scanner.Scan() {
var path Path
line := scanner.Text()
for _, part := range strings.Split(line, ",") {
step, err := parseStep(part)
if err != nil {
return nil, err
}
path = append(path, step)
}
paths = append(paths, path)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return paths, nil
}
// parseStep is a parsing helper for Parse.
// It parse and returns one step any parsing error encountered.
func parseStep(s string) (Step, error) {
var step Step
// the smallest step would be something like U1
if len(s) < 2 {
return step, fmt.Errorf("step too short: %s", s)
}
// parse the direction
switch s[0] {
case 'U':
step.Direction = Up
case 'R':
step.Direction = Right
case 'D':
step.Direction = Down
case 'L':
step.Direction = Left
default:
return step, fmt.Errorf("unrecognized direction: %c", s[0])
}
// parse the step count
i, err := strconv.ParseUint(s[1:], 10, 63) // 63 bit size fit in int64
if err != nil {
return step, err
}
step.Count = int64(i)
return step, nil
}
// abs compute and returns the absolute value of n.
func abs(n int64) int64 {
y := n >> 63
return (n ^ y) - y
} | day03/main.go | 0.698946 | 0.529385 | main.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTGeometryFilter130AllOf struct for BTGeometryFilter130AllOf
type BTGeometryFilter130AllOf struct {
BtType *string `json:"btType,omitempty"`
GeometryType *string `json:"geometryType,omitempty"`
}
// NewBTGeometryFilter130AllOf instantiates a new BTGeometryFilter130AllOf object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTGeometryFilter130AllOf() *BTGeometryFilter130AllOf {
this := BTGeometryFilter130AllOf{}
return &this
}
// NewBTGeometryFilter130AllOfWithDefaults instantiates a new BTGeometryFilter130AllOf object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTGeometryFilter130AllOfWithDefaults() *BTGeometryFilter130AllOf {
this := BTGeometryFilter130AllOf{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTGeometryFilter130AllOf) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTGeometryFilter130AllOf) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTGeometryFilter130AllOf) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTGeometryFilter130AllOf) SetBtType(v string) {
o.BtType = &v
}
// GetGeometryType returns the GeometryType field value if set, zero value otherwise.
func (o *BTGeometryFilter130AllOf) GetGeometryType() string {
if o == nil || o.GeometryType == nil {
var ret string
return ret
}
return *o.GeometryType
}
// GetGeometryTypeOk returns a tuple with the GeometryType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTGeometryFilter130AllOf) GetGeometryTypeOk() (*string, bool) {
if o == nil || o.GeometryType == nil {
return nil, false
}
return o.GeometryType, true
}
// HasGeometryType returns a boolean if a field has been set.
func (o *BTGeometryFilter130AllOf) HasGeometryType() bool {
if o != nil && o.GeometryType != nil {
return true
}
return false
}
// SetGeometryType gets a reference to the given string and assigns it to the GeometryType field.
func (o *BTGeometryFilter130AllOf) SetGeometryType(v string) {
o.GeometryType = &v
}
func (o BTGeometryFilter130AllOf) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.GeometryType != nil {
toSerialize["geometryType"] = o.GeometryType
}
return json.Marshal(toSerialize)
}
type NullableBTGeometryFilter130AllOf struct {
value *BTGeometryFilter130AllOf
isSet bool
}
func (v NullableBTGeometryFilter130AllOf) Get() *BTGeometryFilter130AllOf {
return v.value
}
func (v *NullableBTGeometryFilter130AllOf) Set(val *BTGeometryFilter130AllOf) {
v.value = val
v.isSet = true
}
func (v NullableBTGeometryFilter130AllOf) IsSet() bool {
return v.isSet
}
func (v *NullableBTGeometryFilter130AllOf) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTGeometryFilter130AllOf(val *BTGeometryFilter130AllOf) *NullableBTGeometryFilter130AllOf {
return &NullableBTGeometryFilter130AllOf{value: val, isSet: true}
}
func (v NullableBTGeometryFilter130AllOf) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTGeometryFilter130AllOf) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_geometry_filter_130_all_of.go | 0.769254 | 0.469034 | model_bt_geometry_filter_130_all_of.go | starcoder |
package spry
import (
"github.com/tanema/amore/gfx"
)
// Atlas is the structure to serialize a spritesheet. You can retrieve and draw
// single frames from the atlas
type Atlas struct {
sheet *gfx.Image
Frames []*gfx.Quad
}
// NewAtlas will build and return a new Atlas. frameWidth and frameHeight are to define
// the size of each frame on the spritesheet. left and top define the offset from
// the top left corner if there is padding around the outside of the spritesheet.
// border defines the amount of pixels inbetween frames.
func NewAtlas(sheet *gfx.Image, frameWidth, frameHeight, left, top, border int32) *Atlas {
sheetWidth, sheetHeight := sheet.Width, sheet.Height
framesHorizontal := int32(float32(sheetWidth) / float32(frameWidth))
framesVertical := int32(float32(sheetHeight) / float32(frameHeight))
atlas := &Atlas{
sheet: sheet,
Frames: []*gfx.Quad{},
}
for y := int32(0); y < framesVertical; y++ {
for x := int32(0); x < framesHorizontal; x++ {
atlas.Frames = append(atlas.Frames, gfx.NewQuad(
left+x*frameWidth+x*border,
top+y*frameHeight+y*border,
frameWidth, frameHeight,
sheetWidth, sheetHeight,
))
}
}
return atlas
}
// NewAnimation will create and return a new animation using this atlas as reference.
// frames defines the frame indexes in the atlas. duration defines how long the animation
// should play for. looping will define id the animation should loop when it gets
// to the end.
func (atlas *Atlas) NewAnimation(frames []int, duration float32, looping bool) *Animation {
return &Animation{
atlas: atlas,
frames: frames,
duration: duration,
frameDuration: duration / float32(len(frames)),
looping: looping,
playing: true,
}
}
// GetFrame will return a single frame for drawing
func (atlas *Atlas) GetFrame(index int) *Animation {
return &Animation{
atlas: atlas,
frames: []int{index},
}
}
// Draw will draw the frame at the index supplied. the provided arguments fit the
// amore draw interface.
func (atlas *Atlas) Draw(index int, args ...float32) {
gfx.Drawq(atlas.sheet, atlas.Frames[index], args...)
} | atlas.go | 0.761716 | 0.507873 | atlas.go | starcoder |
package canvas
import (
"fmt"
"image"
"image/draw"
"log"
"golang.org/x/image/colornames"
"gonum.org/v1/gonum/mat"
)
// Axes represents a Primitive with Figure as its parent.
type Axes struct {
primitive
Parent *Figure
}
// newAxes creates a new Axes linked to a parent Figure.
func newAxes(parent *Figure, dims ...float64) (*Axes, error) {
var o, s [2]float64
switch l := len(dims); l {
case 4:
o = [2]float64{dims[0], dims[1]}
s = [2]float64{dims[2], dims[3]}
default:
return &Axes{}, fmt.Errorf("Dimensions not valid")
}
var ax Axes
ax.Parent = parent
ax.Origin = o
ax.Size = s
Tc := mat.NewDense(3, 3, []float64{
s[0], 0, o[0],
0, s[1], o[1],
0, 0, 1,
})
ax.T = append(ax.T, parent.T...)
ax.T = append(ax.T, Tc)
ax.FillColor = colornames.White
parent.children = append(parent.children, &ax)
return &ax, nil
}
func minSlice(s []float64) float64 {
if len(s) <= 0 {
log.Panic("max(s) on an empty slice")
}
var m = s[0]
for _, v := range s {
if v < m {
m = v
}
}
return m
}
func maxSlice(s []float64) float64 {
if len(s) <= 0 {
log.Panic("max(s) on an empty slice")
}
var m = s[0]
for _, v := range s {
if v > m {
m = v
}
}
return m
}
// BarPlot creates a Bar chart inside Axes with X labels and Y values.
func (ax *Axes) BarPlot(X []string, Y []float64) error {
if X != nil {
if len(X) != len(Y) {
return fmt.Errorf(
"Dimensions mismatch (X[%v] != Y[%v])",
len(X), len(Y))
}
}
maxY := maxSlice(Y) / 0.9
n := float64(len(Y))
var padding = 0.1
barW := (2.0 - 4.0*padding) / (3*n - 1)
spaceW := barW / 2.0
for i := range Y {
bar, err := newBar(ax,
padding+barW/2.0+float64(i)*(barW+spaceW),
0,
barW,
Y[i]/maxY)
if err != nil {
return err
}
bar.XAlign = CenterAlign
}
axX, _ := newAxis(ax, BottomAxis)
if X != nil {
axX.Labels(X, padding+spaceW)
}
maxY = maxSlice(Y)
labelsY := []string{}
if Y != nil {
step := (maxY) / float64(4)
for i := 0; i < 5; i++ {
labelsY = append(labelsY, fmt.Sprintf("%.2f", step*float64(i)))
}
}
axY, err := newAxis(ax, LeftAxis)
if err != nil {
return err
}
axY.Labels(labelsY, 0.1)
return nil
}
func vmap(value, fmin, fmax, tmin, tmax float64) float64 {
return (value-fmin)/(fmax-fmin)*(tmax-tmin) + tmin
}
// ScatterPlot creates a Scatter chart inside Axes with X and Y values.
func (ax *Axes) ScatterPlot(X, Y []float64) error {
if X != nil {
if len(X) != len(Y) {
return fmt.Errorf(
"Dimensions mismatch (X[%v] != Y[%v])",
len(X), len(Y))
}
}
maxY := maxSlice(Y) / 0.9
maxX := maxSlice(X)
labels := []string{}
labelsY := []string{}
var padding = 0.1
for i := range Y {
_, err := NewScatterPoint(ax, vmap(X[i], 0, maxX, padding, 1-padding), Y[i]/maxY)
if err != nil {
return err
}
}
if X != nil {
min := minSlice(X)
step := (maxX - min) / float64(len(X))
for i := range X {
labels = append(labels, fmt.Sprintf("%.2f", min+step*float64(i)))
}
}
axX, err := newAxis(ax, BottomAxis)
if err != nil {
return err
}
axX.Labels(labels, padding)
axX2, err := newAxis(ax, TopAxis)
if err != nil {
return err
}
axX2.Labels(labels, padding)
maxY = maxSlice(Y)
if Y != nil {
min := minSlice(Y)
step := (maxY - min) / float64(4)
for i := 0; i < 5; i++ {
labelsY = append(labelsY, fmt.Sprintf("%.2f", min+step*float64(i)))
}
}
axY, err := newAxis(ax, LeftAxis)
if err != nil {
return err
}
axY.Labels(labelsY, 0.1)
axY2, err := newAxis(ax, RightAxis)
if err != nil {
return err
}
axY2.Labels(labelsY, 0)
return nil
}
func border(dst draw.Image, r image.Rectangle, w int, src image.Image,
sp image.Point, op draw.Op) {
// inside r
if w > 0 {
// top
draw.Draw(dst, image.Rect(r.Min.X, r.Min.Y, r.Max.X, r.Min.Y+w), src, sp, op)
// left
draw.Draw(dst, image.Rect(r.Min.X, r.Min.Y+w, r.Min.X+w, r.Max.Y-w),
src, sp.Add(image.Pt(0, w)), op)
// right
draw.Draw(dst, image.Rect(r.Max.X-w, r.Min.Y+w, r.Max.X, r.Max.Y-w),
src, sp.Add(image.Pt(r.Dx()-w, w)), op)
// bottom
draw.Draw(dst, image.Rect(r.Min.X, r.Max.Y-w, r.Max.X, r.Max.Y),
src, sp.Add(image.Pt(0, r.Dy()-w)), op)
return
}
// outside r;
w = -w
// top
draw.Draw(dst, image.Rect(r.Min.X-w, r.Min.Y-w, r.Max.X+w, r.Min.Y),
src, sp.Add(image.Pt(-w, -w)), op)
// left
draw.Draw(dst, image.Rect(r.Min.X-w, r.Min.Y, r.Min.X, r.Max.Y), src,
sp.Add(image.Pt(-w, 0)), op)
// right
draw.Draw(dst, image.Rect(r.Max.X, r.Min.Y, r.Max.X+w, r.Max.Y), src,
sp.Add(image.Pt(r.Dx(), 0)), op)
// bottom
draw.Draw(dst, image.Rect(r.Min.X-w, r.Max.Y, r.Max.X+w, r.Max.Y+w),
src, sp.Add(image.Pt(-w, 0)), op)
}
// Render draws the Axes' border on top of drawing its contents.
func (ax *Axes) Render(dst draw.Image) {
ax.primitive.Render(dst)
border(dst, ax.Bounds(), -2, &image.Uniform{colornames.Black}, image.ZP, draw.Src)
} | canvas/axes.go | 0.713132 | 0.447038 | axes.go | starcoder |
package lc3
import (
"fmt"
"sync"
"time"
)
const (
// R_R0 represents the index of the general purpose register 0.
R_R0 uint16 = iota
// R_R1 represents the index of the general purpose register 1.
R_R1
// R_R2 represents the index of the general purpose register 2.
R_R2
// R_R3 represents the index of the general purpose register 3.
R_R3
// R_R4 represents the index of the general purpose register 4.
R_R4
// R_R5 represents the index of the general purpose register 5.
R_R5
// R_R6 represents the index of the general purpose register 6.
R_R6
// R_R7 represents the index of the general purpose register 7.
R_R7
// R_PC represents the index of the program counter register.
R_PC
// R_COND represents the index of the condition flags register.
R_COND
// R_COUNT represents the number of registers.
R_COUNT
)
const (
// OP_BR represents the branching operation.
OP_BR uint16 = iota
// OP_ADD represents the add operation.
OP_ADD
// OP_LD represents the load operation.
OP_LD
// OP_ST represents the store operation.
OP_ST
// OP_JSR represents the jump register operation.
OP_JSR
// OP_AND represents the bitwise AND operation.
OP_AND
// OP_LDR represents the load register operation.
OP_LDR
// OP_STR represents the store register operation.
OP_STR
// OP_RTI represents a supervisor operation.
OP_RTI
// OP_NOT represents the bitwise NOT operation.
OP_NOT
// OP_LDI represents the load indirect operation.
OP_LDI
// OP_STI represents the store indirect operation.
OP_STI
// OP_JMP represents the jump operation.
OP_JMP
// OP_RES represents the reserve operation.
OP_RES
// OP_LEA represents the load effective address operation.
OP_LEA
// OP_TRAP represents the execute trap operation.
OP_TRAP
)
const (
// FL_POS represents a positive value in the R_COND register.
FL_POS uint16 = 1 << iota
// FL_ZRO represents a zero value in the R_COND register.
FL_ZRO
// FL_NEG represents a negative value in the R_COND register.
FL_NEG
)
const (
// MR_KBSR represents the memory address of the keyboard status.
MR_KBSR uint16 = 0xFE00
// MR_KBDR represents the memory address of the keyboard data.
MR_KBDR uint16 = 0xFE02
)
const (
// TRAP_GETC represents the TRAP action of reading a char from the keyboard (not echoed).
TRAP_GETC uint16 = 0x20 + iota
// TRAP_OUT represents the TRAP action of outputing a character.
TRAP_OUT
// TRAP_PUTS represents the TRAP action of outputing a string of characters.
TRAP_PUTS
// TRAP_IN represents the TRAP action of getting a char from the keyboard (echoed).
TRAP_IN
// TRAP_PUTSP represents the TRAP action of outputing a byte string.
TRAP_PUTSP
// TRAP_HALT represents the TRAP action that stops the program loop.
TRAP_HALT
)
// MachineState represents the states of the LC-3 Virtual Machine.
type MachineState uint8
const (
// MS_STOPPED represents the state of a stopped machine.
MS_STOPPED MachineState = iota
// MS_RUNNING represents the state of a machine processing instructions.
MS_RUNNING
)
// Machine implements an LC-3 Virtual Machine.
type Machine struct {
mem [65536]uint16
reg [R_COUNT]uint16
state MachineState
sig chan struct{}
stdin chan rune
stdout chan rune
inputBuffer []rune
}
// NewMachine returns a new instance of the LC-3 Virtual Machine.
func NewMachine() *Machine {
return &Machine{}
}
// LoadMemory loads the provided data into memory.
func (m *Machine) LoadMemory(mem [65536]uint16) {
m.mem = mem
}
// ReadMemory returns the memory value for the provided address.
func (m *Machine) ReadMemory(addr uint16) uint16 {
return m.mem[addr]
}
// LoadRegisters loads the provided data into the registers.
func (m *Machine) LoadRegisters(reg [R_COUNT]uint16) {
m.reg = reg
}
// ReadRegister returns the register value for the provided address.
func (m *Machine) ReadRegister(addr uint16) uint16 {
return m.reg[addr]
}
// SetStdin sets the standard input stream of the VM.
func (m *Machine) SetStdin(in chan rune) {
m.stdin = in
}
// SetStdout sets the standard output stream of the VM.
func (m *Machine) SetStdout(out chan rune) {
m.stdout = out
}
// State returns the state of the machine.
func (m *Machine) State() MachineState {
return m.state
}
// Reset brings the machine back to it's inital PC and clears the condition register.
func (m *Machine) Reset() {
m.reg[R_PC] = 0x3000
m.reg[R_COND] = 0
}
// Start starts the instruction loop.
func (m *Machine) Start(sig chan struct{}, clockSpeed time.Duration) {
m.sig = sig
m.state = MS_RUNNING
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-m.sig:
return
case in := <-m.stdin:
m.inputBuffer = append(m.inputBuffer, in)
}
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-m.sig:
return
default:
m.processInput()
m.Step()
}
}
}()
wg.Wait()
}
// Stop stops the instruction loop.
func (m *Machine) Stop() {
m.state = MS_STOPPED
close(m.sig)
}
// Step reads an instruction and executes it.
func (m *Machine) Step() error {
pc := m.reg[R_PC] + 1
instr := m.mem[m.reg[R_PC]]
switch instr >> 12 {
case OP_ADD:
dr := (instr >> 9) & 0x7
sr1 := (instr >> 6) & 0x7
immFlag := (instr >> 5) & 0x1
if immFlag == 1 {
imm5 := signExtend(instr&0x1F, 5)
m.reg[dr] = m.reg[sr1] + imm5
} else {
sr2 := instr & 0x7
m.reg[dr] = m.reg[sr1] + m.reg[sr2]
}
m.updateFlags(dr)
case OP_AND:
dr := (instr >> 9) & 0x7
sr1 := (instr >> 6) & 0x7
immFlag := (instr >> 5) & 0x1
if immFlag == 1 {
m.reg[dr] = m.reg[sr1] & signExtend(instr&0x1F, 5)
} else {
sr2 := instr & 0x7
m.reg[dr] = m.reg[sr1] & m.reg[sr2]
}
m.updateFlags(dr)
case OP_NOT:
dr := (instr >> 9) & 0x7
sr := (instr >> 6) & 0x7
m.reg[dr] = ^m.reg[sr]
m.updateFlags(dr)
case OP_BR:
nzp := (instr >> 9) & 0x7
pcOffset := signExtend(instr&0x1FF, 9)
if nzp&m.reg[R_COND] != 0 {
pc += pcOffset
}
case OP_JMP:
baseR := (instr >> 6) & 0x7
pc = m.reg[baseR]
case OP_JSR:
m.reg[R_R7] = pc
pcOffsetFlag := (instr >> 11) & 0x1
if pcOffsetFlag == 1 {
pc += signExtend(instr&0x7FF, 11)
} else {
pc = m.reg[(instr>>6)&0x7]
}
case OP_LD:
dr := (instr >> 9) & 0x7
m.reg[dr] = m.mem[pc+(instr&0xFF)]
m.updateFlags(dr)
case OP_LDI:
dr := (instr >> 9) & 0x7
m.reg[dr] = m.mem[m.mem[pc+(instr&0xFF)]]
m.updateFlags(dr)
case OP_LDR:
dr := (instr >> 9) & 0x7
baseR := (instr >> 6) & 0x7
m.reg[dr] = m.mem[m.reg[baseR]+signExtend(instr&0x3F, 6)]
m.updateFlags(dr)
case OP_LEA:
dr := (instr >> 9) & 0x7
m.reg[dr] = pc + signExtend(instr&0x1FF, 9)
m.updateFlags(dr)
case OP_ST:
sr := (instr >> 9) & 0x7
m.mem[pc+signExtend(instr&0x1FF, 9)] = m.reg[sr]
case OP_STI:
sr := (instr >> 9) & 0x7
m.mem[m.mem[pc+signExtend(instr&0x1FF, 9)]] = m.reg[sr]
case OP_STR:
sr := (instr >> 9) & 0x7
baseR := (instr >> 6) & 0x7
m.mem[m.reg[baseR]+signExtend(instr&0x3F, 6)] = m.reg[sr]
case OP_TRAP:
switch instr & 0xFF {
case TRAP_GETC:
loop:
for {
select {
case <-m.sig:
return nil
default:
if len(m.inputBuffer) > 0 {
break loop
}
}
}
m.reg[R_R0], m.inputBuffer = uint16(m.inputBuffer[0]), m.inputBuffer[1:]
case TRAP_OUT:
m.stdout <- rune(m.reg[R_R0])
case TRAP_PUTS:
addr := m.reg[R_R0]
var i uint16
for {
r := rune(m.mem[addr+i] & 0xFFFF)
if r == rune(0) {
break
}
m.stdout <- r
i++
}
case TRAP_HALT:
m.Stop()
default:
return fmt.Errorf("trap code '0x%04x' is not implemented", instr)
}
default:
return fmt.Errorf("op code '0x%x' is not implemented", instr)
}
m.reg[R_PC] = pc
return nil
}
func (m *Machine) processInput() {
kbsrVal := m.mem[MR_KBSR]
kbsrReady := ((kbsrVal & 0x8000) == 0)
if kbsrReady && len(m.inputBuffer) > 0 {
m.mem[MR_KBSR] = kbsrVal | 0x8000
m.mem[MR_KBDR] = uint16(m.inputBuffer[0])
}
}
func signExtend(x uint16, bitCount int) uint16 {
if (x>>(bitCount-1))&1 == 1 {
x |= (0xFFFF << bitCount)
}
return x
}
func (m *Machine) updateFlags(r uint16) {
if m.reg[r] == 0 {
m.reg[R_COND] = uint16(FL_ZRO)
} else if (m.reg[r] >> 15) == 1 {
m.reg[R_COND] = uint16(FL_NEG)
} else {
m.reg[R_COND] = uint16(FL_POS)
}
} | pkg/lc3/lc3.go | 0.614047 | 0.472805 | lc3.go | starcoder |
package util
import (
"fmt"
"regexp"
"strings"
)
const (
Gb = 1024 * 1024 * 1024
Tb = 1024 * Gb
)
// Round up to the nearest Gb
func RoundBytesToGb(bytes int64) int64 {
return (bytes + Gb - 1) / Gb
}
func BytesToGb(bytes int64) int64 {
return bytes / Gb
}
func GbToBytes(gbs int64) int64 {
return gbs * Gb
}
func Min(a, b int64) int64 {
if a < b {
return a
}
return b
}
func Max(a, b int64) int64 {
if a > b {
return a
}
return b
}
// ConvertLabelsStringToMap converts the labels from string to map
// example: "key1=value1,key2=value2" gets converted into {"key1": "value1", "key2": "value2"}
func ConvertLabelsStringToMap(labels string) (map[string]string, error) {
const labelsDelimiter = ","
const labelsKeyValueDelimiter = "="
labelsMap := make(map[string]string)
if labels == "" {
return labelsMap, nil
}
// Following rules enforced for label keys
// 1. Keys have a minimum length of 1 character and a maximum length of 63 characters, and cannot be empty.
// 2. Keys and values can contain only lowercase letters, numeric characters, underscores, and dashes.
// 3. Keys must start with a lowercase letter.
regexKey, _ := regexp.Compile(`^\p{Ll}[\p{Ll}0-9_-]{0,62}$`)
checkLabelKeyFn := func(key string) error {
if !regexKey.MatchString(key) {
return fmt.Errorf("label value %q is invalid (should start with lowercase letter / lowercase letter, digit, _ and - chars are allowed / 1-63 characters", key)
}
return nil
}
// Values can be empty, and have a maximum length of 63 characters.
regexValue, _ := regexp.Compile(`^[\p{Ll}0-9_-]{0,63}$`)
checkLabelValueFn := func(value string) error {
if !regexValue.MatchString(value) {
return fmt.Errorf("label value %q is invalid (lowercase letter, digit, _ and - chars are allowed / 0-63 characters", value)
}
return nil
}
keyValueStrings := strings.Split(labels, labelsDelimiter)
for _, keyValue := range keyValueStrings {
keyValue := strings.Split(keyValue, labelsKeyValueDelimiter)
if len(keyValue) != 2 {
return nil, fmt.Errorf("labels %q are invalid, correct format: 'key1=value1,key2=value2'", labels)
}
key := strings.TrimSpace(keyValue[0])
if err := checkLabelKeyFn(key); err != nil {
return nil, err
}
value := strings.TrimSpace(keyValue[1])
if err := checkLabelValueFn(value); err != nil {
return nil, err
}
labelsMap[key] = value
}
const maxNumberOfLabels = 64
if len(labelsMap) > maxNumberOfLabels {
return nil, fmt.Errorf("more than %d labels is not allowed, given: %d", maxNumberOfLabels, len(labelsMap))
}
return labelsMap, nil
} | pkg/util/util.go | 0.763219 | 0.421254 | util.go | starcoder |
package parser
import (
"regexp"
"github.com/tsaikd/KDGoLib/errutil"
)
// CheckValueOption for changing CheckValueAPIType behavior
type CheckValueOption interface{}
// CheckValueOptionAllowIntegerToBeNumber allow type integer to be type number,
// e.g. APIType need a integer, but value is a number
// default: false
type CheckValueOptionAllowIntegerToBeNumber bool
// CheckValueOptionAllowArrayToBeNull allow array type to be null,
// default: false
type CheckValueOptionAllowArrayToBeNull bool
// CheckValueOptionAllowRequiredPropertyToBeEmpty allow required property to be empty value, but still should be existed
// only check the following types: TypeString, TypeArray, TypeObject, TypeBinary
// default: false
type CheckValueOptionAllowRequiredPropertyToBeEmpty bool
// CheckValueAPIType check value is valid for apiType
func CheckValueAPIType(apiType APIType, value Value, options ...CheckValueOption) (err error) {
allowIntegerToBeNumber := CheckValueOptionAllowIntegerToBeNumber(false)
allowArrayToBeNull := CheckValueOptionAllowArrayToBeNull(false)
allowRequiredPropertyToBeEmpty := CheckValueOptionAllowRequiredPropertyToBeEmpty(false)
for _, option := range options {
switch optval := option.(type) {
case CheckValueOptionAllowIntegerToBeNumber:
allowIntegerToBeNumber = optval
case CheckValueOptionAllowArrayToBeNull:
allowArrayToBeNull = optval
case CheckValueOptionAllowRequiredPropertyToBeEmpty:
allowRequiredPropertyToBeEmpty = optval
}
}
return checkValueAPIType(
apiType,
value,
allowIntegerToBeNumber,
allowArrayToBeNull,
allowRequiredPropertyToBeEmpty,
)
}
func checkValueAPIType(
apiType APIType,
value Value,
allowIntegerToBeNumber CheckValueOptionAllowIntegerToBeNumber,
allowArrayToBeNull CheckValueOptionAllowArrayToBeNull,
allowRequiredPropertyToBeEmpty CheckValueOptionAllowRequiredPropertyToBeEmpty,
) (err error) {
if value.IsEmpty() {
// no need to check if value is empty
return
}
if apiType.IsArray {
if value.Type != TypeArray {
if !allowArrayToBeNull || value.Type != TypeNull {
return ErrorPropertyTypeMismatch2.New(nil, apiType.Type, value.Type)
}
}
elemType := apiType
elemType.IsArray = false
for i, elemValue := range value.Array {
if err = checkValueAPIType(
elemType,
*elemValue,
allowIntegerToBeNumber,
allowArrayToBeNull,
allowRequiredPropertyToBeEmpty,
); err != nil {
switch errutil.FactoryOf(err) {
case ErrorPropertyTypeMismatch2:
return ErrorArrayElementTypeMismatch3.New(nil, i, elemType.Type, elemValue.Type)
}
return
}
}
return
}
switch apiType.NativeType {
case TypeBoolean, TypeString:
if apiType.NativeType == value.Type {
return nil
}
return ErrorPropertyTypeMismatch2.New(nil, apiType.Type, value.Type)
case TypeInteger:
if apiType.NativeType == value.Type {
return nil
}
if allowIntegerToBeNumber {
switch value.Type {
case TypeNumber:
if value.Number == float64(int64(value.Number)) {
return nil
}
}
}
return ErrorPropertyTypeMismatch2.New(nil, apiType.Type, value.Type)
case TypeNumber:
if apiType.NativeType == value.Type {
return nil
}
if allowIntegerToBeNumber {
switch value.Type {
case TypeInteger:
return nil
}
}
return ErrorPropertyTypeMismatch2.New(nil, apiType.Type, value.Type)
case TypeFile:
// no type check for file type
return nil
default:
if isInlineAPIType(apiType) {
// no type check if declared by JSON
return nil
}
switch value.Type {
case TypeObject, TypeNull:
default:
return ErrorPropertyTypeMismatch2.New(nil, apiType.Type, value.Type)
}
for _, property := range apiType.Properties.Slice() {
if err = checkPropertyRequired(
*property,
value,
allowArrayToBeNull,
allowRequiredPropertyToBeEmpty,
apiType,
); err != nil {
return err
}
if err = checkPropertyValue(
*property,
value,
allowIntegerToBeNumber,
allowArrayToBeNull,
allowRequiredPropertyToBeEmpty,
); err != nil {
return err
}
}
}
return nil
}
func checkPropertyRequired(
property Property,
parent Value,
allowArrayToBeNull CheckValueOptionAllowArrayToBeNull,
allowRequiredPropertyToBeEmpty CheckValueOptionAllowRequiredPropertyToBeEmpty,
apiType APIType, // only used for error message
) (err error) {
if !property.Required {
return nil
}
if property.IsArray && bool(allowArrayToBeNull) {
return nil
}
switch parent.Type {
case TypeNull:
return ErrorRequiredProperty2.New(nil, property.Name, apiType.Type)
case TypeObject:
value := parent.Map[property.Name]
if value == nil {
return ErrorRequiredProperty2.New(nil, property.Name, apiType.Type)
}
if !bool(allowRequiredPropertyToBeEmpty) {
switch value.Type {
case TypeString, TypeArray, TypeObject, TypeBinary:
if value.IsZero() {
return ErrorRequiredProperty2.New(nil, property.Name, apiType.Type)
}
}
}
return nil
default:
panic("check property required with wrong parent type: " + parent.Type)
}
}
func checkPropertyValue(
property Property,
parent Value,
allowIntegerToBeNumber CheckValueOptionAllowIntegerToBeNumber,
allowArrayToBeNull CheckValueOptionAllowArrayToBeNull,
allowRequiredPropertyToBeEmpty CheckValueOptionAllowRequiredPropertyToBeEmpty,
) (err error) {
value := parent.Map[property.Name]
if value == nil {
return nil
}
// no need to check recursive if property is not required
if !property.Required && value.IsZero() {
return nil
}
if err = checkValueAPIType(
property.APIType,
*value,
allowIntegerToBeNumber,
allowArrayToBeNull,
allowRequiredPropertyToBeEmpty,
); err != nil {
switch errutil.FactoryOf(err) {
case ErrorPropertyTypeMismatch2:
return ErrorPropertyTypeMismatch3.New(nil, property.Name, property.Type, value.Type)
case ErrorArrayElementTypeMismatch3:
return ErrorPropertyTypeMismatch1.New(err, property.Name)
}
return err
}
return nil
}
func isInlineAPIType(apiType APIType) bool {
regValidType := regexp.MustCompile(`^[\w]+(\[\])?$`)
return !regValidType.MatchString(apiType.Type)
} | parser/check.go | 0.599954 | 0.439807 | check.go | starcoder |
package fputil
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/cbrand/go-filterparams/definition"
)
type DataType interface {
IsFilterAllowed(filter *definition.Filter) bool
Parse(value interface{}) (interface{}, bool)
}
var (
trueStrings = map[string]bool{
"true": true,
"yes": true,
"y": true,
}
falseStrings = map[string]bool{
"false": true,
"no": true,
"n": true,
}
)
type boolDataType struct {
}
func BoolDataType() DataType {
return &boolDataType{}
}
func (dt boolDataType) IsFilterAllowed(filter *definition.Filter) bool {
return filter.Identification == definition.FilterEq.Identification ||
filter.Identification == definition.FilterIn.Identification
}
func (dt boolDataType) Parse(value interface{}) (interface{}, bool) {
switch v := value.(type) {
case string:
v = strings.ToLower(v)
if trueStrings[v] {
return true, true
}
if falseStrings[v] {
return false, true
}
case bool:
return v, true
}
return nil, false
}
type uintDataType struct {
bits int
}
func UintDataType(bits int) DataType {
return &uintDataType{
bits: bits,
}
}
func (dt uintDataType) IsFilterAllowed(filter *definition.Filter) bool {
return filter.Identification == definition.FilterEq.Identification ||
filter.Identification == definition.FilterGt.Identification ||
filter.Identification == definition.FilterGte.Identification ||
filter.Identification == definition.FilterLt.Identification ||
filter.Identification == definition.FilterLte.Identification ||
filter.Identification == definition.FilterIn.Identification
}
func (dt uintDataType) Parse(value interface{}) (interface{}, bool) {
switch v := value.(type) {
case string:
if n, err := strconv.ParseUint(v, 10, dt.bits); err == nil {
return n, true
}
case uint64:
return v, true
case uint:
return uint64(v), true
case uint8:
return uint64(v), true
case uint16:
return uint64(v), true
case uint32:
return uint64(v), true
}
return nil, false
}
type intDataType struct {
bits int
}
func IntDataType(bits int) DataType {
return &intDataType{
bits: bits,
}
}
func (dt intDataType) IsFilterAllowed(filter *definition.Filter) bool {
return filter.Identification == definition.FilterEq.Identification ||
filter.Identification == definition.FilterGt.Identification ||
filter.Identification == definition.FilterGte.Identification ||
filter.Identification == definition.FilterLt.Identification ||
filter.Identification == definition.FilterLte.Identification ||
filter.Identification == definition.FilterIn.Identification
}
func (dt intDataType) Parse(value interface{}) (interface{}, bool) {
switch v := value.(type) {
case string:
if n, err := strconv.ParseInt(v, 10, dt.bits); err == nil {
return n, true
}
case int64:
return v, true
case int:
return int64(v), true
case int8:
return int64(v), true
case int16:
return int64(v), true
case int32:
return int64(v), true
}
return nil, false
}
type floatDataType struct {
bits int
}
func FloatDataType(bits int) DataType {
return &floatDataType{
bits: bits,
}
}
func (dt floatDataType) IsFilterAllowed(filter *definition.Filter) bool {
return filter.Identification == definition.FilterEq.Identification ||
filter.Identification == definition.FilterGt.Identification ||
filter.Identification == definition.FilterGte.Identification ||
filter.Identification == definition.FilterLt.Identification ||
filter.Identification == definition.FilterLte.Identification ||
filter.Identification == definition.FilterIn.Identification
}
func (dt floatDataType) Parse(value interface{}) (interface{}, bool) {
switch v := value.(type) {
case string:
if n, err := strconv.ParseFloat(v, dt.bits); err == nil {
return n, true
}
case float32:
return float64(v), true
case float64:
return v, true
}
return nil, false
}
type stringDataType struct {
}
func StringDataType() DataType {
return &stringDataType{}
}
func (dt stringDataType) IsFilterAllowed(filter *definition.Filter) bool {
return filter.Identification == definition.FilterEq.Identification ||
filter.Identification == definition.FilterLike.Identification ||
filter.Identification == definition.FilterILike.Identification ||
filter.Identification == definition.FilterIn.Identification
}
func (dt stringDataType) Parse(value interface{}) (interface{}, bool) {
switch v := value.(type) {
case string:
return v, true
}
return fmt.Sprintf("%v", value), false
}
type timeDataType struct {
inFormat string
outFormat string
}
func TimeDataType(inFormat string, outFormat string) DataType {
return &timeDataType{
inFormat: inFormat,
outFormat: outFormat,
}
}
func (dt timeDataType) IsFilterAllowed(filter *definition.Filter) bool {
return filter.Identification == definition.FilterEq.Identification ||
filter.Identification == definition.FilterGt.Identification ||
filter.Identification == definition.FilterGte.Identification ||
filter.Identification == definition.FilterLt.Identification ||
filter.Identification == definition.FilterLte.Identification ||
filter.Identification == definition.FilterIn.Identification
}
func (dt timeDataType) Parse(value interface{}) (interface{}, bool) {
switch v := value.(type) {
case string:
if t, err := time.Parse(dt.inFormat, v); err == nil {
if dt.outFormat != "" {
return t.Format(dt.outFormat), true
}
return t, true
}
}
return nil, false
}
type nilableDataType struct {
dataType DataType
}
func NilableDataType(dataType DataType) DataType {
return nilableDataType{
dataType: dataType,
}
}
func (dt nilableDataType) IsFilterAllowed(filter *definition.Filter) bool {
return dt.dataType.IsFilterAllowed(filter)
}
func (dt nilableDataType) Parse(value interface{}) (interface{}, bool) {
switch value {
case "nil", nil:
return nil, true
}
return dt.dataType.Parse(value)
} | datatype.go | 0.614394 | 0.420302 | datatype.go | starcoder |
package core
import (
"bytes"
"errors"
"log"
"math"
"math/rand"
"strconv"
"strings"
)
// DataPartitioner is responsible to partition a dataset and, upon estimating,
// the basic partitioning scheme, dynamically partition new datasets.
type DataPartitioner interface {
// Construct estimates the partitioning of the provided tuples (offline)
Construct([]DatasetTuple) error
// Partition executes partitioning to new datasets
Partition([]DatasetTuple) ([][]DatasetTuple, error)
// Configure provides the necessary configuration option to DataPartitioner
Configure(map[string]string)
// Options returns a list of options the DataPartitioner accepts with a
// description
Options() map[string]string
// Serialize converts a DataPartitioner object to a stream of bytes
Serialize() []byte
// Deserialize converts a stream of bytes to a DataPartitioner object
Deserialize([]byte)
}
// DataPartitionerType represents the type of the DataPartitioner struct
type DataPartitionerType uint8
const (
// DataPartitionerKDTree utilizes a kd-tree for partitioning
DataPartitionerKDTree DataPartitionerType = iota + 1
// DataPartitionerKMeans utilizes kmeans for partitioning
DataPartitionerKMeans DataPartitionerType = iota + 2
)
const KMeansMaxIteration = 10000
// NewDataPartitioner is the factory method for the creation of a new
// DataPartitioner object
func NewDataPartitioner(dpType DataPartitionerType, conf map[string]string) DataPartitioner {
var obj DataPartitioner
if dpType == DataPartitionerKDTree {
log.Println("Constructing KDTreePartitioner")
obj = new(KDTreePartitioner)
} else if dpType == DataPartitionerKMeans {
log.Println("Constructing KMeansPartitioner")
obj = new(KDTreePartitioner)
obj = new(KMeansPartitioner)
} else {
return nil
}
obj.Configure(conf)
return obj
}
// DeserializePartitioner returns instantiates a new partitioner from a serialized
// version
func DeserializePartitioner(b []byte) DataPartitioner {
buff := bytes.NewBuffer(b)
tempInt := make([]byte, 4)
buff.Read(tempInt)
t := uint8(getIntBytes(tempInt))
var res DataPartitioner
if t == uint8(DataPartitionerKMeans) {
res = new(KMeansPartitioner)
} else if t == uint8(DataPartitionerKDTree) {
res = new(KDTreePartitioner)
}
res.Deserialize(b)
return res
}
// KMeansPartitioner applies the k-means clustering algorithm to a given dataset
// and using the calculated centroids, it partitions newly provided datasets
// according to their distance from them
type KMeansPartitioner struct {
// k of k-means
k int
// the centroids of the clusters
centroids []DatasetTuple
// the weights of the columns - used for distance normalization
weights []float64
}
// Options returns the configuration options of the KMeansPartitioner
func (p *KMeansPartitioner) Options() map[string]string {
return map[string]string{
"partitions": "the number of partitions to use (k)",
"weights": "the weights of the columns to utilize for the comparison" +
"(default is to 1/(max - min) for each column)",
}
}
// Configure provides the necessary configuration options to the
// KMeansPartitioner struct
func (p *KMeansPartitioner) Configure(conf map[string]string) {
if val, ok := conf["partitions"]; ok {
v, err := strconv.ParseInt(val, 10, 32)
if err != nil {
log.Println(err)
} else {
p.k = int(v)
log.Println("Setting k value", p.k)
}
} else {
log.Println("Setting default k value (1)")
p.k = 1
}
if val, ok := conf["weights"]; ok {
arr := strings.Split(val, ",")
p.weights = make([]float64, len(arr))
for i := range arr {
v, err := strconv.ParseFloat(arr[i], 64)
if err != nil {
log.Println(err)
p.weights = nil
break
} else {
p.weights[i] = v
}
}
log.Println("Setting weights", p.weights)
}
}
// initializeCentroids estimates a very first position of the centroids
// FIXME: can consider kmeans++
func (p *KMeansPartitioner) initializeCentroids(tuples []DatasetTuple) {
perm := rand.Perm(len(tuples))
p.centroids = make([]DatasetTuple, p.k)
for i := 0; i < p.k; i++ {
p.centroids[i] = tuples[perm[i]]
}
}
// estimateWeights estimates the weights to be utilized in measuring the distances
func (p *KMeansPartitioner) estimateWeights(tuples []DatasetTuple) {
if len(tuples) == 0 {
return
}
maxValues := make([]float64, len(tuples[0].Data))
minValues := make([]float64, len(tuples[0].Data))
for i := range maxValues {
maxValues[i] = math.Inf(-1)
minValues[i] = math.Inf(1)
}
for _, t := range tuples {
for j, v := range t.Data {
if v >= maxValues[j] {
maxValues[j] = v
}
if v <= minValues[j] {
minValues[j] = v
}
}
}
p.weights = make([]float64, len(tuples[0].Data))
for i := range maxValues {
if maxValues[i] == minValues[i] {
p.weights[i] = 0.0
} else {
p.weights[i] = 1.0 / ((maxValues[i] - minValues[i]) * (maxValues[i] - minValues[i]))
}
}
}
// assignTuplesToCentroids partitions the tuples according to their
// weighted distance from the calculated centroids
func (p *KMeansPartitioner) assignTuplesToCentroids(tuples []DatasetTuple) [][]DatasetTuple {
groups := make([][]DatasetTuple, len(p.centroids))
for _, t := range tuples {
closestIdx, closestDst := 0, p.distance(t, p.centroids[0])
for j, c := range p.centroids {
currentDst := p.distance(t, c)
if p.distance(t, c) < closestDst {
closestIdx, closestDst = j, currentDst
}
}
groups[closestIdx] = append(groups[closestIdx], t)
}
return groups
}
// estimateCentroids estimates the new centroids based on the given clusters
// the number of centroids may not be equal to the number of clusters
func (p *KMeansPartitioner) estimateCentroids(clusters [][]DatasetTuple) []DatasetTuple {
var centroids []DatasetTuple
for _, c := range clusters {
var centroid []float64
if len(c) > 0 {
centroid = make([]float64, len(c[0].Data))
}
for _, t := range c {
for i, v := range t.Data {
centroid[i] += v
}
}
for i := range centroid {
centroid[i] = centroid[i] / float64(len(c))
}
if len(centroid) > 0 {
centroids = append(centroids, DatasetTuple{centroid})
}
}
return centroids
}
// distance returns the weighted distance between two tuples
func (p *KMeansPartitioner) distance(a, b DatasetTuple) float64 {
if a.Data == nil || b.Data == nil {
log.Println("Nil tuple")
return math.NaN()
}
sum := 0.0
for i := range a.Data {
diff := (a.Data[i] - b.Data[i])
if i < len(p.weights) {
sum += p.weights[i] * diff * diff
}
}
return math.Sqrt(sum)
}
// centroidsDelta returns the difference between the new and old centroids
func (p *KMeansPartitioner) centroidsDelta(a, b []DatasetTuple) float64 {
if len(a) != len(b) {
log.Println("Nil tuple")
return math.NaN()
}
sum := 0.0
for i := range a {
minDst := math.Inf(1)
for j := range b {
dst := p.distance(a[i], b[j])
if dst < minDst {
minDst = dst
}
}
sum += minDst
// sum += p.distance(a[i], b[i])
}
return sum
}
// Construct runs the k-means algorithm and estimates the centroids of the
// cluster (in order to be later used for partitioning.
func (p *KMeansPartitioner) Construct(tuples []DatasetTuple) error {
if tuples == nil || len(tuples) == 0 {
return errors.New("No tuples provided")
}
if p.weights == nil { // need to set weights
p.estimateWeights(tuples)
}
p.initializeCentroids(tuples)
delta := math.Inf(1)
for i := 0; i < KMeansMaxIteration && delta > 0; i++ {
clusters := p.assignTuplesToCentroids(tuples)
newCentroids := p.estimateCentroids(clusters)
delta = p.centroidsDelta(p.centroids, newCentroids)
p.centroids = newCentroids
}
return nil
}
// Partition receives a set of tuples as input and returns a number of clusters
func (p *KMeansPartitioner) Partition(tuples []DatasetTuple) (
[][]DatasetTuple, error) {
if len(tuples) == 0 {
return nil, errors.New("no tuples to partition")
}
if p.centroids == nil || len(p.centroids) == 0 {
return nil, errors.New("centroids not estimated")
}
if len(tuples[0].Data) != len(p.centroids[0].Data) {
return nil, errors.New("wrong data dimensionality")
}
return p.assignTuplesToCentroids(tuples), nil
}
func (p *KMeansPartitioner) Serialize() []byte {
buffer := new(bytes.Buffer)
buffer.Write(getBytesInt(int(DataPartitionerKMeans)))
buffer.Write(getBytesInt(p.k))
buffer.Write(getBytesInt(len(p.weights)))
for i := range p.weights {
buffer.Write(getBytesFloat(p.weights[i]))
}
for i := range p.centroids {
for j := range p.centroids[i].Data {
buffer.Write(getBytesFloat(p.centroids[i].Data[j]))
}
}
return buffer.Bytes()
}
func (p *KMeansPartitioner) Deserialize(b []byte) {
buff := bytes.NewBuffer(b)
bytesInt := make([]byte, 4)
bytesFloat := make([]byte, 8)
buff.Read(bytesInt) // consume kmeans
buff.Read(bytesInt)
p.k = getIntBytes(bytesInt)
buff.Read(bytesInt)
tupleDimensionality := getIntBytes(bytesInt)
p.weights = make([]float64, tupleDimensionality)
for i := range p.weights {
buff.Read(bytesFloat)
p.weights[i] = getFloatBytes(bytesFloat)
}
p.centroids = make([]DatasetTuple, p.k)
for i := 0; i < p.k; i++ {
p.centroids[i] = *new(DatasetTuple)
p.centroids[i].Data = make([]float64, tupleDimensionality)
for j := 0; j < tupleDimensionality; j++ {
buff.Read(bytesFloat)
p.centroids[i].Data[j] = getFloatBytes(bytesFloat)
}
}
}
// KDTreePartitioner generates a kd-tree on the selected columns
// and applies the partitioning to new datasets
type KDTreePartitioner struct {
// the number of partitions to create
partitions int
// the datset columns to consider for the partitioning
columns []int
// kdtree is the tree structure
kdtree []*treeNode
}
// Options returns a list of options
func (p *KDTreePartitioner) Options() map[string]string {
return map[string]string{
"partitions": "the number of partitions",
"columns": "comma separated list of column ids to use (default is all)",
}
}
// Configure provides the necessary configuration params
func (p *KDTreePartitioner) Configure(conf map[string]string) {
if val, ok := conf["partitions"]; ok {
v, err := strconv.ParseInt(val, 10, 32)
if err != nil {
log.Println(err)
} else {
p.partitions = int(v)
}
} else {
p.partitions = 32
}
log.Println("Set partitions", p.partitions)
if val, ok := conf["columns"]; ok && val != "all" {
arr := strings.Split(val, ",")
p.columns = make([]int, len(arr))
for i := range arr {
v, err := strconv.ParseInt(arr[i], 10, 32)
if err != nil {
log.Println(err)
p.columns = nil
break
} else {
p.columns[i] = int(v)
}
}
log.Println("Set columns", p.columns)
}
}
// treeNode represents a node of the kd-tree
type treeNode struct {
dim int
value float64
}
func (p *KDTreePartitioner) partition(column int, tuples []DatasetTuple) ([]DatasetTuple, []DatasetTuple, float64) {
var left, right []DatasetTuple
values := make([]float64, len(tuples))
for i := range tuples {
values[i] = tuples[i].Data[column]
}
goesLeft := true
median := Percentile(values, 50)
for i := range tuples {
if tuples[i].Data[column] < median {
left = append(left, tuples[i])
} else if tuples[i].Data[column] > median {
right = append(right, tuples[i])
} else { // equality
if goesLeft {
left = append(left, tuples[i])
} else {
right = append(left, tuples[i])
}
goesLeft = !goesLeft
}
}
return left, right, median
}
func (p *KDTreePartitioner) Construct(tuples []DatasetTuple) error {
if tuples == nil || len(tuples) == 0 {
return errors.New("tuples not provided")
}
if p.columns == nil {
p.columns = make([]int, len(tuples[0].Data))
for i := range p.columns {
p.columns[i] = i
}
}
p.kdtree = make([]*treeNode, p.partitions-1)
var createKDTree func(int, int, []DatasetTuple)
createKDTree = func(colIdx, treeIdx int, tuples []DatasetTuple) {
if treeIdx >= len(p.kdtree) {
return
}
if len(tuples) == 0 || tuples == nil {
p.kdtree[treeIdx] = nil
return
}
l, r, median := p.partition(p.columns[colIdx%len(p.columns)], tuples)
p.kdtree[treeIdx] = &treeNode{dim: p.columns[colIdx%len(p.columns)], value: median}
if len(l) > 0 && len(r) > 0 {
createKDTree(colIdx+1, 2*treeIdx+1, l)
createKDTree(colIdx+1, 2*treeIdx+2, r)
} else {
createKDTree(colIdx+1, 2*treeIdx+1, nil)
createKDTree(colIdx+1, 2*treeIdx+2, nil)
}
return
}
createKDTree(0, 0, tuples)
return nil
}
// Partition applies the previously constructed kd-tree in order to partition
// the given dataset
func (p *KDTreePartitioner) Partition(tuples []DatasetTuple) ([][]DatasetTuple, error) {
if len(tuples) == 0 {
return nil, errors.New("no tuples to partition")
}
if p.kdtree == nil {
return nil, errors.New("kdtree not estimated")
}
maxHeight := int(math.Floor(math.Log(float64(p.partitions)) + 1))
clusters := make([][]DatasetTuple, p.partitions)
for _, t := range tuples {
id, idx, bitOps := 0, 0, 0
for idx < len(p.kdtree) {
cur := p.kdtree[idx]
bitOps += 1
if t.Data[cur.dim] <= cur.value {
id = (id << 1) | 0
idx = 2*idx + 1
} else {
id = (id << 1) | 1
idx = 2*idx + 2
}
}
if bitOps < maxHeight {
id = (id << 1) | 0
}
clusters[id] = append(clusters[id], t)
}
return clusters, nil
}
// Serialize returns a byte array with the serialized object
func (p *KDTreePartitioner) Serialize() []byte {
buf := new(bytes.Buffer)
buf.Write(getBytesInt(int(DataPartitionerKDTree)))
buf.Write(getBytesInt(p.partitions))
buf.Write(getBytesInt(len(p.columns)))
for i := range p.columns {
buf.Write(getBytesInt(p.columns[i]))
}
buf.Write(getBytesInt(len(p.kdtree)))
for i := range p.kdtree {
buf.Write(getBytesInt(p.kdtree[i].dim))
buf.Write(getBytesFloat(p.kdtree[i].value))
}
return buf.Bytes()
}
// Deserialize parses a byte array and instantiates a new kdtree part. object
func (p *KDTreePartitioner) Deserialize(b []byte) {
buff := bytes.NewBuffer(b)
tempInt, tempFloat := make([]byte, 4), make([]byte, 8)
buff.Read(tempInt) // consume partitioner type
buff.Read(tempInt)
p.partitions = getIntBytes(tempInt)
buff.Read(tempInt)
cols := getIntBytes(tempInt)
p.columns = make([]int, cols)
for i := range p.columns {
buff.Read(tempInt)
p.columns[i] = getIntBytes(tempInt)
}
buff.Read(tempInt)
count := getIntBytes(tempInt)
p.kdtree = make([]*treeNode, count)
for i := range p.kdtree {
buff.Read(tempInt)
buff.Read(tempFloat)
p.kdtree[i] = &treeNode{dim: getIntBytes(tempInt), value: getFloatBytes(tempFloat)}
}
} | core/partitioner.go | 0.617743 | 0.534552 | partitioner.go | starcoder |
package main
import (
"fmt"
"time"
"github.com/go-kit/kit/metrics"
"github.com/mre/edgecast"
)
/*
* instrumentingMiddleware wraps a given EdgecastInterface an creates metrics for its invoked functions
* The following metrics are created per function:
* - requestCount: incremented on every invocation of that function
* - requestLatency: time in seconds that function took from invocation to return
* - requestLatencyDistribution: histogram distribution of all invocations so far including phi-quantiles, total, sum
*/
type instrumentingMiddleware struct {
requestCount metrics.Counter // positive/incrementing only value
requestLatencyDistribution metrics.Histogram // bucket sampling
requestLatency metrics.Gauge // positive and negative counting value
next EdgecastInterface
}
func (mw instrumentingMiddleware) Bandwidth(platform int) (bandwidthData *edgecast.BandwidthData, err error) {
defer func(begin time.Time) {
lvs := []string{"method", "Bandwidth", "error", fmt.Sprint(err != nil)}
mw.requestCount.With(lvs...).Add(1)
mw.requestLatencyDistribution.With(lvs...).Observe(time.Since(begin).Seconds())
mw.requestLatency.With(lvs...).Set(time.Since(begin).Seconds())
}(time.Now())
bandwidthData, err = mw.next.Bandwidth(platform) // hand request to logged service
return
}
func (mw instrumentingMiddleware) Connections(platform int) (connectionData *edgecast.ConnectionData, err error) {
defer func(begin time.Time) {
lvs := []string{"method", "Connections", "error", fmt.Sprint(err != nil)}
mw.requestCount.With(lvs...).Add(1)
mw.requestLatencyDistribution.With(lvs...).Observe(time.Since(begin).Seconds())
mw.requestLatency.With(lvs...).Set(time.Since(begin).Seconds())
}(time.Now())
connectionData, err = mw.next.Connections(platform) // hand request to logged service
return
}
func (mw instrumentingMiddleware) CacheStatus(platform int) (cacheStatusData *edgecast.CacheStatusData, err error) {
defer func(begin time.Time) {
lvs := []string{"method", "CacheStatus", "error", fmt.Sprint(err != nil)}
mw.requestCount.With(lvs...).Add(1)
mw.requestLatencyDistribution.With(lvs...).Observe(time.Since(begin).Seconds())
mw.requestLatency.With(lvs...).Set(time.Since(begin).Seconds())
}(time.Now())
cacheStatusData, err = mw.next.CacheStatus(platform) // hand request to logged service
return
}
func (mw instrumentingMiddleware) StatusCodes(platform int) (statusCodeData *edgecast.StatusCodeData, err error) {
defer func(begin time.Time) {
lvs := []string{"method", "StatusCodes", "error", fmt.Sprint(err != nil)}
mw.requestCount.With(lvs...).Add(1)
mw.requestLatencyDistribution.With(lvs...).Observe(time.Since(begin).Seconds())
mw.requestLatency.With(lvs...).Set(time.Since(begin).Seconds())
}(time.Now())
statusCodeData, err = mw.next.StatusCodes(platform) // hand request to logged service
return
} | instrumenting.go | 0.672224 | 0.402011 | instrumenting.go | starcoder |
package darkfeed
var pow10 = []float64{0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000}
var pow10f = []float32{0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000}
var pow10u32 = []uint32{1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000}
const POW10ZEROIDX = 6 //offset of 10^0 for float pow10 arrays
type Price struct {
Price uint32
Precision int8
TickSize uint8
Currency uint8
}
func roundToTickSize(p uint32, tk uint8) uint32 {
var cutoff uint32
if tk%2 != 0 {
cutoff = uint32(tk)/2 + 1
} else {
cutoff = uint32(tk)
}
var r uint32 = p % uint32(tk)
if r >= cutoff {
return p + uint32(tk) - r
}
return p - r
}
// Loads a price from a double precision float.
// price: The price in whole units of currency with fractional units. Eg; in USD 132.28 represents 138 dollars and 28 cents.
// precision: The quoted precision as a power of 10. Eg; with USD -2 represents cents, 0 represents dollars, etc.
// ticksize: The minimum quoted tick size as a multiple of precision units. Eg; if precision is -2 and ticksize is 5, the price is always a multiple of 5 cents
// currency: The currency code
func PriceFromFloat64(price float64, precision int8, ticksize uint8, currency uint8) Price {
pr := uint32(price * pow10[POW10ZEROIDX-precision])
if pr%uint32(ticksize) != 0 {
pr = roundToTickSize(pr, ticksize)
}
var p = Price{
Price: pr,
Precision: precision,
TickSize: ticksize,
Currency: currency,
}
return p
}
// Loads a price from an unsigned integer (in multiples of the quoted fractional unit
// price: The price in fractional units of currency. Eg; if precision is -2, price represents cents
// precision: The quoted precision as a power of 10. Eg; with USD -2 represents cents, 0 represents dollars, etc.
// ticksize: The minimum quoted tick size as a multiple of precision units. Eg; if precision is -2 and ticksize is 5, the price is always a multiple of 5 cents
// currency: The currency code
func PriceFromUInt32(price uint32, precision int8, ticksize uint8, currency uint8) Price {
var p = Price{
Price: price,
Precision: precision,
TickSize: ticksize,
Currency: currency,
}
if price%uint32(ticksize) != 0 {
p.Price = roundToTickSize(price, ticksize)
}
return p
}
// Resamples price using the given precision
// precision The desired precision
// ticksize The new desired minimum tick size
func (p *Price) SetPrecision(precision int8, ticksize uint8) {
if precision == p.Precision {
return
} else if precision < p.Precision {
p.Price *= pow10u32[p.Precision-precision]
} else {
mf := pow10u32[precision-p.Precision]
p.Price += mf / 2 //necessary for proper rounding when reducing precision
p.Price /= mf
}
p.TickSize = ticksize
p.Precision = precision
if (ticksize != 1) && (p.Price%uint32(p.TickSize) != 0) {
p.Price = roundToTickSize(p.Price, p.TickSize)
}
}
/// Returns price using the quoted precision as a double precision floating point number
func (p *Price) AsFloat64() float64 {
return float64(p.Price) * pow10[POW10ZEROIDX+p.Precision]
}
/// Returns price using the quoted precision as a single precision floating point number
func (p *Price) AsFloat32() float32 {
return float32(p.Price) * pow10f[POW10ZEROIDX+p.Precision]
}
/// Returns price member as a uint32
func (p *Price) AsUInt32() uint32 {
return p.Price
}
/// Returns price member as an int
func (p *Price) AsInt() int {
return int(p.Price)
}
/// Checks if price is less than some reference price
func (p *Price) LessThan(x Price) bool {
//check that currencies are the same
if p.Currency != x.Currency {
return false
} else if p.Precision == x.Precision {
return p.Price < x.Price
} else if p.Precision > x.Precision {
var upsampled uint32 = p.Price * pow10u32[p.Precision-x.Precision]
return upsampled < x.Price
} else {
var upsampled uint32 = x.Price * pow10u32[x.Precision-p.Precision]
return p.Price < upsampled
}
}
/// Checks if price is less than or equal to some reference price
func (p *Price) LessThanEq(x Price) bool {
//check that currencies are the same
if p.Currency != x.Currency {
return false
} else if p.Precision == x.Precision {
return p.Price <= x.Price
} else if p.Precision > x.Precision {
var upsampled uint32 = p.Price * pow10u32[p.Precision-x.Precision]
return upsampled <= x.Price
} else {
var upsampled uint32 = x.Price * pow10u32[x.Precision-p.Precision]
return p.Price <= upsampled
}
}
/// Checks if price is greater than some reference price
func (p *Price) GreaterThan(x Price) bool {
//check that currencies are the same
if p.Currency != x.Currency {
return false
} else if p.Precision == x.Precision {
return p.Price > x.Price
} else if p.Precision > x.Precision {
var upsampled uint32 = p.Price * pow10u32[p.Precision-x.Precision]
return upsampled > x.Price
} else {
var upsampled uint32 = x.Price * pow10u32[x.Precision-p.Precision]
return p.Price > upsampled
}
}
/// Checks if price is greater or equal to some reference price
func (p *Price) GreaterThanEq(x Price) bool {
//check that currencies are the same
if p.Currency != x.Currency {
return false
} else if p.Precision == x.Precision {
return p.Price >= x.Price
} else if p.Precision > x.Precision {
var upsampled uint32 = p.Price * pow10u32[p.Precision-x.Precision]
return upsampled >= x.Price
} else {
var upsampled uint32 = x.Price * pow10u32[x.Precision-p.Precision]
return p.Price >= upsampled
}
}
/// Checks if price is equal to some reference price
func (p *Price) Equals(x Price) bool {
//check that currencies are the same
if p.Currency != x.Currency {
return false
} else if p.Precision == x.Precision {
return p.Price == x.Price
} else if p.Precision > x.Precision {
var upsampled uint32 = p.Price * pow10u32[p.Precision-x.Precision]
return upsampled == x.Price
} else {
var upsampled uint32 = x.Price * pow10u32[x.Precision-p.Precision]
return p.Price == upsampled
}
} | go/price.go | 0.80456 | 0.46563 | price.go | starcoder |
package processors
import (
"fmt"
)
const (
// DataFormatRAW is a DataFormat of type RAW
// raw generic data
DataFormatRAW DataFormat = iota + 1
// DataFormatFILESYSTEMREF is a DataFormat of type FILESYSTEM_REF
// a reference to a file or folder accessible to the processor
DataFormatFILESYSTEMREF
)
const _DataFormatName = "RAWFILESYSTEM_REF"
var _DataFormatMap = map[DataFormat]string{
1: _DataFormatName[0:3],
2: _DataFormatName[3:17],
}
// String implements the Stringer interface.
func (x DataFormat) String() string {
if str, ok := _DataFormatMap[x]; ok {
return str
}
return fmt.Sprintf("DataFormat(%d)", x)
}
var _DataFormatValue = map[string]DataFormat{
_DataFormatName[0:3]: 1,
_DataFormatName[3:17]: 2,
}
// ParseDataFormat attempts to convert a string to a DataFormat
func ParseDataFormat(name string) (DataFormat, error) {
if x, ok := _DataFormatValue[name]; ok {
return x, nil
}
return DataFormat(0), fmt.Errorf("%s is not a valid DataFormat", name)
}
// MarshalText implements the text marshaller method
func (x *DataFormat) MarshalText() ([]byte, error) {
return []byte(x.String()), nil
}
// UnmarshalText implements the text unmarshaller method
func (x *DataFormat) UnmarshalText(text []byte) error {
name := string(text)
tmp, err := ParseDataFormat(name)
if err != nil {
return err
}
*x = tmp
return nil
}
const (
// DataTypeINPUT is a DataType of type INPUT
DataTypeINPUT DataType = iota + 1
// DataTypeOUTPUT is a DataType of type OUTPUT
DataTypeOUTPUT
)
const _DataTypeName = "INPUTOUTPUT"
var _DataTypeMap = map[DataType]string{
1: _DataTypeName[0:5],
2: _DataTypeName[5:11],
}
// String implements the Stringer interface.
func (x DataType) String() string {
if str, ok := _DataTypeMap[x]; ok {
return str
}
return fmt.Sprintf("DataType(%d)", x)
}
var _DataTypeValue = map[string]DataType{
_DataTypeName[0:5]: 1,
_DataTypeName[5:11]: 2,
}
// ParseDataType attempts to convert a string to a DataType
func ParseDataType(name string) (DataType, error) {
if x, ok := _DataTypeValue[name]; ok {
return x, nil
}
return DataType(0), fmt.Errorf("%s is not a valid DataType", name)
}
// MarshalText implements the text marshaller method
func (x *DataType) MarshalText() ([]byte, error) {
return []byte(x.String()), nil
}
// UnmarshalText implements the text unmarshaller method
func (x *DataType) UnmarshalText(text []byte) error {
name := string(text)
tmp, err := ParseDataType(name)
if err != nil {
return err
}
*x = tmp
return nil
} | api/processors/data_type_enum.go | 0.548432 | 0.46132 | data_type_enum.go | starcoder |
package stepMapper
import (
"fmt"
"github.com/Anthony-Jhoiro/WorkyGO/configParser"
"github.com/Anthony-Jhoiro/WorkyGO/workflow"
"strings"
)
func ParseWorkflowSteps(parsedWorkflow configParser.ParsedWorkflow) (*workflow.Workflow, error) {
steps, err := mapMultipleSteps(parsedWorkflow.Steps)
if err != nil {
return nil, fmt.Errorf("fail to map steps : %v", err)
}
return buildWorkflow(steps)
}
func buildWorkflow(stepsDefinitions []workflow.StepDefinition) (*workflow.Workflow, error) {
steps := make([]*workflow.Step, 0, len(stepsDefinitions)+1)
stepMapper := map[string]*workflow.Step{}
for _, stepDefinition := range stepsDefinitions {
step := &workflow.Step{
StepDefinition: stepDefinition,
Status: workflow.StepPending,
NextSteps: make([]*workflow.Step, 0, len(stepsDefinitions)),
PreviousSteps: make([]*workflow.Step, 0, len(stepDefinition.GetDependencies())),
}
steps = append(steps, step)
stepMapper[step.GetLabel()] = step
}
initialStepDef := MakeEmptyStep()
initialStep := &workflow.Step{
StepDefinition: initialStepDef,
Status: workflow.StepPending,
NextSteps: make([]*workflow.Step, 0, len(stepsDefinitions)),
PreviousSteps: make([]*workflow.Step, 0),
}
for _, step := range steps {
deps := step.GetDependencies()
if len(deps) == 0 {
step.AddRequirement(initialStep)
} else {
for _, dep := range deps {
labelDep := strings.ToLower(strings.ReplaceAll(dep, " ", "_"))
if val, ok := stepMapper[labelDep]; ok {
step.AddRequirement(val)
} else {
return nil, fmt.Errorf("step %s does not exists", labelDep)
}
}
}
}
steps = append(steps, initialStep)
wf := workflow.NewWorkflow(initialStep, steps)
return wf, nil
}
func mapMultipleSteps(stepTemplates []interface{}) ([]workflow.StepDefinition, error) {
steps := make([]workflow.StepDefinition, 0, len(stepTemplates))
for stepIndex, stepTemplate := range stepTemplates {
step, err := mapStep(stepTemplate)
if err != nil {
return nil, fmt.Errorf("step %d is invalid : %v", stepIndex, err)
}
steps = append(steps, step)
}
return steps, nil
}
func mapStep(tpl interface{}) (workflow.StepDefinition, error) {
var step workflow.StepDefinition
step, err := MapDockerStep(tpl)
if err == nil {
return step, nil
}
step, err = MapWorkflowStep(tpl)
if err == nil {
return step, nil
}
return nil, fmt.Errorf("invalid step")
} | stepMapper/stepMapper.go | 0.52074 | 0.452234 | stepMapper.go | starcoder |
package alpha
import (
"fmt"
"io"
"math"
"math/rand"
"reflect"
"time"
errors "golang.org/x/xerrors"
yaml "gopkg.in/yaml.v2"
)
// Constant field keys. Each field has a specific expected type.
const (
fieldDistribution = "distribution" // Type: Distribution (TODO: Rename this to Distribution?)
fieldChoices = "choices" // Type: Slice
fieldBounds = "bounds" // Type: Slice with len=2
)
type searchSpace struct {
Seed *int64 `yaml:"seed,omitempty"`
Parameters map[string]interface{} `yaml:"parameters"`
}
// Distribution describes a distribution of values and how they should be sampled.
type Distribution string
const (
// Choice selects a single choice with uniform probability from a list.
Choice Distribution = "choice"
// Integer produces uniformly distributed integers in the range of [bounds[0], bounds[1]).
Integer Distribution = "uniform-int"
// LogUniform produces logarithmically distributed floats in the range of [bounds[0], bounds[1]).
LogUniform Distribution = "log-uniform"
// Uniform produces uniformly distributed floats in the range of [bounds[0], bounds[1]).
Uniform Distribution = "uniform"
)
// A ParameterSpace samples a set of named distributions.
type ParameterSpace struct {
seed int64
rand *rand.Rand
params map[string]distribution
}
func decodeParameterSpace(r io.Reader) (*ParameterSpace, error) {
var ss searchSpace
dec := yaml.NewDecoder(r)
dec.SetStrict(true)
if err := dec.Decode(&ss); err != nil {
return nil, err
}
var ps ParameterSpace
if ss.Seed == nil {
ps.seed = time.Now().Unix()
} else {
ps.seed = *ss.Seed
}
ps.rand = rand.New(rand.NewSource(ps.seed))
ps.params = make(map[string]distribution, len(ss.Parameters))
for key, param := range ss.Parameters {
d, err := parseDistribution(param)
if err != nil {
return nil, errors.Errorf("parameter %q: %w", key, err)
}
ps.params[key] = d
}
return &ps, nil
}
func parseDistribution(param interface{}) (distribution, error) {
if reflect.TypeOf(param).Kind() != reflect.Map {
// All distributions are expressed as a map, so this must be a fixed value.
return fixedValue{param}, nil
}
fields := param.(map[interface{}]interface{})
d, ok := fields[fieldDistribution]
if !ok {
return nil, errors.Errorf("no sampling distribution provided")
}
switch dist := Distribution(d.(string)); dist {
case Choice:
choices, err := getChoices(fields)
if err != nil {
return nil, err
}
return newChooseOne(choices)
case Integer:
min, max, err := getBoundsInt(fields)
if err != nil {
return nil, err
}
return newUniformInt(min, max)
case LogUniform:
min, max, err := getBoundsFloat(fields)
if err != nil {
return nil, err
}
return newLogFloat(min, max)
case Uniform:
min, max, err := getBoundsFloat(fields)
if err != nil {
return nil, err
}
return newUniformFloat(min, max)
default:
return nil, errors.Errorf("sampling distribution %q is not supported", dist)
}
}
func getChoices(fields map[interface{}]interface{}) ([]interface{}, error) {
choices, ok := fields[fieldChoices]
if !ok || reflect.TypeOf(choices).Kind() != reflect.Slice {
return nil, errors.Errorf("must specify %q as a list", fieldChoices)
}
return choices.([]interface{}), nil
}
func getBoundsInt(fields map[interface{}]interface{}) (min, max int64, err error) {
field, ok := fields[fieldBounds]
value := reflect.ValueOf(field)
if !ok || value.Type().Kind() != reflect.Slice || value.Len() != 2 {
return 0, 0, errors.Errorf("must specify %q as a list of 2 elements", fieldBounds)
}
min, err = getInt(value.Index(0))
if err != nil {
return 0, 0, errors.Errorf("%s[0]: %w", fieldBounds, err)
}
max, err = getInt(value.Index(1))
if err != nil {
return 0, 0, errors.Errorf("%s[1]: %w", fieldBounds, err)
}
return min, max, nil
}
func getBoundsFloat(fields map[interface{}]interface{}) (min, max float64, err error) {
field, ok := fields[fieldBounds]
value := reflect.ValueOf(field)
if !ok || value.Type().Kind() != reflect.Slice || value.Len() != 2 {
return 0, 0, errors.Errorf("must specify %q as a list of 2 elements", fieldBounds)
}
min, err = getFloat(value.Index(0))
if err != nil {
return 0, 0, errors.Errorf("%s[0]: %w", fieldBounds, err)
}
max, err = getFloat(value.Index(1))
if err != nil {
return 0, 0, errors.Errorf("%s[1]: %w", fieldBounds, err)
}
return min, max, nil
}
func getInt(value reflect.Value) (int64, error) {
switch value.Type().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return value.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
i := int64(value.Uint())
if i < 0 {
return 0, errors.New("value is too large")
}
return i, nil
case reflect.Interface:
return getInt(reflect.ValueOf(value.Interface()))
default:
fmt.Println(value.Type().Kind())
return 0, errors.Errorf("value is not an integer: %s", value.String())
}
}
func getFloat(value reflect.Value) (float64, error) {
switch value.Type().Kind() {
case reflect.Float32, reflect.Float64:
return value.Float(), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(value.Int()), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return float64(value.Uint()), nil
case reflect.Interface:
return getFloat(reflect.ValueOf(value.Interface()))
default:
return 0, errors.Errorf("value is not a number: %s", value.String())
}
}
// Sample returns a collection of sampled values from a search space.
func (ps *ParameterSpace) Sample() map[string]interface{} {
// TODO: Iterate by sorted keys to ensure a fixed seed always produces identical results.
result := make(map[string]interface{}, len(ps.params))
for key, param := range ps.params {
result[key] = param.Sample(ps.rand)
}
return result
}
type distribution interface {
Sample(r *rand.Rand) interface{}
}
type fixedValue struct {
value interface{}
}
func (d fixedValue) Sample(r *rand.Rand) interface{} {
return d.value
}
type uniformInt struct {
min, max int64
}
func newUniformInt(min, max int64) (uniformInt, error) {
if min >= max {
return uniformInt{}, errors.New("min must be less than max")
}
return uniformInt{min, max}, nil
}
func (d uniformInt) Sample(r *rand.Rand) interface{} {
return r.Int63n(d.max-d.min) + d.min
}
type uniformFloat struct {
min, max float64
}
func newUniformFloat(min, max float64) (uniformFloat, error) {
if min >= max {
return uniformFloat{}, errors.New("min must be less than max")
}
return uniformFloat{min, max}, nil
}
func (d uniformFloat) Sample(r *rand.Rand) interface{} {
return r.Float64()*(d.max-d.min) + d.min
}
type logFloat struct {
min, max float64
}
func newLogFloat(min, max float64) (logFloat, error) {
if min <= 0 || max <= 0 {
return logFloat{}, errors.New("min and max must be positive")
}
if min >= max {
return logFloat{}, errors.New("min must be less than max")
}
return logFloat{math.Log(min), math.Log(max)}, nil
}
func (d logFloat) Sample(r *rand.Rand) interface{} {
return math.Exp(r.Float64()*(d.max-d.min) + d.min)
}
type chooseOne struct {
choices []interface{}
}
func newChooseOne(choices []interface{}) (chooseOne, error) {
if len(choices) == 0 {
return chooseOne{}, errors.New("at least one choice must be provided")
}
return chooseOne{choices}, nil
}
func (d chooseOne) Sample(r *rand.Rand) interface{} {
return d.choices[r.Intn(len(d.choices))]
} | cmd/beaker/alpha/parameter_space.go | 0.705075 | 0.444083 | parameter_space.go | starcoder |
package graph
import (
"sort"
)
type DirectedEdge struct {
X string
Y string
Weight int
}
type DirectedEdges []DirectedEdge
func (d DirectedEdges) Len() int {
return len(d)
}
func (d DirectedEdges) Swap(x, y int) {
d[x], d[y] = d[y], d[x]
}
func (d DirectedEdges) Less(x, y int) bool {
return d[x].Weight < d[y].Weight
}
type Config struct {
Undirected bool
}
type Graph struct {
connections map[string]map[string]int
undirected bool
vetices []string
neighbors map[string][]string
edges map[string][]DirectedEdge
}
func New(config Config) Graph {
g := Graph{}
g.undirected = config.Undirected
g.connections = make(map[string]map[string]int)
g.vetices = []string{}
g.neighbors = make(map[string][]string)
g.edges = make(map[string][]DirectedEdge)
return g
}
func (g *Graph) addVertex(vertex string) {
if g.connections[vertex] == nil {
g.connections[vertex] = make(map[string]int)
g.setVertices()
}
}
func (g *Graph) AddEdge(x string, y string, weight int) {
if g.connections[x] == nil {
g.addVertex(x)
}
if g.connections[y] == nil {
g.addVertex(y)
}
g.connections[x][y] = weight
g.setNeighbors(x)
g.setEdges(x)
if g.undirected {
g.connections[y][x] = weight
g.setNeighbors(y)
g.setEdges(y)
}
}
func (g *Graph) setVertices() {
keys := make([]string, 0, len(g.connections))
for k := range g.connections {
keys = append(keys, k)
}
/*
When iterating over a map with a range loop,
the iteration order is not specified and
is not guaranteed to be the same from one iteration to the next.
*/
sort.Strings(keys)
g.vetices = keys
}
func (g *Graph) setNeighbors(vertex string) {
keys := make([]string, 0, len(g.connections[vertex]))
for k := range g.connections[vertex] {
keys = append(keys, k)
}
sort.Strings(keys)
g.neighbors[vertex] = keys
}
func (g *Graph) setEdges(vertex string) {
neighbors := g.Neighbors(vertex)
edges := make([]DirectedEdge, 0, len(neighbors))
for _, neighbor := range neighbors {
edges = append(edges, DirectedEdge{
X: vertex,
Y: neighbor,
Weight: g.GetEdgeWeight(vertex, neighbor),
})
}
g.edges[vertex] = edges
}
func (g *Graph) Connections() map[string]map[string]int {
return g.connections
}
func (g *Graph) Vertices() []string {
return g.vetices
}
func (g *Graph) Neighbors(vertex string) []string {
return g.neighbors[vertex]
}
func (g *Graph) GetEdgeWeight(x string, y string) int {
return g.connections[x][y]
}
func (g *Graph) Edges(vertex string) []DirectedEdge {
return g.edges[vertex]
} | internal/graph/graph.go | 0.708818 | 0.446736 | graph.go | starcoder |
package chunk
import (
"errors"
"github.com/grafana/metrictank/mdata/chunk/tsz"
)
var (
errUnknownChunkFormat = errors.New("unrecognized chunk format")
errUnknownSpanCode = errors.New("corrupt data, chunk span code is not known")
errShort = errors.New("chunk is too short")
)
//go:generate msgp
type IterGen struct {
T0 uint32
IntervalHint uint32 // a hint wrt expected alignment of points. useful to recover delta overflows in tsz.Series4h, not used for other formats
B []byte
}
// NewIterGen creates an IterGen and performs crude validation of the data
// note: it's ok for intervalHint to be 0 or 1 to mean unknown.
// it just means that series4h corruptions can't be remediated in single-point-per-chunk scenarios
func NewIterGen(t0, intervalHint uint32, b []byte) (IterGen, error) {
switch Format(b[0]) {
case FormatStandardGoTsz:
if len(b) == 1 {
return IterGen{}, errShort
}
case FormatStandardGoTszWithSpan, FormatGoTszLongWithSpan:
if len(b) <= 2 {
return IterGen{}, errShort
}
if int(b[1]) >= len(ChunkSpans) {
return IterGen{}, errUnknownSpanCode
}
default:
return IterGen{}, errUnknownChunkFormat
}
return IterGen{t0, intervalHint, b}, nil
}
func (ig IterGen) Format() Format {
return Format(ig.B[0])
}
func (ig *IterGen) Get() (tsz.Iter, error) {
// note: the tsz iterators modify the stream as they read it, so we must always give it a copy.
switch ig.Format() {
case FormatStandardGoTsz:
src := ig.B[1:]
dest := make([]byte, len(src))
copy(dest, src)
return tsz.NewIterator4h(dest, ig.IntervalHint)
case FormatStandardGoTszWithSpan:
src := ig.B[2:]
dest := make([]byte, len(src))
copy(dest, src)
return tsz.NewIterator4h(dest, ig.IntervalHint)
case FormatGoTszLongWithSpan:
src := ig.B[2:]
dest := make([]byte, len(src))
copy(dest, src)
return tsz.NewIteratorLong(ig.T0, dest)
}
return nil, errUnknownChunkFormat
}
func (ig *IterGen) Span() uint32 {
if ig.Format() == FormatStandardGoTsz {
return 0 // we don't know what the span is. sorry.
}
// already validated at IterGen creation time
return ChunkSpans[SpanCode(ig.B[1])]
}
func (ig *IterGen) Size() uint64 {
return uint64(len(ig.B))
}
// end of itergen (exclusive). next t0
func (ig IterGen) EndTs() uint32 {
return ig.T0 + ig.Span()
}
//msgp:ignore IterGensAsc
type IterGensAsc []IterGen
func (a IterGensAsc) Len() int { return len(a) }
func (a IterGensAsc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a IterGensAsc) Less(i, j int) bool { return a[i].T0 < a[j].T0 } | vendor/github.com/grafana/metrictank/mdata/chunk/itergen.go | 0.572006 | 0.455017 | itergen.go | starcoder |
package set1
import "encoding/hex"
/**
* Cryptopal Set 1
* Challenge 1 - Convert hex to base64
* https://cryptopals.com/sets/1/challenges/1
*/
// BytesToBase64 Convert a byte array to a base64 string
func BytesToBase64(bytes []byte) []byte {
// Base64 table
table := map[byte]byte{
0: 'A', 1: 'B', 2: 'C', 3: 'D',
4: 'E', 5: 'F', 6: 'G', 7: 'H',
8: 'I', 9: 'J', 10: 'K', 11: 'L',
12: 'M', 13: 'N', 14: 'O', 15: 'P',
16: 'Q', 17: 'R', 18: 'S', 19: 'T',
20: 'U', 21: 'V', 22: 'W', 23: 'X',
24: 'Y', 25: 'Z',
26: 'a', 27: 'b', 28: 'c', 29: 'd',
30: 'e', 31: 'f', 32: 'g', 33: 'h',
34: 'i', 35: 'j', 36: 'k', 37: 'l',
38: 'm', 39: 'n', 40: 'o', 41: 'p',
42: 'q', 43: 'r', 44: 's', 45: 't',
46: 'u', 47: 'v', 48: 'w', 49: 'x',
50: 'y', 51: 'z',
52: '0', 53: '1', 54: '2', 55: '3',
56: '4', 57: '5', 58: '6', 59: '7',
60: '8', 61: '9', 62: '+', 63: '/',
}
var padding byte
padding = '='
// Initialization
var store byte
storeCount := 0
var conversion []byte
for counter := 0; counter < len(bytes); counter++ {
switch storeCount {
case 0:
// Take the first 6 bits
// Hex : [xxxx xx]xx
selection := (bytes[counter] >> 2) & 63 // s & 0011 1111
conversion = append(conversion, table[selection])
// Store the last two bits for the next round
// Hex : xxxx xx[bb]
// store: 00bb 0000
store = (bytes[counter] & 3) << 4 // s & 0000 0011
storeCount = 2
case 2:
// Concatenate the stored bits with the first 4 bits
// store = xx0 000
// selection = 00x xxx
selection := bytes[counter] >> 4 // xxxx yyyy => 0000 xxxx
concatenation := selection | store
conversion = append(conversion, table[concatenation])
// Store the last 4 bits for the next round
// Hex : xxxx [bbbb]
// sotre: 00bb bb00
store = (bytes[counter] & 15) << 2 // s & 0000 1111
storeCount = 4
case 4:
// Concatenate the stored bits with the first 2 bits
// store = xxx xx00
// selection = 000 00xx
selection := bytes[counter] >> 6
concatenation := selection | store
conversion = append(conversion, table[concatenation])
// Use the last 6 bits for another conversion
selection = bytes[counter] & 63 // s & 0011 1111
conversion = append(conversion, table[selection])
store = 0
storeCount = 0
}
}
// Padding
switch storeCount {
case 4:
// store : 00xx xx00
conversion = append(conversion, table[store])
conversion = append(conversion, padding)
case 2:
// store : 00xx 0000
conversion = append(conversion, table[store])
conversion = append(conversion, padding)
conversion = append(conversion, padding)
}
return conversion
}
// HexToBase64 convert a binary input represented as a hex string to a base64 representation
func HexToBase64(hexString string) []byte {
bytes, _ := hex.DecodeString(hexString)
return BytesToBase64(bytes)
} | set1/hex-to-base64.go | 0.732974 | 0.514522 | hex-to-base64.go | starcoder |
package ndarray
import (
"math/rand"
"github.com/shopspring/decimal"
)
func NewNdArray(values []float64, rcd ...int) *Array {
rows, cols := dimsToShape(rcd...)
if len(values) != rows*cols {
return nil // Invalid dimensions provided
}
a := Zeros(rcd...)
a.Data = values
a.Length = len(a.Data)
return a
}
func NewRowVector(values []float64, length int) *Array {
return NewNdArray(values, 1, length)
}
func HStack(a, b *Array) *Array {
r, c := a.Dims()
br, bc := b.Dims()
//t := Copy(b)
if r != br {
return nil
}
m := Zeros(r, c+bc)
for i := 0; i < r; i++ {
for j := 0; j < c; j++ {
m.Set(i, j, a.At(i, j))
}
for j := 0; j < bc; j++ {
m.Set(i, j+c, b.At(i, j))
}
}
return m
}
func VStack(a, b *Array) *Array {
r, c := a.Dims()
br, bc := a.Dims()
if c != bc {
return nil
}
m := Zeros(r+br, c)
for i := 0; i < r; i++ {
for j := 0; j < c; j++ {
m.Set(i, j, a.At(i, j))
}
}
for i := 0; i < br; i++ {
for j := 0; j < c; j++ {
m.Set(i+r, j, b.At(i, j))
}
}
return m
}
func Empty() *Array {
return &Array{dims: NewShape()}
}
func Scalar(value float64) *Array {
s := Zeros(1)
s.Data[0] = value
return s
}
func VectorN(values ...float64) *Array {
v := Zeros(len(values))
v.Data = values
return v
}
func Zeros(dims ...int) *Array {
rows, cols := dimsToShape(dims...)
a := &Array{
Data: make([]float64, rows*cols),
dims: NewShape(dims...),
Length: rows*cols,
}
return a
}
func Arrange(start, stop, step float64) *Array {
length := int((stop-start)/step) + 1
arr := Zeros(length)
curr := decimal.NewFromFloat(start)
stride := decimal.NewFromFloat(step)
for i := range arr.Data {
arr.Data[i], _ = curr.Float64()
curr = curr.Add(stride)
}
return arr
}
func ZerosLike(arr *Array) *Array {
return Zeros(arr.Dims())
}
func Full(fill_value float64, dims ...int) *Array {
a := Zeros(dims...)
for i := range a.Data {
a.Data[i] = fill_value
}
return a
}
func Ones(dims ...int) *Array {
return Full(1, dims...)
}
func Random(dims ...int) *Array {
a := Zeros(dims...)
for i := range a.Data {
a.Data[i] = rand.Float64()
}
return a
}
func Identity(dims ...int) *Array {
a := Zeros(dims...)
r, c := a.Dims()
for i := 0; i < r; i++ {
for j := 0; j < c; j++ {
if i == j {
a.Set(i, j, 1.0)
}
}
}
return a
}
func Copy(arr *Array) *Array {
cpy := ZerosLike(arr)
for i, v := range arr.Data {
cpy.Data[i] = v
}
cpy.Length = arr.Length
return cpy
}
func Assign(lhs, rhs *Array) {
copy(lhs.Data, rhs.Data)
}
func dimsToShape(dims ...int) (int, int) {
rows, cols := 1, 1
if len(dims) == 1 {
rows = dims[0]
} else if len(dims) == 2 {
rows = dims[0]
cols = dims[1]
}
return rows, cols
} | ndarray_creation.go | 0.670285 | 0.519034 | ndarray_creation.go | starcoder |
package interpreter
import (
"math"
"github.com/Nv7-Github/bsharp/ir"
"github.com/Nv7-Github/bsharp/tokens"
"github.com/Nv7-Github/bsharp/types"
)
func (i *Interpreter) evalMathNode(pos *tokens.Pos, node *ir.MathNode) (*Value, error) {
left, err := i.evalNode(node.Lhs)
if err != nil {
return nil, err
}
right, err := i.evalNode(node.Rhs)
if err != nil {
return nil, err
}
switch node.Lhs.Type().BasicType() {
case types.INT:
v := mathOp(left.Value.(int), right.Value.(int), node.Op)
if v == nil {
return nil, pos.Error("unknown math operation: %s", node.Op)
}
return NewValue(types.INT, *v), nil
case types.FLOAT:
v := mathOp(left.Value.(float64), right.Value.(float64), node.Op)
if v == nil {
return nil, pos.Error("unknown math operation: %s", node.Op)
}
return NewValue(types.FLOAT, *v), nil
default:
return nil, pos.Error("cannot perform operations on type %s", left.Type.String())
}
}
type mathOpValue interface {
int | float64
}
func mathOp[T mathOpValue](lhs T, rhs T, op ir.MathOperation) *T {
var out T
switch op {
case ir.MathOperationAdd:
out = lhs + rhs
case ir.MathOperationSub:
out = lhs - rhs
case ir.MathOperationMul:
out = lhs * rhs
case ir.MathOperationDiv:
out = lhs / rhs
case ir.MathOperationPow:
switch any(lhs).(type) {
case int:
out = T(math.Pow(float64(lhs), float64(rhs)))
case float64:
out = T(math.Pow(float64(lhs), float64(rhs)))
}
case ir.MathOperationMod:
switch any(lhs).(type) {
case int:
out = T(int(lhs) % int(rhs))
case float64:
out = T(math.Mod(float64(lhs), float64(rhs)))
}
default:
return nil
}
return &out
}
func (i *Interpreter) evalCompareNode(pos *tokens.Pos, node *ir.CompareNode) (*Value, error) {
left, err := i.evalNode(node.Lhs)
if err != nil {
return nil, err
}
right, err := i.evalNode(node.Rhs)
if err != nil {
return nil, err
}
switch node.Lhs.Type().BasicType() {
case types.INT:
v := compOp(left.Value.(int), right.Value.(int), node.Op)
if v == nil {
return nil, pos.Error("unknown compare operation: %s", node.Op)
}
return NewValue(types.BOOL, *v), nil
case types.BYTE:
v := compOp(left.Value.(byte), right.Value.(byte), node.Op)
if v == nil {
return nil, pos.Error("unknown compare operation: %s", node.Op)
}
return NewValue(types.BOOL, *v), nil
case types.FLOAT:
v := compOp(left.Value.(float64), right.Value.(float64), node.Op)
if v == nil {
return nil, pos.Error("unknown compare operation: %s", node.Op)
}
return NewValue(types.BOOL, *v), nil
case types.STRING:
v := compOp(left.Value.(string), right.Value.(string), node.Op)
if v == nil {
return nil, pos.Error("unknown compare operation: %s", node.Op)
}
return NewValue(types.BOOL, *v), nil
default:
return nil, pos.Error("cannot perform comparison on type %s", left.Type.String())
}
}
type compOpValue interface {
int | float64 | string | byte
}
func compOp[T compOpValue](lhs T, rhs T, op ir.CompareOperation) *bool {
var out bool
switch op {
case ir.CompareOperationEqual:
out = lhs == rhs
case ir.CompareOperationNotEqual:
out = lhs != rhs
case ir.CompareOperationGreater:
out = lhs > rhs
case ir.CompareOperationGreaterEqual:
out = lhs >= rhs
case ir.CompareOperationLess:
out = lhs < rhs
case ir.CompareOperationLessEqual:
out = lhs <= rhs
default:
return nil
}
return &out
}
func (i *Interpreter) evalLogicalOp(pos *tokens.Pos, node *ir.LogicalOpNode) (*Value, error) {
val, err := i.evalNode(node.Val)
if err != nil {
return nil, err
}
switch node.Op {
case ir.LogicalOpAnd:
if val.Value.(bool) {
right, err := i.evalNode(node.Rhs)
if err != nil {
return nil, err
}
return NewValue(types.BOOL, right.Value.(bool)), nil
}
return NewValue(types.BOOL, val.Value.(bool)), nil
case ir.LogicalOpOr:
if !val.Value.(bool) {
right, err := i.evalNode(node.Rhs)
if err != nil {
return nil, err
}
return NewValue(types.BOOL, right.Value.(bool)), nil
}
return NewValue(types.BOOL, val.Value.(bool)), nil
case ir.LogicalOpNot:
return NewValue(types.BOOL, !val.Value.(bool)), nil
default:
return nil, pos.Error("unknown logical operation: %d", node.Op)
}
} | backends/interpreter/ops.go | 0.5144 | 0.426083 | ops.go | starcoder |
package keras
import (
"math"
"math/rand"
"time"
)
//Layer interface given these 5 functions which every layer must have.
type Layer interface {
Call() []float64
GetWeights() Matrix
GetBiases() Vector
Name() string
TrainableParameters() int
}
//DenseLayer defines a fully connected layer.
type DenseLayer struct {
units int
inputs, outputs []float64
weights Weights
biases Biases
trainable bool
name string
kernelRegularizer func([]float64) []float64
biasRegularizer func([]float64) []float64
Activation func(float64) float64
KernelInit func(float64) float64
BiasInit func(float64) float64
}
//DenseLayer defines a fully connected layer.
type Conv2DLayer struct {
Filters int
Inputs, Outputs []float64
Weights Weights
Biases Biases
trainable bool
name string
kernelRegularizer func([]float64) []float64
biasRegularizer func([]float64) []float64
Activation func(float64) float64
KernelInit func(float64) float64
BiasInit func(float64) float64
}
//Dense fully connected layer initializer
func Dense(units int, inputs []float64, activation func(float64) float64) DenseLayer {
weights := WeightInit(units, len(inputs), HeUniform)
biases := BiasInit(units, ZeroInitializer)
return DenseLayer{units: units,
inputs: inputs,
Activation: activation,
weights: weights,
biases: biases,
}
}
//Conv2D initializes a Conv2DLayer
func Conv2D(numFilter int, x int, y int, inputs []float64, padding func(x, y int) Weights) Conv2DLayer {
weights := padding(x, y)
biases := BiasInit(numFilter, ZeroInitializer)
return Conv2DLayer{Filters: numFilter,
Inputs: inputs,
Activation: ReLU,
Weights: weights,
Biases: biases,
}
}
func Valid(x, y int) Weights {
weights := WeightInit(x, y, HeUniform)
return weights
}
func DefaultPadding(x, y int) Weights {
w := RandomMatrix(x, y)
padding := Zeros(Max(x, y), Max(x, y))
var i, j int
for i = 0; i < RowNum(w); i++ {
for j = 0; j < ColNum(w); j++ {
padding.Matrix[i][j] = w.Matrix[i][j] + padding.Matrix[i][j]
}
}
padding.MapFunc(HeUniform)
return Weights{Kernels: padding, KernelInit: HeUniform}
}
func Max(x, y int) int {
if x > y {
return x
} else {
return y
}
}
func (cd Conv2DLayer) Call() []float64 {
vec := NewVector(cd.Inputs).ApplyMatrix(cd.Weights.Kernels).Add(cd.Biases.bs)
return vec.Map(cd.Activation).Slice()
}
//Name of the dense layer
func (cd Conv2DLayer) Name() string {
return cd.name
}
//GetWeights returns the layer's weights.
func (cd Conv2DLayer) GetWeights() Matrix {
return cd.Weights.Kernels
}
//GetBiases returns the layer's biases.
func (cd Conv2DLayer) GetBiases() Vector {
return cd.Biases.bs
}
//TrainableParameters returns the count of trainable parameters.
func (cd Conv2DLayer) TrainableParameters() int {
return NumberOfElements(cd.Weights.Kernels) + cd.Biases.bs.NumberOfElements()
}
/*
tf.keras.layers.Conv2D(
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
)*/
// Weights struct with the actual Kernels and the kernel initializer function.
type Weights struct {
Kernels Matrix
KernelInit func(float64) float64
}
//Biases struct with the actual biases and the bias initializer function.
type Biases struct {
bs Vector
BiasInit func(float64) float64
}
type Shape []float64
//WeightInit used for weight initialization. Already defined at the initialization of the dense layer.
func WeightInit(a, b int, kernelInit func(float64) float64) Weights {
w := RandomMatrix(a, b).MapFunc(kernelInit)
return Weights{Kernels: w, KernelInit: kernelInit}
}
//BiasInit used for bias initialization. Already defined at the initialization of the dense layer.
func BiasInit(a int, biasInit func(float64) float64) Biases {
bs := RandomVector(a).Map(biasInit)
return Biases{bs: bs, BiasInit: biasInit}
}
//Call of the dense layer.Outputs the next tensors.
func (d DenseLayer) Call() []float64 {
vec := NewVector(d.inputs).ApplyMatrix(d.weights.Kernels).Add(d.biases.bs)
return vec.Map(d.Activation).Slice()
}
//Name of the dense layer
func (d DenseLayer) Name() string {
return d.name
}
//GetWeights returns the layer's weights.
func (d DenseLayer) GetWeights() Matrix {
return d.weights.Kernels
}
//GetBiases returns the layer's biases.
func (d DenseLayer) GetBiases() Vector {
return d.biases.bs
}
//TrainableParameters returns the count of trainable parameters.
func (d DenseLayer) TrainableParameters() int {
return NumberOfElements(d.weights.Kernels) + d.biases.bs.NumberOfElements()
}
//SetWeights is used for manually defining the weight
func (d *DenseLayer) SetWeights(Kernels Matrix) {
d.weights.Kernels = Kernels
}
//SetBiases is used for manually defining the bias vector.
func (d *DenseLayer) SetBiases(bs Vector) {
d.biases.bs = bs
}
//InputLayer layer, much like the keras one.
type InputLayer struct {
Inputs, outputs []float64
Weights Weights
Biases Biases
Trainable bool
Name string
}
//Input
func Input(inputs []float64) InputLayer {
weights := WeightInit(len(inputs), 1, HeUniform)
biases := BiasInit(len(inputs), ZeroInitializer)
return InputLayer{
Inputs: inputs,
Weights: weights,
Biases: biases,
}
}
//Call of the input layer
func (i *InputLayer) Call() []float64 {
vec := NewVector(i.Inputs).ApplyMatrix(i.Weights.Kernels).Add(i.Biases.bs)
i.outputs = vec.Slice()
return vec.Slice()
}
//BatchNormLayer layer
type BatchNormLayer struct {
inputs, outputs []float64
beta, epsilon, alpha float64
trainable bool
name string
}
//BatchNorm init
func BatchNorm(inputs []float64) BatchNormLayer {
return BatchNormLayer{inputs: inputs}
}
//Call for the batch normalization layer
func (bn *BatchNormLayer) Call() []float64 {
outputs := make([]float64, len(bn.inputs))
variance := Variance(bn.inputs)
mean := meanValue(bn.inputs)
for _, x := range bn.inputs {
newX := (x - mean) / math.Sqrt(variance+bn.epsilon)
outputs = append(outputs, bn.alpha*newX+bn.beta)
}
bn.outputs = outputs
return outputs
}
//Variance returns the variance
func Variance(fls []float64) float64 {
var sum float64
for _, f := range fls {
sum += math.Pow(f-meanValue(fls), 2)
}
return sum / float64(len(fls))
}
func meanValue(fls []float64) float64 {
mean := sum(fls) / float64(len(fls))
return mean
}
func sum(values []float64) float64 {
var total float64
for _, v := range values {
total += v
}
return total
}
//DropoutLayer layer
type DropoutLayer struct {
inputs []float64
rate float64
}
//Dropout init
func Dropout(inputs []float64, rate float64) DropoutLayer {
return DropoutLayer{inputs: inputs, rate: rate}
}
//Call for the dropout layer
func (dr *DropoutLayer) Call() []float64 {
weightCount := dr.rate * float64(len(dr.inputs))
for i := int(weightCount); i > 0; i-- {
if len(dr.inputs)%int(weightCount) == 0 {
dr.inputs[i] = 0
}
}
return dr.inputs
}
//SoftmaxLayer layer
type SoftmaxLayer struct {
inputs, outputs []float64
classes int
}
// Softmax returns the softmax layer based on values.
func Softmax(inputs []float64, classes int) SoftmaxLayer {
return SoftmaxLayer{inputs: inputs, classes: classes}
}
// Call of the softmax
func (s *SoftmaxLayer) Call() []float64 {
sum := 0.0
preds := make([]float64, len(s.inputs))
for i, n := range s.inputs {
preds[i] -= math.Exp(n - findMax(s.inputs))
sum += preds[i]
}
for k := range preds {
preds[k] /= sum
}
outputs := preds[:s.classes]
s.outputs = outputs
return outputs
}
func findMax(fls []float64) float64 {
max := -10000.0
for _, k := range fls {
if k > max {
max = k
}
}
return max
}
// FlattenLayer layer
type FlattenLayer struct {
inputs, outputs []float64
name string
trainable bool
}
// Call of the FlattenLayer
func (f *FlattenLayer) Call() []float64 {
return f.outputs
}
// Flatten init.
func Flatten(m Matrix) FlattenLayer {
return FlattenLayer{outputs: ToArray(m)}
}
// HeUniform stands for He Initialization or the glorot_unifom for kernel_initialization.
func HeUniform(x float64) float64 {
rand.Seed(time.Now().UnixNano())
down, upper := x-0.4, x+0.4
return down + rand.Float64()*(upper-down)
}
// ZeroInitializer returns the zeros initializer for the bias initialization
func ZeroInitializer(x float64) float64 {
return 0
}
// OnesInitializer returns the ones initializer for the bias initialization
func OnesInitializer(x float64) float64 {
return 1
} | keras/layers.go | 0.887119 | 0.597138 | layers.go | starcoder |
package validation
import (
"fmt"
"net"
"net/url"
"reflect"
"time"
"github.com/System-Glitch/goyave/v3/helper"
"github.com/google/uuid"
)
// createArray create a slice of the same type as the given type.
func createArray(dataType string, length int) reflect.Value {
var arr reflect.Value
switch dataType {
case "string":
newArray := make([]string, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
case "numeric":
newArray := make([]float64, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
case "integer":
newArray := make([]int, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
case "timezone":
newArray := make([]*time.Location, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
case "ip", "ipv4", "ipv6":
newArray := make([]net.IP, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
case "json":
newArray := make([]interface{}, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
case "url":
newArray := make([]*url.URL, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
case "uuid":
newArray := make([]uuid.UUID, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
case "bool":
newArray := make([]bool, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
case "date":
newArray := make([]time.Time, 0, length)
arr = reflect.ValueOf(&newArray).Elem()
}
return arr
}
func validateArray(field string, value interface{}, parameters []string, form map[string]interface{}) bool {
if GetFieldType(value) == "array" {
if len(parameters) == 0 {
return true
}
if parameters[0] == "array" {
panic("Cannot use array type for array validation. Use \">array\" instead")
}
if !validationRules[parameters[0]].IsType {
panic(fmt.Sprintf("Rule %s is not converting, cannot use it for array validation", parameters[0]))
}
list := reflect.ValueOf(value)
length := list.Len()
arr := createArray(parameters[0], length)
params := parameters[1:]
for i := 0; i < length; i++ {
val := list.Index(i).Interface()
tmpData := map[string]interface{}{field: val}
if !validationRules[parameters[0]].Function(field, val, params, tmpData) {
return false
}
arr.Set(reflect.Append(arr, reflect.ValueOf(tmpData[field])))
}
form[field] = arr.Interface()
return true
}
return false
}
func validateDistinct(field string, value interface{}, parameters []string, form map[string]interface{}) bool {
if GetFieldType(value) != "array" {
return false // Can't validate if not an array
}
found := []interface{}{}
list := reflect.ValueOf(value)
for i := 0; i < list.Len(); i++ {
v := list.Index(i).Interface()
if helper.Contains(found, v) {
return false
}
found = append(found, v)
}
return true
}
func checkInNumeric(parameters []string, value interface{}) bool {
for _, v := range parameters {
floatVal, _ := helper.ToFloat64(value)
other, err := helper.ToFloat64(v)
if err == nil && floatVal == other { // Compare only values of the same type
return true
}
}
return false
}
func validateIn(field string, value interface{}, parameters []string, form map[string]interface{}) bool {
switch GetFieldType(value) {
case "numeric":
return checkInNumeric(parameters, value)
case "string":
return helper.Contains(parameters, value)
}
// Don't check arrays and files
return false
}
func validateNotIn(field string, value interface{}, parameters []string, form map[string]interface{}) bool {
switch GetFieldType(value) {
case "numeric":
return !checkInNumeric(parameters, value)
case "string":
return !helper.ContainsStr(parameters, value.(string))
}
// Don't check arrays and files
return false
}
func validateInArray(field string, value interface{}, parameters []string, form map[string]interface{}) bool {
other, exists := form[parameters[0]]
if exists && GetFieldType(other) == "array" {
return helper.Contains(other, value)
}
return false
}
func validateNotInArray(field string, value interface{}, parameters []string, form map[string]interface{}) bool {
other, exists := form[parameters[0]]
if exists && GetFieldType(other) == "array" {
return !helper.Contains(other, value)
}
return false
} | validation/arrays.go | 0.554229 | 0.403508 | arrays.go | starcoder |
package model3d
import (
"math"
"github.com/heustis/tsp-solver-go/model"
)
// BuildPerimiter attempts to produce the smallest convex perimeter that can encompass all the vertices in the supplied array.
// The current 3D algorithm is not guaranteed to produce the smallest convex perimeter, unlike the 2D algorithm.
// This returns both the edges comprising the convex perimeter and the set of unattached (interior) vertices.
// This will panic if any of the vertices in the array are not of type Vertex3D.
func BuildPerimiter(verticesArg []model.CircuitVertex) ([]model.CircuitEdge, map[model.CircuitVertex]bool) {
numVertices := len(verticesArg)
midpoint := NewVertex3D(0, 0, 0)
unattachedVertices := make(map[model.CircuitVertex]bool)
distanceToMidpoint := make(map[model.CircuitVertex]float64)
exteriorClosestEdges := make(map[model.CircuitVertex]*model.DistanceToEdge)
circuitEdges := make([]model.CircuitEdge, 0, numVertices)
for _, v := range verticesArg {
v3d := v.(*Vertex3D)
unattachedVertices[v] = true
midpoint.X += v3d.X / float64(numVertices)
midpoint.Y += v3d.Y / float64(numVertices)
midpoint.Z += v3d.Z / float64(numVertices)
}
// 1. Find point farthest from midpoint
// Restricts problem-space to a sphere around the midpoint, with radius equal to the distance to the point.
farthestFromMid := model.FindFarthestPoint(midpoint, verticesArg)
delete(unattachedVertices, farthestFromMid)
distanceToMidpoint[farthestFromMid] = farthestFromMid.DistanceTo(midpoint)
// 2. Find point farthest from point in step 1.
// Restricts problem-space to intersection of step 1 sphere,
// and a sphere centered on the point from step 1 with a radius equal to the distance between the points found in step 1 and 2.
farthestFromFarthest := model.FindFarthestPoint(farthestFromMid, verticesArg)
delete(unattachedVertices, farthestFromFarthest)
distanceToMidpoint[farthestFromFarthest] = farthestFromFarthest.DistanceTo(midpoint)
// 3. Created edges 1 -> 2 and 2 -> 1
circuitEdges = append(circuitEdges, farthestFromMid.EdgeTo(farthestFromFarthest))
circuitEdges = append(circuitEdges, farthestFromFarthest.EdgeTo(farthestFromMid))
// 4. Initialize the closestEdges map which will be used to find the exterior point farthest from its closest edge.
// For the third point only, we can simplify this since both edges are the same (but flipped).
// When the third point is inserted it will determine whether our vertices are ordered clockwise or counter-clockwise.
// For the 3D version of this algorithm we will not deliberately select CW vs CCW.
for vertex := range unattachedVertices {
v3d := vertex.(*Vertex3D)
e3d := circuitEdges[0].(*Edge3D)
exteriorClosestEdges[vertex] = &model.DistanceToEdge{
Edge: e3d,
Distance: v3d.DistanceToEdge(e3d),
Vertex: v3d,
}
distanceToMidpoint[v3d] = v3d.DistanceTo(midpoint)
}
// 5. Find the exterior point farthest from its closest edge.
// Split the closest edge by adding the point to it, and consequently to the perimeter.
// Check all remaining exterior points to see if they are now interior points, and update the model as appropriate.
// Repeat until all points are interior or perimeter points.
// Complexity: This step in O(N^2) because it iterates once per vertex in the concave perimeter (N iterations) and for each of those iterations it:
// 1. looks at each exterior point to find farthest from its closest point (O(N)); and then
// 2. updates each exterior point that had the split edge as its closest edge (O(N)).
for len(exteriorClosestEdges) > 0 {
farthestFromClosestEdge := &model.DistanceToEdge{
Distance: 0.0,
}
for _, closest := range exteriorClosestEdges {
if closest.Distance > farthestFromClosestEdge.Distance {
farthestFromClosestEdge = closest
}
}
circuitEdges, _ = model.SplitEdge(circuitEdges, farthestFromClosestEdge.Edge, farthestFromClosestEdge.Vertex)
delete(unattachedVertices, farthestFromClosestEdge.Vertex)
delete(exteriorClosestEdges, farthestFromClosestEdge.Vertex)
for v := range unattachedVertices {
// If the vertex was previously an exterior point and the edge closest to it was split, it could now be an interior point.
if closest, wasExterior := exteriorClosestEdges[v]; wasExterior {
// Update the closest edge to this point. For 3 dimensions, need to check all edges.
v3d := v.(*Vertex3D)
newClosest := &model.DistanceToEdge{
Vertex: v3d,
Distance: math.MaxFloat64,
}
for _, edge := range circuitEdges {
e3d := edge.(*Edge3D)
dist := v3d.DistanceToEdge(e3d)
if dist < newClosest.Distance {
newClosest.Distance = dist
newClosest.Edge = e3d
}
}
// If the vertex is now interior, stop tracking its closest edge (until the convex perimeter is fully constructed) and add it to the interior edge list.
// Otherwise, it is still exterior, so update its closest edge.
if newClosest.Edge != closest.Edge && isInterior(v, newClosest.Edge, midpoint, distanceToMidpoint[v]) {
delete(exteriorClosestEdges, v)
} else {
exteriorClosestEdges[v] = newClosest
}
}
}
}
return circuitEdges, unattachedVertices
}
func isInterior(v model.CircuitVertex, closestEdge model.CircuitEdge, midpoint *Vertex3D, distanceToMidpoint float64) bool {
projected := v.(*Vertex3D).ProjectToEdge(closestEdge.(*Edge3D))
projectedDist := projected.DistanceTo(midpoint)
return projectedDist > distanceToMidpoint
}
var _ model.PerimeterBuilder = BuildPerimiter | model3d/perimeterbuilder3d.go | 0.840161 | 0.839997 | perimeterbuilder3d.go | starcoder |
http://arxiv.org/pdf/1505.00062.pdf
*/
package mpchash
import (
"math"
"sort"
)
// Multi selects buckets with a multi-probe consistent hash
type Multi struct {
buckets []string
seeds [2]uint64
hashf func(b []byte, s uint64) uint64
k int
bmap map[uint64]string
// We store sorted slices of hashes by bit prefix
bhashes [][]uint64
prefixmask uint64
prefixshift uint64
}
// New returns a new multi-probe hasher. The hash function h is used with the two seeds to generate k different probes.
func New(buckets []string, h func(b []byte, s uint64) uint64, seeds [2]uint64, k int) *Multi {
m := &Multi{
buckets: make([]string, len(buckets)),
hashf: h,
seeds: seeds,
bmap: make(map[uint64]string, len(buckets)),
k: k,
}
copy(m.buckets, buckets)
const desiredCollisionRate = 6
prefixlen := len(buckets) / desiredCollisionRate
psize := ilog2(prefixlen)
m.prefixmask = ((1 << psize) - 1) << (64 - psize)
m.prefixshift = 64 - psize
m.bhashes = make([][]uint64, 1<<psize)
for _, b := range buckets {
h := m.hashf([]byte(b), 0)
prefix := (h & m.prefixmask) >> m.prefixshift
m.bhashes[prefix] = append(m.bhashes[prefix], h)
m.bmap[h] = b
}
for _, v := range m.bhashes {
sort.Sort(uint64Slice(v))
}
return m
}
// Hash returns the bucket for a given key
func (m *Multi) Hash(key string) string {
bkey := []byte(key)
minDistance := uint64(math.MaxUint64)
var minhash uint64
h1 := m.hashf(bkey, m.seeds[0])
h2 := m.hashf(bkey, m.seeds[1])
for i := 0; i < m.k; i++ {
hash := h1 + uint64(i)*h2
prefix := (hash & m.prefixmask) >> m.prefixshift
var node uint64
FOUND:
for {
uints := m.bhashes[prefix]
for _, v := range uints {
if hash < v {
node = v
break FOUND
}
}
prefix++
if prefix == uint64(len(m.bhashes)) {
prefix = 0
// wrapped -- take the first node hash we can find
for uints = nil; uints == nil; prefix++ {
uints = m.bhashes[prefix]
}
node = uints[0]
break FOUND
}
}
distance := node - hash
if distance < minDistance {
minDistance = distance
minhash = node
}
}
return m.bmap[minhash]
}
type uint64Slice []uint64
func (p uint64Slice) Len() int { return len(p) }
func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// integer log base 2
func ilog2(v int) uint64 {
var r uint64
for ; v != 0; v >>= 1 {
r++
}
return r
} | mpchash.go | 0.794664 | 0.482124 | mpchash.go | starcoder |
package astmodel
import "github.com/pkg/errors"
// PropertyInjector is a utility for injecting property definitions into resources and objects
type PropertyInjector struct {
// visitor is used to do the actual injection
visitor TypeVisitor
}
// NewPropertyInjector creates a new property injector for modifying resources and objects
func NewPropertyInjector() *PropertyInjector {
result := &PropertyInjector{}
result.visitor = TypeVisitorBuilder{
VisitObjectType: result.injectPropertyIntoObject,
VisitResourceType: result.injectPropertyIntoResource,
}.Build()
return result
}
// Inject modifies the passed type definition by injecting the passed property
func (pi *PropertyInjector) Inject(def TypeDefinition, prop *PropertyDefinition) (TypeDefinition, error) {
result, err := pi.visitor.VisitDefinition(def, prop)
if err != nil {
return TypeDefinition{}, errors.Wrapf(err, "failed to inject property %q into %q", prop.PropertyName(), def.Name())
}
return result, nil
}
// injectPropertyIntoObject takes the property provided as a context and includes it on the provided object type
func (pi *PropertyInjector) injectPropertyIntoObject(
_ *TypeVisitor, ot *ObjectType, ctx interface{}) (Type, error) {
prop := ctx.(*PropertyDefinition)
// Ensure that we don't already have a property with the same name
if _, ok := ot.Property(prop.PropertyName()); ok {
return nil, errors.Errorf("already has property named %q", prop.PropertyName())
}
return ot.WithProperty(prop), nil
}
// injectPropertyIntoResource takes the property provided as a context and includes it on the provided resource type
func (pi *PropertyInjector) injectPropertyIntoResource(
_ *TypeVisitor, rt *ResourceType, ctx interface{}) (Type, error) {
prop := ctx.(*PropertyDefinition)
// Ensure that we don't already have a property with the same name
if _, ok := rt.Property(prop.PropertyName()); ok {
return nil, errors.Errorf("already has property named %q", prop.PropertyName())
}
return rt.WithProperty(prop), nil
} | v2/tools/generator/internal/astmodel/property_injector.go | 0.621081 | 0.411288 | property_injector.go | starcoder |
package luar
import (
"reflect"
"github.com/yuin/gopher-lua"
)
// LState is an wrapper for gopher-lua's LState. It should be used when you
// wish to have a function/method with the standard "func(*lua.LState) int"
// signature.
type LState struct {
*lua.LState
}
var refTypeLStatePtr reflect.Type
var refTypeLuaLValueSlice reflect.Type
var refTypeLuaLValue reflect.Type
var refTypeInt reflect.Type
func init() {
refTypeLStatePtr = reflect.TypeOf(&LState{})
refTypeLuaLValueSlice = reflect.TypeOf([]lua.LValue{})
refTypeLuaLValue = reflect.TypeOf((*lua.LValue)(nil)).Elem()
refTypeInt = reflect.TypeOf(int(0))
}
func funcIsBypass(t reflect.Type) bool {
if t.NumIn() == 1 && t.NumOut() == 1 && t.In(0) == refTypeLStatePtr && t.Out(0) == refTypeInt {
return true
}
if t.NumIn() == 2 && t.NumOut() == 1 && t.In(1) == refTypeLStatePtr && t.Out(0) == refTypeInt {
return true
}
return false
}
func funcEvaluate(L *lua.LState, fn reflect.Value) int {
fnType := fn.Type()
if funcIsBypass(fnType) {
luarState := LState{L}
args := make([]reflect.Value, 0, 2)
if fnType.NumIn() == 2 {
receiverHint := fnType.In(0)
receiver := lValueToReflect(L.Get(1), receiverHint)
if receiver.Type() != receiverHint {
L.RaiseError("incorrect receiver type")
}
args = append(args, receiver)
L.Remove(1)
}
args = append(args, reflect.ValueOf(&luarState))
return fn.Call(args)[0].Interface().(int)
}
top := L.GetTop()
expected := fnType.NumIn()
variadic := fnType.IsVariadic()
if !variadic && top != expected {
L.RaiseError("invalid number of function argument (%d expected, got %d)", expected, top)
}
if variadic && top < expected-1 {
L.RaiseError("invalid number of function argument (%d or more expected, got %d)", expected-1, top)
}
args := make([]reflect.Value, top)
for i := 0; i < L.GetTop(); i++ {
var hint reflect.Type
if variadic && i >= expected-1 {
hint = fnType.In(expected - 1).Elem()
} else {
hint = fnType.In(i)
}
args[i] = lValueToReflect(L.Get(i+1), hint)
}
ret := fn.Call(args)
if len(ret) == 1 && ret[0].Type() == refTypeLuaLValueSlice {
values := ret[0].Interface().([]lua.LValue)
for _, value := range values {
L.Push(value)
}
return len(values)
}
for _, val := range ret {
L.Push(New(L, val.Interface()))
}
return len(ret)
}
func funcWrapper(L *lua.LState, fn reflect.Value) *lua.LFunction {
wrapper := func(L *lua.LState) int {
return funcEvaluate(L, fn)
}
return L.NewFunction(wrapper)
} | Godeps/_workspace/src/github.com/layeh/gopher-luar/func.go | 0.543106 | 0.406597 | func.go | starcoder |
package trie
import (
"bytes"
"fmt"
)
// New return a new trie tree
func New() *Trie {
root := &Node{sons: map[byte]*Node{}}
return &Trie{root}
}
// Node is a node in trie tree
type Node struct {
data byte
isEnd bool
sons map[byte]*Node
}
// Trie is a trie tree stuct
type Trie struct {
root *Node
}
// Insert could insert a word to trie tree
func (t *Trie) Insert(word string) {
k := len(word)
if k < 1 {
return
}
n := t.root
for i := 0; i < k; i++ {
ns, ok := n.sons[word[i]]
if !ok {
ns = &Node{data: word[i], sons: map[byte]*Node{}}
n.sons[word[i]] = ns
}
n = ns
}
n.isEnd = true
}
// Find tell you is the word in the trie tree
func (t *Trie) Find(word string) (bool, *Node) {
k := len(word)
if k < 1 {
return false, nil
}
n := t.root
for i := 0; i < k; i++ {
n = n.sons[word[i]]
if n == nil {
return false, nil
}
}
return n.isEnd, n
}
// Completion return a list of words that start with the input word
func (t *Trie) Completion(word string) []string {
words := []string{}
_, n := t.Find(word)
if n == nil {
return words
}
words = t.completion(n, word, words)
return words
}
func (t *Trie) completion(n *Node, preWord string, words []string) []string {
for _, ns := range n.sons {
nsPreWord := preWord + string(ns.data)
if ns.isEnd {
words = append(words, nsPreWord)
}
words = t.completion(ns, nsPreWord, words)
}
return words
}
// Completion2 return a list of words that start with the input word
func (t *Trie) Completion2(word string) []string {
words := []string{}
_, n := t.Find(word)
if n == nil {
return words
}
wordHash := map[*Node]string{n: word}
toWords := func(n *Node, ns *Node) {
wordHash[ns] = wordHash[n] + string(ns.data)
if ns.isEnd {
words = append(words, wordHash[ns])
}
return
}
t.completion2(n, toWords)
return words
}
func (t *Trie) completion2(n *Node, do func(*Node, *Node)) {
for _, ns := range n.sons {
do(n, ns)
t.completion2(ns, do)
}
return
}
func (t *Trie) preOrder(n *Node, do func(*Node)) {
for _, ns := range n.sons {
do(ns)
t.preOrder(ns, do)
}
return
}
func (t *Trie) String() string {
var buffer bytes.Buffer
buffer.WriteString("Trie Tree:")
bufferWriteString := func(n *Node) {
buffer.WriteString(fmt.Sprintf("%q, ", n.data))
}
buffer.WriteString("data = [")
t.preOrder(t.root, bufferWriteString)
buffer.WriteString("]")
return buffer.String()
} | dataStructuresAndAlgorithms/string/trie/trie.go | 0.634317 | 0.401658 | trie.go | starcoder |
package layout
import (
"image"
"gioui.org/op"
)
// Stack lays out child elements on top of each other,
// according to an alignment direction.
type Stack struct {
// Alignment is the direction to align children
// smaller than the available space.
Alignment Direction
}
// StackChild represents a child for a Stack layout.
type StackChild struct {
expanded bool
widget Widget
// Scratch space.
call op.CallOp
dims Dimensions
}
// Stacked returns a Stack child that is laid out with no minimum
// constraints and the maximum constraints passed to Stack.Layout.
func Stacked(w Widget) StackChild {
return StackChild{
widget: w,
}
}
// Expanded returns a Stack child with the minimum constraints set
// to the largest Stacked child. The maximum constraints are set to
// the same as passed to Stack.Layout.
func Expanded(w Widget) StackChild {
return StackChild{
expanded: true,
widget: w,
}
}
// Layout a stack of children. The position of the children are
// determined by the specified order, but Stacked children are laid out
// before Expanded children.
func (s Stack) Layout(gtx Context, children ...StackChild) Dimensions {
var maxSZ image.Point
// First lay out Stacked children.
cgtx := gtx
cgtx.Constraints.Min = image.Point{}
for i, w := range children {
if w.expanded {
continue
}
macro := op.Record(gtx.Ops)
dims := w.widget(cgtx)
call := macro.Stop()
if w := dims.Size.X; w > maxSZ.X {
maxSZ.X = w
}
if h := dims.Size.Y; h > maxSZ.Y {
maxSZ.Y = h
}
children[i].call = call
children[i].dims = dims
}
// Then lay out Expanded children.
for i, w := range children {
if !w.expanded {
continue
}
macro := op.Record(gtx.Ops)
cgtx.Constraints.Min = maxSZ
dims := w.widget(cgtx)
call := macro.Stop()
if w := dims.Size.X; w > maxSZ.X {
maxSZ.X = w
}
if h := dims.Size.Y; h > maxSZ.Y {
maxSZ.Y = h
}
children[i].call = call
children[i].dims = dims
}
maxSZ = gtx.Constraints.Constrain(maxSZ)
var baseline int
for _, ch := range children {
sz := ch.dims.Size
var p image.Point
switch s.Alignment {
case N, S, Center:
p.X = (maxSZ.X - sz.X) / 2
case NE, SE, E:
p.X = maxSZ.X - sz.X
}
switch s.Alignment {
case W, Center, E:
p.Y = (maxSZ.Y - sz.Y) / 2
case SW, S, SE:
p.Y = maxSZ.Y - sz.Y
}
trans := op.Offset(FPt(p)).Push(gtx.Ops)
ch.call.Add(gtx.Ops)
trans.Pop()
if baseline == 0 {
if b := ch.dims.Baseline; b != 0 {
baseline = b + maxSZ.Y - sz.Y - p.Y
}
}
}
return Dimensions{
Size: maxSZ,
Baseline: baseline,
}
} | layout/stack.go | 0.643665 | 0.422981 | stack.go | starcoder |
package main
import (
"github.com/ethereum/go-ethereum/p2p/enode"
)
type TreeNode interface {
Depth() uint
ID() enode.ID
Score() float64
SubtreeSize() uint
// Add a node to the tree, return updated tree root, and ok == true if the node didn't already exist
Add(n *enode.Node) (updated TreeNode, ok bool)
// Search for closest leaf nodes (log distance) and append to out,
// maximum to the capacity of the out slice
Search(target enode.ID, out []TreeNode) []TreeNode
// Weakest finds the content with the weakest score at given tree depth
Weakest(depth uint) TreeNode
}
type LeafNode struct {
depth uint
score float64
self *enode.Node
}
func bitCheck(id enode.ID, bitIndex uint) bool {
return id[bitIndex>>3]&(1<<bitIndex) != 0
}
// zeroes out the remaining bits starting at depth index
func clip(id enode.ID, depth uint) enode.ID {
i := depth >> 3
id[i] &= (1 << (byte(depth) & 7)) - 1
for j := i; j < uint(len(id)); j++ {
id[j] = 0
}
return id
}
func (leaf *LeafNode) Depth() uint {
return leaf.depth
}
func (leaf *LeafNode) ID() enode.ID {
return leaf.self.ID()
}
func (leaf *LeafNode) Score() float64 {
return leaf.score
}
func (*LeafNode) SubtreeSize() uint {
return 1
}
func (leaf *LeafNode) Add(other *enode.Node) (updated TreeNode, ok bool) {
if leaf.ID() == other.ID() {
return leaf, false
}
pair := &PairNode{depth: leaf.depth, score: 0, subtreeSize: 0, id: clip(leaf.ID(), leaf.depth)}
_, _ = pair.Add(leaf.self)
_, _ = pair.Add(other)
return pair, true
}
func (leaf *LeafNode) Search(target enode.ID, out []TreeNode) []TreeNode {
if len(out) == cap(out) {
return out
}
return append(out, leaf)
}
func (leaf *LeafNode) Weakest(depth uint) TreeNode {
return leaf
}
type PairNode struct {
depth uint
score float64
subtreeSize uint
// Bits after depth index are zeroed
id enode.ID
// left and right are never nil at the same time
// May be nil (pair node as extension node)
left TreeNode
// May be nil (pair node as extension node)
right TreeNode
}
func (pair *PairNode) Depth() uint {
return pair.depth
}
func (pair *PairNode) ID() enode.ID {
return pair.id
}
func (pair *PairNode) Score() float64 {
return pair.score
}
func (pair *PairNode) SubtreeSize() uint {
return pair.subtreeSize
}
func (pair *PairNode) Add(n *enode.Node) (updated TreeNode, ok bool) {
if pair.ID() == n.ID() {
return pair, false
}
if bitCheck(n.ID(), pair.depth) {
if pair.right != nil {
pair.right, ok = pair.right.Add(n)
} else {
pair.right = &LeafNode{
depth: pair.depth + 1,
score: 0,
self: n,
}
ok = true
}
} else {
if pair.left != nil {
pair.left, ok = pair.left.Add(n)
} else {
pair.left = &LeafNode{
depth: pair.depth + 1,
score: 0,
self: n,
}
ok = true
}
}
if ok {
pair.subtreeSize += 1
pair.score = 0
if pair.left != nil {
pair.score += pair.left.Score()
}
if pair.right != nil {
pair.score += pair.right.Score()
}
}
return pair, ok
}
func (pair *PairNode) Search(target enode.ID, out []TreeNode) []TreeNode {
if len(out) == cap(out) {
return out
}
if pair.left == nil {
return pair.right.Search(target, out)
}
if pair.right == nil {
return pair.left.Search(target, out)
}
if bitCheck(target, pair.depth) {
out = pair.right.Search(target, out)
if len(out) < cap(out) {
out = pair.left.Search(target, out)
}
return out
} else {
out = pair.left.Search(target, out)
if len(out) < cap(out) {
out = pair.right.Search(target, out)
}
return out
}
}
func (pair *PairNode) Weakest(depth uint) TreeNode {
if depth > pair.depth {
if pair.left == nil || (pair.right.Score() > pair.left.Score()) {
return pair.right.Weakest(depth)
}
return pair.left.Weakest(depth)
}
return pair
} | tree.go | 0.680242 | 0.482002 | tree.go | starcoder |
package sets
import "sort"
type Set map[string]struct{}
// New returns a new empty Set.
func New() Set {
return make(Set)
}
// NewWithLength returns an empty Set with the given length.
func NewWithLength(l int) Set {
return make(Set, l)
}
// NewWith creates a new Set with the given items.
func NewWith(items ...string) Set {
return NewWithLength(len(items)).InsertAll(items...)
}
// Insert adds the item to the set.
func (s Set) Insert(item string) Set {
s[item] = struct{}{}
return s
}
// InsertAll adds items to the set.
func (s Set) InsertAll(items ...string) Set {
for _, item := range items {
s[item] = struct{}{}
}
return s
}
// Delete removes the item from the set.
func (s Set) Delete(item string) Set {
delete(s, item)
return s
}
// DeleteAll removes items from the set.
func (s Set) DeleteAll(items ...string) Set {
for _, item := range items {
delete(s, item)
}
return s
}
// Merge a set of objects that are in s2 into s
// For example:
// s = {a1, a2, a3}
// s2 = {a3, a4, a5}
// s.Merge(s2) = {a1, a2, a3, a4, a5}
func (s Set) Merge(s2 Set) Set {
for item := range s2 {
s[item] = struct{}{}
}
return s
}
// Copy this set.
func (s Set) Copy() Set {
result := New()
for key := range s {
result.Insert(key)
}
return result
}
// Union returns a set of objects that are in s or s2
// For example:
// s = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s.Union(s2) = s2.Union(s) = {a1, a2, a3, a4, a5}
func (s Set) Union(s2 Set) Set {
result := s.Copy()
for key := range s2 {
result.Insert(key)
}
return result
}
// Difference returns a set of objects that are not in s2
// For example:
// s = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s.Difference(s2) = {a3}
// s2.Difference(s) = {a4, a5}
func (s Set) Difference(s2 Set) Set {
result := New()
for key := range s {
if !s2.Contains(key) {
result.Insert(key)
}
}
return result
}
// Intersection returns a set of objects that are common between s and s2
// For example:
// s = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s.Intersection(s2) = {a1, a2}
func (s Set) Intersection(s2 Set) Set {
result := New()
for key := range s {
if s2.Contains(key) {
result.Insert(key)
}
}
return result
}
// SupersetOf returns true if s contains all elements of s2
// For example:
// s = {a1, a2, a3}
// s2 = {a1, a2, a3, a4, a5}
// s.SupersetOf(s2) = false
// s2.SupersetOf(s) = true
func (s Set) SupersetOf(s2 Set) bool {
return s2.Difference(s).IsEmpty()
}
// UnsortedList returns the slice with contents in random order.
func (s Set) UnsortedList() []string {
res := make([]string, 0, s.Len())
for key := range s {
res = append(res, key)
}
return res
}
// SortedList returns the slice with contents sorted.
func (s Set) SortedList() []string {
res := s.UnsortedList()
sort.Strings(res)
return res
}
// Contains returns whether the given item is in the set.
func (s Set) Contains(item string) bool {
_, ok := s[item]
return ok
}
// Equals checks whether the given set is equal to the current set.
func (s Set) Equals(other Set) bool {
if s.Len() != other.Len() {
return false
}
for key := range s {
if !other.Contains(key) {
return false
}
}
return true
}
// Len returns the number of elements in this Set.
func (s Set) Len() int {
return len(s)
}
// IsEmpty indicates whether the set is the empty set.
func (s Set) IsEmpty() bool {
return len(s) == 0
}
// Diff takes a pair of Sets, and returns the elements that occur only on the left and right set.
func (s Set) Diff(other Set) (left []string, right []string) {
for k := range s {
if _, f := other[k]; !f {
left = append(left, k)
}
}
for k := range other {
if _, f := s[k]; !f {
right = append(right, k)
}
}
return
} | pkg/util/sets/string.go | 0.856797 | 0.474936 | string.go | starcoder |
package backoff
import (
"math"
"time"
)
// Algorithm defines a function that calculates a time.Duration based on
// the given retry attempt number.
type Algorithm func(attempt uint) time.Duration
// Constant creates an Algorithm that returns the initial duration
// by the all time.
func Constant(duration time.Duration) Algorithm {
return func(uint) time.Duration {
return duration
}
}
// Incremental creates an Algorithm that increments the initial duration
// by the given increment for each attempt.
func Incremental(initial, increment time.Duration) Algorithm {
return func(attempt uint) time.Duration {
return initial + (increment * time.Duration(attempt))
}
}
// Linear creates an Algorithm that linearly multiplies the factor
// duration by the attempt number for each attempt.
func Linear(factor time.Duration) Algorithm {
return Incremental(0, factor)
}
// Exponential creates an Algorithm that multiplies the factor duration by
// an exponentially increasing factor for each attempt, where the factor is
// calculated as the given base raised to the attempt number.
func Exponential(factor time.Duration, base float64) Algorithm {
return func(attempt uint) time.Duration {
return factor * time.Duration(math.Pow(base, float64(attempt)))
}
}
// BinaryExponential creates an Algorithm that multiplies the factor
// duration by an exponentially increasing factor for each attempt, where the
// factor is calculated as `2` raised to the attempt number (2^attempt).
func BinaryExponential(factor time.Duration) Algorithm {
return Exponential(factor, 2)
}
// Fibonacci creates an Algorithm that multiplies the factor duration by
// an increasing factor for each attempt, where the factor is the Nth number in
// the Fibonacci sequence.
func Fibonacci(factor time.Duration) Algorithm {
return func(attempt uint) time.Duration {
return factor * time.Duration(fibonacciNumber(attempt))
}
}
// fibonacciNumber calculates the Fibonacci sequence number for the given
// sequence position.
func fibonacciNumber(n uint) uint {
if n == 0 {
return 0
}
var a, b uint = 0, 1
for i := uint(1); i < n; i++ {
a, b = b, a+b
}
return b
} | vendor/github.com/kamilsk/retry/v4/backoff/backoff.go | 0.860662 | 0.63672 | backoff.go | starcoder |
package match
// match m a
func MatchArr(pattern string, data []string, limit ...int) []string {
weight := make([]float64, 0, 5)
res := make([]string, 0, 5)
// start init
res = append(res, "")
weight = append(weight, 0)
length := 0
for _, content := range data {
w := matchWeight(pattern, content)
if w > 0 {
// insert
res = append(res, content)
weight = append(weight, w)
length++
// swim
j := length
for {
parent := j / 2
if weight[parent] <= w {
break
}
if parent == 0 {
break
}
// switch
weight[j], weight[parent] = weight[parent], weight[j]
res[j], res[parent] = res[parent], res[j]
j = parent
}
}
}
// fetch by order
for i := 1; i <= length; i++ {
// switch root
weight[1], weight[length-i+1] = weight[length-i+1], weight[1]
res[1], res[length-i+1] = res[length-i+1], res[1]
j := 1
for {
// sink
childLeft := j * 2
childRight := childLeft + 1
// two nodes
if childRight <= length-i {
if weight[childLeft] <= weight[childRight] && weight[j] > weight[childLeft] {
// switch
weight[childLeft], weight[j] = weight[j], weight[childLeft]
res[childLeft], res[j] = res[j], res[childLeft]
j = childLeft
continue
}
if weight[childLeft] <= weight[childRight] && weight[j] > weight[childRight] {
// switch
weight[childRight], weight[j] = weight[j], weight[childRight]
res[childRight], res[j] = res[j], res[childRight]
j = childRight
continue
}
break
}
// one node
if childLeft <= length-i && weight[j] > weight[childLeft] {
// switch
weight[childLeft], weight[j] = weight[j], weight[childLeft]
res[childLeft], res[j] = res[j], res[childLeft]
j = childLeft
continue
}
// none
break
}
}
return res[1:]
}
// ab axxxxb
func matchWeight(pattern string, content string) (weight float64) {
p := []byte(pattern)
c := []byte(content)
lenp := len(p)
lenc := len(c)
// parttern
// content
// (lenc - posb)/(lenc-posa) / lenp * lenp / lenc
i := 0
j := 0
for {
if i >= lenp || j >= lenc {
break
}
if p[i] == c[j] {
delta := float64(lenc-j) / float64(lenc-i) / float64(lenc)
weight = weight + delta
i++
j++
} else {
j++
}
}
if i < lenp {
weight = 0
}
return
} | vendor/match/match.go | 0.537527 | 0.401336 | match.go | starcoder |
package types
import (
"fmt"
"math"
"time"
"github.com/pingcap/errors"
)
// AddUint64 adds uint64 a and b if no overflow, else returns error.
func AddUint64(a uint64, b uint64) (uint64, error) {
if math.MaxUint64-a < b {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
return a + b, nil
}
// AddInt64 adds int64 a and b if no overflow, otherwise returns error.
func AddInt64(a int64, b int64) (int64, error) {
if (a > 0 && b > 0 && math.MaxInt64-a < b) ||
(a < 0 && b < 0 && math.MinInt64-a > b) {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
return a + b, nil
}
// AddDuration adds time.Duration a and b if no overflow, otherwise returns error.
func AddDuration(a time.Duration, b time.Duration) (time.Duration, error) {
if (a > 0 && b > 0 && math.MaxInt64-a < b) ||
(a < 0 && b < 0 && math.MinInt64-a > b) {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", int64(a), int64(b)))
}
return a + b, nil
}
// SubDuration subtracts time.Duration a with b and returns time.Duration if no overflow error.
func SubDuration(a time.Duration, b time.Duration) (time.Duration, error) {
if (a > 0 && b < 0 && math.MaxInt64-a < -b) ||
(a < 0 && b > 0 && math.MinInt64-a > -b) ||
(a == 0 && b == math.MinInt64) {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
return a - b, nil
}
// AddInteger adds uint64 a and int64 b and returns uint64 if no overflow error.
func AddInteger(a uint64, b int64) (uint64, error) {
if b >= 0 {
return AddUint64(a, uint64(b))
}
if uint64(-b) > a {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
return a - uint64(-b), nil
}
// SubUint64 subtracts uint64 a with b and returns uint64 if no overflow error.
func SubUint64(a uint64, b uint64) (uint64, error) {
if a < b {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
return a - b, nil
}
// SubInt64 subtracts int64 a with b and returns int64 if no overflow error.
func SubInt64(a int64, b int64) (int64, error) {
if (a > 0 && b < 0 && math.MaxInt64-a < -b) ||
(a < 0 && b > 0 && math.MinInt64-a > -b) ||
(a == 0 && b == math.MinInt64) {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
return a - b, nil
}
// SubUintWithInt subtracts uint64 a with int64 b and returns uint64 if no overflow error.
func SubUintWithInt(a uint64, b int64) (uint64, error) {
if b < 0 {
return AddUint64(a, uint64(-b))
}
return SubUint64(a, uint64(b))
}
// SubIntWithUint subtracts int64 a with uint64 b and returns uint64 if no overflow error.
func SubIntWithUint(a int64, b uint64) (uint64, error) {
if a < 0 || uint64(a) < b {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
return uint64(a) - b, nil
}
// MulUint64 multiplies uint64 a and b and returns uint64 if no overflow error.
func MulUint64(a uint64, b uint64) (uint64, error) {
if b > 0 && a > math.MaxUint64/b {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
return a * b, nil
}
// MulInt64 multiplies int64 a and b and returns int64 if no overflow error.
func MulInt64(a int64, b int64) (int64, error) {
if a == 0 || b == 0 {
return 0, nil
}
var (
res uint64
err error
negative = false
)
if a > 0 && b > 0 {
res, err = MulUint64(uint64(a), uint64(b))
} else if a < 0 && b < 0 {
res, err = MulUint64(uint64(-a), uint64(-b))
} else if a < 0 && b > 0 {
negative = true
res, err = MulUint64(uint64(-a), uint64(b))
} else {
negative = true
res, err = MulUint64(uint64(a), uint64(-b))
}
if err != nil {
return 0, errors.Trace(err)
}
if negative {
// negative result
if res > math.MaxInt64+1 {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
return -int64(res), nil
}
// positive result
if res > math.MaxInt64 {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
return int64(res), nil
}
// MulInteger multiplies uint64 a and int64 b, and returns uint64 if no overflow error.
func MulInteger(a uint64, b int64) (uint64, error) {
if a == 0 || b == 0 {
return 0, nil
}
if b < 0 {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
return MulUint64(a, uint64(b))
}
// DivInt64 divides int64 a with b, returns int64 if no overflow error.
// It just checks overflow, if b is zero, a "divide by zero" panic throws.
func DivInt64(a int64, b int64) (int64, error) {
if a == math.MinInt64 && b == -1 {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
return a / b, nil
}
// DivUintWithInt divides uint64 a with int64 b, returns uint64 if no overflow error.
// It just checks overflow, if b is zero, a "divide by zero" panic throws.
func DivUintWithInt(a uint64, b int64) (uint64, error) {
if b < 0 {
if a != 0 && uint64(-b) <= a {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
return 0, nil
}
return a / uint64(b), nil
}
// DivIntWithUint divides int64 a with uint64 b, returns uint64 if no overflow error.
// It just checks overflow, if b is zero, a "divide by zero" panic throws.
func DivIntWithUint(a int64, b uint64) (uint64, error) {
if a < 0 {
if uint64(-a) >= b {
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
return 0, nil
}
return uint64(a) / b, nil
} | tidb-types/overflow.go | 0.772316 | 0.437523 | overflow.go | starcoder |
package trees
import (
"errors"
)
// BinarySearchTree defines a binary search tree
type BinarySearchTree struct {
BinaryTree
size int
}
func (b *BinarySearchTree) insert(node *BinaryTreeNode, key, value interface{}) bool {
if b.Comparator(node.Key, key) > 0 {
if node.Left == nil {
node.Left = &BinaryTreeNode{Key: key, Value: value}
return true
}
return b.insert(node.Left, key, value)
} else if b.Comparator(node.Key, key) < 0 {
if node.Right == nil {
node.Right = &BinaryTreeNode{Key: key, Value: value}
return true
}
return b.insert(node.Right, key, value)
}
node.Value = value
return false
}
func (b *BinarySearchTree) search(node *BinaryTreeNode, key interface{}) (value interface{}) {
for node != nil {
if b.Comparator(node.Key, key) == 0 {
value = node.Value
return
}
if b.Comparator(node.Key, key) > 0 {
node = node.Left
} else {
node = node.Right
}
}
return
}
func (b *BinarySearchTree) inorderTraverse(node *BinaryTreeNode) []interface{} {
if node == nil {
return []interface{}{}
}
ordered := b.inorderTraverse(node.Left)
ordered = append(ordered, node.Key)
ordered = append(ordered, b.inorderTraverse(node.Right)...)
return ordered
}
func (b *BinarySearchTree) findMaxNodeWithParent(node *BinaryTreeNode, parent *BinaryTreeNode) (*BinaryTreeNode, *BinaryTreeNode) {
for node.Right != nil {
parent = node
node = node.Right
}
return node, parent
}
func (b *BinarySearchTree) delete(node *BinaryTreeNode, parent *BinaryTreeNode, key interface{}) error {
if node == nil {
return errors.New("could not find node")
}
compareRes := b.Comparator(node.Key, key)
// Found the node, let's check how to remove it.
if compareRes == 0 {
// If this node is a leaf, we simply remove it.
if node.Left == nil && node.Right == nil {
if parent.Left == node {
parent.Left = nil
} else {
parent.Right = nil
}
} else if node.Right == nil {
// If this node has left child only, we point the parent to this child.
if parent.Left == node {
parent.Left = node.Left
} else {
parent.Right = node.Left
}
} else if node.Left == nil {
// If this node has right child only, we point the parent to this child.
if parent.Left == node {
parent.Left = node.Right
} else {
parent.Right = node.Right
}
} else {
// If this node has both children, we find the right most node in the left sub stree and replace it with node.
maxNode, maxNodeParent := b.findMaxNodeWithParent(node.Left, node)
node.Key = maxNode.Key
node.Value = maxNode.Value
if maxNodeParent.Left == maxNode {
maxNodeParent.Left = maxNode.Left
} else {
maxNodeParent.Right = maxNode.Left
}
}
return nil
}
if compareRes > 0 {
return b.delete(node.Left, node, key)
}
return b.delete(node.Right, node, key)
}
// GetSize gets the size of the tree.
func (b *BinarySearchTree) GetSize() int {
return b.size
}
// Search searchs value by key.
func (b *BinarySearchTree) Search(key interface{}) (value interface{}) {
return b.search(b.Root, key)
}
// Put puts a data node into the binary search tree, if the key exists already, update its value.
func (b *BinarySearchTree) Put(key, value interface{}) {
if b.Root == nil {
b.Root = &BinaryTreeNode{Key: key, Value: value}
b.size++
return
}
if b.insert(b.Root, key, value) {
b.size++
}
return
}
// Clear clears the binary search tree.
func (b *BinarySearchTree) Clear() {
b.Root = nil
b.size = 0
}
// Delete deletes a data node from binary search tree.
func (b *BinarySearchTree) Delete(data interface{}) error {
err := b.delete(b.Root, &BinaryTreeNode{Right: b.Root}, data)
if err == nil {
b.size--
}
return err
}
// ToSortedSlice traverse the tree and store the data into a sorted slice
func (b *BinarySearchTree) ToSortedSlice() []interface{} {
return b.inorderTraverse(b.Root)
}
// ConvertToDoubleLinkedList converts the BST to A Double Linked List.
func (b *BinarySearchTree) ConvertToDoubleLinkedList() (head *BinaryTreeNode, tail *BinaryTreeNode) {
if b.Root == nil {
return nil, nil
}
b.convertToDoubleLinkedList(b.Root, &head, &tail)
return
}
func (b *BinarySearchTree) convertToDoubleLinkedList(node *BinaryTreeNode, head **BinaryTreeNode, tail **BinaryTreeNode) {
if node.Left != nil {
b.convertToDoubleLinkedList(node.Left, head, tail)
}
if (*tail) == nil {
(*tail) = node
(*head) = node
} else {
(*tail).Right = node
node.Left = (*tail)
(*tail) = node
}
if node.Right != nil {
b.convertToDoubleLinkedList(node.Right, head, tail)
}
}
// ConvertFromDoubleLinkedList converts double linked list back to
func (b *BinarySearchTree) ConvertFromDoubleLinkedList(head *BinaryTreeNode, length int) {
b.Root = b.convertFromDoubleLinkedList(&head, length)
}
func (b *BinarySearchTree) convertFromDoubleLinkedList(head **BinaryTreeNode, length int) *BinaryTreeNode {
// This means we reach the most left
if length == 0 {
return nil
}
// Otherwise, we get the root for the left subtree.
left := b.convertFromDoubleLinkedList(head, length/2)
root := *head
root.Left = left
*head = (*head).Right
root.Right = b.convertFromDoubleLinkedList(head, length-length/2-1)
return root
} | datastructure/trees/binary_search_tree.go | 0.777596 | 0.434161 | binary_search_tree.go | starcoder |
package contextionary
import (
"fmt"
annoy "github.com/creativesoftwarefdn/weaviate/contextionary/annoyindex"
"sort"
)
type MemoryIndex struct {
dimensions int
words []string
knn annoy.AnnoyIndex
}
// Return the number of items that is stored in the index.
func (mi *MemoryIndex) GetNumberOfItems() int {
return len(mi.words)
}
// Returns the length of the used vectors.
func (mi *MemoryIndex) GetVectorLength() int {
return mi.dimensions
}
// Look up a word, return an index.
// Perform binary search.
func (mi *MemoryIndex) WordToItemIndex(word string) ItemIndex {
for idx, w := range mi.words {
if word == w {
return ItemIndex(idx)
}
}
return -1
}
// Based on an index, return the assosiated word.
func (mi *MemoryIndex) ItemIndexToWord(item ItemIndex) (string, error) {
if item >= 0 && int(item) <= len(mi.words) {
return mi.words[item], nil
} else {
return "", fmt.Errorf("Index out of bounds")
}
}
// Get the vector of an item index.
func (mi *MemoryIndex) GetVectorForItemIndex(item ItemIndex) (*Vector, error) {
if item >= 0 && int(item) <= len(mi.words) {
var floats []float32
mi.knn.GetItem(int(item), &floats)
return &Vector{floats}, nil
} else {
return nil, fmt.Errorf("Index out of bounds")
}
}
// Compute the distance between two items.
func (mi MemoryIndex) GetDistance(a ItemIndex, b ItemIndex) (float32, error) {
if a >= 0 && b >= 0 && int(a) <= len(mi.words) && int(b) <= len(mi.words) {
return mi.knn.GetDistance(int(a), int(b)), nil
} else {
return 0, fmt.Errorf("Index out of bounds")
}
}
// Get the n nearest neighbours of item, examining k trees.
// Returns an array of indices, and of distances between item and the n-nearest neighbors.
func (mi *MemoryIndex) GetNnsByItem(item ItemIndex, n int, k int) ([]ItemIndex, []float32, error) {
if item >= 0 && int(item) <= len(mi.words) {
var items []int
var distances []float32
mi.knn.GetNnsByItem(int(item), n, k, &items, &distances)
var indices []ItemIndex = make([]ItemIndex, len(items))
for i, x := range items {
indices[i] = ItemIndex(x)
}
return indices, distances, nil
} else {
return nil, nil, fmt.Errorf("Index out of bounds")
}
}
// Get the n nearest neighbours of item, examining k trees.
// Returns an array of indices, and of distances between item and the n-nearest neighbors.
func (mi *MemoryIndex) GetNnsByVector(vector Vector, n int, k int) ([]ItemIndex, []float32, error) {
if len(vector.vector) == mi.dimensions {
var items []int
var distances []float32
mi.knn.GetNnsByVector(vector.vector, n, k, &items, &distances)
var indices []ItemIndex = make([]ItemIndex, len(items))
for i, x := range items {
indices[i] = ItemIndex(x)
}
return indices, distances, nil
} else {
return nil, nil, fmt.Errorf("Wrong vector length provided")
}
}
// The rest of this file concerns itself with building the Memory Index.
// This is done from the MemoryIndexBuilder struct.
type MemoryIndexBuilder struct {
dimensions int
word_vectors mib_pairs
}
type mib_pair struct {
word string
vector Vector
}
// Define custom type, and implement functions required for sort.Sort.
type mib_pairs []mib_pair
func (a mib_pairs) Len() int { return len(a) }
func (a mib_pairs) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a mib_pairs) Less(i, j int) bool { return a[i].word < a[j].word }
// Construct a new builder.
func InMemoryBuilder(dimensions int) *MemoryIndexBuilder {
mib := MemoryIndexBuilder{
dimensions: dimensions,
word_vectors: make([]mib_pair, 0),
}
return &mib
}
// Add a word and it's vector to the builder.
func (mib *MemoryIndexBuilder) AddWord(word string, vector Vector) {
wv := mib_pair{word: word, vector: vector}
mib.word_vectors = append(mib.word_vectors, wv)
}
// Build an efficient lookup iddex from the builder.
func (mib *MemoryIndexBuilder) Build(trees int) *MemoryIndex {
mi := MemoryIndex{
dimensions: mib.dimensions,
words: make([]string, 0),
knn: annoy.NewAnnoyIndexEuclidean(mib.dimensions),
}
// First sort the words; this way we can do binary search on the words.
sort.Sort(mib.word_vectors)
// Then fill up the data in the MemoryIndex
for i, pair := range mib.word_vectors {
mi.words = append(mi.words, pair.word)
mi.knn.AddItem(i, pair.vector.vector)
}
// And instruct Annoy to build it's index
mi.knn.Build(trees)
return &mi
} | contextionary/memory_index.go | 0.777933 | 0.520253 | memory_index.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// PropertyToEvaluate
type PropertyToEvaluate struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Provides the property name.
propertyName *string
// Provides the property value.
propertyValue *string
}
// NewPropertyToEvaluate instantiates a new propertyToEvaluate and sets the default values.
func NewPropertyToEvaluate()(*PropertyToEvaluate) {
m := &PropertyToEvaluate{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreatePropertyToEvaluateFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreatePropertyToEvaluateFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewPropertyToEvaluate(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PropertyToEvaluate) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *PropertyToEvaluate) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["propertyName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetPropertyName(val)
}
return nil
}
res["propertyValue"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetPropertyValue(val)
}
return nil
}
return res
}
// GetPropertyName gets the propertyName property value. Provides the property name.
func (m *PropertyToEvaluate) GetPropertyName()(*string) {
if m == nil {
return nil
} else {
return m.propertyName
}
}
// GetPropertyValue gets the propertyValue property value. Provides the property value.
func (m *PropertyToEvaluate) GetPropertyValue()(*string) {
if m == nil {
return nil
} else {
return m.propertyValue
}
}
// Serialize serializes information the current object
func (m *PropertyToEvaluate) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("propertyName", m.GetPropertyName())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("propertyValue", m.GetPropertyValue())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PropertyToEvaluate) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetPropertyName sets the propertyName property value. Provides the property name.
func (m *PropertyToEvaluate) SetPropertyName(value *string)() {
if m != nil {
m.propertyName = value
}
}
// SetPropertyValue sets the propertyValue property value. Provides the property value.
func (m *PropertyToEvaluate) SetPropertyValue(value *string)() {
if m != nil {
m.propertyValue = value
}
} | models/property_to_evaluate.go | 0.68721 | 0.403126 | property_to_evaluate.go | starcoder |
package gockle
import (
"github.com/gocql/gocql"
"github.com/maraino/go-mock"
)
// ColumnApplied is the name of a special column that has a bool that indicates
// whether a conditional statement was applied.
const ColumnApplied = "[applied]"
// Batch is an ordered collection of CQL queries.
type Batch interface {
// Add adds the query for statement and arguments.
Add(statement string, arguments ...interface{})
// Exec executes the queries in the order they were added.
Exec() error
// ExecTx executes the queries in the order they were added. It returns a slice
// of maps from columns to values, the maps corresponding to all the conditional
// queries, and ordered in the same relative order. The special column
// ColumnApplied has a bool that indicates whether the conditional statement was
// applied. If a conditional statement was not applied, the current values for
// the columns are put into the map.
ExecTx() ([]map[string]interface{}, error)
}
var (
_ Batch = &BatchMock{}
_ Batch = batch{}
)
// BatchKind is the kind of Batch. The choice of kind mostly affects performance.
type BatchKind byte
// Kinds of batches.
const (
// BatchLogged queries are atomic. Queries are only isolated within a single
// partition.
BatchLogged BatchKind = 0
// BatchUnlogged queries are not atomic. Atomic queries spanning multiple partitions cost performance.
BatchUnlogged BatchKind = 1
// BatchCounter queries update counters and are not idempotent.
BatchCounter BatchKind = 2
)
// BatchMock is a mock Batch. See github.com/maraino/go-mock.
type BatchMock struct {
mock.Mock
}
// Add implements Batch.
func (m *BatchMock) Add(statement string, arguments ...interface{}) {
m.Called(statement, arguments)
}
// Exec implements Batch.
func (m *BatchMock) Exec() error {
return m.Called().Error(0)
}
// ExecTx implements Batch.
func (m *BatchMock) ExecTx() ([]map[string]interface{}, error) {
var r = m.Called()
return r.Get(0).([]map[string]interface{}), r.Error(1)
}
type batch struct {
b *gocql.Batch
s *gocql.Session
}
func (b batch) Add(statement string, arguments ...interface{}) {
b.b.Query(statement, arguments...)
}
func (b batch) Exec() error {
return b.s.ExecuteBatch(b.b)
}
func (b batch) ExecTx() ([]map[string]interface{}, error) {
var m = map[string]interface{}{}
var a, i, err = b.s.MapExecuteBatchCAS(b.b, m)
if err != nil {
return nil, err
}
s, err := i.SliceMap()
if err != nil {
return nil, err
}
if err := i.Close(); err != nil {
return nil, err
}
m[ColumnApplied] = a
s = append([]map[string]interface{}{m}, s...)
return s, nil
} | batch.go | 0.775945 | 0.461684 | batch.go | starcoder |
package aura
import (
"fmt"
"time"
"github.com/rcrowley/go-metrics"
)
// Histogram measures the statistical distribution of values in a stream of data.
// In addition to minimum, maximum, mean, etc.
// it also measures median, 75th, 90th, 95th, 98th, 99th, and 99.9th percentiles.
type Histogram interface {
Collector
Observe(int64)
}
type HistogramOpts struct {
HVTypes []HistogramVType
Percentiles []float64
}
var (
defaultSample = metrics.NewExpDecaySample(1028, 0.015)
DefaultHistogramOpts = &HistogramOpts{
HVTypes: []HistogramVType{HistogramVTMin, HistogramVTMax, HistogramVTMean},
Percentiles: nil,
}
)
type HistogramVType string
const (
HistogramVTMin HistogramVType = "min"
HistogramVTMax HistogramVType = "max"
HistogramVTMean HistogramVType = "mean"
HistogramVTCount HistogramVType = "count"
HistogramVTStdDev HistogramVType = "stdDev"
HistogramVTSum HistogramVType = "sum"
HistogramVTVariance HistogramVType = "variance"
)
type histogram struct {
*Desc
opts *HistogramOpts
self metrics.Histogram
labels map[string]string
interval time.Duration
}
type HistogramVec struct {
*Desc
opts *HistogramOpts
histograms map[string]*histogram
interval time.Duration
}
func (h *histogram) switchValues(v HistogramVType) interface{} {
switch v {
case HistogramVTMin:
return h.self.Mean()
case HistogramVTMax:
return h.self.Max()
case HistogramVTMean:
return h.self.Mean()
case HistogramVTCount:
return h.self.Count()
case HistogramVTSum:
return h.self.Sum()
case HistogramVTStdDev:
return h.self.StdDev()
case HistogramVTVariance:
return h.self.Variance()
}
return nil
}
func (h *histogram) popMetricWithHVT(desc *Desc, hvt HistogramVType) Metric {
return Metric{
Endpoint: h.labels["endpoint"],
Metric: fmt.Sprintf("%s.%s", desc.fqName, hvt),
Step: desc.step,
Value: h.switchValues(hvt),
Type: GaugeValue,
Labels: h.labels,
Timestamp: time.Now().Unix(),
}
}
func (h *histogram) popMetricWithPer(desc *Desc, per float64) Metric {
return Metric{
Endpoint: h.labels["endpoint"],
Metric: fmt.Sprintf("%s.%.2f", desc.fqName, per),
Step: desc.step,
Value: h.self.Percentile(per),
Type: GaugeValue,
Labels: h.labels,
Timestamp: time.Now().Unix(),
}
}
func (h *histogram) Observe(i int64) {
h.self.Update(i)
}
// Interval implements aura.Collector.
func (h *histogram) Interval() time.Duration {
return h.interval
}
// Describe implements aura.Collector.
func (h *histogram) Describe(ch chan<- *Desc) {
ch <- h.Desc
}
// Collect implements aura.Collector.
func (h *histogram) Collect(ch chan<- Metric) {
for _, hvt := range h.opts.HVTypes {
ch <- h.popMetricWithHVT(h.Desc, hvt)
}
for _, per := range h.opts.Percentiles {
ch <- h.popMetricWithPer(h.Desc, per)
}
}
func (hv *HistogramVec) WithLabelValues(lvs ...string) Histogram {
if len(hv.Desc.labelKeys) != len(lvs) {
panic(fmt.Sprintf("histogram(%s): expected %d label values but go %d",
hv.Desc.fqName, len(hv.Desc.labelKeys), len(lvs)),
)
}
return hv.searchHistogram(lvs...)
}
func (hv *HistogramVec) With(labels map[string]string) Histogram {
for k := range labels {
if !hv.Desc.IsKeyIn(k) {
panic(fmt.Sprintf("histogram(%s): expected label key: %s, but it dosen't exists", hv.Desc.fqName, k))
}
}
lvs := make([]string, 0)
for _, key := range hv.Desc.labelKeys {
lvs = append(lvs, labels[key])
}
return hv.searchHistogram(lvs...)
}
func (hv *HistogramVec) searchHistogram(lvs ...string) Histogram {
lbp := makeLabelPairs(hv.Desc.fqName, hv.Desc.labelKeys, lvs)
lbm := makeLabelMap(hv.Desc.labelKeys, lvs)
_, ok := hv.histograms[lbp]
if !ok {
hv.histograms[lbp] = &histogram{
self: metrics.NewHistogram(defaultSample),
labels: lbm,
opts: hv.opts,
}
}
return hv.histograms[lbp]
}
func (hv *HistogramVec) Describe(ch chan<- *Desc) {
ch <- hv.Desc
}
func (hv *HistogramVec) Interval() time.Duration {
return hv.interval
}
func (hv *HistogramVec) Collect(ch chan<- Metric) {
for _, v := range hv.histograms {
for _, hvt := range v.opts.HVTypes {
ch <- v.popMetricWithHVT(hv.Desc, hvt)
}
for _, per := range v.opts.Percentiles {
ch <- v.popMetricWithPer(hv.Desc, per)
}
}
}
func NewHistogram(fqName, help string, step uint32, interval time.Duration, opts *HistogramOpts) Histogram {
if opts == nil {
opts = DefaultHistogramOpts
}
return &histogram{
Desc: NewDesc(fqName, help, step, nil),
self: metrics.NewHistogram(defaultSample),
labels: map[string]string{},
interval: interval,
opts: opts,
}
}
func NewHistogramVec(fqName, help string, step uint32, interval time.Duration, labelKeys []string, opts *HistogramOpts) *HistogramVec {
if opts == nil {
opts = DefaultHistogramOpts
}
return &HistogramVec{
Desc: NewDesc(fqName, help, step, labelKeys),
histograms: map[string]*histogram{},
interval: interval,
opts: opts,
}
} | histogram.go | 0.782413 | 0.543409 | histogram.go | starcoder |
package bloom
import (
"math"
"github.com/andy2046/bitmap"
)
type (
scalableBloomFilter struct {
filterz []*bloomFilterBit // bloom filters list
count uint64 // number of elements in the bloom filter
n uint64 // estimated number of elements
p float64 // target False Positive rate
r float64 // optimal tightening ratio
fillRatio float64 // fill ratio
}
)
const (
rDefault float64 = 0.8
fillRatio float64 = 0.5
)
// NewS creates scalable bloom filter based on the provided fpRate.
// fpRate is the target False Positive probability.
func NewS(fpRate float64) Bloom {
return NewSGuess(10000, fpRate, rDefault)
}
// NewSGuess estimates m/k based on the provided n/p then creates scalable bloom filter.
// n is the estimated number of elements in the bloom filter.
// p is the false positive probability.
// r is the optimal tightening ratio.
func NewSGuess(n uint64, p, r float64) Bloom {
m, k := Guess(n, p)
mm, exponent := adjustM(m)
sBF := scalableBloomFilter{
filterz: make([]*bloomFilterBit, 0, 1),
r: r,
fillRatio: fillRatio,
p: p,
n: n,
}
sBF.filterz = append(sBF.filterz, &bloomFilterBit{
bitmap: bitmap.New(mm),
m: mm - 1, // x % 2^i = x & (2^i - 1)
k: k,
shift: 64 - exponent,
})
return &sBF
}
func (bf *scalableBloomFilter) Add(entry []byte) {
idx := len(bf.filterz) - 1
if bf.filterz[idx].estimatedFillRatio() >= bf.fillRatio {
fp := bf.p * math.Pow(bf.r, float64(len(bf.filterz)))
m, k := Guess(bf.n, fp)
mm, exponent := adjustM(m)
bf.filterz = append(bf.filterz, &bloomFilterBit{
bitmap: bitmap.New(mm),
m: mm - 1, // x % 2^i = x & (2^i - 1)
k: k,
shift: 64 - exponent,
})
idx++
}
bf.filterz[idx].Add(entry)
bf.count++
}
func (bf *scalableBloomFilter) AddString(entry string) {
bf.Add([]byte(entry))
}
func (bf *scalableBloomFilter) Exist(entry []byte) bool {
for _, f := range bf.filterz {
if f.Exist(entry) {
return true
}
}
return false
}
func (bf *scalableBloomFilter) ExistString(entry string) bool {
return bf.Exist([]byte(entry))
}
func (bf *scalableBloomFilter) FalsePositive() float64 {
rez := 1.0
for _, f := range bf.filterz {
rez *= (1.0 - f.FalsePositive())
}
return 1.0 - rez
}
func (bf *scalableBloomFilter) GuessFalsePositive(n uint64) float64 {
rez := 1.0
for _, f := range bf.filterz {
rez *= (1.0 - f.GuessFalsePositive(n))
}
return 1.0 - rez
}
func (bf *scalableBloomFilter) M() uint64 {
m := uint64(0)
for _, f := range bf.filterz {
m += f.M()
}
return m
}
func (bf *scalableBloomFilter) K() uint64 {
return bf.filterz[0].K()
}
func (bf *scalableBloomFilter) N() uint64 {
return bf.count
}
func (bf *scalableBloomFilter) Clear() {
for i := range bf.filterz {
bf.filterz[i] = nil
}
bf.filterz = make([]*bloomFilterBit, 0, 1)
m, k := Guess(bf.n, bf.p)
mm, exponent := adjustM(m)
bf.filterz = append(bf.filterz, &bloomFilterBit{
bitmap: bitmap.New(mm),
m: mm - 1, // x % 2^i = x & (2^i - 1)
k: k,
shift: 64 - exponent,
})
bf.count = 0
} | pkg/bloom/bloomscale.go | 0.733643 | 0.594051 | bloomscale.go | starcoder |
package main
import (
"time"
)
// Period - Hold from and to timestamps for a given period
type Period struct {
from int
to int
}
func (p *Period) String() (string, string) {
return time.Unix(int64(p.from), 0).UTC().String(), time.Unix(int64(p.to), 0).UTC().String()
}
// GetLastMonthPeriod - Get Period for "LastMonth"
func GetLastMonthPeriod(now time.Time) Period {
//now := time.Now().UTC()
firstOfTheMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, time.UTC)
lastDayOfLastMonth := firstOfTheMonth.AddDate(0, 0, -1)
// lastMonth := now.AddDate(0, -1, 0)
fromTime := time.Date(lastDayOfLastMonth.Year(), lastDayOfLastMonth.Month(), 1, 0, 0, 0, 0, time.UTC)
toTime := time.Date(lastDayOfLastMonth.Year(), lastDayOfLastMonth.Month(), lastDayOfLastMonth.Day(), 23, 59, 59, 0, time.UTC)
// fmt.Printf("From: %s, To: %s\n", fromTime.String(), toTime.String())
return Period{
from: int(fromTime.Unix()),
to: int(toTime.Unix()),
}
}
// GetTodayPeriod - Get Period for "Today"
func GetTodayPeriod(now time.Time) Period {
//now := time.Now().UTC()
fromTime := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)
toTime := time.Date(now.Year(), now.Month(), now.Day(), 23, 59, 59, 0, time.UTC)
return Period{
from: int(fromTime.Unix()),
to: int(toTime.Unix()),
}
}
// GetThisYearPeriod - Get Period for "ThisYear"
func GetThisYearPeriod(now time.Time) Period {
//now := time.Now().UTC()
fromTime := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
toTime := time.Date(now.Year(), 12, 31, 23, 59, 59, 0, time.UTC)
return Period{
from: int(fromTime.Unix()),
to: int(toTime.Unix()),
}
}
// GetLastYearPeriod - Get Period for "LastYear"
func GetLastYearPeriod(now time.Time) Period {
//now := time.Now().UTC()
fromTime := time.Date(now.Year()-1, 1, 1, 0, 0, 0, 0, time.UTC)
toTime := time.Date(now.Year()-1, 12, 31, 23, 59, 59, 0, time.UTC)
return Period{
from: int(fromTime.Unix()),
to: int(toTime.Unix()),
}
}
// GetPeriodByName - Get Period by name
func GetPeriodByName(name string, now int64) Period {
// Initialize "now" in case it was not provided
if now == 0 {
now = time.Now().UTC().Unix()
}
date := time.Unix(now, 0)
switch name {
case "LastMonth":
return GetLastMonthPeriod(date)
case "ThisYear":
return GetThisYearPeriod(date)
case "LastYear":
return GetLastYearPeriod(date)
case "Today":
return GetTodayPeriod(date)
default:
return GetTodayPeriod(date)
}
} | period.go | 0.604983 | 0.601155 | period.go | starcoder |
package blockhash
import (
"fmt"
"image"
"image/color"
"math"
"math/big"
"sort"
"strconv"
"github.com/dsoprea/go-logging"
)
type Blockhash struct {
image image.Image
hashbits int
toColor *color.Model
hasAlpha bool
hexdigest string
isOpaqueable bool
}
// opaqueableModel automatically fulfilled by existing Go types.
type opaqueableModel interface {
Opaque() bool
}
func NewBlockhash(image image.Image, hashbits int) *Blockhash {
// Only images that support alpha are explicitly aware of opaqueness.
_, isOpaqueable := image.(opaqueableModel)
// If the bits aren't aligned, the digest won't make sense as a hex string.
if (hashbits % 4) != 0 {
log.Panicf("Bits must be a multiple of four: (%d)", hashbits)
}
return &Blockhash{
image: image,
hashbits: hashbits,
isOpaqueable: isOpaqueable,
}
}
func (bh *Blockhash) totalValue(p color.Color) (value uint32) {
defer func() {
if state := recover(); state != nil {
log.Panic(state.(error))
}
}()
// The RGBA() will return the alpha-multiplied values but the fields will
// still be in their premultiplied state.
if bh.image.ColorModel() != color.RGBAModel {
p = color.RGBAModel.Convert(p)
}
c2 := p.(color.RGBA)
if bh.isOpaqueable == true && c2.A == 0 {
return 765
}
return uint32(c2.R) + uint32(c2.G) + uint32(c2.B)
}
func (bh *Blockhash) totalValueAt(x, y int) (value uint32) {
defer func() {
if state := recover(); state != nil {
log.Panic(state.(error))
}
}()
p := bh.image.At(x, y)
return bh.totalValue(p)
}
func (bh *Blockhash) median(data []float64) float64 {
defer func() {
if state := recover(); state != nil {
log.Panic(state.(error))
}
}()
copied := make([]float64, len(data))
copy(copied, data)
sort.Float64s(copied)
len_ := len(copied)
if len(copied)%2 == 0 {
v := (copied[len_/2-1] + copied[len_/2]) / 2.0
return v
} else {
v := copied[len_/2]
return v
}
}
func (bh *Blockhash) bitsToHex(bitString []int) string {
defer func() {
if state := recover(); state != nil {
log.Panic(state.(error))
}
}()
s := make([]byte, len(bitString))
for i, d := range bitString {
if d == 0 {
s[i] = '0'
} else if d == 1 {
s[i] = '1'
} else {
log.Panicf("invalid bit value (%d) at offset (%d)", d, i)
}
}
b := new(big.Int)
b.SetString(string(s), 2)
width := int(math.Pow(float64(bh.hashbits), 2.0) / 4.0)
encoded := fmt.Sprintf("%0"+strconv.Itoa(width)+"x", b)
return encoded
}
func (bh *Blockhash) translateBlocksToBits(blocksInline []float64, pixelsPerBlock float64) (results []int) {
defer func() {
if state := recover(); state != nil {
log.Panic(state.(error))
}
}()
blocks := make([]int, len(blocksInline))
halfBlockValue := pixelsPerBlock * 256.0 * 3.0 / 2.0
bandsize := int(math.Floor(float64(len(blocksInline)) / 4.0))
for i := 0; i < 4; i++ {
m := bh.median(blocksInline[i*bandsize : (i+1)*bandsize])
for j := i * bandsize; j < (i+1)*bandsize; j++ {
v := blocksInline[j]
// TODO(dustin): Use epsilon.
if v > m || (math.Abs(v-m) < 1 && m > halfBlockValue) {
blocks[j] = 1
} else {
blocks[j] = 0
}
}
}
return blocks
}
func (bh *Blockhash) size() (width int, height int) {
r := bh.image.Bounds()
width = r.Max.X
height = r.Max.Y
return width, height
}
func (bh *Blockhash) getBlocks() []float64 {
width, height := bh.size()
isEvenX := (width % bh.hashbits) == 0
isEvenY := (height % bh.hashbits) == 0
blocks := make([][]float64, bh.hashbits)
for i := 0; i < bh.hashbits; i++ {
blocks[i] = make([]float64, bh.hashbits)
}
blockWidth := float64(width) / float64(bh.hashbits)
blockHeight := float64(height) / float64(bh.hashbits)
for y := 0; y < height; y++ {
var weightTop, weightBottom, weightLeft, weightRight float64
var blockTop, blockBottom, blockLeft, blockRight int
if isEvenY {
blockTop = int(math.Floor(float64(y) / blockHeight))
blockBottom = blockTop
weightTop = 1.0
weightBottom = 0.0
} else {
yMod := math.Mod((float64(y) + 1.0), blockHeight)
yInt, yFrac := math.Modf(yMod)
weightTop = (1.0 - yFrac)
weightBottom = yFrac
// y_int will be 0 on bottom/right borders and on block boundaries
if yInt > 0.0 || (y+1) == height {
blockTop = int(math.Floor(float64(y) / blockHeight))
blockBottom = blockTop
} else {
blockTop = int(math.Floor(float64(y) / blockHeight))
blockBottom = int(math.Ceil(float64(y) / blockHeight))
}
}
for x := 0; x < width; x++ {
value := bh.totalValueAt(x, y)
if isEvenX {
blockRight = int(math.Floor(float64(x) / blockWidth))
blockLeft = blockRight
weightLeft = 1.0
weightRight = 0.0
} else {
xMod := math.Mod((float64(x) + 1.0), blockWidth)
xInt, xFrac := math.Modf(xMod)
weightLeft = (1.0 - xFrac)
weightRight = (xFrac)
if xInt > 0.0 || (x+1) == width {
blockRight = int(math.Floor(float64(x) / blockWidth))
blockLeft = blockRight
} else {
blockLeft = int(math.Floor(float64(x) / blockWidth))
blockRight = int(math.Ceil(float64(x) / blockWidth))
}
}
blocks[blockTop][blockLeft] += float64(value) * weightTop * weightLeft
blocks[blockTop][blockRight] += float64(value) * weightTop * weightRight
blocks[blockBottom][blockLeft] += float64(value) * weightBottom * weightLeft
blocks[blockBottom][blockRight] += float64(value) * weightBottom * weightRight
}
}
blocksInline := make([]float64, bh.hashbits*bh.hashbits)
i := 0
for y := 0; y < bh.hashbits; y++ {
for x := 0; x < bh.hashbits; x++ {
blocksInline[i] = blocks[y][x]
i++
}
}
return blocksInline
}
func (bh *Blockhash) process() (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if bh.hexdigest != "" {
return nil
}
blocks := bh.getBlocks()
width, height := bh.size()
blockWidth := float64(width) / float64(bh.hashbits)
blockHeight := float64(height) / float64(bh.hashbits)
digest := bh.translateBlocksToBits(blocks, blockWidth*blockHeight)
bh.hexdigest = bh.bitsToHex(digest)
return nil
}
func (bh *Blockhash) Hexdigest() string {
defer func() {
if state := recover(); state != nil {
err := log.Wrap(state.(error))
log.PanicIf(err)
}
}()
err := bh.process()
log.PanicIf(err)
return bh.hexdigest
} | blockhash.go | 0.563018 | 0.412944 | blockhash.go | starcoder |
package mapqueryparam
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
// EncodeValues takes a input struct and encodes the content into the form of a set of query parameters.
// Input must be a pointer to a struct. Same as Encode.
func EncodeValues(v interface{}) (url.Values, error) {
return Encode(v)
}
// Encode takes a input struct and encodes the content into the form of a set of query parameters.
// Input must be a pointer to a struct. Same as EncodeValues.
func Encode(v interface{}) (map[string][]string, error) {
if v == nil {
return map[string][]string{}, nil
}
res := make(map[string][]string)
val := reflect.ValueOf(v)
for val.Kind() == reflect.Ptr {
if val.IsNil() {
return map[string][]string{}, nil
}
val = val.Elem()
}
if val.Kind() != reflect.Struct {
return nil, errors.New("unable to encode non-struct")
}
t := val.Type()
for i := 0; i < val.NumField(); i++ {
fTyp := t.Field(i)
isUnexported := fTyp.PkgPath != ""
if isUnexported {
continue
}
fVal := val.Field(i)
if isEmptyValue(fVal) {
continue
}
d, err := encodeField(fVal)
if err != nil {
return nil, err
}
if len(d) == 0 {
continue
}
fieldTag := getFieldTag(fTyp)
res[fieldTag] = d
}
return res, nil
}
// getFieldTag returns the tag or name that a struct field is identified by. It prioritizes the MQP tag over the
// json tag. It defaults to the field name if neither tag is available.
func getFieldTag(t reflect.StructField) string {
if tags := t.Tag.Get(mapQueryParameterTagName); len(tags) > 0 {
for _, s := range strings.Split(tags, ",") {
if len(s) > 0 {
return s
}
}
}
if tags := t.Tag.Get("json"); len(tags) > 0 {
for _, s := range strings.Split(tags, ",") {
if len(s) > 0 && !strings.EqualFold(s, "omitempty") {
return s
}
}
}
return t.Name
}
// encodeField encodes a field of the input struct as a set of parameter strings. Arrays and slices are represented as
// multiple strings. Other values are encoded as a single string
func encodeField(v reflect.Value) ([]string, error) {
switch v.Kind() {
case reflect.Array, reflect.Slice:
res := make([]string, v.Len())
for i := 0; i < v.Len(); i++ {
s, err := encodeValue(v.Index(i))
if err != nil {
return nil, err
}
res[i] = s
}
return res, nil
case reflect.Interface, reflect.Ptr:
return encodeField(v.Elem())
default:
s, err := encodeValue(v)
if err != nil {
return nil, err
}
return []string{s}, nil
}
}
// encodeValue encodes a single value as a string. Base types are formatted using `strconv`. Maps and structs are
// encoded as json objects using standard json marshaling. Channels and functions are skipped, as they're not supported.
func encodeValue(v reflect.Value) (string, error) {
switch v.Kind() {
case reflect.String:
return v.String(), nil
case reflect.Bool:
return strconv.FormatBool(v.Bool()), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(v.Int(), 10), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(v.Uint(), 10), nil
case reflect.Float32:
return strconv.FormatFloat(v.Float(), 'f', -1, 32), nil
case reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64), nil
case reflect.Complex64:
return strconv.FormatComplex(v.Complex(), 'f', -1, 64), nil
case reflect.Complex128:
return strconv.FormatComplex(v.Complex(), 'f', -1, 128), nil
case reflect.Map, reflect.Struct:
i := v.Interface()
switch t := i.(type) {
case time.Time:
return t.Format(time.RFC3339Nano), nil
default:
b, err := json.Marshal(i)
return string(b), err
}
case reflect.Interface, reflect.Ptr:
return encodeValue(v.Elem())
case reflect.Chan, reflect.Func:
return "", nil
default:
return "", fmt.Errorf("unsupported field kind: %s", v.Kind().String())
}
}
// isEmptyValue validated whether a value is empty/zero/nil. Used to determine if a field should be omitted from the
// encoded result.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Chan, reflect.Func:
return true
case reflect.Struct:
i := v.Interface()
switch t := i.(type) {
case time.Time:
return t.IsZero()
}
}
return false
} | encode.go | 0.713731 | 0.430686 | encode.go | starcoder |
package ast
import (
"fmt"
"github.com/golang/glog"
)
// Visitor VisitBefore method is invoked for each node encountered by Walk.
// If the result Visitor v is not nil, Walk visits each of the children of that
// node with v. VisitAfter is called on n at the end.
type Visitor interface {
VisitBefore(n Node) (Visitor, Node)
VisitAfter(n Node) Node
}
// convenience function
func walknodelist(v Visitor, list []Node) []Node {
r := make([]Node, 0, len(list))
for _, x := range list {
r = append(r, Walk(v, x))
}
return r
}
// Walk traverses (walks) an AST node with the provided Visitor v.
func Walk(v Visitor, node Node) Node {
glog.V(2).Infof("About to VisitBefore node at %s", node.Pos())
// Returning nil from VisitBefore signals to Walk that the Visitor has
// handled the children of this node. VisitAfter will not be called.
if v, node = v.VisitBefore(node); v == nil {
return node
}
switch n := node.(type) {
case *StmtList:
n.Children = walknodelist(v, n.Children)
case *ExprList:
n.Children = walknodelist(v, n.Children)
case *CondStmt:
if n.Cond != nil {
n.Cond = Walk(v, n.Cond)
}
n.Truth = Walk(v, n.Truth)
if n.Else != nil {
n.Else = Walk(v, n.Else)
}
case *BuiltinExpr:
if n.Args != nil {
n.Args = Walk(v, n.Args)
}
case *BinaryExpr:
n.Lhs = Walk(v, n.Lhs)
n.Rhs = Walk(v, n.Rhs)
case *UnaryExpr:
n.Expr = Walk(v, n.Expr)
case *IndexedExpr:
n.Index = Walk(v, n.Index)
n.Lhs = Walk(v, n.Lhs)
case *DecoDecl:
n.Block = Walk(v, n.Block)
case *DecoStmt:
n.Block = Walk(v, n.Block)
case *ConvExpr:
n.N = Walk(v, n.N)
case *PatternExpr:
n.Expr = Walk(v, n.Expr)
case *PatternFragment:
n.Expr = Walk(v, n.Expr)
case *IdTerm, *CaprefTerm, *VarDecl, *StringLit, *IntLit, *FloatLit, *PatternLit, *NextStmt, *OtherwiseStmt, *DelStmt, *StopStmt:
// These nodes are terminals, thus have no children to walk.
default:
panic(fmt.Sprintf("Walk: unexpected node type %T: %v", n, n))
}
glog.V(2).Infof("About to VisitAfter node at %s", node.Pos())
node = v.VisitAfter(node)
return node
} | internal/vm/ast/walk.go | 0.698638 | 0.445952 | walk.go | starcoder |
package gopie
import (
"math"
)
func createLabels(chart PieChart, pieRect rectangle) []label {
if len(chart.Values) == 0 {
return createEmptyLabels()
}
if len(chart.Values) == 1 {
return createSingleLabel(chart, pieRect)
}
return createMultipleLabels(chart, pieRect)
}
func createEmptyLabels() []label {
return make([]label, 0)
}
func createMultipleLabels(chart PieChart, pieRect rectangle) []label {
labels := make([]label, len(chart.Values))
sum := float64(0)
total := chart.calculateTotalValue()
labelLineInnerRadius := calculateLabelLineInnerRadius(chart, pieRect)
labelLineOuterRadius := calculateLabelLineOuterRadius(chart, pieRect)
textRadius := calculateTextRadius(chart, pieRect)
for index, value := range chart.Values {
labelLineAngle := ((sum + value.Value/2) / total) * twoPi
labels[index] = label{
Text: createText(index, textRadius, labelLineAngle, chart),
Line: createLine(index, labelLineInnerRadius, labelLineOuterRadius, labelLineAngle, chart),
}
sum += value.Value
}
return labels
}
func createSingleLabel(chart PieChart, pieRect rectangle) []label {
labelLineInnerRadius := calculateLabelLineInnerRadius(chart, pieRect)
labelLineOuterRadius := calculateLabelLineOuterRadius(chart, pieRect)
textRadius := calculateTextRadius(chart, pieRect)
labelLineAngle := math.Pi / 2
return []label{
label{
Text: createText(0, textRadius, labelLineAngle, chart),
Line: createLine(0, labelLineInnerRadius, labelLineOuterRadius, labelLineAngle, chart),
},
}
}
func createText(index int, textRadius, angle float64, chart PieChart) text {
centerX, centerY := chart.getCenter()
textX, textY := toDecartTranslate(angle, textRadius, centerX, centerY)
return text{
Text: chart.Values[index].Label,
FontAnchor: calculateLabelTextAnchor(angle),
FontFamily: chart.getFontFamily(),
FontSize: chart.getFontSize(),
X: textX,
Y: textY,
}
}
func calculateLabelTextAnchor(angle float64) string {
mod := math.Mod(angle, math.Pi*2)
switch {
case mod == 0:
return "middle"
case mod == math.Pi:
return "middle"
case mod > math.Pi:
return "end"
default:
return "start"
}
}
func createLine(index int, innerRadius, outerRadius, angle float64, chart PieChart) line {
centerX, centerY := chart.getCenter()
startX, startY := toDecartTranslate(angle, innerRadius, centerX, centerY)
endX, endY := toDecartTranslate(angle, outerRadius, centerX, centerY)
return line{
Style: createLabelLineStyle(chart, index),
X1: startX,
Y1: startY,
X2: endX,
Y2: endY,
}
}
func calculateLabelLineInnerRadius(c PieChart, r rectangle) float64 {
outerRadius := r.calculateIncircleRadius()
strokeWidth := c.getStrokeWidth()
return outerRadius - strokeWidth
}
func calculateLabelLineOuterRadius(c PieChart, r rectangle) float64 {
innerRadius := calculateLabelLineInnerRadius(c, r)
return innerRadius + c.getLabelLineFullLength()
}
func calculateTextRadius(c PieChart, r rectangle) float64 {
labelLineOuterRadius := calculateLabelLineOuterRadius(c, r)
return labelLineOuterRadius + c.getLabelPadding()
} | labels.go | 0.809953 | 0.402275 | labels.go | starcoder |
package vec2
import (
"fmt"
"math"
)
// Rect is a coordinate system aligned rectangle defined by a Min and Max vector.
type Rect struct {
Min T
Max T
}
// NewRect creates a Rect from two points.
func NewRect(a, b *T) (rect Rect) {
rect.Min = Min(a, b)
rect.Max = Max(a, b)
return rect
}
// ParseRect parses a Rect from a string. See also String()
func ParseRect(s string) (r Rect, err error) {
_, err = fmt.Sscan(s, &r.Min[0], &r.Min[1], &r.Max[0], &r.Max[1])
return r, err
}
func (rect *Rect) Width() float64 {
return rect.Min[0] - rect.Max[0]
}
func (rect *Rect) Height() float64 {
return rect.Min[1] - rect.Max[1]
}
func (rect *Rect) Size() float64 {
width := rect.Width()
height := rect.Height()
return math.Max(width, height)
}
// Slice returns the elements of the vector as slice.
func (rect *Rect) Slice() []float64 {
return rect.Array()[:]
}
func (rect *Rect) Array() *[4]float64 {
return &[...]float64{
rect.Min[0], rect.Min[1],
rect.Max[0], rect.Max[1],
}
}
// String formats Rect as string. See also ParseRect().
func (rect *Rect) String() string {
return rect.Min.String() + " " + rect.Max.String()
}
// ContainsPoint returns if a point is contained within the rectangle.
func (rect *Rect) ContainsPoint(p *T) bool {
return p[0] >= rect.Min[0] && p[0] <= rect.Max[0] &&
p[1] >= rect.Min[1] && p[1] <= rect.Max[1]
}
// Contains returns if other Rect is contained within the rectangle.
func (rect *Rect) Contains(other *Rect) bool {
return rect.Min[0] <= other.Min[0] &&
rect.Min[1] <= other.Min[1] &&
rect.Max[0] >= other.Max[0] &&
rect.Max[1] >= other.Max[1]
}
// Area calculates the area of the rectangle.
func (rect *Rect) Area() float64 {
return (rect.Max[0] - rect.Min[0]) * (rect.Max[1] - rect.Min[1])
}
func (rect *Rect) Intersects(other *Rect) bool {
return other.Max[0] >= rect.Min[0] &&
other.Min[0] <= rect.Max[0] &&
other.Max[1] >= rect.Min[0] &&
other.Min[1] <= rect.Max[1]
}
// Join enlarges this rectangle to contain also the given rectangle.
func (rect *Rect) Join(other *Rect) {
rect.Min = Min(&rect.Min, &other.Min)
rect.Max = Max(&rect.Max, &other.Max)
}
func (rect *Rect) Extend(p *T) {
rect.Min = Min(&rect.Min, p)
rect.Max = Max(&rect.Max, p)
}
// Joined returns the minimal rectangle containing both a and b.
func Joined(a, b *Rect) (rect Rect) {
rect.Min = Min(&a.Min, &b.Min)
rect.Max = Max(&a.Max, &b.Max)
return rect
} | float64/vec2/rect.go | 0.929103 | 0.600305 | rect.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.