code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package main
import (
"fmt"
"math"
"os"
)
const (
MarsGravity = -3.711
MaxHSpeed = 20
MaxVSpeed = 40
)
func main() {
defer func() {
if r := recover(); r != nil {
d("ERROR: %v", r)
}
}()
(&lander{}).Land()
}
func d(format string, a ...interface{}) {
fmt.Fprintln(os.Stderr, fmt.Sprintf(format, a...))
}
type lander struct {
// Read only - gathered from env.
pos point
hSpeed, vSpeed, fuel, rotation, power int
surface []point
landingCenterPoint *point
landingSiteTolerance int
landingSurfaceEndID int
}
// Land using PID controller approach. In every iteration check the estimated landing and adjust.
func (l *lander) Land() {
l.discoverSurfaceAndLandingSite()
d("Landing center: %s, tolerance: %d", l.landingCenterPoint.print(), l.landingSiteTolerance)
// Adjusting loop.
for {
l.gatherInput()
where, isLandingArea, when, eVSpeed, eHSpeed := l.estimateSurfaceReachable()
d("Estimated landing: %s | ok? %v | epochs: %d, eV: %f, eH %f",
where.print(), isLandingArea, when, eVSpeed, eHSpeed)
// TODO: Calculate obstacles, use bezier.
angleToAdjust, distance := l.angleAndDistanceToTarget(where)
d("Angle to adjust: %f | distance %f", angleToAdjust, distance)
// Adjusting phase.
throttle := 4
if angleToAdjust > 5 {
throttle = 4
}
// We are free-falling to Landing Area, cool - but we need to brake ):
if isLandingArea && when < 20 && (math.Abs(float64(eHSpeed)) >= MaxHSpeed || math.Abs(float64(eVSpeed)) >= MaxVSpeed) {
brakingVec := newPoint(-l.hSpeed, -l.vSpeed)
d("braking: %s", brakingVec.print())
angleToAdjust = l.pos.Sub(where).Angle(brakingVec)
throttle = 4
if angleToAdjust > 15 {
throttle = 4
}
}
l.engineSettings(int(angleToAdjust), throttle)
}
}
func (l *lander) landingPhase(landingTarget point) {
for {
throttle := 0
angleToAdjust := float64(0)
// We are free-falling to Landing Area, cool - but we need to brake ):
if math.Abs(float64(l.hSpeed)) >= MaxHSpeed || math.Abs(float64(l.vSpeed)) >= MaxVSpeed {
brakingVec := newPoint(-l.hSpeed, -l.vSpeed)
d("braking: %s", brakingVec.print())
angleToAdjust = l.pos.Sub(landingTarget).Angle(brakingVec)
throttle = 4
if angleToAdjust > 15 {
throttle = 0
}
} else {
angleToAdjust, distance := l.angleAndDistanceToTarget(landingTarget)
d("Angle to adjust: %f | distance %f", angleToAdjust, distance)
if angleToAdjust > 5 {
throttle = 4
}
}
l.engineSettings(int(angleToAdjust), throttle)
l.gatherInput()
}
}
func (l *lander) rotationAndPowerToAdjustAngle(angleToAdjust float64) {
//a := float64(l.power) * math.Cos(float64(l.rotation))
//l.rotation
}
func (l *lander) angleAndDistanceToTarget(currentTarget point) (angle float64, distance float64) {
desiredDir := l.landingCenterPoint.Sub(l.pos)
headingDir := currentTarget.Sub(l.pos)
if l.landingCenterPoint.x < currentTarget.x {
angle = desiredDir.Angle(headingDir)
} else {
d("TOO FAR")
angle = headingDir.Angle(desiredDir)
}
return angle, l.landingCenterPoint.Sub(l.pos).Norm()
}
func (l *lander) discoverSurfaceAndLandingSite() {
// surfaceN: the number of points used to draw the surface of Mars.
var surfaceN int
fmt.Scan(&surfaceN)
var surface []point
lastY := -1
for i := 0; i < surfaceN; i++ {
// landX: X coordinate of a surface point. (0 to 6999)
// landY: Y coordinate of a surface point. By linking all the points together in a sequential fashion,
// you form the surface of Mars.
var landX, landY int
fmt.Scan(&landX, &landY)
surface = append(surface, newPoint(landX, landY))
if l.landingCenterPoint == nil && lastY == landY {
l.landingSiteTolerance = landX - int(surface[i-1].x)
p := newPoint((l.landingSiteTolerance/2)+int(surface[i-1].x), lastY)
l.landingCenterPoint = &p
l.landingSurfaceEndID = i
}
lastY = landY
}
l.surface = surface
}
func (l *lander) gatherInput() {
// hSpeed: the horizontal speed (in m/s), can be negative.
// vSpeed: the vertical speed (in m/s), can be negative.
// fuel: the quantity of remaining fuel in liters.
// rotation: the rotation angle in degrees (-90 to 90).
// power: the thrust power (0 to 4).
var X, Y int
fmt.Scan(&X, &Y, &l.hSpeed, &l.vSpeed, &l.fuel, &l.rotation, &l.power)
l.pos = newPoint(X, Y)
}
func (l *lander) engineSettings(rotationSetting, throttleSetting int) {
// rotate power. rotate is the desired rotation angle. [ MINUS = RIGHT ]
// power is the desired thrust power.
// validate rotation first.
if rotationSetting > 90 {
rotationSetting = 90
} else if rotationSetting < -90 {
rotationSetting = -90
}
fmt.Printf("%d %d\n", rotationSetting, throttleSetting)
}
func Max(a, b float64) float64 {
if a>=b {
return a
}
return b
}
// Assuming no throttle.
func (l *lander) estimateSurfaceReachable() (where point, isLandingArea bool, when int, eVSpeed float64, eHSpeed float64) {
epoch := 0
tmpVSpeed := float64(l.vSpeed)
tmpHSpeed := float64(l.hSpeed)
pos := l.pos
for {
// Movement.
pos.x += tmpHSpeed
pos.y += tmpVSpeed
safeZone := newCollCircle(pos, tmpVSpeed, tmpHSpeed)
if collisionPt, surfaceEndID, is := safeZone.isCollidingWithSurface(l.surface); is {
return collisionPt, (surfaceEndID == l.landingSurfaceEndID), epoch, tmpVSpeed, tmpHSpeed
}
// Next speeds.
epoch++
tmpVSpeed += MarsGravity
// It is stupid, but codingame calculates on ints, so we need to adjust based on that.
tmpVSpeed = float64(int(tmpVSpeed))
}
}
// 2d vector.
type point struct {
x, y float64
}
func newPoint(x, y int) point {
return point{x: float64(x), y: float64(y)}
}
// Dot returns the standard dot product of v and ov.
func (p point) Dot(ov point) float64 { return p.x*ov.x + p.y*ov.y }
// Norm2 returns the square of the norm.
func (p point) Norm2() float64 { return p.Dot(p) }
// Norm returns the vector's norm.
func (p point) Norm() float64 { return math.Sqrt(p.Dot(p)) }
// Normalize returns a unit vector in the same direction as v.
func (p point) Normalize() point {
if p == (point{0, 0}) {
return p
}
return p.Mul(1 / p.Norm())
}
// Mul returns the standard scalar product of v and m.
func (p point) Mul(m float64) point { return point{x: m * p.x, y: m * p.y} }
// Sub returns the standard vector difference of v and ov.
func (p point) Sub(ov point) point { return point{x: p.x - ov.x, y: p.y - ov.y} }
// Angle returns the angle between v and ov. (Degrees)
func (p point) Angle(ov point) float64 {
s := ov.Sub(p)
return math.Atan2(s.y, s.x) * (180 / math.Pi)
}
// Distance returns the Euclidean distance between v and ov.
func (p point) Distance(ov point) float64 { return p.Sub(ov).Norm() }
func (p point) print() string {
return fmt.Sprintf("[%f, %f]", p.x, p.y)
}
type collisionCircle struct {
center point
r float64
}
// r based on current speed.
func newCollCircle(pos point, vSpeed, hSpeed float64) collisionCircle {
return collisionCircle{
center: pos,
r: Max(math.Abs(vSpeed), math.Abs(hSpeed)),
}
}
func (co collisionCircle) isCollidingWithSurface(surface []point) (point, int, bool) {
for i := range surface[1:] {
start := surface[i]
end := surface[i + 1]
if end.x < (co.center.x - co.r) && start.x < (co.center.x - co.r) {
// To far to be collision.
continue
}
if end.x > (co.center.x + co.r) && start.x > (co.center.x + co.r) {
// To far to be collision.
continue
}
// Fallback to proper intersection by finding the closest point and checking if it collide.
startToEndVec := end.Sub(start)
startToCenterVec := co.center.Sub(start)
distanceFromStart := startToCenterVec.Dot(startToEndVec) / startToEndVec.Norm2()
closestPt := point{
x: start.x + startToEndVec.x * distanceFromStart,
y: start.y + startToEndVec.y * distanceFromStart,
}
dist := co.center.Distance(closestPt)
if dist > co.r {
continue
}
return closestPt, i+1, true
}
return point{}, 0, false
}
// Bezier evaluation.
func evalBezierXUsingHornerMethod(t float64, controlPoints []point) float64 {
n := len(controlPoints) - 1
u := float64(1 - t)
bc := float64(1)
tn := float64(1)
tmp := controlPoints[0].x * u
for i := 1; i < n; i++ {
tn *= t
bc *= float64(n-i+1) / float64(i)
tmp = (tmp + tn*bc*controlPoints[i].x) * u
}
return (tmp + tn*t*controlPoints[n].x)
}
func evalBezierYUsingHornerMethod(t float64, controlPoints []point) float64 {
n := len(controlPoints) - 1
u := float64(1 - t)
bc := float64(1)
tn := float64(1)
tmp := controlPoints[0].y * u
for i := 1; i < n; i++ {
tn *= t
bc *= float64(n-i+1) / float64(i)
tmp = (tmp + tn*bc*controlPoints[i].y) * u
}
return (tmp + tn*t*controlPoints[n].y)
} | very_hard/Mars_Lander_Ep_3/mars.go | 0.577257 | 0.513485 | mars.go | starcoder |
package cache
import (
"time"
)
// Cache is a representation of any cache store that has keys and values
type Cache interface {
// Purge is used to completely clear the cache.
Purge()
// Add adds the given key and value to the store without an expiry.
Add(key, value interface{})
// AddWithDefaultExpires adds the given key and value to the store with the default expiry.
AddWithDefaultExpires(key, value interface{})
// AddWithExpiresInSecs adds the given key and value to the cache with the given expiry.
AddWithExpiresInSecs(key, value interface{}, expireAtSecs int64)
// Get returns the value stored in the cache for a key, or nil if no value is present. The ok result indicates whether value was found in the cache.
Get(key interface{}) (value interface{}, ok bool)
// GetOrAdd returns the existing value for the key if present. Otherwise, it stores and returns the given value. The loaded result is true if the value was loaded, false if stored.
// This API intentionally deviates from the Add-only variants above for simplicity. We should simplify the entire API in the future.
GetOrAdd(key, value interface{}, ttl time.Duration) (actual interface{}, loaded bool)
// Remove deletes the value for a key.
Remove(key interface{})
// RemoveByPrefix deletes all keys containing the given prefix string.
RemoveByPrefix(prefix string)
// Keys returns a slice of the keys in the cache.
Keys() []interface{}
// Len returns the number of items in the cache.
Len() int
// Name identifies this cache instance among others in the system.
Name() string
// GetInvalidateClusterEvent returns the cluster event configured when this cache was created.
GetInvalidateClusterEvent() string
}
// Provider defines how to create new caches
type Provider interface {
// Connect opens a new connection to the cache using specific provider parameters.
Connect()
// NewCache creates a new cache with given size.
NewCache(size int) Cache
// NewCacheWithParams creates a new cache with the given parameters.
NewCacheWithParams(size int, name string, defaultExpiry int64, invalidateClusterEvent string) Cache
// Close releases any resources used by the cache provider.
Close()
} | services/cache/cache.go | 0.701304 | 0.473779 | cache.go | starcoder |
package snomed
import (
"sync"
)
// NaiveCache is an fairly naive in-memory cache used for development.
// It is designed to store arbitrary SNOMED-CT entities such as Concepts, Descriptions and Relationships.
// To make it easier to use, there are convenience methods to do the type-casting to put and get objects of
// these types which will simply wrap a better future implementation when implemented.
// This now is backed by a concurrent map instead ready for experimenting with concurrency.
// TODO: use an even better cache behind-the-scenes perhaps with persistence to filesystem?
type NaiveCache struct {
cache sync.Map
}
// NewCache creates a new cache
func NewCache() *NaiveCache {
return &NaiveCache{}
}
// Clear clears the cache
func (nc *NaiveCache) Clear() {
nc.cache = *new(sync.Map)
}
// Get fetches a generic object from the cache using the specified identifier
// returning the object and a boolean indicating success (or not)
func (nc *NaiveCache) Get(id int) (interface{}, bool) {
return nc.cache.Load(id)
}
// GetOrElse fethes a generic object from the cache or performs the callback specified, caching the result
func (nc *NaiveCache) GetOrElse(id int, f func(int) (interface{}, error)) (interface{}, error) {
value, success := nc.Get(id)
if success {
return value, nil
}
value, err := f(id)
if err != nil {
return nil, err
}
nc.Put(id, value)
return value, nil
}
// Put stores a generic object into the cache using the specified identifier
func (nc *NaiveCache) Put(id int, value interface{}) {
nc.cache.Store(id, value)
}
// PutConcept stores a concept in the cache
func (nc *NaiveCache) PutConcept(conceptID int, concept *Concept) {
nc.Put(conceptID, concept)
}
// GetConcept fetches a concept from the cache
func (nc *NaiveCache) GetConcept(conceptID int) (*Concept, bool) {
value, success := nc.Get(conceptID)
if !success {
return nil, false
}
concept, success := value.(*Concept)
return concept, success
}
// GetConceptOrElse fetches a concept from the cache or performs the callback specified, caching the result
func (nc *NaiveCache) GetConceptOrElse(conceptID int, f func(conceptID int) (interface{}, error)) (*Concept, error) {
v, err := nc.GetOrElse(conceptID, f)
if err != nil {
return nil, err
}
return v.(*Concept), nil
}
// PutDescription stores a description in the cache
func (nc *NaiveCache) PutDescription(descriptionID int, description *Description) {
nc.Put(descriptionID, description)
}
// GetDescription fetches a description from the cache
func (nc *NaiveCache) GetDescription(descriptionID int) (*Description, bool) {
value, success := nc.Get(descriptionID)
if !success {
return nil, false
}
description, success := value.(*Description)
return description, success
}
// GetDescriptionOrElse fetches a description from the cache or performs the callback specified, caching the result
func (nc *NaiveCache) GetDescriptionOrElse(descriptionID int, f func(descriptionID int) (interface{}, error)) (*Description, error) {
v, err := nc.GetOrElse(descriptionID, f)
if err != nil {
return nil, err
}
return v.(*Description), nil
}
// PutRelationship stores a relationship in the cache
func (nc *NaiveCache) PutRelationship(descriptionID int, description *Description) {
nc.Put(descriptionID, description)
}
// GetRelationship fetches a relationship from the cache
func (nc *NaiveCache) GetRelationship(relationshipID int) (*Relationship, bool) {
value, success := nc.Get(relationshipID)
if !success {
return nil, false
}
relationship, success := value.(*Relationship)
return relationship, true
}
// GetRelationshipOrElse fetches a relationship from the cache or performs the callback specified, caching the result
func (nc *NaiveCache) GetRelationshipOrElse(relationshipID int, f func(relationshipID int) (interface{}, error)) (*Relationship, error) {
v, err := nc.GetOrElse(relationshipID, f)
if err != nil {
return nil, err
}
return v.(*Relationship), nil
} | snomed/cache.go | 0.527803 | 0.419886 | cache.go | starcoder |
package validation
import (
"fmt"
"reflect"
"time"
)
var (
// ErrMinGreaterEqualThanRequired is the error that returns when a value is less than a specified threshold.
ErrMinGreaterEqualThanRequired = NewError("validation_min_greater_equal_than_required", "must be no less than {{.threshold}}")
// ErrMaxLessEqualThanRequired is the error that returns when a value is greater than a specified threshold.
ErrMaxLessEqualThanRequired = NewError("validation_max_less_equal_than_required", "must be no greater than {{.threshold}}")
// ErrMinGreaterThanRequired is the error that returns when a value is less than or equal to a specified threshold.
ErrMinGreaterThanRequired = NewError("validation_min_greater_than_required", "must be greater than {{.threshold}}")
// ErrMaxLessThanRequired is the error that returns when a value is greater than or equal to a specified threshold.
ErrMaxLessThanRequired = NewError("validation_max_less_than_required", "must be less than {{.threshold}}")
)
// ThresholdRule is a validation rule that checks if a value satisfies the specified threshold requirement.
type ThresholdRule struct {
threshold interface{}
operator int
err Error
}
const (
greaterThan = iota
greaterEqualThan
lessThan
lessEqualThan
)
// Min returns a validation rule that checks if a value is greater or equal than the specified value.
// By calling Exclusive, the rule will check if the value is strictly greater than the specified value.
// Note that the value being checked and the threshold value must be of the same type.
// Only int, uint, float and time.Time types are supported.
// An empty value is considered valid. Please use the Required rule to make sure a value is not empty.
func Min(min interface{}) ThresholdRule {
return ThresholdRule{
threshold: min,
operator: greaterEqualThan,
err: ErrMinGreaterEqualThanRequired,
}
}
// Max returns a validation rule that checks if a value is less or equal than the specified value.
// By calling Exclusive, the rule will check if the value is strictly less than the specified value.
// Note that the value being checked and the threshold value must be of the same type.
// Only int, uint, float and time.Time types are supported.
// An empty value is considered valid. Please use the Required rule to make sure a value is not empty.
func Max(max interface{}) ThresholdRule {
return ThresholdRule{
threshold: max,
operator: lessEqualThan,
err: ErrMaxLessEqualThanRequired,
}
}
// Exclusive sets the comparison to exclude the boundary value.
func (r ThresholdRule) Exclusive() ThresholdRule {
if r.operator == greaterEqualThan {
r.operator = greaterThan
r.err = ErrMinGreaterThanRequired
} else if r.operator == lessEqualThan {
r.operator = lessThan
r.err = ErrMaxLessThanRequired
}
return r
}
// Validate checks if the given value is valid or not.
func (r ThresholdRule) Validate(value interface{}) error {
value, isNil := Indirect(value)
if isNil || IsEmpty(value) {
return nil
}
rv := reflect.ValueOf(r.threshold)
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
v, err := ToInt(value)
if err != nil {
return err
}
if r.compareInt(rv.Int(), v) {
return nil
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
v, err := ToUint(value)
if err != nil {
return err
}
if r.compareUint(rv.Uint(), v) {
return nil
}
case reflect.Float32, reflect.Float64:
v, err := ToFloat(value)
if err != nil {
return err
}
if r.compareFloat(rv.Float(), v) {
return nil
}
case reflect.Struct:
t, ok := r.threshold.(time.Time)
if !ok {
return fmt.Errorf("type not supported: %v", rv.Type())
}
v, ok := value.(time.Time)
if !ok {
return fmt.Errorf("cannot convert %v to time.Time", reflect.TypeOf(value))
}
if v.IsZero() || r.compareTime(t, v) {
return nil
}
default:
return fmt.Errorf("type not supported: %v", rv.Type())
}
return r.err.SetParams(map[string]interface{}{"threshold": r.threshold})
}
// Error sets the error message for the rule.
func (r ThresholdRule) Error(message string) ThresholdRule {
r.err = r.err.SetMessage(message)
return r
}
// ErrorObject sets the error struct for the rule.
func (r ThresholdRule) ErrorObject(err Error) ThresholdRule {
r.err = err
return r
}
func (r ThresholdRule) compareInt(threshold, value int64) bool {
switch r.operator {
case greaterThan:
return value > threshold
case greaterEqualThan:
return value >= threshold
case lessThan:
return value < threshold
default:
return value <= threshold
}
}
func (r ThresholdRule) compareUint(threshold, value uint64) bool {
switch r.operator {
case greaterThan:
return value > threshold
case greaterEqualThan:
return value >= threshold
case lessThan:
return value < threshold
default:
return value <= threshold
}
}
func (r ThresholdRule) compareFloat(threshold, value float64) bool {
switch r.operator {
case greaterThan:
return value > threshold
case greaterEqualThan:
return value >= threshold
case lessThan:
return value < threshold
default:
return value <= threshold
}
}
func (r ThresholdRule) compareTime(threshold, value time.Time) bool {
switch r.operator {
case greaterThan:
return value.After(threshold)
case greaterEqualThan:
return value.After(threshold) || value.Equal(threshold)
case lessThan:
return value.Before(threshold)
default:
return value.Before(threshold) || value.Equal(threshold)
}
} | vendor/github.com/go-ozzo/ozzo-validation/v4/minmax.go | 0.814643 | 0.520131 | minmax.go | starcoder |
package xyml
import (
"encoding/base64"
"strconv"
"time"
"gopkg.in/yaml.v3"
)
// NewBinaryNode returns a new binary typed YAML node with the given content.
func NewBinaryNode(v []byte) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: TagBinary,
Value: base64.StdEncoding.EncodeToString(v),
}
}
// NewBoolNode returns a new boolean typed YAML node with the given value.
func NewBoolNode(v bool) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: TagBool,
Value: strconv.FormatBool(v),
}
}
// NewFloatNode returns a new float typed YAML node with the given value.
func NewFloatNode(val float64, rep byte, prec int) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: TagFloat,
Value: strconv.FormatFloat(val, rep, prec, 64),
}
}
// NewIntNode returns a new int typed YAML node with the given value.
func NewIntNode(val int64, base int) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: TagInt,
Value: strconv.FormatInt(val, base),
}
}
// NewNullNode returns a new null typed YAML node.
func NewNullNode() *yaml.Node {
return &yaml.Node{Kind: yaml.ScalarNode, Tag: TagNil}
}
// NewStringNode returns a new string typed YAML node with the given value.
func NewStringNode(v string) *yaml.Node {
return &yaml.Node{Kind: yaml.ScalarNode, Tag: TagString, Value: v}
}
// NewTimestampNode returns a new timestamp typed YAML node with the given
// value.
func NewTimestampNode(v time.Time) *yaml.Node {
return &yaml.Node{
Kind: yaml.ScalarNode,
Tag: TagTimestamp,
Value: v.Format(time.RFC3339Nano),
}
}
// NewMapNode returns a new mapping typed YAML node presized to the given size.
func NewMapNode(size int) *yaml.Node {
return &yaml.Node{
Kind: yaml.MappingNode,
Tag: TagMap,
Content: make([]*yaml.Node, 0, size*2),
}
}
// NewOrderedMapNode returns a new ordered map typed YAML node presized to the
// given size.
func NewOrderedMapNode(size int) *yaml.Node {
return &yaml.Node{
Kind: yaml.SequenceNode,
Tag: TagOrderedMap,
Content: make([]*yaml.Node, 0, size*2),
}
}
// NewPairsNode returns a new pairs typed YAML node presized to the given size.
func NewPairsNode(size int) *yaml.Node {
return &yaml.Node{
Kind: yaml.SequenceNode,
Tag: TagPairs,
Content: make([]*yaml.Node, 0, size),
}
}
// NewSetNode returns a new set typed YAML node presized to the given size.
func NewSetNode(size int) *yaml.Node {
return &yaml.Node{
Kind: yaml.SequenceNode,
Tag: TagSet,
Content: make([]*yaml.Node, 0, size),
}
}
// NewSequenceNode returns a new sequence typed YAML node presized to the given size.
func NewSequenceNode(size int) *yaml.Node {
return &yaml.Node{
Kind: yaml.SequenceNode,
Tag: TagSequence,
Content: make([]*yaml.Node, 0, size),
}
} | v1/pkg/xyml/make.go | 0.814348 | 0.597197 | make.go | starcoder |
package function
import (
"fmt"
"github.com/dolthub/go-mysql-server/sql"
)
// Explode is a function that generates a row for each value of its child.
// It is a placeholder expression node.
type Explode struct {
Child sql.Expression
}
var _ sql.FunctionExpression = (*Explode)(nil)
// NewExplode creates a new Explode function.
func NewExplode(ctx *sql.Context, child sql.Expression) sql.Expression {
return &Explode{child}
}
// FunctionName implements sql.FunctionExpression
func (e *Explode) FunctionName() string {
return "explode"
}
// Resolved implements the sql.Expression interface.
func (e *Explode) Resolved() bool { return e.Child.Resolved() }
// Children implements the sql.Expression interface.
func (e *Explode) Children() []sql.Expression { return []sql.Expression{e.Child} }
// IsNullable implements the sql.Expression interface.
func (e *Explode) IsNullable() bool { return e.Child.IsNullable() }
// Type implements the sql.Expression interface.
func (e *Explode) Type() sql.Type {
return sql.UnderlyingType(e.Child.Type())
}
// Eval implements the sql.Expression interface.
func (e *Explode) Eval(*sql.Context, sql.Row) (interface{}, error) {
panic("eval method of Explode is only a placeholder")
}
func (e *Explode) String() string {
return fmt.Sprintf("EXPLODE(%s)", e.Child)
}
// WithChildren implements the Expression interface.
func (e *Explode) WithChildren(ctx *sql.Context, children ...sql.Expression) (sql.Expression, error) {
if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(e, len(children), 1)
}
return NewExplode(ctx, children[0]), nil
}
// Generate is a function that generates a row for each value of its child.
// This is the non-placeholder counterpart of Explode.
type Generate struct {
Child sql.Expression
}
// NewGenerate creates a new Generate function.
func NewGenerate(ctx *sql.Context, child sql.Expression) sql.Expression {
return &Generate{child}
}
// Resolved implements the sql.Expression interface.
func (e *Generate) Resolved() bool { return e.Child.Resolved() }
// Children implements the sql.Expression interface.
func (e *Generate) Children() []sql.Expression { return []sql.Expression{e.Child} }
// IsNullable implements the sql.Expression interface.
func (e *Generate) IsNullable() bool { return e.Child.IsNullable() }
// Type implements the sql.Expression interface.
func (e *Generate) Type() sql.Type {
return e.Child.Type()
}
// Eval implements the sql.Expression interface.
func (e *Generate) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
return e.Child.Eval(ctx, row)
}
func (e *Generate) String() string {
return fmt.Sprintf("EXPLODE(%s)", e.Child)
}
// WithChildren implements the Expression interface.
func (e *Generate) WithChildren(ctx *sql.Context, children ...sql.Expression) (sql.Expression, error) {
if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(e, len(children), 1)
}
return NewGenerate(ctx, children[0]), nil
} | sql/expression/function/explode.go | 0.762424 | 0.538741 | explode.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTBodyTypeFilter112AllOf struct for BTBodyTypeFilter112AllOf
type BTBodyTypeFilter112AllOf struct {
BodyType *string `json:"bodyType,omitempty"`
BtType *string `json:"btType,omitempty"`
}
// NewBTBodyTypeFilter112AllOf instantiates a new BTBodyTypeFilter112AllOf object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTBodyTypeFilter112AllOf() *BTBodyTypeFilter112AllOf {
this := BTBodyTypeFilter112AllOf{}
return &this
}
// NewBTBodyTypeFilter112AllOfWithDefaults instantiates a new BTBodyTypeFilter112AllOf object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTBodyTypeFilter112AllOfWithDefaults() *BTBodyTypeFilter112AllOf {
this := BTBodyTypeFilter112AllOf{}
return &this
}
// GetBodyType returns the BodyType field value if set, zero value otherwise.
func (o *BTBodyTypeFilter112AllOf) GetBodyType() string {
if o == nil || o.BodyType == nil {
var ret string
return ret
}
return *o.BodyType
}
// GetBodyTypeOk returns a tuple with the BodyType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTBodyTypeFilter112AllOf) GetBodyTypeOk() (*string, bool) {
if o == nil || o.BodyType == nil {
return nil, false
}
return o.BodyType, true
}
// HasBodyType returns a boolean if a field has been set.
func (o *BTBodyTypeFilter112AllOf) HasBodyType() bool {
if o != nil && o.BodyType != nil {
return true
}
return false
}
// SetBodyType gets a reference to the given string and assigns it to the BodyType field.
func (o *BTBodyTypeFilter112AllOf) SetBodyType(v string) {
o.BodyType = &v
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTBodyTypeFilter112AllOf) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTBodyTypeFilter112AllOf) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTBodyTypeFilter112AllOf) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTBodyTypeFilter112AllOf) SetBtType(v string) {
o.BtType = &v
}
func (o BTBodyTypeFilter112AllOf) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.BodyType != nil {
toSerialize["bodyType"] = o.BodyType
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
return json.Marshal(toSerialize)
}
type NullableBTBodyTypeFilter112AllOf struct {
value *BTBodyTypeFilter112AllOf
isSet bool
}
func (v NullableBTBodyTypeFilter112AllOf) Get() *BTBodyTypeFilter112AllOf {
return v.value
}
func (v *NullableBTBodyTypeFilter112AllOf) Set(val *BTBodyTypeFilter112AllOf) {
v.value = val
v.isSet = true
}
func (v NullableBTBodyTypeFilter112AllOf) IsSet() bool {
return v.isSet
}
func (v *NullableBTBodyTypeFilter112AllOf) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTBodyTypeFilter112AllOf(val *BTBodyTypeFilter112AllOf) *NullableBTBodyTypeFilter112AllOf {
return &NullableBTBodyTypeFilter112AllOf{value: val, isSet: true}
}
func (v NullableBTBodyTypeFilter112AllOf) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTBodyTypeFilter112AllOf) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_body_type_filter_112_all_of.go | 0.680348 | 0.521471 | model_bt_body_type_filter_112_all_of.go | starcoder |
package rbtree
import (
"constraints"
"github.com/modern-dev/gtl/utility"
)
const (
red color = iota
black
)
type (
RBTree[T comparable] struct {
root *nodeHandle[T]
nilNode *nodeHandle[T]
cmpInst utility.Compare[T]
size int
dupl bool
}
nodeHandle[T any] struct {
col color
left *nodeHandle[T]
right *nodeHandle[T]
parent *nodeHandle[T]
value T
}
color uint8
)
func NewRBTree[T constraints.Ordered](allowDuplicates bool) *RBTree[T] {
nilNode := &nodeHandle[T]{col: black}
return &RBTree[T]{
root: nilNode,
nilNode: nilNode,
cmpInst: &utility.Less[T]{},
dupl: allowDuplicates,
}
}
// NewRBTreeWithComparator creates an empty tree with provided comparator for items.
func NewRBTreeWithComparator[T comparable](comparator utility.Compare[T], allowDuplicates bool) *RBTree[T] {
nilNode := &nodeHandle[T]{col: black}
return &RBTree[T]{
root: nilNode,
nilNode: nilNode,
cmpInst: comparator,
dupl: allowDuplicates,
}
}
// Insert adds value into the tree.
// Complexity O(log n), where n is the number of elements in the tree.
func (rbt *RBTree[T]) Insert(value T) {
newNode := &nodeHandle[T]{
col: red,
left: rbt.nilNode,
right: rbt.nilNode,
parent: rbt.nilNode,
value: value,
}
parentNode := rbt.findInsertNode(newNode)
newNode.parent = parentNode
rbt.addChild(parentNode, newNode)
rbt.size++
rbt.insertFixup(newNode)
}
// Find tries to find the value in the tree.
// Returns 2 values.
// First value is an item if it was found, otherwise zero value for type parameter.
// Second value is bool indicating whether an item was found.
// Complexity O(log n), where n is the number of elements in the tree.
func (rbt *RBTree[T]) Find(value T) (T, bool) {
node, found := rbt.searchFromNode(rbt.root, value)
return node.value, found
}
// Max returns max item in the tree according to the comparator.
// Complexity O(log n), where n is the number of elements in tree.
func (rbt *RBTree[T]) Max() T {
node := rbt.maximum(rbt.root)
return node.value
}
// Min returns min item in the tree according to the comparator
// Complexity O(log n), where n is the number of elements in the tree.
func (rbt *RBTree[T]) Min() T {
node := rbt.minimum(rbt.root)
return node.value
}
// Erase deletes the item from the tree. Has no effect if the item was not in the tree.
// Complexity O(log n), where n is the number of elements in the tree.
func (rbt *RBTree[T]) Erase(value T) {
node, found := rbt.searchFromNode(rbt.root, value)
if !found {
return
}
if color, nodeToFix := rbt.deleteNode(node); color == black {
rbt.deleteFixup(nodeToFix)
}
rbt.size--
}
// Size return the number of elements in the tree.
// Complexity O(1).
func (rbt *RBTree[T]) Size() int {
return rbt.size
}
func (rbt *RBTree[T]) Empty() bool {
return rbt.Size() == 0
}
func (rbt *RBTree[T]) searchFromNode(node *nodeHandle[T], value T) (*nodeHandle[T], bool) {
it := node
for it != rbt.nilNode {
cmpRes := rbt.cmpInst.Cmp(value, it.value)
// discard duplicates
if value == it.value && !rbt.dupl {
return it, true
}
if cmpRes {
it = it.left
} else {
it = it.right
}
}
return rbt.nilNode, false
}
func (rbt *RBTree[T]) findInsertNode(newNode *nodeHandle[T]) *nodeHandle[T] {
y, x := rbt.nilNode, rbt.root
for x != rbt.nilNode {
y = x
if rbt.cmpInst.Cmp(newNode.value, x.value) {
x = x.left
} else {
x = x.right
}
}
return y
}
func (rbt *RBTree[T]) addChild(node, child *nodeHandle[T]) {
if node == rbt.nilNode {
rbt.root = child
return
}
if rbt.cmpInst.Cmp(child.value, node.value) {
node.left = child
return
}
node.right = child
}
func (rbt *RBTree[T]) insertFixup(z *nodeHandle[T]) {
for z.parent.col == red {
if z.parent == z.parent.parent.left {
y := z.parent.parent.right
if y.col == red {
z.parent.col = black
y.col = black
z.parent.parent.col = red
z = z.parent.parent
} else {
if z == z.parent.right {
z = z.parent
rbt.leftRotate(z)
}
z.parent.col = black
z.parent.parent.col = red
rbt.rightRotate(z.parent.parent)
}
} else {
y := z.parent.parent.left
if y.col == red {
z.parent.col = black
y.col = black
z.parent.parent.col = red
z = z.parent.parent
} else {
if z == z.parent.left {
z = z.parent
rbt.rightRotate(z)
}
z.parent.col = black
z.parent.parent.col = red
rbt.leftRotate(z.parent.parent)
}
}
}
rbt.root.col = black
}
func (rbt *RBTree[T]) deleteNode(z *nodeHandle[T]) (color, *nodeHandle[T]) {
var x *nodeHandle[T]
y := z
originalColor := y.col
if z.left == rbt.nilNode {
x = z.right
rbt.transplant(z, z.right)
return originalColor, x
}
if z.right == rbt.nilNode {
x = z.left
rbt.transplant(z, z.left)
return originalColor, x
}
y = rbt.minimum(z.right)
originalColor = y.col
x = y.right
if y.parent == z {
x.parent = y
} else {
rbt.transplant(y, y.right)
y.right = z.right
y.right.parent = y
}
rbt.transplant(z, y)
y.left = z.left
y.left.parent = y
y.col = z.col
return originalColor, x
}
func (rbt *RBTree[T]) deleteFixup(x *nodeHandle[T]) {
for x != rbt.root && x.col == black {
if x == x.parent.left {
w := x.parent.right
if w.col == red {
w.col = black
x.parent.col = red
rbt.leftRotate(x.parent)
w = x.parent.right
}
if w.left.col == black && w.right.col == black {
w.col = red
x = x.parent
} else {
if w.right.col == black {
w.left.col = black
w.col = red
rbt.rightRotate(w)
w = x.parent.right
}
w.col = x.parent.col
x.parent.col = black
w.right.col = black
rbt.leftRotate(x.parent)
x = rbt.root
}
} else {
w := x.parent.left
if w.col == red {
w.col = black
x.parent.col = red
rbt.rightRotate(x.parent)
w = x.parent.left
}
if w.left.col == black && w.right.col == black {
w.col = red
x = x.parent
} else {
if w.left.col == black {
w.right.col = black
w.col = red
rbt.leftRotate(w)
w = x.parent.left
}
w.col = x.parent.col
x.parent.col = black
w.left.col = black
rbt.rightRotate(x.parent)
x = rbt.root
}
}
}
x.col = black
}
func (rbt *RBTree[T]) transplant(a, b *nodeHandle[T]) {
if a.parent == rbt.nilNode {
rbt.root = b
} else if a == a.parent.left {
a.parent.left = b
} else {
a.parent.right = b
}
b.parent = a.parent
}
func (rbt *RBTree[T]) minimum(node *nodeHandle[T]) *nodeHandle[T] {
for node.left != rbt.nilNode {
node = node.left
}
return node
}
func (rbt *RBTree[T]) maximum(node *nodeHandle[T]) *nodeHandle[T] {
for node.right != rbt.nilNode {
node = node.right
}
return node
}
func (rbt *RBTree[T]) leftRotate(x *nodeHandle[T]) {
y := x.right
x.right = y.left
if y.left != rbt.nilNode {
y.left.parent = x
}
y.parent = x.parent
if x.parent == rbt.nilNode {
rbt.root = y
} else if x == x.parent.left {
x.parent.left = y
} else {
x.parent.right = y
}
y.left = x
x.parent = y
}
func (rbt *RBTree[T]) rightRotate(x *nodeHandle[T]) {
y := x.left
x.left = y.right
if y.right != rbt.nilNode {
y.right.parent = x
}
y.parent = x.parent
if x.parent == rbt.nilNode {
rbt.root = y
} else if x == x.parent.right {
x.parent.right = y
} else {
x.parent.left = y
}
y.right = x
x.parent = y
} | containers/rbtree/redblacktree.go | 0.795062 | 0.59749 | redblacktree.go | starcoder |
package model
import (
"github.com/hashicorp/hcl/v2"
"github.com/pulumi/pulumi/sdk/go/common/util/contract"
"github.com/zclconf/go-cty/cty"
)
// Traversable represents an entity that can be traversed by an HCL2 traverser.
type Traversable interface {
// Traverse attempts to traverse the receiver using the given traverser.
Traverse(t hcl.Traverser) (Traversable, hcl.Diagnostics)
}
// TypedTraversable is a Traversable that has an associated type.
type TypedTraversable interface {
Traversable
Type() Type
}
// GetTraversableType returns the type of the given Traversable:
// - If the Traversable is a TypedTraversable, this returns t.Type()
// - If the Traversable is a Type, this returns t
// - Otherwise, this returns DynamicType
func GetTraversableType(t Traversable) Type {
switch t := t.(type) {
case TypedTraversable:
return t.Type()
case Type:
return t
default:
return DynamicType
}
}
// GetTraverserKey extracts the value and type of the key associated with the given traverser.
func GetTraverserKey(t hcl.Traverser) (cty.Value, Type) {
switch t := t.(type) {
case hcl.TraverseAttr:
return cty.StringVal(t.Name), StringType
case hcl.TraverseIndex:
if t.Key.Type().Equals(typeCapsule) {
return cty.DynamicVal, *(t.Key.EncapsulatedValue().(*Type))
}
return t.Key, ctyTypeToType(t.Key.Type(), false)
default:
contract.Failf("unexpected traverser of type %T (%v)", t, t.SourceRange())
return cty.DynamicVal, DynamicType
}
}
// bindTraversalParts computes the type for each element of the given traversal.
func (b *expressionBinder) bindTraversalParts(receiver Traversable,
traversal hcl.Traversal) ([]Traversable, hcl.Diagnostics) {
parts := make([]Traversable, len(traversal)+1)
parts[0] = receiver
var diagnostics hcl.Diagnostics
for i, part := range traversal {
nextReceiver, partDiags := parts[i].Traverse(part)
parts[i+1], diagnostics = nextReceiver, append(diagnostics, partDiags...)
}
switch parts[len(parts)-1].(type) {
case TypedTraversable, Type:
// OK
default:
// TODO(pdg): improve this diagnostic
diagnostics = append(diagnostics, undefinedVariable("", traversal.SourceRange()))
}
return parts, diagnostics
} | pkg/codegen/hcl2/model/traversable.go | 0.735452 | 0.431464 | traversable.go | starcoder |
package formats
import "errors"
type Z80 struct {
cpu CpuState
ula UlaState
mem [48 * 1024]byte
samRom bool
issue2_emulation bool
doubleInterruptFrequency bool
videoSynchronization byte // 0..3
joystick byte // 0..3
}
const (
_Z80_V1_HEADER_SIZE = 30
_Z80_V2_HEADER_SIZE = 30 + 2 + 23
_Z80_V3_HEADER_SIZE = 30 + 2 + 54
_Z80_V3X_HEADER_SIZE = 30 + 2 + 55
)
// Decode [Z80 snapshot] from binary data
func (data SnapshotData) DecodeZ80() (*Z80, error) {
if len(data) < _Z80_V1_HEADER_SIZE {
return nil, errors.New("invalid Z80 snapshot")
}
PC := uint16(data[6]) | (uint16(data[7]) << 8)
if PC != 0 {
// Z80 version 1.xx
return data.decodeZ80_v1()
} else {
if len(data) < _Z80_V2_HEADER_SIZE {
return nil, errors.New("invalid Z80 snapshot")
}
extendedHeaderLength := uint16(data[30]) | (uint16(data[31]) << 8)
switch _Z80_V1_HEADER_SIZE + 2 + extendedHeaderLength {
case _Z80_V2_HEADER_SIZE:
// Z80 version 2.01
return data.decodeZ80_v2()
case _Z80_V3_HEADER_SIZE, _Z80_V3X_HEADER_SIZE:
// Z80 version 3.0x
return data.decodeZ80_v3()
}
}
return nil, errors.New("invalid Z80 snapshot, or unsupported Z80 snapshot version")
}
func (data SnapshotData) readHeader_v1(s *Z80) error {
data12 := data[12]
if data12 == 255 {
data12 = 1
}
s.cpu.A = data[0]
s.cpu.F = data[1]
s.cpu.C = data[2]
s.cpu.B = data[3]
s.cpu.L = data[4]
s.cpu.H = data[5]
s.cpu.PC = uint16(data[6]) | (uint16(data[7]) << 8)
s.cpu.SP = uint16(data[8]) | (uint16(data[9]) << 8)
s.cpu.I = data[10]
s.cpu.R = (data[11] & 0x7f) | ((data12 & 0x01) << 7)
s.ula.Border = (data12 >> 1) & 0x07
s.samRom = ((data12 & 0x10) != 0)
s.cpu.E = data[13]
s.cpu.D = data[14]
s.cpu.C_ = data[15]
s.cpu.B_ = data[16]
s.cpu.E_ = data[17]
s.cpu.D_ = data[18]
s.cpu.L_ = data[19]
s.cpu.H_ = data[20]
s.cpu.A_ = data[21]
s.cpu.F_ = data[22]
s.cpu.IY = uint16(data[23]) | (uint16(data[24]) << 8)
s.cpu.IX = uint16(data[25]) | (uint16(data[26]) << 8)
if data[27] != 0 {
s.cpu.IFF1 = 1
} else {
s.cpu.IFF1 = 0
}
if data[28] != 0 {
s.cpu.IFF2 = 1
} else {
s.cpu.IFF2 = 0
}
switch IM := (data[29] & 0x03); IM {
case 0, 1, 2:
s.cpu.IM = IM
default:
return errors.New("invalid interrupt mode")
}
s.issue2_emulation = ((data[29] & 0x04) != 0)
s.doubleInterruptFrequency = ((data[29] & 0x08) != 0)
s.videoSynchronization = ((data[29] >> 4) & 0x03)
s.joystick = ((data[29] >> 6) & 0x03)
if s.samRom {
return errors.New("unsupported feature: SamRom")
}
if s.issue2_emulation {
return errors.New("unsupported feature: Issue 2 emulation")
}
return nil
}
func (data SnapshotData) decodeZ80_v1() (*Z80, error) {
var s Z80
var err error
err = data.readHeader_v1(&s)
if err != nil {
return nil, err
}
var compressed bool
{
data12 := data[12]
if data12 == 255 {
data12 = 1
}
compressed = ((data12 & 0x20) != 0)
}
{
var mem []byte
if compressed {
last0 := data[len(data)-4]
last1 := data[len(data)-3]
last2 := data[len(data)-2]
last3 := data[len(data)-1]
if !((last0 == 0x00) && (last1 == 0xED) && (last2 == 0xED) && (last3 == 0x00)) {
return nil, errors.New("invalid Z80 snapshot: no end-marker")
}
mem = z80_decompress(data[30 : len(data)-4])
} else {
mem = data[30:]
}
if len(mem) != 48*1024 {
return nil, errors.New("invalid Z80 snapshot")
}
for i := 0; i < (48 * 1024); i++ {
s.mem[i] = mem[i]
}
}
return &s, nil
}
func (data SnapshotData) decodeZ80_v2() (*Z80, error) {
var s Z80
var err error
err = data.readHeader_v1(&s)
if err != nil {
return nil, err
}
if len(data) < _Z80_V2_HEADER_SIZE {
return nil, errors.New("invalid Z80 snapshot")
}
extendedHeaderLength := uint16(data[30]) | (uint16(data[31]) << 8)
if extendedHeaderLength != 23 {
return nil, errors.New("invalid Z80 snapshot")
}
s.cpu.PC = uint16(data[32]) | (uint16(data[33]) << 8)
hw_mode := data[34]
switch hw_mode {
case 0:
// 48k
case 1:
// 48k + If.1
default:
return nil, errors.New("read Z80 snapshot version 2.01: unsupported hardware mode")
}
// data[35]: no meaning in 48k mode
// data[36]: no meaning in 48k mode
var modifyHardware bool = ((data[37] >> 7) != 0)
if modifyHardware {
return nil, errors.New("read Z80 snapshot version 2.01: unsupported hardware mode")
}
// rest of data[37]: ignored
// data[38]: ignored
// data[39..54]: ignored
// Memory blocks
{
i := int(_Z80_V1_HEADER_SIZE + 2 + extendedHeaderLength)
err = z80_loadMemBlocks(&s, data[i:])
if err != nil {
return nil, err
}
}
return &s, nil
}
func (data SnapshotData) decodeZ80_v3() (*Z80, error) {
var s Z80
var err error
err = data.readHeader_v1(&s)
if err != nil {
return nil, err
}
if len(data) < _Z80_V3X_HEADER_SIZE {
return nil, errors.New("invalid Z80 snapshot")
}
extendedHeaderLength := uint16(data[30]) | (uint16(data[31]) << 8)
if !((extendedHeaderLength == 54) || (extendedHeaderLength == 55)) {
return nil, errors.New("invalid Z80 snapshot")
}
s.cpu.PC = uint16(data[32]) | (uint16(data[33]) << 8)
hw_mode := data[34]
switch hw_mode {
case 0:
// 48k
case 1:
// 48k + If.1
default:
return nil, errors.New("read Z80 snapshot version 3.0x: unsupported hardware mode")
}
// data[35]: no meaning in 48k mode
// data[36]: no meaning in 48k mode
var modifyHardware bool = ((data[37] >> 7) != 0)
if modifyHardware {
return nil, errors.New("read Z80 snapshot version 3.0x: unsupported hardware mode")
}
// rest of data[37]: ignored
// data[38]: ignored
// data[39..54]: ignored
tstate_low := uint(data[55]) | (uint(data[56]) << 8)
tstate_hi := uint(data[57] & 0x03)
const T4 = TStatesPerFrame / 4
s.cpu.Tstate = ((tstate_hi-3)%4)*T4 + (T4 - (tstate_low % T4) - 1)
// data[58]: always ignored
MGT_rom_paged := (data[59] == 0xff)
multiface_rom_paged := (data[60] == 0xff)
rom0_writable := (data[61] == 0xff)
rom1_writable := (data[62] == 0xff)
// data[63..72]: ignored
// data[73..82]: ignored
// data[83]: ignored
// data[84]: ignored
// data[85]: ignored
if extendedHeaderLength == 55 {
// data[86]: ignored
}
if MGT_rom_paged {
return nil, errors.New("read Z80 snapshot version 3.0x: unsupported feature: MGT ROM paging")
}
if multiface_rom_paged {
return nil, errors.New("read Z80 snapshot version 3.0x: unsupported feature: Multiface ROM paging")
}
if rom0_writable {
return nil, errors.New("read Z80 snapshot version 3.0x: unsupported feature: RAM 0..8191")
}
if rom1_writable {
return nil, errors.New("read Z80 snapshot version 3.0x: unsupported feature: RAM 8192..16383")
}
// Memory blocks
{
i := int(_Z80_V1_HEADER_SIZE + 2 + extendedHeaderLength)
err = z80_loadMemBlocks(&s, data[i:])
if err != nil {
return nil, err
}
}
return &s, nil
}
func z80_loadMemBlocks(s *Z80, data []byte) error {
pages := make(map[byte]([]byte))
i := 0
for i+3 <= len(data) {
length := int(data[i+0]) | (int(data[i+1]) << 8)
page := data[i+2]
i += 3
compressed := true
if length == 0xFFFF {
compressed = false
length = 0x4000
}
if !(i+length <= len(data)) {
return errors.New("invalid Z80 snapshot")
}
if !compressed {
pages[page] = data[i:(i + length)]
} else {
decompressedBlock := z80_decompress(data[i:(i + length)])
pages[page] = decompressedBlock
}
i += length
}
if i != len(data) {
return errors.New("invalid Z80 snapshot")
}
if len(pages) != 3 {
return errors.New("invalid Z80 snapshot")
}
for page, pageData := range pages {
var addr, length int
switch page {
case 8:
addr = 0x4000
length = 0x4000
case 4:
addr = 0x8000
length = 0x4000
case 5:
addr = 0xc000
length = 0x4000
default:
return errors.New("invalid Z80 snapshot")
}
if len(pageData) != length {
return errors.New("invalid Z80 snapshot")
}
for i := 0; i < length; i++ {
s.mem[addr+i-0x4000] = pageData[i]
}
}
return nil
}
func z80_decompress(in []byte) []byte {
// The input is decompressed in 2 phases:
// 1. Determine output size
// 2. Decompress
len_in := len(in)
i := 0
j := 0
for i < len_in {
if i+4 <= len_in {
if (in[i+0] == 0xED) && (in[i+1] == 0xED) {
count := in[i+2]
j += int(count)
i += 4
continue
}
}
i++
j++
}
len_out := j
out := make([]byte, len_out)
i = 0
j = 0
for i < len_in {
if i+4 <= len_in {
if (in[i+0] == 0xED) && (in[i+1] == 0xED) {
count := in[i+2]
value := in[i+3]
for jj := byte(0); jj < count; jj++ {
out[j] = value
j++
}
i += 4
continue
}
}
out[j] = in[i]
i++
j++
}
return out
}
func (s *Z80) CpuState() CpuState {
return s.cpu
}
func (s *Z80) UlaState() UlaState {
return s.ula
}
func (s *Z80) Memory() *[48 * 1024]byte {
return &s.mem
} | formats/Z80.go | 0.590189 | 0.475118 | Z80.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// ReferencedObject
type ReferencedObject struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Name of the referenced object. Must match one of the objects in the directory definition.
referencedObjectName *string
// Currently not supported. Name of the property in the referenced object, the value for which is used as the reference.
referencedProperty *string
}
// NewReferencedObject instantiates a new referencedObject and sets the default values.
func NewReferencedObject()(*ReferencedObject) {
m := &ReferencedObject{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateReferencedObjectFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateReferencedObjectFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewReferencedObject(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ReferencedObject) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *ReferencedObject) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["referencedObjectName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetReferencedObjectName(val)
}
return nil
}
res["referencedProperty"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetReferencedProperty(val)
}
return nil
}
return res
}
// GetReferencedObjectName gets the referencedObjectName property value. Name of the referenced object. Must match one of the objects in the directory definition.
func (m *ReferencedObject) GetReferencedObjectName()(*string) {
if m == nil {
return nil
} else {
return m.referencedObjectName
}
}
// GetReferencedProperty gets the referencedProperty property value. Currently not supported. Name of the property in the referenced object, the value for which is used as the reference.
func (m *ReferencedObject) GetReferencedProperty()(*string) {
if m == nil {
return nil
} else {
return m.referencedProperty
}
}
// Serialize serializes information the current object
func (m *ReferencedObject) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("referencedObjectName", m.GetReferencedObjectName())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("referencedProperty", m.GetReferencedProperty())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *ReferencedObject) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetReferencedObjectName sets the referencedObjectName property value. Name of the referenced object. Must match one of the objects in the directory definition.
func (m *ReferencedObject) SetReferencedObjectName(value *string)() {
if m != nil {
m.referencedObjectName = value
}
}
// SetReferencedProperty sets the referencedProperty property value. Currently not supported. Name of the property in the referenced object, the value for which is used as the reference.
func (m *ReferencedObject) SetReferencedProperty(value *string)() {
if m != nil {
m.referencedProperty = value
}
} | models/referenced_object.go | 0.694303 | 0.407098 | referenced_object.go | starcoder |
package graph
import (
i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization"
)
// WorkbookChartDataLabels
type WorkbookChartDataLabels struct {
Entity
// Represents the format of chart data labels, which includes fill and font formatting. Read-only.
format *WorkbookChartDataLabelFormat;
// DataLabelPosition value that represents the position of the data label. The possible values are: None, Center, InsideEnd, InsideBase, OutsideEnd, Left, Right, Top, Bottom, BestFit, Callout.
position *string;
// String representing the separator used for the data labels on a chart.
separator *string;
// Boolean value representing if the data label bubble size is visible or not.
showBubbleSize *bool;
// Boolean value representing if the data label category name is visible or not.
showCategoryName *bool;
// Boolean value representing if the data label legend key is visible or not.
showLegendKey *bool;
// Boolean value representing if the data label percentage is visible or not.
showPercentage *bool;
// Boolean value representing if the data label series name is visible or not.
showSeriesName *bool;
// Boolean value representing if the data label value is visible or not.
showValue *bool;
}
// NewWorkbookChartDataLabels instantiates a new workbookChartDataLabels and sets the default values.
func NewWorkbookChartDataLabels()(*WorkbookChartDataLabels) {
m := &WorkbookChartDataLabels{
Entity: *NewEntity(),
}
return m
}
// GetFormat gets the format property value. Represents the format of chart data labels, which includes fill and font formatting. Read-only.
func (m *WorkbookChartDataLabels) GetFormat()(*WorkbookChartDataLabelFormat) {
if m == nil {
return nil
} else {
return m.format
}
}
// GetPosition gets the position property value. DataLabelPosition value that represents the position of the data label. The possible values are: None, Center, InsideEnd, InsideBase, OutsideEnd, Left, Right, Top, Bottom, BestFit, Callout.
func (m *WorkbookChartDataLabels) GetPosition()(*string) {
if m == nil {
return nil
} else {
return m.position
}
}
// GetSeparator gets the separator property value. String representing the separator used for the data labels on a chart.
func (m *WorkbookChartDataLabels) GetSeparator()(*string) {
if m == nil {
return nil
} else {
return m.separator
}
}
// GetShowBubbleSize gets the showBubbleSize property value. Boolean value representing if the data label bubble size is visible or not.
func (m *WorkbookChartDataLabels) GetShowBubbleSize()(*bool) {
if m == nil {
return nil
} else {
return m.showBubbleSize
}
}
// GetShowCategoryName gets the showCategoryName property value. Boolean value representing if the data label category name is visible or not.
func (m *WorkbookChartDataLabels) GetShowCategoryName()(*bool) {
if m == nil {
return nil
} else {
return m.showCategoryName
}
}
// GetShowLegendKey gets the showLegendKey property value. Boolean value representing if the data label legend key is visible or not.
func (m *WorkbookChartDataLabels) GetShowLegendKey()(*bool) {
if m == nil {
return nil
} else {
return m.showLegendKey
}
}
// GetShowPercentage gets the showPercentage property value. Boolean value representing if the data label percentage is visible or not.
func (m *WorkbookChartDataLabels) GetShowPercentage()(*bool) {
if m == nil {
return nil
} else {
return m.showPercentage
}
}
// GetShowSeriesName gets the showSeriesName property value. Boolean value representing if the data label series name is visible or not.
func (m *WorkbookChartDataLabels) GetShowSeriesName()(*bool) {
if m == nil {
return nil
} else {
return m.showSeriesName
}
}
// GetShowValue gets the showValue property value. Boolean value representing if the data label value is visible or not.
func (m *WorkbookChartDataLabels) GetShowValue()(*bool) {
if m == nil {
return nil
} else {
return m.showValue
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *WorkbookChartDataLabels) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) {
res := m.Entity.GetFieldDeserializers()
res["format"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewWorkbookChartDataLabelFormat() })
if err != nil {
return err
}
if val != nil {
m.SetFormat(val.(*WorkbookChartDataLabelFormat))
}
return nil
}
res["position"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetPosition(val)
}
return nil
}
res["separator"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetSeparator(val)
}
return nil
}
res["showBubbleSize"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetShowBubbleSize(val)
}
return nil
}
res["showCategoryName"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetShowCategoryName(val)
}
return nil
}
res["showLegendKey"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetShowLegendKey(val)
}
return nil
}
res["showPercentage"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetShowPercentage(val)
}
return nil
}
res["showSeriesName"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetShowSeriesName(val)
}
return nil
}
res["showValue"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetShowValue(val)
}
return nil
}
return res
}
func (m *WorkbookChartDataLabels) IsNil()(bool) {
return m == nil
}
// Serialize serializes information the current object
func (m *WorkbookChartDataLabels) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) {
err := m.Entity.Serialize(writer)
if err != nil {
return err
}
{
err = writer.WriteObjectValue("format", m.GetFormat())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("position", m.GetPosition())
if err != nil {
return err
}
}
{
err = writer.WriteStringValue("separator", m.GetSeparator())
if err != nil {
return err
}
}
{
err = writer.WriteBoolValue("showBubbleSize", m.GetShowBubbleSize())
if err != nil {
return err
}
}
{
err = writer.WriteBoolValue("showCategoryName", m.GetShowCategoryName())
if err != nil {
return err
}
}
{
err = writer.WriteBoolValue("showLegendKey", m.GetShowLegendKey())
if err != nil {
return err
}
}
{
err = writer.WriteBoolValue("showPercentage", m.GetShowPercentage())
if err != nil {
return err
}
}
{
err = writer.WriteBoolValue("showSeriesName", m.GetShowSeriesName())
if err != nil {
return err
}
}
{
err = writer.WriteBoolValue("showValue", m.GetShowValue())
if err != nil {
return err
}
}
return nil
}
// SetFormat sets the format property value. Represents the format of chart data labels, which includes fill and font formatting. Read-only.
func (m *WorkbookChartDataLabels) SetFormat(value *WorkbookChartDataLabelFormat)() {
if m != nil {
m.format = value
}
}
// SetPosition sets the position property value. DataLabelPosition value that represents the position of the data label. The possible values are: None, Center, InsideEnd, InsideBase, OutsideEnd, Left, Right, Top, Bottom, BestFit, Callout.
func (m *WorkbookChartDataLabels) SetPosition(value *string)() {
if m != nil {
m.position = value
}
}
// SetSeparator sets the separator property value. String representing the separator used for the data labels on a chart.
func (m *WorkbookChartDataLabels) SetSeparator(value *string)() {
if m != nil {
m.separator = value
}
}
// SetShowBubbleSize sets the showBubbleSize property value. Boolean value representing if the data label bubble size is visible or not.
func (m *WorkbookChartDataLabels) SetShowBubbleSize(value *bool)() {
if m != nil {
m.showBubbleSize = value
}
}
// SetShowCategoryName sets the showCategoryName property value. Boolean value representing if the data label category name is visible or not.
func (m *WorkbookChartDataLabels) SetShowCategoryName(value *bool)() {
if m != nil {
m.showCategoryName = value
}
}
// SetShowLegendKey sets the showLegendKey property value. Boolean value representing if the data label legend key is visible or not.
func (m *WorkbookChartDataLabels) SetShowLegendKey(value *bool)() {
if m != nil {
m.showLegendKey = value
}
}
// SetShowPercentage sets the showPercentage property value. Boolean value representing if the data label percentage is visible or not.
func (m *WorkbookChartDataLabels) SetShowPercentage(value *bool)() {
if m != nil {
m.showPercentage = value
}
}
// SetShowSeriesName sets the showSeriesName property value. Boolean value representing if the data label series name is visible or not.
func (m *WorkbookChartDataLabels) SetShowSeriesName(value *bool)() {
if m != nil {
m.showSeriesName = value
}
}
// SetShowValue sets the showValue property value. Boolean value representing if the data label value is visible or not.
func (m *WorkbookChartDataLabels) SetShowValue(value *bool)() {
if m != nil {
m.showValue = value
}
} | models/microsoft/graph/workbook_chart_data_labels.go | 0.694095 | 0.531696 | workbook_chart_data_labels.go | starcoder |
// Command custommetric creates a custom metric and writes TimeSeries value
// to it. It writes a GAUGE measurement, which is a measure of value at a
// specific point in time. This means the startTime and endTime of the interval
// are the same. To make it easier to see the output, a random value is written.
// When reading the TimeSeries back, a window of the last 5 minutes is used.
package main
import (
"encoding/json"
"fmt"
"log"
"math/rand"
"os"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/monitoring/v3"
)
// createCustomMetric creates a custom metric specified by metricName.
func createCustomMetric(s *monitoring.Service, projectResource string, metricType string, metricName string) error {
ld := monitoring.LabelDescriptor{Key: "environment", ValueType: "STRING", Description: "An arbitrary measurement"}
md := monitoring.MetricDescriptor{
Name: metricName,
Type: metricType,
Labels: []*monitoring.LabelDescriptor{&ld},
MetricKind: "GAUGE",
ValueType: "INT64",
Unit: "items",
Description: "An arbitrary measurement",
DisplayName: "Custom Metric",
}
resp, err := s.Projects.MetricDescriptors.Create(projectResource, &md).Do()
if err != nil {
return fmt.Errorf("Could not create custom metric: %v", err)
}
log.Printf("createCustomMetric: %s\n", formatResource(resp))
return nil
}
// getCustomMetric reads the custom metric created.
func getCustomMetric(s *monitoring.Service, projectResource string, metricType string, metricName string) (*monitoring.ListMetricDescriptorsResponse, error) {
resp, err := s.Projects.MetricDescriptors.List(projectResource).
Filter(fmt.Sprintf("metric.type=\"%s\"", metricType)).Do()
if err != nil {
return nil, fmt.Errorf("Could not get custom metric: %v", err)
}
log.Printf("getCustomMetric: %s\n", formatResource(resp))
return resp, nil
}
// writeTimeSeriesValue writes a value for the custom metric created
func writeTimeSeriesValue(s *monitoring.Service, projectResource string, metricType string, metricName string) error {
now := time.Now().UTC().Format(time.RFC3339Nano)
timeseries := monitoring.TimeSeries{
Metric: &monitoring.Metric{
Type: metricType,
Labels: map[string]string{
"environment": "STAGING",
},
},
Resource: &monitoring.MonitoredResource{
Labels: map[string]string{
"instance_id": "test-instance",
"zone": "us-central1-f",
},
Type: "gce_instance",
},
MetricKind: "GAUGE",
ValueType: "INT64",
Points: []*monitoring.Point{
{
Interval: &monitoring.TimeInterval{
StartTime: now,
EndTime: now,
},
Value: &monitoring.TypedValue{
Int64Value: rand.Int63n(10),
},
},
},
}
createTimeseriesRequest := monitoring.CreateTimeSeriesRequest{
TimeSeries: []*monitoring.TimeSeries{×eries},
}
log.Printf("writeTimeseriesRequest: %s\n", formatResource(createTimeseriesRequest))
_, err := s.Projects.TimeSeries.Create(projectResource, &createTimeseriesRequest).Do()
if err != nil {
return fmt.Errorf("Could not write time series value, %v ", err)
}
return nil
}
// readTimeSeriesValue reads the TimeSeries for the value specified by metricName in a time window from the last 5 minutes.
func readTimeSeriesValue(s *monitoring.Service, projectResource string, metricType string, metricName string) error {
startTime := time.Now().UTC().Add(time.Minute * -5)
endTime := time.Now().UTC()
resp, err := s.Projects.TimeSeries.List(projectResource).
Filter(fmt.Sprintf("metric.type=\"%s\"", metricType)).
IntervalStartTime(startTime.Format(time.RFC3339Nano)).
IntervalEndTime(endTime.Format(time.RFC3339Nano)).
Do()
if err != nil {
return fmt.Errorf("Could not read time series value, %v ", err)
}
log.Printf("readTimeseriesValue: %s\n", formatResource(resp))
return nil
}
func main() {
client, err := google.DefaultClient(
oauth2.NoContext,
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/monitoring.read",
"https://www.googleapis.com/auth/monitoring.write",
)
if err != nil {
log.Fatal(err)
}
if len(os.Args) < 2 {
fmt.Println("Usage: auth.go <project_id>")
return
}
projectResource := "projects/" + os.Args[1]
svc, err := monitoring.New(client)
if err != nil {
log.Fatal(err)
}
metricType := "custom.googleapis.com/custom_measurement"
metricName := projectResource + "/metricDescriptors/" + metricType
// Create the metric.
if err := createCustomMetric(svc, projectResource, metricType, metricName); err != nil {
log.Fatal(err)
}
var resp *monitoring.ListMetricDescriptorsResponse
// Wait until the new metric can be read back.
for resp == nil || resp.MetricDescriptors == nil {
var err error
resp, err = getCustomMetric(svc, projectResource, metricType, metricName)
if err != nil {
log.Fatal(err)
}
time.Sleep(2 * time.Second)
}
rand.Seed(time.Now().UTC().UnixNano())
// write a TimeSeries value for that metric
if err := writeTimeSeriesValue(svc, projectResource, metricType, metricName); err != nil {
log.Fatal(err)
}
time.Sleep(2 * time.Second)
// Read the TimeSeries for the last 5 minutes for that metric.
readTimeSeriesValue(svc, projectResource, metricType, metricName)
}
// printResource prints out our API response objects as JSON.
func formatResource(resource interface{}) []byte {
b, err := json.MarshalIndent(resource, "", " ")
if err != nil {
panic(err)
}
return b
} | monitoring/custommetric/custommetric.go | 0.782746 | 0.461381 | custommetric.go | starcoder |
package linear
import (
"fmt"
"math"
)
const (
sgn_mask_float = 0x80000000
)
var (
positive_zero_float64 float64 = 0.0
negative_zero_float64 float64 = -positive_zero_float64
positive_zero_float64_bits = math.Float64bits(positive_zero_float64)
negative_zero_float64_bits = math.Float64bits(negative_zero_float64)
)
func _isSymmetric(matrix RealMatrix, relativeTolerance float64, raiseException bool) bool {
rows := matrix.RowDimension()
if rows != matrix.ColumnDimension() {
if raiseException {
panic(fmt.Sprintf("non square (%dx%d) matrix", rows, matrix.ColumnDimension()))
} else {
return false
}
}
for i := 0; i < rows; i++ {
for j := i + 1; j < rows; j++ {
mij := matrix.At(i, j)
mji := matrix.At(j, i)
if math.Abs(mij-mji) >
math.Max(math.Abs(mij), math.Abs(mji))*relativeTolerance {
if raiseException {
panic(fmt.Sprintf("non symmetric matrix: the difference between entries at (%v,%v) and (%v,%v) is larger than %v", i, j, j, i, relativeTolerance))
} else {
return false
}
}
}
}
return true
}
func isSymmetric(matrix RealMatrix, eps float64) bool {
return _isSymmetric(matrix, eps, false)
}
func compareTo(x, y, eps float64) int {
if equalsWithError(x, y, eps) {
return 0
} else if x < y {
return -1
}
return 1
}
func equals(x, y float64) bool {
return equalsWithULP(x, y, 1)
}
func equalsWithError(x, y, eps float64) bool {
return equalsWithULP(x, y, 1) || math.Abs(y-x) <= eps
}
func equalsWithULP(x, y float64, maxUlps int) bool {
xInt := math.Float64bits(x)
yInt := math.Float64bits(y)
var isEqual bool
if ((xInt ^ yInt) & sgn_mask_float) == 0 {
// number have same sign, there is no risk of overflow
isEqual = uint64(math.Abs(float64(xInt-yInt))) <= uint64(maxUlps)
} else {
// number have opposite signs, take care of overflow
var deltaPlus, deltaMinus uint64
if xInt < yInt {
deltaPlus = yInt - positive_zero_float64_bits
deltaMinus = xInt - negative_zero_float64_bits
} else {
deltaPlus = xInt - positive_zero_float64_bits
deltaMinus = yInt - negative_zero_float64_bits
}
if deltaPlus > uint64(maxUlps) {
isEqual = false
} else {
isEqual = deltaMinus <= (uint64(maxUlps) - deltaPlus)
}
}
return isEqual && !math.IsNaN(x) && !math.IsNaN(y)
} | utils.go | 0.710327 | 0.519704 | utils.go | starcoder |
package viz
import (
"image/color"
"github.com/anki/goverdrive/phys"
"github.com/anki/goverdrive/robo/track"
)
const (
// Supported GameShape types
// Note that a thick line works as a rectangle. Boo-yeah!
shapeLine = 0
shapeCirc = 1
numShapes = 2
)
// GameShape defines a flexible container for specifying primitive shapes used
// by the game. The meaning of some fields depends on specific shape.
// - Shape coordinates can be absolute or relative to a particular vehicle
// - Shape coordinates can be in Track or Cartesian coordinate space
type GameShape struct {
vehId int // >= 0 means relative to that vehicle
shape uint // eg ShapeLine
isCartes bool // coordinate space: true => cartesian; false => track
x1 phys.Meters // X or Dofs of point 1
y1 phys.Meters // Y or Cofs of point 1
x2 phys.Meters // X or Dofs of point 2 (may be unused, depending on Shape)
y2 phys.Meters // Y or Cofs of point 2 (may be unused, depending on Shape)
color color.Color
thickness phys.Meters // line thickness (0 => filled)
}
func NewCartesGameLine(vehId int, p1, p2 phys.Point, color color.Color, thickness phys.Meters) *GameShape {
return &GameShape{
vehId: vehId,
shape: shapeLine,
isCartes: true,
x1: p1.X,
y1: p1.Y,
x2: p2.X,
y2: p2.Y,
color: color,
thickness: thickness,
}
}
func NewTrackGameLine(vehId int, tp1, tp2 track.Point, color color.Color, thickness phys.Meters) *GameShape {
return &GameShape{
vehId: vehId,
shape: shapeLine,
isCartes: false,
x1: tp1.Dofs,
y1: tp1.Cofs,
x2: tp2.Dofs,
y2: tp2.Cofs,
color: color,
thickness: thickness,
}
}
func NewCartesGameCirc(vehId int, ctr phys.Point, rad phys.Meters, color color.Color, thickness phys.Meters) *GameShape {
return &GameShape{
vehId: vehId,
shape: shapeCirc,
isCartes: true,
x1: ctr.X,
y1: ctr.Y,
x2: ctr.X + rad,
y2: ctr.Y,
color: color,
thickness: thickness,
}
}
func NewTrackGameCirc(vehId int, ctr track.Point, rad phys.Meters, color color.Color, thickness phys.Meters) *GameShape {
return &GameShape{
vehId: vehId,
shape: shapeCirc,
isCartes: false,
x1: ctr.Dofs,
y1: ctr.Cofs,
x2: ctr.Dofs + rad,
y2: ctr.Cofs,
color: color,
thickness: thickness,
}
}
func (gs GameShape) VehId() int {
return gs.vehId
}
func (gs GameShape) IsCartesian() bool {
return gs.isCartes
}
func (gs GameShape) Color() color.Color {
return gs.color
}
func (gs GameShape) Thickness() phys.Meters {
return gs.thickness
} | goverdrive/viz/gameshape.go | 0.752468 | 0.439988 | gameshape.go | starcoder |
package systems
import (
"github.com/emctague/go-loopy/ecs"
"log"
)
// Transform is a component which represents the position of some entity.
type Transform struct {
X float64
Y float64
Rotation float64
Width float64
Height float64
ParentID uint64 // This transform will follow all the same movements as its parent. Set to 0 for 'no parent'.
}
// TransformEvent represents a change in the position of an entity.
// This may then fire transform events for any transforms which use the given entity as a parent.
type TransformEvent struct {
EntityID uint64 // The entity to transform.
OffsetX float64
OffsetY float64
Absolute bool // True if offsets are actually absolute screen coordinates.
}
// SetTransformParentEvent changes which entity a transform is parented to.
// This does not change the current position of the entity.
type SetTransformParentEvent struct {
EntityID uint64 // The entity whose parent should be changed.
ParentID uint64 // The new parent for the entity.
}
type eTransform struct{ *Transform }
type eTransformParent struct{ EntityID uint64 }
// TransformSystem keeps track of the transformation of entities and parenting of entity positions to those of other
// entities.
func TransformSystem(e *ecs.ECS) {
events := e.Subscribe()
entities := make(map[uint64]eTransform)
parents := make(map[uint64][]eTransformParent)
go func() {
for ev := range events {
switch event := ev.Event.(type) {
case ecs.EntityAddedEvent:
addedSet := ecs.UnpackEntity(event, &entities)
if addedSet == nil {
break
}
addedCSet := addedSet.(*eTransform)
if addedCSet.ParentID != 0 {
tempParentID := addedCSet.ParentID
addedCSet.ParentID = 0
setParent(&entities, &parents, event.ID, tempParentID)
}
case ecs.EntityRemovedEvent:
// Change the parent to no-parent so that the entity is removed from any child lists.
if entity, ok := entities[event.ID]; ok && entity.ParentID != 0 {
setParent(&entities, &parents, event.ID, 0)
}
// Remove from parent list if appropriate
if _, ok := parents[event.ID]; ok {
delete(parents, event.ID)
}
ecs.RemoveEntity(event.ID, &entities)
case SetTransformParentEvent:
setParent(&entities, &parents, event.EntityID, event.ParentID)
case TransformEvent:
transformedEntity, ok := entities[event.EntityID]
if !ok {
log.Fatal("Transform event on entity without a transform!")
}
// Turn absolute values into relative ones.
if event.Absolute {
event.OffsetX = event.OffsetX - transformedEntity.X
event.OffsetY = event.OffsetY - transformedEntity.Y
}
// Change the position
transformedEntity.X += event.OffsetX
transformedEntity.Y += event.OffsetY
// Propagate transformation to children.
children, ok := parents[event.EntityID]
if ok {
for _, child := range children {
ev.Next <- TransformEvent{child.EntityID, event.OffsetX, event.OffsetY, false}
}
}
}
ev.Wg.Done()
}
}()
}
// Change the parent of the given entity to the given parent entity, updating the appropriate structures.
func setParent(entities *map[uint64]eTransform, parents *map[uint64][]eTransformParent, childID uint64, newParentID uint64) {
comSet, ok := (*entities)[childID]
if !ok {
log.Fatal("Cannot set parent on nonexistent component")
}
// Exit if we're trying to set the same exact parent ID
if comSet.Transform.ParentID == newParentID {
return
}
// Remove an entry from the old parent's list if it isn't no parent (0)
if comSet.Transform.ParentID != 0 {
oldParentList, ok := (*parents)[comSet.Transform.ParentID]
if !ok {
log.Fatal("Entity's old parent is invalid!?")
}
if len(oldParentList) == 0 {
delete(*parents, comSet.Transform.ParentID)
} else {
// Remove the item from the list
for i, oldChild := range oldParentList {
if oldChild.EntityID == childID {
oldParentList[i] = oldParentList[len(oldParentList)-1]
(*parents)[comSet.Transform.ParentID] = oldParentList[:len(oldParentList)-1]
break
}
}
}
}
comSet.Transform.ParentID = newParentID
if newParentID == 0 {
return
}
// Add an entry to the new parent's list if it isn't no parent (0)
_, ok = (*parents)[newParentID]
if !ok {
(*parents)[newParentID] = []eTransformParent{}
}
(*parents)[newParentID] = append((*parents)[newParentID], eTransformParent{childID})
} | systems/transform.go | 0.620047 | 0.526099 | transform.go | starcoder |
package pgdialect
import (
"database/sql/driver"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"time"
"unicode/utf8"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/schema"
)
var (
driverValuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
stringType = reflect.TypeOf((*string)(nil)).Elem()
sliceStringType = reflect.TypeOf([]string(nil))
intType = reflect.TypeOf((*int)(nil)).Elem()
sliceIntType = reflect.TypeOf([]int(nil))
int64Type = reflect.TypeOf((*int64)(nil)).Elem()
sliceInt64Type = reflect.TypeOf([]int64(nil))
float64Type = reflect.TypeOf((*float64)(nil)).Elem()
sliceFloat64Type = reflect.TypeOf([]float64(nil))
)
func customAppender(typ reflect.Type) schema.AppenderFunc {
switch typ.Kind() {
case reflect.Uint32:
return appendUint32ValueAsInt
case reflect.Uint, reflect.Uint64:
return appendUint64ValueAsInt
}
return nil
}
func appendTime(b []byte, tm time.Time) []byte {
b = append(b, '\'')
b = tm.UTC().AppendFormat(b, "2006-01-02 15:04:05.999999-07:00")
b = append(b, '\'')
return b
}
func appendUint32ValueAsInt(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
return strconv.AppendInt(b, int64(int32(v.Uint())), 10)
}
func appendUint64ValueAsInt(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
return strconv.AppendInt(b, int64(v.Uint()), 10)
}
//------------------------------------------------------------------------------
func arrayAppend(fmter schema.Formatter, b []byte, v interface{}) []byte {
switch v := v.(type) {
case int64:
return strconv.AppendInt(b, v, 10)
case float64:
return dialect.AppendFloat64(b, v)
case bool:
return dialect.AppendBool(b, v)
case []byte:
return arrayAppendBytes(b, v)
case string:
return arrayAppendString(b, v)
case time.Time:
return appendTime(b, v)
default:
err := fmt.Errorf("pgdialect: can't append %T", v)
return dialect.AppendError(b, err)
}
}
func arrayElemAppender(typ reflect.Type) schema.AppenderFunc {
if typ.Implements(driverValuerType) {
return arrayAppendDriverValue
}
switch typ.Kind() {
case reflect.String:
return arrayAppendStringValue
case reflect.Slice:
if typ.Elem().Kind() == reflect.Uint8 {
return arrayAppendBytesValue
}
}
return schema.Appender(typ, customAppender)
}
func arrayAppendStringValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
return arrayAppendString(b, v.String())
}
func arrayAppendBytesValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
return arrayAppendBytes(b, v.Bytes())
}
func arrayAppendDriverValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
iface, err := v.Interface().(driver.Valuer).Value()
if err != nil {
return dialect.AppendError(b, err)
}
return arrayAppend(fmter, b, iface)
}
//------------------------------------------------------------------------------
func arrayAppender(typ reflect.Type) schema.AppenderFunc {
kind := typ.Kind()
switch kind {
case reflect.Ptr:
if fn := arrayAppender(typ.Elem()); fn != nil {
return schema.PtrAppender(fn)
}
case reflect.Slice, reflect.Array:
// ok:
default:
return nil
}
elemType := typ.Elem()
if kind == reflect.Slice {
switch elemType {
case stringType:
return appendStringSliceValue
case intType:
return appendIntSliceValue
case int64Type:
return appendInt64SliceValue
case float64Type:
return appendFloat64SliceValue
}
}
appendElem := arrayElemAppender(elemType)
if appendElem == nil {
panic(fmt.Errorf("pgdialect: %s is not supported", typ))
}
return func(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
kind := v.Kind()
switch kind {
case reflect.Ptr, reflect.Slice:
if v.IsNil() {
return dialect.AppendNull(b)
}
}
if kind == reflect.Ptr {
v = v.Elem()
}
b = append(b, '\'')
b = append(b, '{')
for i := 0; i < v.Len(); i++ {
elem := v.Index(i)
b = appendElem(fmter, b, elem)
b = append(b, ',')
}
if v.Len() > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
b = append(b, '\'')
return b
}
}
func appendStringSliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
ss := v.Convert(sliceStringType).Interface().([]string)
return appendStringSlice(b, ss)
}
func appendStringSlice(b []byte, ss []string) []byte {
if ss == nil {
return dialect.AppendNull(b)
}
b = append(b, '\'')
b = append(b, '{')
for _, s := range ss {
b = arrayAppendString(b, s)
b = append(b, ',')
}
if len(ss) > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
b = append(b, '\'')
return b
}
func appendIntSliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
ints := v.Convert(sliceIntType).Interface().([]int)
return appendIntSlice(b, ints)
}
func appendIntSlice(b []byte, ints []int) []byte {
if ints == nil {
return dialect.AppendNull(b)
}
b = append(b, '\'')
b = append(b, '{')
for _, n := range ints {
b = strconv.AppendInt(b, int64(n), 10)
b = append(b, ',')
}
if len(ints) > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
b = append(b, '\'')
return b
}
func appendInt64SliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
ints := v.Convert(sliceInt64Type).Interface().([]int64)
return appendInt64Slice(b, ints)
}
func appendInt64Slice(b []byte, ints []int64) []byte {
if ints == nil {
return dialect.AppendNull(b)
}
b = append(b, '\'')
b = append(b, '{')
for _, n := range ints {
b = strconv.AppendInt(b, n, 10)
b = append(b, ',')
}
if len(ints) > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
b = append(b, '\'')
return b
}
func appendFloat64SliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
floats := v.Convert(sliceFloat64Type).Interface().([]float64)
return appendFloat64Slice(b, floats)
}
func appendFloat64Slice(b []byte, floats []float64) []byte {
if floats == nil {
return dialect.AppendNull(b)
}
b = append(b, '\'')
b = append(b, '{')
for _, n := range floats {
b = dialect.AppendFloat64(b, n)
b = append(b, ',')
}
if len(floats) > 0 {
b[len(b)-1] = '}' // Replace trailing comma.
} else {
b = append(b, '}')
}
b = append(b, '\'')
return b
}
//------------------------------------------------------------------------------
func arrayAppendBytes(b []byte, bs []byte) []byte {
if bs == nil {
return dialect.AppendNull(b)
}
b = append(b, `"\\x`...)
s := len(b)
b = append(b, make([]byte, hex.EncodedLen(len(bs)))...)
hex.Encode(b[s:], bs)
b = append(b, '"')
return b
}
func arrayAppendString(b []byte, s string) []byte {
b = append(b, '"')
for _, r := range s {
switch r {
case 0:
// ignore
case '\'':
b = append(b, "'''"...)
case '"':
b = append(b, '\\', '"')
case '\\':
b = append(b, '\\', '\\')
default:
if r < utf8.RuneSelf {
b = append(b, byte(r))
break
}
l := len(b)
if cap(b)-l < utf8.UTFMax {
b = append(b, make([]byte, utf8.UTFMax)...)
}
n := utf8.EncodeRune(b[l:l+utf8.UTFMax], r)
b = b[:l+n]
}
}
b = append(b, '"')
return b
} | dialect/pgdialect/append.go | 0.517571 | 0.411939 | append.go | starcoder |
package ec
import (
"encoding/hex"
"fmt"
"math/big"
)
// p is a prime number of secp256k1.
// http://www.secg.org/sec2-v2.pdf
var p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
// Point is a coordinate of elliptic curve.
type Point struct {
X *big.Int
Y *big.Int
}
// Infinite returns whether it is at infinity or not.
func (point *Point) Infinite() bool {
if point.X == nil || point.Y == nil {
return true
}
return false
}
// Clone returns a copy of Point.
func (point *Point) Clone() *Point {
clone := &Point{}
if point.Infinite() {
return nil
}
clone.X = new(big.Int).SetBytes(point.X.Bytes())
clone.Y = new(big.Int).SetBytes(point.Y.Bytes())
return clone
}
// Compressed returns the compressed Point.
func (point *Point) Compressed() []byte {
if point.Infinite() {
return nil
}
size := len(p.Bytes())
bs := new(big.Int).Mod(point.X, p).Bytes()
for len(bs) != size {
bs = append([]byte{0x00}, bs...)
}
if point.Y.Bit(0) == 0 {
bs = append([]byte{0x02}, bs...)
} else {
bs = append([]byte{0x03}, bs...)
}
return bs
}
// Decode returns a Point from the bytes.
func Decode(bs []byte) (*Point, error) {
size := len(p.Bytes())
if len(bs) == 1+2*size {
if bs[0] != 0x04 {
return nil, fmt.Errorf("invalid format : %x", bs)
}
point := &Point{}
point.X = new(big.Int).SetBytes(bs[1 : size+1])
point.Y = new(big.Int).SetBytes(bs[size+1:])
return point, nil
}
if len(bs) != 1+size {
return nil, fmt.Errorf("invalid length : %x", bs)
}
if bs[0] != 0x02 && bs[0] != 0x03 {
return nil, fmt.Errorf("invalid format : %x", bs)
}
point := &Point{}
point.X = new(big.Int).SetBytes(bs[1:])
// (x^3 + 7)^((p + 1) / 4)
point.Y = new(big.Int).Exp(
new(big.Int).Add(new(big.Int).Exp(point.X, big.NewInt(3), p), big.NewInt(7)),
new(big.Int).Div(new(big.Int).Add(p, big.NewInt(1)), big.NewInt(4)),
p)
if (bs[0] != 0x02 && point.Y.Bit(0) == 0) || (bs[0] != 0x03 && point.Y.Bit(0) == 1) {
point.Y.Sub(p, point.Y)
}
return point, nil
}
// DecodeString returns a Point from the hexstring.
func DecodeString(hexstring string) (*Point, error) {
bs, err := hex.DecodeString(hexstring)
if err != nil {
return nil, err
}
return Decode(bs)
}
// G is the base point of secp256k1.
var G, _ = DecodeString("0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798")
// n is the order of G.
var n, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
// Add returns the addition of Points.
func Add(P, Q *Point) *Point {
if P.Infinite() {
return Q.Clone()
}
if Q.Infinite() {
return P.Clone()
}
if P.X.Cmp(Q.X) == 0 && Q.Y.Cmp(Q.Y) != 0 {
return &Point{}
}
var s *big.Int
if P.X.Cmp(Q.X) == 0 && P.Y.Cmp(Q.Y) == 0 {
// (3xP^2) * (2 * yP)^(p - 2) mod p
s = new(big.Int).Mod(
new(big.Int).Mul(
new(big.Int).Mul(new(big.Int).Mul(big.NewInt(3), P.X), P.X),
new(big.Int).Exp(
new(big.Int).Mul(big.NewInt(2), P.Y),
new(big.Int).Sub(p, big.NewInt(2)),
p)),
p)
} else {
// (yP - yQ) * (xP - xQ)^(p - 2) mod p
s = new(big.Int).Mod(
new(big.Int).Mul(
new(big.Int).Sub(P.Y, Q.Y),
new(big.Int).Exp(
new(big.Int).Sub(P.X, Q.X),
new(big.Int).Sub(p, big.NewInt(2)),
p)),
p)
}
R := &Point{}
// xR = s*s - (xP + xQ) mod p
R.X = new(big.Int).Mod(new(big.Int).Sub(new(big.Int).Mul(s, s), new(big.Int).Add(P.X, Q.X)), p)
// -yR = s*(xP - xR) - yP mod p
R.Y = new(big.Int).Mod(new(big.Int).Sub(new(big.Int).Mul(s, new(big.Int).Sub(P.X, R.X)), P.Y), p)
return R
}
// Mul is the multiple of Point.
func Mul(x *big.Int, P *Point) *Point {
R := &Point{}
for i := 0; i < x.BitLen(); i++ {
if x.Bit(i) == 1 {
R = Add(R, P)
}
P = Add(P, P)
}
return R
} | ec/ec.go | 0.728265 | 0.500793 | ec.go | starcoder |
package chart
import "fmt"
// Interface Assertions.
var (
_ Series = (*ContinuousSeries)(nil)
_ FirstValuesProvider = (*ContinuousSeries)(nil)
_ LastValuesProvider = (*ContinuousSeries)(nil)
)
// ContinuousSeries represents a line on a chart.
type ContinuousSeries struct {
Name string
Style Style
YAxis YAxisType
XValueFormatter ValueFormatter
YValueFormatter ValueFormatter
XValues []float64
YValues []float64
}
// GetName returns the name of the time series.
func (cs ContinuousSeries) GetName() string {
return cs.Name
}
// GetStyle returns the line style.
func (cs ContinuousSeries) GetStyle() Style {
return cs.Style
}
// Len returns the number of elements in the series.
func (cs ContinuousSeries) Len() int {
return len(cs.XValues)
}
// GetValues gets the x,y values at a given index.
func (cs ContinuousSeries) GetValues(index int) (x, y float64) {
return cs.XValues[index], cs.YValues[index]
}
// GetFirstValues gets the first x,y values.
func (cs ContinuousSeries) GetFirstValues() (x, y float64) {
return cs.XValues[0], cs.YValues[0]
}
// GetLastValues gets the last x,y values.
func (cs ContinuousSeries) GetLastValues() (x, y float64) {
return cs.XValues[len(cs.XValues)-1], cs.YValues[len(cs.YValues)-1]
}
// GetValueFormatters returns value formatter defaults for the series.
func (cs ContinuousSeries) GetValueFormatters() (x, y ValueFormatter) {
if cs.XValueFormatter != nil {
x = cs.XValueFormatter
} else {
x = FloatValueFormatter
}
if cs.YValueFormatter != nil {
y = cs.YValueFormatter
} else {
y = FloatValueFormatter
}
return
}
// GetYAxis returns which YAxis the series draws on.
func (cs ContinuousSeries) GetYAxis() YAxisType {
return cs.YAxis
}
// Render renders the series.
func (cs ContinuousSeries) Render(r Renderer, canvasBox Box, xrange, yrange Range, defaults Style) {
style := cs.Style.InheritFrom(defaults)
Draw.LineSeries(r, canvasBox, xrange, yrange, style, cs)
}
// Validate validates the series.
func (cs ContinuousSeries) Validate() error {
if len(cs.XValues) == 0 {
return fmt.Errorf("continuous series; must have xvalues set")
}
if len(cs.YValues) == 0 {
return fmt.Errorf("continuous series; must have yvalues set")
}
if len(cs.XValues) != len(cs.YValues) {
return fmt.Errorf("continuous series; must have same length xvalues as yvalues")
}
return nil
} | continuous_series.go | 0.831827 | 0.554953 | continuous_series.go | starcoder |
package dataframe
// ColumnHeader helps you manipulate column names
type ColumnHeader struct {
columns map[string]bool
}
// Columns create a ColumnHeader from a list of column names.
func Columns(names ...string) ColumnHeader {
result := make(map[string]bool)
for _, col := range names {
result[col] = true
}
return ColumnHeader{result}
}
// Num returns the number of columns in the ColumnHeader
func (h ColumnHeader) Num() int {
if h.columns == nil {
return 0
}
return len(h.columns)
}
// NameSet returns the set of columns in the header for read-only access.
// This is faster than NameList()
func (h ColumnHeader) NameSet() map[string]bool {
if h.columns == nil {
return make(map[string]bool)
}
return h.columns
}
// NameList returns the list of columns in the header.
// Altering the returned slice won't alter ColumnHeader.
func (h ColumnHeader) NameList() []string {
if h.columns == nil {
return nil
}
result := make([]string, len(h.columns))
i := 0
for col := range h.columns {
result[i] = col
i++
}
return result
}
// Except removes all columns from other ColumnHeaders.
// It returns a shallow-copy of itself, not an entirely new ColumnHeader.
func (h ColumnHeader) ExceptHeader(others ...ColumnHeader) ColumnHeader {
if h.columns == nil {
return h
}
for _, other := range others {
if other.columns != nil {
for col := range other.columns {
delete(h.columns, col)
}
}
}
return h
}
// Except removes all the columns given as arguments.
// It returns a shallow-copy of itself, not an entirely new ColumnHeader.
func (h ColumnHeader) Except(columns ...string) ColumnHeader {
if h.columns == nil {
return h
}
for _, colName := range columns {
delete(h.columns, colName)
}
return h
}
// And add all the columns from the given other ColumnHeaders.
// It returns a shallow-copy of itself, not an entirely new ColumnHeader.
func (h ColumnHeader) And(others ...ColumnHeader) ColumnHeader {
if h.columns == nil {
h.columns = make(map[string]bool)
}
for _, other := range others {
if other.columns != nil {
for col := range other.columns {
h.columns[col] = true
}
}
}
return h
}
// Copy returns a deep-copy of the ColumnHeader
func (h ColumnHeader) Copy() ColumnHeader {
if h.columns == nil {
return h
}
result := make(map[string]bool)
if h.columns != nil {
for col := range h.columns {
result[col] = true
}
}
return ColumnHeader{result}
}
// IntHeader returns a ColumnHeader with all the integer column names
// Altering the returned ColumnHeader has no effect on the underlying RawData.
func (data *RawData) IntHeader() ColumnHeader {
if len(data.ints) == 0 {
return ColumnHeader{}
}
result := make(map[string]bool)
for col := range data.ints {
result[col] = true
}
return ColumnHeader{result}
}
// ObjectHeader returns a ColumnHeader with all the object column names,
// including that of string columns.
// Altering the returned ColumnHeader has no effect on the underlying RawData.
func (data *RawData) ObjectHeader() ColumnHeader {
if len(data.objects) == 0 {
return ColumnHeader{}
}
result := make(map[string]bool)
for col := range data.objects {
result[col] = true
}
return ColumnHeader{result}
}
// FloatHeader returns a ColumnHeader with all the float column names.
// Altering the returned ColumnHeader has no effect on the underlying RawData.
func (data *RawData) FloatHeader() ColumnHeader {
if len(data.floats) == 0 {
return ColumnHeader{}
}
result := make(map[string]bool)
for col := range data.floats {
result[col] = true
}
return ColumnHeader{result}
}
// BoolHeader returns a ColumnHeader with all the boolean column names.
// Altering the returned ColumnHeader has no effect on the underlying RawData.
func (data *RawData) BoolHeader() ColumnHeader {
if len(data.bools) == 0 {
return ColumnHeader{}
}
result := make(map[string]bool)
for col := range data.bools {
result[col] = true
}
return ColumnHeader{result}
}
// BoolHeader returns a ColumnHeader with all the string column names.
// Altering the returned ColumnHeader has no effect on the underlying RawData.
func (data *RawData) StringHeader() ColumnHeader {
// we do a copy for consistency with other Getters
return data.stringHeader.Copy()
}
// Header returns a ColumnHeader with all the column names.
// Altering the returned ColumnHeader has no effect on the underlying RawData.
func (data *RawData) Header() ColumnHeader {
return data.IntHeader().And(data.BoolHeader(), data.ObjectHeader(), data.FloatHeader())
}
func (h *ColumnHeader) add(cols ...string) {
if h.columns == nil {
h.columns = make(map[string]bool)
}
for _, col := range cols {
h.columns[col] = true
}
}
func (h ColumnHeader) remove(cols ...string) {
if h.columns == nil {
return
}
for _, col := range cols {
delete(h.columns, col)
}
}
func (h ColumnHeader) contains(col string) bool {
if h.columns == nil {
return false
}
return h.columns[col]
}
func (h ColumnHeader) get() map[string]bool {
if h.columns == nil {
return make(map[string]bool)
}
return h.columns
} | dataframe/column_header.go | 0.875694 | 0.511839 | column_header.go | starcoder |
package datadog
import (
"encoding/json"
"time"
)
// UsageLambdaHour Number of lambda functions and sum of the invocations of all lambda functions for each hour for a given organization.
type UsageLambdaHour struct {
// Contains the number of different functions for each region and AWS account.
FuncCount *int64 `json:"func_count,omitempty"`
// The hour for the usage.
Hour *time.Time `json:"hour,omitempty"`
// Contains the sum of invocations of all functions.
InvocationsSum *int64 `json:"invocations_sum,omitempty"`
// UnparsedObject contains the raw value of the object if there was an error when deserializing into the struct
UnparsedObject map[string]interface{} `json:-`
}
// NewUsageLambdaHour instantiates a new UsageLambdaHour object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewUsageLambdaHour() *UsageLambdaHour {
this := UsageLambdaHour{}
return &this
}
// NewUsageLambdaHourWithDefaults instantiates a new UsageLambdaHour object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewUsageLambdaHourWithDefaults() *UsageLambdaHour {
this := UsageLambdaHour{}
return &this
}
// GetFuncCount returns the FuncCount field value if set, zero value otherwise.
func (o *UsageLambdaHour) GetFuncCount() int64 {
if o == nil || o.FuncCount == nil {
var ret int64
return ret
}
return *o.FuncCount
}
// GetFuncCountOk returns a tuple with the FuncCount field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageLambdaHour) GetFuncCountOk() (*int64, bool) {
if o == nil || o.FuncCount == nil {
return nil, false
}
return o.FuncCount, true
}
// HasFuncCount returns a boolean if a field has been set.
func (o *UsageLambdaHour) HasFuncCount() bool {
if o != nil && o.FuncCount != nil {
return true
}
return false
}
// SetFuncCount gets a reference to the given int64 and assigns it to the FuncCount field.
func (o *UsageLambdaHour) SetFuncCount(v int64) {
o.FuncCount = &v
}
// GetHour returns the Hour field value if set, zero value otherwise.
func (o *UsageLambdaHour) GetHour() time.Time {
if o == nil || o.Hour == nil {
var ret time.Time
return ret
}
return *o.Hour
}
// GetHourOk returns a tuple with the Hour field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageLambdaHour) GetHourOk() (*time.Time, bool) {
if o == nil || o.Hour == nil {
return nil, false
}
return o.Hour, true
}
// HasHour returns a boolean if a field has been set.
func (o *UsageLambdaHour) HasHour() bool {
if o != nil && o.Hour != nil {
return true
}
return false
}
// SetHour gets a reference to the given time.Time and assigns it to the Hour field.
func (o *UsageLambdaHour) SetHour(v time.Time) {
o.Hour = &v
}
// GetInvocationsSum returns the InvocationsSum field value if set, zero value otherwise.
func (o *UsageLambdaHour) GetInvocationsSum() int64 {
if o == nil || o.InvocationsSum == nil {
var ret int64
return ret
}
return *o.InvocationsSum
}
// GetInvocationsSumOk returns a tuple with the InvocationsSum field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UsageLambdaHour) GetInvocationsSumOk() (*int64, bool) {
if o == nil || o.InvocationsSum == nil {
return nil, false
}
return o.InvocationsSum, true
}
// HasInvocationsSum returns a boolean if a field has been set.
func (o *UsageLambdaHour) HasInvocationsSum() bool {
if o != nil && o.InvocationsSum != nil {
return true
}
return false
}
// SetInvocationsSum gets a reference to the given int64 and assigns it to the InvocationsSum field.
func (o *UsageLambdaHour) SetInvocationsSum(v int64) {
o.InvocationsSum = &v
}
func (o UsageLambdaHour) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.UnparsedObject != nil {
return json.Marshal(o.UnparsedObject)
}
if o.FuncCount != nil {
toSerialize["func_count"] = o.FuncCount
}
if o.Hour != nil {
toSerialize["hour"] = o.Hour
}
if o.InvocationsSum != nil {
toSerialize["invocations_sum"] = o.InvocationsSum
}
return json.Marshal(toSerialize)
}
func (o *UsageLambdaHour) UnmarshalJSON(bytes []byte) (err error) {
raw := map[string]interface{}{}
all := struct {
FuncCount *int64 `json:"func_count,omitempty"`
Hour *time.Time `json:"hour,omitempty"`
InvocationsSum *int64 `json:"invocations_sum,omitempty"`
}{}
err = json.Unmarshal(bytes, &all)
if err != nil {
err = json.Unmarshal(bytes, &raw)
if err != nil {
return err
}
o.UnparsedObject = raw
return nil
}
o.FuncCount = all.FuncCount
o.Hour = all.Hour
o.InvocationsSum = all.InvocationsSum
return nil
}
type NullableUsageLambdaHour struct {
value *UsageLambdaHour
isSet bool
}
func (v NullableUsageLambdaHour) Get() *UsageLambdaHour {
return v.value
}
func (v *NullableUsageLambdaHour) Set(val *UsageLambdaHour) {
v.value = val
v.isSet = true
}
func (v NullableUsageLambdaHour) IsSet() bool {
return v.isSet
}
func (v *NullableUsageLambdaHour) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableUsageLambdaHour(val *UsageLambdaHour) *NullableUsageLambdaHour {
return &NullableUsageLambdaHour{value: val, isSet: true}
}
func (v NullableUsageLambdaHour) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableUsageLambdaHour) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | api/v1/datadog/model_usage_lambda_hour.go | 0.738009 | 0.430626 | model_usage_lambda_hour.go | starcoder |
package sshutil
// Variables used to hold template data.
const (
TypeKey = "Type"
KeyIDKey = "KeyID"
PrincipalsKey = "Principals"
ExtensionsKey = "Extensions"
CriticalOptionsKey = "CriticalOptions"
TokenKey = "Token"
InsecureKey = "Insecure"
UserKey = "User"
CertificateRequestKey = "CR"
)
// TemplateError represents an error in a template produced by the fail
// function.
type TemplateError struct {
Message string
}
// Error implements the error interface and returns the error string when a
// template executes the `fail "message"` function.
func (e *TemplateError) Error() string {
return e.Message
}
// TemplateData is an alias for map[string]interface{}. It represents the data
// passed to the templates.
type TemplateData map[string]interface{}
// CreateTemplateData returns a TemplateData with the given certificate type,
// key id, principals, and the default extensions.
func CreateTemplateData(ct CertType, keyID string, principals []string) TemplateData {
return TemplateData{
TypeKey: ct.String(),
KeyIDKey: keyID,
PrincipalsKey: principals,
ExtensionsKey: DefaultExtensions(ct),
}
}
// DefaultExtensions returns the default extensions set in an SSH certificate.
func DefaultExtensions(ct CertType) map[string]interface{} {
switch ct {
case UserCert:
return map[string]interface{}{
"permit-X11-forwarding": "",
"permit-agent-forwarding": "",
"permit-port-forwarding": "",
"permit-pty": "",
"permit-user-rc": "",
}
default:
return nil
}
}
// NewTemplateData creates a new map for templates data.
func NewTemplateData() TemplateData {
return TemplateData{}
}
// AddExtension adds one extension to the templates data.
func (t TemplateData) AddExtension(key, value string) {
if m, ok := t[ExtensionsKey].(map[string]interface{}); ok {
m[key] = value
} else {
t[ExtensionsKey] = map[string]interface{}{
key: value,
}
}
}
// AddCriticalOption adds one critical option to the templates data.
func (t TemplateData) AddCriticalOption(key, value string) {
if m, ok := t[CriticalOptionsKey].(map[string]interface{}); ok {
m[key] = value
} else {
t[CriticalOptionsKey] = map[string]interface{}{
key: value,
}
}
}
// Set sets a key-value pair in the template data.
func (t TemplateData) Set(key string, v interface{}) {
t[key] = v
}
// SetInsecure sets a key-value pair in the insecure template data.
func (t TemplateData) SetInsecure(key string, v interface{}) {
if m, ok := t[InsecureKey].(TemplateData); ok {
m[key] = v
} else {
t[InsecureKey] = TemplateData{key: v}
}
}
// SetType sets the certificate type in the template data.
func (t TemplateData) SetType(typ CertType) {
t.Set(TypeKey, typ.String())
}
// SetKeyID sets the certificate key id in the template data.
func (t TemplateData) SetKeyID(id string) {
t.Set(KeyIDKey, id)
}
// SetPrincipals sets the certificate principals in the template data.
func (t TemplateData) SetPrincipals(p []string) {
t.Set(PrincipalsKey, p)
}
// SetExtensions sets the certificate extensions in the template data.
func (t TemplateData) SetExtensions(e map[string]interface{}) {
t.Set(ExtensionsKey, e)
}
// SetCriticalOptions sets the certificate critical options in the template
// data.
func (t TemplateData) SetCriticalOptions(o map[string]interface{}) {
t.Set(CriticalOptionsKey, o)
}
// SetToken sets the given token in the template data.
func (t TemplateData) SetToken(v interface{}) {
t.Set(TokenKey, v)
}
// SetUserData sets the given user provided object in the insecure template
// data.
func (t TemplateData) SetUserData(v interface{}) {
t.SetInsecure(UserKey, v)
}
// SetCertificateRequest sets the simulated ssh certificate request the insecure
// template data.
func (t TemplateData) SetCertificateRequest(cr CertificateRequest) {
t.SetInsecure(CertificateRequestKey, cr)
}
// DefaultTemplate is the default template for an SSH certificate.
const DefaultTemplate = `{
"type": "{{ .Type }}",
"keyId": "{{ .KeyID }}",
"principals": {{ toJson .Principals }},
"extensions": {{ toJson .Extensions }},
"criticalOptions": {{ toJson .CriticalOptions }}
}`
// DefaultAdminTemplate is the template used by an admin user in a OIDC
// provisioner.
const DefaultAdminTemplate = `{
"type": "{{ .Insecure.CR.Type }}",
"keyId": "{{ .Insecure.CR.KeyID }}",
"principals": {{ toJson .Insecure.CR.Principals }}
{{- if eq .Insecure.CR.Type "user" }}
, "extensions": {{ toJson .Extensions }},
"criticalOptions": {{ toJson .CriticalOptions }}
{{- end }}
}`
// DefaultIIDTemplate is the default template for IID provisioners. By default
// certificate type will be set always to host, key id to the instance id.
// Principals will be only enforced by the provisioner if disableCustomSANs is
// set to true.
const DefaultIIDTemplate = `{
"type": "{{ .Type }}",
"keyId": "{{ .KeyID }}",
{{- if .Insecure.CR.Principals }}
"principals": {{ toJson .Insecure.CR.Principals }},
{{- else }}
"principals": {{ toJson .Principals }},
{{- end }}
"extensions": {{ toJson .Extensions }}
}`
// CertificateRequestTemplate is the template used for provisioners that accepts
// any certificate request. The provisioner must validate that type, keyId and
// principals are passed in the request.
const CertificateRequestTemplate = `{
"type": "{{ .Insecure.CR.Type }}",
"keyId": "{{ .Insecure.CR.KeyID }}",
"principals": {{ toJson .Insecure.CR.Principals }}
{{- if eq .Insecure.CR.Type "user" }}
, "extensions": {
"permit-X11-forwarding": "",
"permit-agent-forwarding": "",
"permit-port-forwarding": "",
"permit-pty": "",
"permit-user-rc": ""
}
{{- end }}
}` | vendor/go.step.sm/crypto/sshutil/templates.go | 0.702938 | 0.426322 | templates.go | starcoder |
package asm
import (
"fmt"
"github.com/llir/ll/ast"
"github.com/llir/llvm/ir"
"github.com/llir/llvm/ir/types"
"github.com/pkg/errors"
)
// === [ Create IR ] ===========================================================
// newExtractElementInst returns a new IR extractelement instruction (without
// body but with type) based on the given AST extractelement instruction.
func (fgen *funcGen) newExtractElementInst(ident ir.LocalIdent, old *ast.ExtractElementInst) (*ir.InstExtractElement, error) {
xType, err := fgen.gen.irType(old.X().Typ())
if err != nil {
return nil, errors.WithStack(err)
}
xt, ok := xType.(*types.VectorType)
if !ok {
panic(fmt.Errorf("invalid vector type; expected *types.VectorType, got %T", xType))
}
return &ir.InstExtractElement{LocalIdent: ident, Typ: xt.ElemType}, nil
}
// newInsertElementInst returns a new IR insertelement instruction (without body
// but with type) based on the given AST insertelement instruction.
func (fgen *funcGen) newInsertElementInst(ident ir.LocalIdent, old *ast.InsertElementInst) (*ir.InstInsertElement, error) {
xType, err := fgen.gen.irType(old.X().Typ())
if err != nil {
return nil, errors.WithStack(err)
}
xt, ok := xType.(*types.VectorType)
if !ok {
panic(fmt.Errorf("invalid vector type; expected *types.VectorType, got %T", xType))
}
return &ir.InstInsertElement{LocalIdent: ident, Typ: xt}, nil
}
// newShuffleVectorInst returns a new IR shufflevector instruction (without body
// but with type) based on the given AST shufflevector instruction.
func (fgen *funcGen) newShuffleVectorInst(ident ir.LocalIdent, old *ast.ShuffleVectorInst) (*ir.InstShuffleVector, error) {
xType, err := fgen.gen.irType(old.X().Typ())
if err != nil {
return nil, errors.WithStack(err)
}
xt, ok := xType.(*types.VectorType)
if !ok {
panic(fmt.Errorf("invalid vector type; expected *types.VectorType, got %T", xType))
}
maskType, err := fgen.gen.irType(old.Mask().Typ())
if err != nil {
return nil, errors.WithStack(err)
}
mt, ok := maskType.(*types.VectorType)
if !ok {
panic(fmt.Errorf("invalid vector type; expected *types.VectorType, got %T", maskType))
}
typ := types.NewVector(mt.Len, xt.ElemType)
return &ir.InstShuffleVector{LocalIdent: ident, Typ: typ}, nil
}
// === [ Translate AST to IR ] =================================================
// --- [ extractelement ] ------------------------------------------------------
// irExtractElementInst translates the given AST extractelement instruction into
// an equivalent IR instruction.
func (fgen *funcGen) irExtractElementInst(new ir.Instruction, old *ast.ExtractElementInst) error {
inst, ok := new.(*ir.InstExtractElement)
if !ok {
panic(fmt.Errorf("invalid IR instruction for AST instruction; expected *ir.InstExtractElement, got %T", new))
}
// Vector.
x, err := fgen.irTypeValue(old.X())
if err != nil {
return errors.WithStack(err)
}
inst.X = x
// Element index.
index, err := fgen.irTypeValue(old.Index())
if err != nil {
return errors.WithStack(err)
}
inst.Index = index
// (optional) Metadata.
md, err := fgen.gen.irMetadataAttachments(old.Metadata())
if err != nil {
return errors.WithStack(err)
}
inst.Metadata = md
return nil
}
// --- [ insertelement ] -------------------------------------------------------
// irInsertElementInst translates the given AST insertelement instruction into
// an equivalent IR instruction.
func (fgen *funcGen) irInsertElementInst(new ir.Instruction, old *ast.InsertElementInst) error {
inst, ok := new.(*ir.InstInsertElement)
if !ok {
panic(fmt.Errorf("invalid IR instruction for AST instruction; expected *ir.InstInsertElement, got %T", new))
}
// Vector.
x, err := fgen.irTypeValue(old.X())
if err != nil {
return errors.WithStack(err)
}
inst.X = x
// Element to insert.
elem, err := fgen.irTypeValue(old.Elem())
if err != nil {
return errors.WithStack(err)
}
inst.Elem = elem
// Element index.
index, err := fgen.irTypeValue(old.Index())
if err != nil {
return errors.WithStack(err)
}
inst.Index = index
// (optional) Metadata.
md, err := fgen.gen.irMetadataAttachments(old.Metadata())
if err != nil {
return errors.WithStack(err)
}
inst.Metadata = md
return nil
}
// --- [ shufflevector ] -------------------------------------------------------
// irShuffleVectorInst translates the given AST shufflevector instruction into
// an equivalent IR instruction.
func (fgen *funcGen) irShuffleVectorInst(new ir.Instruction, old *ast.ShuffleVectorInst) error {
inst, ok := new.(*ir.InstShuffleVector)
if !ok {
panic(fmt.Errorf("invalid IR instruction for AST instruction; expected *ir.InstShuffleVector, got %T", new))
}
// X vector.
x, err := fgen.irTypeValue(old.X())
if err != nil {
return errors.WithStack(err)
}
inst.X = x
// Y vector.
y, err := fgen.irTypeValue(old.Y())
if err != nil {
return errors.WithStack(err)
}
inst.Y = y
// Shuffle mask.
mask, err := fgen.irTypeValue(old.Mask())
if err != nil {
return errors.WithStack(err)
}
inst.Mask = mask
// (optional) Metadata.
md, err := fgen.gen.irMetadataAttachments(old.Metadata())
if err != nil {
return errors.WithStack(err)
}
inst.Metadata = md
return nil
} | asm/inst_vector.go | 0.578567 | 0.413714 | inst_vector.go | starcoder |
package mpa
// A reservoirReader is an intermediate buffer for the main data stream in Layer
// III. Before the decoder starts reading the main data for a frame, it makes
// sure all main data between the byte pointed by main_data_begin and the last
// byte of the frame being decoded is in the reservoir. Thus, the required
// buffer size is the maximum frame size, minus the header size, minus the
// minimum length of the side information, plus the maximum value of
// main_data_begin.
type reservoirReader struct {
stream *bitReader
buffer [1441 - 4 - 17 + 511]byte
head int // index of the first unread byte in the buffer
limit int // index of the first byte not in the buffer
current byte // byte being read, left-shifted by the # of bits read so far
bits int // number of unread bits in 'current' (0 <= bits < 8)
}
// setSize moves the last up to n bytes in the reservoir to the beginning of the
// buffer and discards all preceding data. It also sets the current read
// position to the beginning of the buffer, which may result in the main data
// stream essentially being rewound.
func (rd *reservoirReader) setSize(n int) error {
rd.bits = 0
rd.head = 0
if n > rd.limit {
return MalformedStream("not enough main data")
}
copy(rd.buffer[0:], rd.buffer[rd.limit-n:rd.limit])
rd.limit = n
return nil
}
// load moves the next n bytes from the input stream to the reservoir.
func (rd *reservoirReader) load(n int) error {
m, err := rd.stream.readBytes(rd.buffer[rd.limit : rd.limit+n])
rd.limit += m
return err
}
// loadUntilSyncword moves bytes from the input stream to the reservoir
// one-by-one until a syncword is found.
func (rd *reservoirReader) loadUntilSyncword() error {
var err error
var x int
overflow := false
for !rd.stream.syncword3() {
if x, err = rd.stream.readByte(); err != nil {
break
}
if rd.limit == len(rd.buffer) {
// Even if the data overflows the buffer, we keep reading until a
// syncword is found. First, the decoder stays in sync this way.
// Second, this ensures that we have the most recent data in the
// reservoir, so chances are higher it will be possible to decode
// the next frame.
rd.limit, overflow = 0, true
}
rd.buffer[rd.limit] = byte(x)
rd.limit++
}
if overflow {
tmp := rd.buffer
n := copy(rd.buffer[0:], tmp[rd.limit:])
copy(rd.buffer[n:], tmp[:rd.limit])
rd.head, rd.limit = 0, len(rd.buffer)
if err == nil {
err = MalformedStream("reservoir overflow")
}
}
return err
}
// readBits reads the next n bits from the reservoir and returns them as an
// integer.
func (rd *reservoirReader) readBits(n int) (int, error) {
if n <= 0 {
return 0, nil
}
retval, bits := 0, rd.bits
for {
if bits == 0 {
if rd.head == rd.limit {
return 0, MalformedStream("reservoir overread")
}
bits, rd.current = 8, rd.buffer[rd.head]
rd.head++
}
if n <= bits {
retval <<= uint(n)
retval |= int(rd.current >> uint(8-n))
rd.current <<= uint(n)
bits -= n
rd.bits = bits
return retval, nil
} else {
retval <<= uint(bits)
retval |= int(rd.current >> uint(8-bits))
n -= bits
bits = 0
}
}
}
// readCode reads one Huffman coded value from the reservoir using the specified
// code tree.
func (rd *reservoirReader) readCode(tree huffmanTree) (int, error) {
n, bits, current := uint32(0), rd.bits, rd.current
for tree[n] != 0 {
if bits == 0 {
if rd.head == rd.limit {
rd.bits = 0
return 0, MalformedStream("not enough Huffman data")
}
bits, current = 8, rd.buffer[rd.head]
rd.head++
}
n = tree[n+uint32(current>>7)]
current <<= 1
bits--
}
rd.bits, rd.current = bits, current
return int(tree[n+1]), nil
} | vendor/github.com/korandiz/mpa/reservoirreader.go | 0.628977 | 0.611643 | reservoirreader.go | starcoder |
package promql
import (
"math"
)
// Function represents a function of the expression language and is
// used by function nodes.
type Function struct {
Name string
ArgTypes []ValueType
Variadic int
ReturnType ValueType
}
// Calculate the trend value at the given index i in raw data d.
// This is somewhat analogous to the slope of the trend at the given index.
// The argument "s" is the set of computed smoothed values.
// The argument "b" is the set of computed trend factors.
// The argument "d" is the set of raw input values.
func calcTrendValue(i int, sf, tf, s0, s1, b float64) float64 {
if i == 0 {
return b
}
x := tf * (s1 - s0)
y := (1 - tf) * b
return x + y
}
// linearRegression performs a least-square linear regression analysis on the
// provided SamplePairs. It returns the slope, and the intercept value at the
// provided time.
func linearRegression(samples []Point, interceptTime int64) (slope, intercept float64) {
var (
n float64
sumX, sumY float64
sumXY, sumX2 float64
)
for _, sample := range samples {
x := float64(sample.T-interceptTime) / 1e3
n += 1.0
sumY += sample.V
sumX += x
sumXY += x * sample.V
sumX2 += x * x
}
covXY := sumXY - sumX*sumY/n
varX := sumX2 - sumX*sumX/n
slope = covXY / varX
intercept = sumY/n - slope*sumX/n
return slope, intercept
}
var functions = map[string]*Function{
"abs": {
Name: "abs",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"absent": {
Name: "absent",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"avg_over_time": {
Name: "avg_over_time",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"ceil": {
Name: "ceil",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"changes": {
Name: "changes",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"clamp_max": {
Name: "clamp_max",
ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
ReturnType: ValueTypeVector,
},
"clamp_min": {
Name: "clamp_min",
ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
ReturnType: ValueTypeVector,
},
"count_over_time": {
Name: "count_over_time",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"days_in_month": {
Name: "days_in_month",
ArgTypes: []ValueType{ValueTypeVector},
Variadic: 1,
ReturnType: ValueTypeVector,
},
"day_of_month": {
Name: "day_of_month",
ArgTypes: []ValueType{ValueTypeVector},
Variadic: 1,
ReturnType: ValueTypeVector,
},
"day_of_week": {
Name: "day_of_week",
ArgTypes: []ValueType{ValueTypeVector},
Variadic: 1,
ReturnType: ValueTypeVector,
},
"delta": {
Name: "delta",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"deriv": {
Name: "deriv",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"exp": {
Name: "exp",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"floor": {
Name: "floor",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"histogram_quantile": {
Name: "histogram_quantile",
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector},
ReturnType: ValueTypeVector,
},
"holt_winters": {
Name: "holt_winters",
ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar},
ReturnType: ValueTypeVector,
},
"hour": {
Name: "hour",
ArgTypes: []ValueType{ValueTypeVector},
Variadic: 1,
ReturnType: ValueTypeVector,
},
"idelta": {
Name: "idelta",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"increase": {
Name: "increase",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"irate": {
Name: "irate",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"label_replace": {
Name: "label_replace",
ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString, ValueTypeString},
ReturnType: ValueTypeVector,
},
"label_join": {
Name: "label_join",
ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString},
Variadic: -1,
ReturnType: ValueTypeVector,
},
"ln": {
Name: "ln",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"log10": {
Name: "log10",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"log2": {
Name: "log2",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"max_over_time": {
Name: "max_over_time",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"min_over_time": {
Name: "min_over_time",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"minute": {
Name: "minute",
ArgTypes: []ValueType{ValueTypeVector},
Variadic: 1,
ReturnType: ValueTypeVector,
},
"month": {
Name: "month",
ArgTypes: []ValueType{ValueTypeVector},
Variadic: 1,
ReturnType: ValueTypeVector,
},
"predict_linear": {
Name: "predict_linear",
ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar},
ReturnType: ValueTypeVector,
},
"quantile_over_time": {
Name: "quantile_over_time",
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"rate": {
Name: "rate",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"resets": {
Name: "resets",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"round": {
Name: "round",
ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar},
Variadic: 1,
ReturnType: ValueTypeVector,
},
"scalar": {
Name: "scalar",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeScalar,
},
"sort": {
Name: "sort",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"sort_desc": {
Name: "sort_desc",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"sqrt": {
Name: "sqrt",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"stddev_over_time": {
Name: "stddev_over_time",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"stdvar_over_time": {
Name: "stdvar_over_time",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"sum_over_time": {
Name: "sum_over_time",
ArgTypes: []ValueType{ValueTypeMatrix},
ReturnType: ValueTypeVector,
},
"time": {
Name: "time",
ArgTypes: []ValueType{},
ReturnType: ValueTypeScalar,
},
"timestamp": {
Name: "timestamp",
ArgTypes: []ValueType{ValueTypeVector},
ReturnType: ValueTypeVector,
},
"vector": {
Name: "vector",
ArgTypes: []ValueType{ValueTypeScalar},
ReturnType: ValueTypeVector,
},
"year": {
Name: "year",
ArgTypes: []ValueType{ValueTypeVector},
Variadic: 1,
ReturnType: ValueTypeVector,
},
}
// getFunction returns a predefined Function object for the given name.
func getFunction(name string) (*Function, bool) {
function, ok := functions[name]
return function, ok
}
type vectorByValueHeap Vector
func (s vectorByValueHeap) Len() int {
return len(s)
}
func (s vectorByValueHeap) Less(i, j int) bool {
if math.IsNaN(s[i].V) {
return true
}
return s[i].V < s[j].V
}
func (s vectorByValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *vectorByValueHeap) Push(x interface{}) {
*s = append(*s, *(x.(*Sample)))
}
func (s *vectorByValueHeap) Pop() interface{} {
old := *s
n := len(old)
el := old[n-1]
*s = old[0 : n-1]
return el
}
type vectorByReverseValueHeap Vector
func (s vectorByReverseValueHeap) Len() int {
return len(s)
}
func (s vectorByReverseValueHeap) Less(i, j int) bool {
if math.IsNaN(s[i].V) {
return true
}
return s[i].V > s[j].V
}
func (s vectorByReverseValueHeap) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s *vectorByReverseValueHeap) Push(x interface{}) {
*s = append(*s, *(x.(*Sample)))
}
func (s *vectorByReverseValueHeap) Pop() interface{} {
old := *s
n := len(old)
el := old[n-1]
*s = old[0 : n-1]
return el
} | vendor/github.com/influxdata/promql/v2/functions.go | 0.747155 | 0.781247 | functions.go | starcoder |
package mercator
import (
"math"
)
const (
tileSize = 256.0
initialResolution = 2 * math.Pi * 6378137 / tileSize
originShift = 2 * math.Pi * 6378137 / 2
)
func round(a float64) float64 {
if a < 0 {
return math.Ceil(a - 0.5)
}
return math.Floor(a + 0.5)
}
// Resolution calculates the resolution (meters/pixel) for given zoom level (measured at Equator)
func Resolution(zoom int) float64 {
return initialResolution / math.Pow(2, float64(zoom))
}
// Zoom gives the zoom level for given resolution (measured at Equator)
func Zoom(resolution float64) int {
zoom := round(math.Log(initialResolution/resolution) / math.Log(2))
return int(zoom)
}
// LatLonToMeters converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913
func LatLonToMeters(lat, lon float64) (float64, float64) {
x := lat * originShift / 180
y := math.Log(math.Tan((90+lon)*math.Pi/360)) / (math.Pi / 180)
y = y * originShift / 180
return x, y
}
// MetersToLatLon converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum
func MetersToLatLon(x, y float64) (float64, float64) {
lon := (x / originShift) * 180
lat := (y / originShift) * 180
lat = 180 / math.Pi * (2*math.Atan(math.Exp(lat*math.Pi/180)) - math.Pi/2)
return lat, lon
}
// PixelsToMeters converts pixel coordinates in given zoom level of pyramid to EPSG:900913
func PixelsToMeters(px, py float64, zoom int) (float64, float64) {
res := Resolution(zoom)
x := px*res - originShift
y := py*res - originShift
return x, y
}
// MetersToPixels converts EPSG:900913 to pixel coordinates in given zoom level
func MetersToPixels(x, y float64, zoom int) (float64, float64) {
res := Resolution(zoom)
px := (x + originShift) / res
py := (y + originShift) / res
return px, py
}
// LatLonToPixels converts given lat/lon in WGS84 Datum to pixel coordinates in given zoom level
func LatLonToPixels(lat, lon float64, zoom int) (float64, float64) {
x, y := LatLonToMeters(lat, lon)
return MetersToPixels(x, y, zoom)
}
// PixelsToLatLon converts pixel coordinates in given zoom level to lat/lon in WGS84 Datum
func PixelsToLatLon(px, py float64, zoom int) (float64, float64) {
x, y := PixelsToMeters(px, py, zoom)
return MetersToLatLon(x, y)
}
// PixelsToTile returns a tile covering region in given pixel coordinates
func PixelsToTile(px, py float64) (int, int) {
tileX := int(math.Floor(px / tileSize))
tileY := int(math.Floor(py / tileSize))
return tileX, tileY
}
// MetersToTile returns tile for given mercator coordinates
func MetersToTile(x, y float64, zoom int) (int, int) {
px, py := MetersToPixels(x, y, zoom)
return PixelsToTile(px, py)
}
// LatLonToTile returns tile for given lat/lon coordinates
func LatLonToTile(lat, lon float64, zoom int) (int, int) {
px, py := LatLonToPixels(lat, lon, zoom)
return PixelsToTile(px, py)
} | mercator.go | 0.890853 | 0.587085 | mercator.go | starcoder |
package utils
import "fmt"
/*
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment.
*/
type FreqDist struct {
Samples map[string]int
}
func NewFreqDist(samples map[string]int) *FreqDist {
return &FreqDist{samples}
}
// N returns the total number of sample outcomes that have been recorded by this FreqDist.
func (f *FreqDist) N() float64 {
sum := 0.0
for _, val := range f.Samples {
sum += float64(val)
}
return sum
}
// B returns the total number of sample values (or "bins") that have counts greater than zero.
func (f *FreqDist) B() int {
return len(f.Samples)
}
// Return a list of all Samples that occur once (hapax legomena)
func (f *FreqDist) hapaxes() []string {
hap := make([]string, 0, f.B())
for key, val := range f.Samples {
if val != 1 {
continue
}
hap = append(hap, key)
}
return hap
}
// Return the dictionary mapping r to Nr, the number of Samples with frequency r, where Nr > 0
func (f *FreqDist) rToNr(bins int) map[int]int {
tmpRToNr := map[int]int{}
for _, value := range f.Samples {
tmpRToNr[value] += 1
}
if bins == 0 {
tmpRToNr[0] = 0
} else {
tmpRToNr[0] = bins - f.B()
}
return tmpRToNr
}
// Return the cumulative frequencies of the specified Samples.
// If no Samples are specified, all counts are returned, starting with the largest.
func (f *FreqDist) cumulativeFrequencies(Samples []string) []int {
cf := make([]int, 0, len(f.Samples))
for _, val := range Samples {
cf = append(cf, f.Samples[val])
}
return cf
}
/*
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
*/
func (f *FreqDist) freq(sample string) float64 {
if f.N() == 0 {
return 0
}
return float64(f.Samples[sample]) / f.N()
}
type maxFreq struct {
Key string
Val int
}
/*
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more Samples have the same
number of outcomes, return one of them; which sample is
returned is undefined.
*/
func (f *FreqDist) max() (string, error) {
if len(f.Samples) == 0 {
return "", fmt.Errorf("No Samples loaded, please add samples before getting max")
}
max := maxFreq{}
for key, val := range f.Samples {
if val > max.Val {
max.Key = key
max.Val = val
}
}
return max.Key, nil
} | utils/frequency_dist.go | 0.88397 | 0.72227 | frequency_dist.go | starcoder |
package main
type PlayerState struct {
score int
position int
moving string // Can be "no", "up" or "down"
}
type BallState struct {
position struct {
x int
y int
}
direction struct {
x bool
y bool
}
}
type GameState struct {
inProgress bool
player1 PlayerState
player2 PlayerState
ball BallState
}
/* After every point scored, the ball must reset such that:
* 1. It randomly picks a player to move towards
* 2. It randomly picks between upward or downward momentum
* 3. It randomly picks a spot along the Y axis (height) to start from
* 4. It must start just behind the visible center line relative to the
* direction of the player it is moving towards
*/
func (state *GameState) GenerateNewBallState() {
var ball BallState
if GetRandomBool() {
ball.direction.x = true
}
if GetRandomBool() {
ball.direction.y = true
}
if ball.direction.x {
ball.position.x = (arenaWidth / 2) - ((ballSize - 1) / 2)
} else {
ball.position.x = (arenaWidth / 2) + ((ballSize - 1) / 2)
}
ball.position.y = GetRandomNumberInRange(ballCollisionBounds[0][1], ballCollisionBounds[1][1])
state.ball = ball
}
// Calculate new positions for paddles and ball and then check for collision
func (state *GameState) GameTick() {
if state.player1.moving == "up" && (state.player1.position+(2*velocityMultiplier)) < paddleMovementBounds[1] {
state.player1.position += 2 * velocityMultiplier
} else if state.player1.moving == "down" && (state.player1.position-(2*velocityMultiplier)) > paddleMovementBounds[0] {
state.player1.position -= 2 * velocityMultiplier
}
if state.player2.moving == "up" && (state.player2.position+(2*velocityMultiplier)) < paddleMovementBounds[1] {
state.player2.position += 2 * velocityMultiplier
} else if state.player2.moving == "down" && (state.player2.position-(2*velocityMultiplier)) > paddleMovementBounds[0] {
state.player2.position -= 2 * velocityMultiplier
}
if state.ball.direction.y {
state.ball.position.y += 1 * velocityMultiplier
if state.ball.position.y > ballCollisionBounds[1][1] {
state.ball.position.y -= 2 * velocityMultiplier
state.ball.direction.y = false
}
} else {
state.ball.position.y -= 1 * velocityMultiplier
if state.ball.position.y < ballCollisionBounds[0][1] {
state.ball.position.y += 2 * velocityMultiplier
state.ball.direction.y = true
}
}
if state.ball.direction.x {
state.ball.position.x += 1 * velocityMultiplier
if state.ball.position.x > ballCollisionBounds[1][0] {
// Check if player 2 lost a point or managed to deflect the ball
if state.CouldNotDeflect(state.player2.position) {
state.player1.score++
if state.player1.score >= scoreLimit {
endGame("1")
} else {
state.GenerateNewBallState()
}
} else {
state.ball.position.x -= 2 * velocityMultiplier
state.ball.direction.x = false
}
}
} else {
state.ball.position.x -= 1 * velocityMultiplier
if state.ball.position.x < ballCollisionBounds[0][0] {
// Check if player 1 lost a point or managed to deflect the ball
if state.CouldNotDeflect(state.player1.position) {
state.player2.score++
if state.player2.score >= scoreLimit {
endGame("2")
} else {
state.GenerateNewBallState()
}
} else {
state.ball.position.x += 2 * velocityMultiplier
state.ball.direction.x = true
}
}
}
if state.inProgress {
announceState()
}
}
/* When the ball approaches its X axis boundary, we consider that the ball was not deflected and the player lost the point if
* 1. The upper bound of the ball is below the lower bound of their paddle, OR
* 2. The lower bound of the ball is above the upper bound of their paddle
*/
func (state *GameState) CouldNotDeflect(paddlePosition int) bool {
if ((state.ball.position.y + ((ballSize - 1) / 2)) < (paddlePosition - ((paddleHeight - 1) / 2))) || ((state.ball.position.y - ((ballSize - 1) / 2)) > (paddlePosition + ((paddleHeight - 1) / 2))) {
return true
}
return false
} | backend/state.go | 0.602062 | 0.422147 | state.go | starcoder |
package internal
type TrapezoidSet map[*Trapezoid]struct{}
// Use a query graph to split a set of polygons into monotone polygons.
func ConvertToMonotones(list PolygonList) PolygonList {
// TODO: QueryGraph should natively support adding all of the polygons at once
graph := &QueryGraph{}
for _, polygon := range list {
graph.AddPolygon(polygon)
}
trapezoids := make(TrapezoidSet)
for trapezoid := range graph.IterateTrapezoids() {
// Skip trapezoids that aren't inside
if !trapezoid.IsInside() {
continue
}
trapezoids[trapezoid] = struct{}{}
}
// This step will turn all trapezoids that should have diagonals (trapezoids
// who have two non-adjacent points on their boundary) into two trapezoids.
// This has destroyed the query graph, but not the neighbor graph; the
// neighbor graph is valid over _inside_ trapezoids, which is all we care
// about. We also can no longer trust the output of IsInside(), because some
// trapezoids have been split with segments that do not obey its winding rule.
// We will use the trapezoid set instead to determine if a trapezoid is
// inside.
splitTrapezoidsOnDiagonals(trapezoids)
var result PolygonList
for trapezoid := range trapezoids {
// Scan to the top trapezoid in the monotone. It will always be degenerate
// on top, and therefore have zero neighbors
for {
aboveNeighbor := trapezoid.TrapezoidsAbove.AnyNeighbor()
if aboveNeighbor == nil {
break
}
if _, ok := trapezoids[aboveNeighbor]; !ok {
break
}
trapezoid = aboveNeighbor
}
// The top point is on both chains. We arbitrarily put it on the left
leftChain := []*Point{trapezoid.Top}
var rightChain []*Point
// Traverse the trapezoid chain, collecting the points on the trapezoid's boundary
for {
bottom := trapezoid.Bottom
leftBottom := trapezoid.Left.Bottom()
rightBottom := trapezoid.Right.Bottom()
if bottom == leftBottom && bottom == rightBottom {
// We converged, so just put it on the left chain and break
leftChain = append(leftChain, bottom)
delete(trapezoids, trapezoid)
break
}
// Figure out which chain we're on
if bottom == leftBottom {
leftChain = append(leftChain, bottom)
} else if bottom == rightBottom {
rightChain = append(rightChain, bottom)
} else {
fatalf("bottom point was not on either chain")
}
delete(trapezoids, trapezoid) // Skip iterating this later
belowNeighbor := trapezoid.TrapezoidsBelow.AnyNeighbor()
if belowNeighbor == nil {
break
}
if _, ok := trapezoids[belowNeighbor]; !ok {
break
}
trapezoid = belowNeighbor
}
// Now concatenate all the points from the right chain onto the left in reverse order
points := leftChain
for i := len(rightChain) - 1; i >= 0; i-- {
points = append(points, rightChain[i])
}
if len(points) < 3 {
fatalf("polygon is degenerate: %#v", points)
}
// Add the polygon to the result
result = append(result, Polygon{points})
}
return result
}
// Split all trapezoids with diagonals into two trapezoids, updating the
// neighbor relationships. Note that this invalidates the query graph, and it
// breaks the validity of IsInside(), so we cannot use either of those after
// this has been used.
func splitTrapezoidsOnDiagonals(trapezoids TrapezoidSet) {
for trapezoid := range trapezoids {
top := trapezoid.Top
bottom := trapezoid.Bottom
leftTop := trapezoid.Left.Top()
leftBottom := trapezoid.Left.Bottom()
rightTop := trapezoid.Right.Top()
rightBottom := trapezoid.Right.Bottom()
// Skip if the top and bottom are one of the trapezoid's sides. There's no diagonal in that case
if top == leftTop && bottom == leftBottom {
continue
} else if top == rightTop && bottom == rightBottom {
continue
}
// Split the trapezoid into two trapezoids
segment := &Segment{top, bottom}
leftTrapezoid, rightTrapezoid := trapezoid.SplitBySegment(segment)
// Remove the old trapezoid
delete(trapezoids, trapezoid)
// Add the trapezoids to the map
trapezoids[leftTrapezoid] = struct{}{}
trapezoids[rightTrapezoid] = struct{}{}
}
}
func dbgDrawTrapezoids(trapezoids TrapezoidSet, scale float64) {
var list PolygonList
// Convert the trapezoids into polygons
for trapezoid := range trapezoids {
var points []*Point
topY := trapezoid.Top.Y
bottomY := trapezoid.Bottom.Y
if trapezoid.Left.IsHorizontal() || trapezoid.Right.IsHorizontal() {
// The trapezoid is degenerate, so just draw a line
points = []*Point{trapezoid.Top, trapezoid.Bottom}
} else {
leftTopX := trapezoid.Left.SolveForX(topY)
leftBottomX := trapezoid.Left.SolveForX(bottomY)
rightTopX := trapezoid.Right.SolveForX(topY)
rightBottomX := trapezoid.Right.SolveForX(bottomY)
points = append(points, &Point{leftTopX, topY})
points = append(points, &Point{leftBottomX, bottomY})
points = append(points, &Point{rightBottomX, bottomY})
points = append(points, &Point{rightTopX, topY})
}
list = append(list, Polygon{points})
}
list.dbgDraw(scale)
} | internal/split_monotones.go | 0.550849 | 0.706912 | split_monotones.go | starcoder |
Minimum Phase Bandwidth Limited Steps
See:
https://www.experimentalscene.com/articles/minbleps.php
https://www.cs.cmu.edu/~eli/papers/icmc01-hardsync.pdf
*/
//-----------------------------------------------------------------------------
package core
import (
"math"
"math/cmplx"
)
//-----------------------------------------------------------------------------
// Sinc function (pi * x variant).
func Sinc(x float64) float64 {
if x == 0 {
return 1
}
x *= Pi
return math.Sin(x) / x
}
// BlackmanWindow returns a Blackman window with n elements.
func BlackmanWindow(n int) []float64 {
w := make([]float64, n)
m := float64(n - 1)
if n == 1 {
w[0] = 1
} else {
for i := 0; i < n; i++ {
f1 := Tau * float64(i) / m
f2 := 2 * f1
w[i] = 0.42 - (0.5 * math.Cos(f1)) + (0.08 * math.Cos(f2))
}
}
return w
}
// RealCepstrum returns the real cepstrum of a real signal.
func RealCepstrum(signal []float64) []float64 {
freq := DFT(toComplex128(signal))
// calculate the log of the absolute value
for i := range freq {
freq[i] = complex(math.Log(cmplx.Abs(freq[i])), 0)
}
// back to time domain
time := InverseDFT(freq)
// output the real part
return toFloat64(time)
}
// MinimumPhase returns the minimum phase reconstruction of a signal.
func MinimumPhase(realCepstrum []float64) []float64 {
n := len(realCepstrum)
nd2 := n / 2
realTime := make([]float64, n)
if (n % 2) == 1 {
realTime[0] = realCepstrum[0]
for i := 1; i < nd2; i++ {
realTime[i] = 2 * realCepstrum[i]
}
for i := nd2; i < n; i++ {
realTime[i] = 0
}
} else {
realTime[0] = realCepstrum[0]
for i := 1; i < nd2; i++ {
realTime[i] = 2 * realCepstrum[i]
}
realTime[nd2] = realCepstrum[nd2]
for i := nd2 + 1; i < n; i++ {
realTime[i] = 0
}
}
freq := DFT(toComplex128(realTime))
for i := range freq {
freq[i] = cmplx.Exp(freq[i])
}
time := InverseDFT(freq)
return toFloat64(time)
}
// GenerateMinBLEP returns a minimum phase bandwidth limited step.
func GenerateMinBLEP(zeroCrossings, overSampling int) []float64 {
n := (2 * zeroCrossings * overSampling) + 1
// generate sinc
sinc := make([]float64, n)
k := 1.0 / float64(overSampling)
for i := 0; i < n; i++ {
sinc[i] = Sinc(k*float64(i) - float64(zeroCrossings))
}
// window the sinc
window := BlackmanWindow(n)
for i := 0; i < n; i++ {
sinc[i] *= window[i]
}
// minimum phase reconstruction
realCepstrum := RealCepstrum(sinc)
minPhase := MinimumPhase(realCepstrum)
// integrate into minBLEP
minBLEP := make([]float64, n)
sum := 0.0
for i := 0; i < n; i++ {
sum += minPhase[i]
minBLEP[i] = sum
}
// Normalize
scale := 1.0 / minBLEP[n-1]
for i := 0; i < n; i++ {
minBLEP[i] *= scale
}
return minBLEP
}
//----------------------------------------------------------------------------- | core/minblep.go | 0.803482 | 0.554772 | minblep.go | starcoder |
package geometry
import (
"math"
"github.com/go-gl/mathgl/mgl32"
)
type ArcSegment struct {
center mgl32.Vec2
radius float32
angleStart float32
angleEnd float32
}
// Solves the quadratic equation, assuming that a != 0
// Returns the listing of real results (0 to 2)
func solveQuadraticReals(a, b, c float32) []float32 {
discriminant := b*b - 4*a*c
if discriminant < 0 {
return []float32{}
} else if discriminant == 0 {
return []float32{-b / (2 * a)}
}
discriminantSqrt := float32(math.Sqrt(float64(discriminant)))
return []float32{
(-b + discriminantSqrt) / (2 * a),
(-b - discriminantSqrt) / (2 * a)}
}
// Finds the closest vector intersection to the given arc segment as a circle.
// This can return up to two results, so we take the smallest positive result for the
// % along the vector direction if we get any results.
// See the associated wxMaxima file for the mathematical basis for this.
func findVectorCircleIntersection(seg ArcSegment, vector Vector) []float32 {
xv := vector.direction.X()
yv := vector.direction.Y()
xp := vector.point.X()
yp := vector.point.Y()
xo := seg.center.X()
yo := seg.center.Y()
a := xv*xv + yv*yv
b := 2 * (xp*xv + yp*yv - (xv*xo + yv*yo))
c := xp*xp + xo*xo + yp*yp + yo*yo - (2*(xp*xo+yp*yo) + seg.radius*seg.radius)
// Find the intersection point and reduce to the closest positive point
// TODO Tomorrow -- this isn't correct, we need to return both results.
// If we don't return both results, we match on a closer part of the circle that may not be part of the partial arc.
// This should actually simplify the logic a bit.
s := solveQuadraticReals(a, b, c)
return s
}
// Returns true and the intersection point on an intersection, false otherwise
func (seg ArcSegment) Intersects(vector Vector) (bool, mgl32.Vec2, mgl32.Vec2) {
intersectionPercentDistances := findVectorCircleIntersection(seg, vector)
doesIntersect := false
foundIntersectionPoint := mgl32.Vec2{0, 0}
intersectionNormal := mgl32.Vec2{0, 0}
lastIntersectionDistancePercent := float32(math.MaxFloat32)
for _, intersectionDistancePercent := range intersectionPercentDistances {
if intersectionDistancePercent > 0 && intersectionDistancePercent < lastIntersectionDistancePercent {
intersectionPoint := mgl32.Vec2{
vector.point.X() + vector.direction.X()*intersectionDistancePercent,
vector.point.Y() + vector.direction.Y()*intersectionDistancePercent}
// Verify the intersection point is on the arc.
dx := intersectionPoint.X() - seg.center.X()
dy := intersectionPoint.Y() - seg.center.Y()
angle := float32(math.Atan2(float64(dy), float64(dx)))
if angle < 0 {
angle += 2 * math.Pi
}
if angle >= seg.angleStart && angle <= seg.angleEnd {
doesIntersect = true
foundIntersectionPoint = intersectionPoint
lastIntersectionDistancePercent = intersectionDistancePercent
intersectionNormal = mgl32.Vec2{float32(math.Sin(float64(angle))), float32(math.Cos(float64(angle)))}
}
}
}
return doesIntersect, foundIntersectionPoint, intersectionNormal
}
func NewArcSegment(center mgl32.Vec2, radius, angleStart, angleEnd float32) ArcSegment {
return ArcSegment{center: center, radius: radius, angleStart: angleStart, angleEnd: angleEnd}
} | voxelli/geometry/arcSegment.go | 0.865863 | 0.684149 | arcSegment.go | starcoder |
package codegen
import "strings"
type goFuncValue struct {
name string
args []Value
}
// Len creates a new function call of the Go built-in function `len()`
func Len(val Value) *goFuncValue {
return newGoFunc("len", val)
}
// MakeSlice creates a new function call of the Go built-in function `make()` for an empty slice
func MakeSlice(sliceType *TypeDecl) *goFuncValue {
return MakeSliceWithCount(sliceType, 0)
}
// MakeSliceWithCount creates a new function call of the Go built-in function `make()` for a slice with count
func MakeSliceWithCount(sliceType *TypeDecl, count int) *goFuncValue {
sliceType.Array()
typeString := Identifier(sliceType.name.String())
return newGoFunc("make", typeString, Int(count))
}
// MakeMap creates a new function call of the Go built-in function `make()` for an empty map
func MakeMap(mapType *MapTypeDecl) *goFuncValue {
typeString := Identifier(mapType.String())
return newGoFunc("make", typeString)
}
// MakeMapWithCount creates a new function call of the Go built-in function `make()` for a map with count
func MakeMapWithCount(mapType *MapTypeDecl, count int) *goFuncValue {
typeString := Identifier(mapType.String())
return newGoFunc("make", typeString, Int(count))
}
// Append creates a new function call of the built-in function `append`
func Append(sliceValue Value, elementValues ...Value) *goFuncValue {
vals := make([]Value, len(elementValues)+1)
vals[0] = sliceValue
for i, v := range elementValues {
vals[i+1] = v
}
return newGoFunc("append", vals...)
}
// Equals compares a value of the go function for equality
func (g *goFuncValue) Equals(val Value) *comparisonValue {
return newEquals(g, val, cmpType_Equals)
}
// Equals compares a value of the go function for not being equal
func (g *goFuncValue) NotEquals(val Value) *comparisonValue {
return newEquals(g, val, cmpType_NotEquals)
}
func newGoFunc(name string, args ...Value) *goFuncValue {
return &goFuncValue{
name: name,
args: args,
}
}
func (g *goFuncValue) writeValue(sb *strings.Builder) {
writeF(sb, "%s(", g.name)
writeValues(sb, g.args)
sb.WriteByte(')')
}
func (g *goFuncValue) isPointer() bool {
return false
} | value_goFunc.go | 0.833087 | 0.455683 | value_goFunc.go | starcoder |
package internal
import (
"errors"
"time"
)
//TimeOfDay represents the time of day
type TimeOfDay struct {
hour, minute, second int
d time.Duration
}
const shortForm = "15:04:05"
var errParseTime = errors.New("Time must be in the format HH:MM:SS")
//NewTimeOfDay returns a newly initialized TimeOfDay
func NewTimeOfDay(hour, minute, second int) TimeOfDay {
d := time.Duration(second)*time.Second +
time.Duration(minute)*time.Minute +
time.Duration(hour)*time.Hour
return TimeOfDay{hour: hour, minute: minute, second: second, d: d}
}
//ParseTimeOfDay parses a TimeOfDay from a string in the format HH:MM:SS
func ParseTimeOfDay(str string) (TimeOfDay, error) {
t, err := time.Parse(shortForm, str)
if err != nil {
return TimeOfDay{}, errParseTime
}
return NewTimeOfDay(t.Clock()), nil
}
//TimeRange represents a time band in a given time zone
type TimeRange struct {
startTime, endTime TimeOfDay
startDay, endDay *time.Weekday
loc *time.Location
}
//NewUTCTimeRange returns a time range in UTC
func NewUTCTimeRange(start, end TimeOfDay) *TimeRange {
return NewTimeRangeInLocation(start, end, time.UTC)
}
//NewTimeRangeInLocation returns a time range in a given location
func NewTimeRangeInLocation(start, end TimeOfDay, loc *time.Location) *TimeRange {
if loc == nil {
panic("time: missing Location in call to NewTimeRangeInLocation")
}
return &TimeRange{startTime: start, endTime: end, loc: loc}
}
//NewUTCWeekRange returns a weekly TimeRange
func NewUTCWeekRange(startTime, endTime TimeOfDay, startDay, endDay time.Weekday) *TimeRange {
return NewWeekRangeInLocation(startTime, endTime, startDay, endDay, time.UTC)
}
//NewWeekRangeInLocation returns a time range in a given location
func NewWeekRangeInLocation(startTime, endTime TimeOfDay, startDay, endDay time.Weekday, loc *time.Location) *TimeRange {
r := NewTimeRangeInLocation(startTime, endTime, loc)
r.startDay = &startDay
r.endDay = &endDay
return r
}
func (r *TimeRange) isInTimeRange(t time.Time) bool {
t = t.In(r.loc)
ts := NewTimeOfDay(t.Clock()).d
if r.startTime.d < r.endTime.d {
return r.startTime.d <= ts && ts <= r.endTime.d
}
return !(r.endTime.d < ts && ts < r.startTime.d)
}
func (r *TimeRange) isInWeekRange(t time.Time) bool {
t = t.In(r.loc)
day := t.Weekday()
if *r.startDay == *r.endDay {
if day == *r.startDay {
return r.isInTimeRange(t)
}
if r.startTime.d < r.endTime.d {
return false
}
return true
}
switch {
case *r.startDay < *r.endDay:
if day < *r.startDay || *r.endDay < day {
return false
}
default:
if *r.endDay < day && day < *r.startDay {
return false
}
}
timeOfDay := NewTimeOfDay(t.Clock())
if day == *r.startDay {
return timeOfDay.d >= r.startTime.d
}
if day == *r.endDay {
return timeOfDay.d <= r.endTime.d
}
return true
}
//IsInRange returns true if time t is within in the time range
func (r *TimeRange) IsInRange(t time.Time) bool {
if r == nil {
return true
}
if r.startDay != nil {
return r.isInWeekRange(t)
}
return r.isInTimeRange(t)
}
//IsInSameRange determines if two points in time are in the same time range
func (r *TimeRange) IsInSameRange(t1, t2 time.Time) bool {
if r == nil {
return true
}
if !(r.IsInRange(t1) && r.IsInRange(t2)) {
return false
}
if t2.Before(t1) {
t1, t2 = t2, t1
}
t1 = t1.In(r.loc)
t1Time := NewTimeOfDay(t1.Clock())
dayOffset := 0
if r.endDay == nil {
if r.startTime.d >= r.endTime.d && t1Time.d >= r.startTime.d {
dayOffset = 1
}
} else {
switch {
case *r.endDay < t1.Weekday():
dayOffset = 7 + int(*(r.endDay)-t1.Weekday())
case t1.Weekday() == *r.endDay:
if r.endTime.d <= t1Time.d {
dayOffset = 7
}
default:
dayOffset = int(*(r.endDay) - t1.Weekday())
}
}
sessionEnd := time.Date(t1.Year(), t1.Month(), t1.Day(), r.endTime.hour, r.endTime.minute, r.endTime.second, 0, r.loc)
sessionEnd = sessionEnd.AddDate(0, 0, dayOffset)
return t2.Before(sessionEnd)
} | vendor/github.com/quickfixgo/quickfix/internal/time_range.go | 0.839208 | 0.664173 | time_range.go | starcoder |
package constraints
import (
"github.com/zimmski/tavor/token"
)
// Optional implements a constraint and optional token which references another token which can be de(activated)
type Optional struct {
token token.Token
value bool
reducing bool
reducingOriginalValue bool
}
// NewOptional returns a new instance of a Optional token referencing the given token and setting the initial state to deactivated
func NewOptional(tok token.Token) *Optional {
return &Optional{
token: tok,
value: false,
}
}
// Token interface methods
// Clone returns a copy of the token and all its children
func (c *Optional) Clone() token.Token {
return &Optional{
token: c.token.Clone(),
value: c.value,
}
}
// Parse tries to parse the token beginning from the current position in the parser data.
// If the parsing is successful the error argument is nil and the next current position after the token is returned.
func (c *Optional) Parse(pars *token.InternalParser, cur int) (int, []error) {
nex, errs := c.token.Parse(pars, cur)
if len(errs) == 0 {
c.value = false
return nex, nil
}
c.value = true
return cur, nil
}
func (c *Optional) permutation(i uint) {
c.value = i == 0
}
// Permutation sets a specific permutation for this token
func (c *Optional) Permutation(i uint) error {
permutations := c.Permutations()
if i < 1 || i > permutations {
return &token.PermutationError{
Type: token.PermutationErrorIndexOutOfBound,
}
}
c.permutation(i - 1)
return nil
}
// Permutations returns the number of permutations for this token
func (c *Optional) Permutations() uint {
return 2
}
// PermutationsAll returns the number of all possible permutations for this token including its children
func (c *Optional) PermutationsAll() uint {
return 1 + c.token.PermutationsAll()
}
func (c *Optional) String() string {
if c.value {
return ""
}
return c.token.String()
}
// ForwardToken interface methods
// Get returns the current referenced token
func (c *Optional) Get() token.Token {
if c.value {
return nil
}
return c.token
}
// InternalGet returns the current referenced internal token
func (c *Optional) InternalGet() token.Token {
return c.token
}
// InternalLogicalRemove removes the referenced internal token and returns the replacement for the current token or nil if the current token should be removed.
func (c *Optional) InternalLogicalRemove(tok token.Token) token.Token {
if c.token == tok {
return nil
}
return c
}
// InternalReplace replaces an old with a new internal token if it is referenced by this token. The error return argument is not nil, if the replacement is not suitable.
func (c *Optional) InternalReplace(oldToken, newToken token.Token) error {
if c.token == oldToken {
c.token = newToken
}
return nil
}
// OptionalToken interface methods
// IsOptional checks dynamically if this token is in the current state optional
func (c *Optional) IsOptional() bool { return true }
// Activate activates this token
func (c *Optional) Activate() { c.value = false }
// Deactivate deactivates this token
func (c *Optional) Deactivate() { c.value = true }
// ReduceToken interface methods
// Reduce sets a specific reduction for this token
func (c *Optional) Reduce(i uint) error {
reduces := c.Permutations()
if reduces == 0 || i < 1 || i > reduces {
return &token.ReduceError{
Type: token.ReduceErrorIndexOutOfBound,
}
}
if !c.reducing {
c.reducing = true
c.reducingOriginalValue = c.value
}
c.permutation(i - 1)
return nil
}
// Reduces returns the number of reductions for this token
func (c *Optional) Reduces() uint {
if c.reducing || !c.value {
return 2
}
return 0
} | token/constraints/optional.go | 0.900666 | 0.534552 | optional.go | starcoder |
package stats
import (
"math"
"math/rand"
"strings"
"time"
)
// NormPpfRvs generates random variates using the Point Percentile Function.
// For more information please visit: https://demonstrations.wolfram.com/TheMethodOfInverseTransforms/
func NormPpfRvs(loc float64, scale float64, size int) []float64 {
var toReturn []float64
for i := 0; i < size; i++ {
toReturn = append(toReturn, NormPpf(rand.Float64(), loc, scale))
}
return toReturn
}
// NormBoxMullerRvs generates random variates using the Box–Muller transform.
// For more information please visit: http://mathworld.wolfram.com/Box-MullerTransformation.html
func NormBoxMullerRvs(loc float64, scale float64, size int) []float64 {
var toReturn []float64
for i := 0; i < int(math.Floor(float64(size / 2)) + float64(size % 2)); i++ {
// u1 and u2 are uniformly distributed random numbers between 0 and 1.
u1 := rand.Float64()
u2 := rand.Float64()
// x1 and x2 are normally distributed random numbers.
x1 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Cos(2*math.Pi*u2)))
toReturn = append(toReturn, x1)
if (i + 1) * 2 <= size {
x2 := loc + (scale * (math.Sqrt(-2*math.Log(u1)) * math.Sin(2*math.Pi*u2)))
toReturn = append(toReturn, x2)
}
}
return toReturn
}
// NormPdf is the probability density function.
func NormPdf(x float64, loc float64, scale float64) float64 {
return (math.Pow(math.E, -(math.Pow(x-loc, 2)) / (2 * math.Pow(scale, 2)))) / (scale* math.Sqrt(2 * math.Pi))
}
// NormLogPdf is the log of the probability density function.
func NormLogPdf(x float64, loc float64, scale float64) float64 {
return math.Log((math.Pow(math.E, -(math.Pow(x-loc, 2)) / (2 * math.Pow(scale, 2)))) / (scale* math.Sqrt(2 * math.Pi)))
}
// NormCdf is the cumulative distribution function.
func NormCdf(x float64, loc float64, scale float64) float64 {
return 0.5*(1 + math.Erf((x - loc) / (scale * math.Sqrt(2))))
}
// NormLogCdf is the log of the cumulative distribution function.
func NormLogCdf(x float64, loc float64, scale float64) float64 {
return math.Log(0.5*(1 + math.Erf((x - loc) / (scale * math.Sqrt(2)))))
}
// NormSf is the survival function (also defined as 1 - cdf, but sf is sometimes more accurate).
func NormSf(x float64, loc float64, scale float64) float64 {
return 1 - 0.5*(1 + math.Erf((x - loc) / (scale * math.Sqrt(2))))
}
// NormLogSf is the log of the survival function.
func NormLogSf(x float64, loc float64, scale float64) float64 {
return math.Log(1 - 0.5*(1 + math.Erf((x - loc) / (scale * math.Sqrt(2)))))
}
// NormPpf is the point percentile function.
// This is based on <NAME>'s inverse normal CDF.
// algorithm: http://home.online.no/~pjacklam/notes/invnorm/ (no longer visible).
// For more information please visit: https://stackedboxes.org/2017/05/01/acklams-normal-quantile-function/
func NormPpf(p float64, loc float64, scale float64) (x float64) {
const (
a1 = -3.969683028665376e+01
a2 = 2.209460984245205e+02
a3 = -2.759285104469687e+02
a4 = 1.383577518672690e+02
a5 = -3.066479806614716e+01
a6 = 2.506628277459239e+00
b1 = -5.447609879822406e+01
b2 = 1.615858368580409e+02
b3 = -1.556989798598866e+02
b4 = 6.680131188771972e+01
b5 = -1.328068155288572e+01
c1 = -7.784894002430293e-03
c2 = -3.223964580411365e-01
c3 = -2.400758277161838e+00
c4 = -2.549732539343734e+00
c5 = 4.374664141464968e+00
c6 = 2.938163982698783e+00
d1 = 7.784695709041462e-03
d2 = 3.224671290700398e-01
d3 = 2.445134137142996e+00
d4 = 3.754408661907416e+00
plow = 0.02425
phigh = 1 - plow
)
if p < 0 || p > 1 {
return math.NaN()
} else if p == 0 {
return -math.Inf(0)
} else if p == 1 {
return math.Inf(0)
}
if p < plow {
q := math.Sqrt(-2 * math.Log(p))
x = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
((((d1*q+d2)*q+d3)*q+d4)*q + 1)
} else if phigh < p {
q := math.Sqrt(-2 * math.Log(1-p))
x = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /
((((d1*q+d2)*q+d3)*q+d4)*q + 1)
} else {
q := p - 0.5
r := q * q
x = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q /
(((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1)
}
e := 0.5*math.Erfc(-x/math.Sqrt2) - p
u := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2)
x = x - u/(1+x*u/2)
return x*scale + loc
}
// NormIsf is the inverse survival function (inverse of sf).
func NormIsf(p float64, loc float64, scale float64) (x float64) {
if -NormPpf(p, loc, scale) == 0 {
return 0
}
return -NormPpf(p, loc, scale)
}
// NormMoment approximates the non-central (raw) moment of order n.
// For more information please visit: https://math.stackexchange.com/questions/1945448/methods-for-finding-raw-moments-of-the-normal-distribution
func NormMoment(n int, loc float64, scale float64) float64 {
toReturn := 0.0
for i := 0; i < n + 1; i++ {
if (n-i) % 2 == 0 {
toReturn += float64(Ncr(n, i)) * (math.Pow(loc, float64(i))) * (math.Pow(scale, float64(n - i))) *
(float64(factorial(n - i)) / ((math.Pow(2.0, float64((n - i) / 2))) *
float64(factorial((n - i) / 2))))
}
}
return toReturn
}
// NormStats returns the mean, variance, skew, and/or kurtosis.
// Mean(‘m’), variance(‘v’), skew(‘s’), and/or kurtosis(‘k’).
// Takes string containing any of 'mvsk'.
// Returns array of m v s k in that order.
func NormStats(loc float64, scale float64, moments string) []float64 {
var toReturn []float64
if strings.ContainsAny(moments, "m") {
toReturn = append(toReturn, loc)
}
if strings.ContainsAny(moments, "v") {
toReturn = append(toReturn, math.Pow(scale, 2))
}
if strings.ContainsAny(moments, "s") {
toReturn = append(toReturn, 0.0)
}
if strings.ContainsAny(moments, "k") {
toReturn = append(toReturn, 0.0)
}
return toReturn
}
// NormEntropy is the differential entropy of the RV.
func NormEntropy(loc float64, scale float64) float64 {
return math.Log(scale * math.Sqrt(2 * math.Pi * math.E))
}
// NormFit returns the maximum likelihood estimators for the Normal Distribution.
// Takes array of float64 values.
// Returns array of Mean followed by Standard Deviation.
func NormFit(data []float64) [2]float64{
sum := 0.00
mean := 0.00
for i := 0; i < len(data); i++ {
sum += data[i]
}
mean = sum / float64(len(data))
stdNumerator := 0.00
for i := 0; i < len(data); i++ {
stdNumerator += math.Pow(data[i]-mean, 2)
}
return [2]float64{mean , math.Sqrt((stdNumerator)/(float64(len(data))))}
}
// NormMedian is the median of the distribution.
func NormMedian(loc float64, scale float64) float64 {
return loc
}
// NormMean is the mean/expected value of the distribution.
func NormMean(loc float64, scale float64) float64 {
return loc
}
// NormVar is the variance of the distribution.
func NormVar(loc float64, scale float64) float64 {
return math.Pow(scale, 2)
}
// NormStd is the standard deviation of the distribution.
func NormStd(loc float64, scale float64) float64 {
return scale
}
// NormInterval finds endpoints of the range that contains alpha percent of the distribution.
func NormInterval(alpha float64, loc float64, scale float64 ) [2]float64 {
q1 := (1.0-alpha)/2
q2 := (1.0+alpha)/2
a := NormPpf(q1, loc, scale)
b := NormPpf(q2, loc, scale)
return [2]float64{a, b}
}
// factorial is the naive factorial algorithm.
func factorial(x int) int {
if x == 0 {
return 1
}
return x * factorial(x - 1)
}
// Ncr is an N choose R algorithm.
// <NAME>'s algorithm.
func Ncr(n, r int) int {
if n <= 1 || r == 0 || n == r {
return 1
}
if newR := n - r; newR < r {
r = newR
}
if r == 1 {
return n
}
ret := int(n - r + 1)
for i, j := ret+1, int(2); j <= r; i, j = i+1, j+1 {
ret = ret * i / j
}
return ret
} | norm.go | 0.767777 | 0.67882 | norm.go | starcoder |
package operator
import (
"bytes"
"github.com/matrixorigin/matrixone/pkg/container/nulls"
"github.com/matrixorigin/matrixone/pkg/container/types"
"github.com/matrixorigin/matrixone/pkg/container/vector"
"github.com/matrixorigin/matrixone/pkg/encoding"
"github.com/matrixorigin/matrixone/pkg/vm/process"
)
var retType = types.T_bool.ToType()
func allocateBoolVector(length int64, proc *process.Process) (*vector.Vector, error) {
vec, err := proc.AllocVector(retType, length)
if err != nil {
return nil, err
}
vec.Col = encoding.DecodeBoolSlice(vec.Data)
vec.Col = vec.Col.([]bool)[:length]
return vec, nil
}
func FillNullPos(vec *vector.Vector) {
if nulls.Any(vec.Nsp) {
rows := vec.Nsp.Np.ToArray()
cols := vec.Col.([]bool)
for _, row := range rows {
cols[row] = false
}
}
}
func ScalarNeNotScalar[T NormalType](sv, nsv *vector.Vector, col1, col2 []T, proc *process.Process) (*vector.Vector, error) {
length := int64(vector.Length(nsv))
vec, err := allocateBoolVector(length, proc)
if err != nil {
return nil, err
}
vcols := vec.Col.([]bool)
value := col1[0]
for i := range vcols {
vcols[i] = value != col2[i]
}
nulls.Or(nsv.Nsp, nil, vec.Nsp)
FillNullPos(vec)
return vec, nil
}
func isBytesNe(b1, b2 []byte) bool {
if len(b1) != len(b2) {
return true
}
return !bytes.Equal(b1, b2)
}
func ScalarStringNeNotScalar(sv, nsv *vector.Vector, str []byte, col *types.Bytes, proc *process.Process) (*vector.Vector, error) {
var i int64
length := int64(vector.Length(nsv))
vec, err := allocateBoolVector(length, proc)
if err != nil {
return nil, err
}
vcols := vec.Col.([]bool)
for i = 0; i < length; i++ {
vcols[i] = isBytesNe(str, col.Get(i))
}
nulls.Or(nsv.Nsp, nil, vec.Nsp)
FillNullPos(vec)
return vec, nil
}
func NeGeneral[T NormalType](vs []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
v1, v2 := vs[0], vs[1]
col1, col2 := vector.MustTCols[T](v1), vector.MustTCols[T](v2)
if v1.IsScalarNull() || v2.IsScalarNull() {
return HandleWithNullCol(vs, proc)
}
c1, c2 := v1.IsScalar(), v2.IsScalar()
switch {
case c1 && c2:
vec := proc.AllocScalarVector(retType)
vec.Col = make([]bool, 1)
vec.Col.([]bool)[0] = col1[0] != col2[0]
return vec, nil
case c1 && !c2:
return ScalarNeNotScalar[T](v1, v2, col1, col2, proc)
case !c1 && c2:
return ScalarNeNotScalar[T](v2, v1, col2, col1, proc)
}
// case !c1 && !c2
length := int64(vector.Length(v1))
vec, err := allocateBoolVector(length, proc)
if err != nil {
return nil, err
}
vcols := vec.Col.([]bool)
for i := range vcols {
vcols[i] = col1[i] != col2[i]
}
nulls.Or(v1.Nsp, v2.Nsp, vec.Nsp)
FillNullPos(vec)
return vec, nil
}
func NeString(vs []*vector.Vector, proc *process.Process) (*vector.Vector, error) {
v1, v2 := vs[0], vs[1]
col1, col2 := vector.MustBytesCols(v1), vector.MustBytesCols(v2)
if v1.IsScalarNull() || v2.IsScalarNull() {
return HandleWithNullCol(vs, proc)
}
c1, c2 := v1.IsScalar(), v2.IsScalar()
switch {
case c1 && c2:
vec := proc.AllocScalarVector(retType)
vec.Col = make([]bool, 1)
vec.Col.([]bool)[0] = isBytesNe(col1.Get(0), col2.Get(0))
return vec, nil
case c1 && !c2:
return ScalarStringNeNotScalar(v1, v2, col1.Get(0), col2, proc)
case !c1 && c2:
return ScalarStringNeNotScalar(v2, v1, col2.Get(0), col1, proc)
}
// case !c1 && !c2
length := int64(vector.Length(v1))
vec, err := allocateBoolVector(length, proc)
if err != nil {
return nil, err
}
vcols := vec.Col.([]bool)
for i := range vcols {
j := int64(i)
vcols[i] = isBytesNe(col1.Get(j), col2.Get(j))
}
nulls.Or(v1.Nsp, v2.Nsp, vec.Nsp)
FillNullPos(vec)
return vec, nil
} | pkg/sql/plan2/function/operator/ne.go | 0.576423 | 0.417746 | ne.go | starcoder |
package fil
import "github.com/rannoch/cldr"
var calendar = cldr.Calendar{
Formats: cldr.CalendarFormats{
Date: cldr.CalendarDateFormat{Full: "EEEE, MMMM d, y", Long: "MMMM d, y", Medium: "MMM d, y", Short: "M/d/yy"},
Time: cldr.CalendarDateFormat{Full: "h:mm:ss a zzzz", Long: "h:mm:ss a z", Medium: "h:mm:ss a", Short: "h:mm a"},
DateTime: cldr.CalendarDateFormat{Full: "{1} 'nang' {0}", Long: "{1} 'nang' {0}", Medium: "{1}, {0}", Short: "{1}, {0}"},
},
FormatNames: cldr.CalendarFormatNames{
Months: cldr.CalendarMonthFormatNames{
Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "Ene", Feb: "Peb", Mar: "Mar", Apr: "Abr", May: "May", Jun: "Hun", Jul: "Hul", Aug: "Ago", Sep: "Set", Oct: "Okt", Nov: "Nob", Dec: "Dis"},
Narrow: cldr.CalendarMonthFormatNameValue{Jan: "E", Feb: "P", Mar: "M", Apr: "A", May: "M", Jun: "H", Jul: "H", Aug: "A", Sep: "S", Oct: "O", Nov: "N", Dec: "D"},
Short: cldr.CalendarMonthFormatNameValue{},
Wide: cldr.CalendarMonthFormatNameValue{Jan: "Enero", Feb: "Pebrero", Mar: "Marso", Apr: "Abril", May: "Mayo", Jun: "Hunyo", Jul: "Hulyo", Aug: "Agosto", Sep: "Setyembre", Oct: "Oktubre", Nov: "Nobyembre", Dec: "Disyembre"},
},
Days: cldr.CalendarDayFormatNames{
Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "Lin", Mon: "Lun", Tue: "Mar", Wed: "Miy", Thu: "Huw", Fri: "Biy", Sat: "Sab"},
Narrow: cldr.CalendarDayFormatNameValue{Sun: "L", Mon: "L", Tue: "M", Wed: "M", Thu: "H", Fri: "B", Sat: "S"},
Short: cldr.CalendarDayFormatNameValue{Sun: "Li", Mon: "Lu", Tue: "Ma", Wed: "Mi", Thu: "Hu", Fri: "Bi", Sat: "Sa"},
Wide: cldr.CalendarDayFormatNameValue{Sun: "Linggo", Mon: "Lunes", Tue: "Martes", Wed: "Miyerkules", Thu: "Huwebes", Fri: "Biyernes", Sat: "Sabado"},
},
Periods: cldr.CalendarPeriodFormatNames{
Abbreviated: cldr.CalendarPeriodFormatNameValue{AM: "AM", PM: "PM"},
Narrow: cldr.CalendarPeriodFormatNameValue{AM: "a", PM: "p"},
Short: cldr.CalendarPeriodFormatNameValue{},
Wide: cldr.CalendarPeriodFormatNameValue{AM: "AM", PM: "PM"},
},
},
} | resources/locales/fil/calendar.go | 0.550366 | 0.441553 | calendar.go | starcoder |
package gabi
import (
"crypto/rand"
"math/big"
)
// CLSignature is a data structure for holding a Camenisch-Lysyanskaya signature.
type CLSignature struct {
A *big.Int
E *big.Int `json:"e"`
V *big.Int `json:"v"`
KeyshareP *big.Int `json:"KeyshareP"` // R_0^{keysharesecret}, necessary for verification
}
// SignMessageBlock signs a message block (ms) and a commitment (U) using the
// Camenisch-Lysyanskaya signature scheme as used in the IdeMix system.
func signMessageBlockAndCommitment(sk *PrivateKey, pk *PublicKey, U *big.Int, ms []*big.Int, Rs []*big.Int) (*CLSignature, error) {
R := representToBases(Rs, ms, pk.N, pk.Params.Lm)
vTilde, _ := RandomBigInt(pk.Params.Lv - 1)
twoLv := new(big.Int).Lsh(bigONE, pk.Params.Lv-1)
v := new(big.Int).Add(twoLv, vTilde)
// Q = inv( S^v * R * U) * Z
numerator := new(big.Int).Exp(pk.S, v, pk.N)
numerator.Mul(numerator, R).Mul(numerator, U).Mod(numerator, pk.N)
invNumerator, _ := modInverse(numerator, pk.N)
Q := new(big.Int).Mul(pk.Z, invNumerator)
Q.Mod(Q, pk.N)
e, err := randomPrimeInRange(rand.Reader, pk.Params.Le-1, pk.Params.LePrime-1)
if err != nil {
return nil, err
}
order := new(big.Int).Mul(sk.PPrime, sk.QPrime)
d, _ := modInverse(e, order)
A := new(big.Int).Exp(Q, d, pk.N)
// TODO: this is probably open to side channel attacks, maybe use a
// safe (raw) RSA signature?
return &CLSignature{A: A, E: e, V: v}, nil
}
// SignMessageBlock signs a message block (ms) using the Camenisch-Lysyanskaya
// signature scheme as used in the IdeMix system.
func SignMessageBlock(sk *PrivateKey, pk *PublicKey, ms []*big.Int) (*CLSignature, error) {
return signMessageBlockAndCommitment(sk, pk, big.NewInt(1), ms, pk.R)
}
// Verify checks whether the signature is correct while being given a public key
// and the messages.
func (s *CLSignature) Verify(pk *PublicKey, ms []*big.Int) bool {
// First check that e is in the range [2^{l_e - 1}, 2^{l_e - 1} + 2^{l_e_prime - 1}]
start := new(big.Int).Lsh(bigONE, pk.Params.Le-1)
end := new(big.Int).Lsh(bigONE, pk.Params.LePrime-1)
end.Add(end, start)
if s.E.Cmp(start) < 0 || s.E.Cmp(end) > 0 {
return false
}
// Q = A^e * R * S^v
Ae := new(big.Int).Exp(s.A, s.E, pk.N)
R := representToBases(pk.R, ms, pk.N, pk.Params.Lm)
if s.KeyshareP != nil {
R.Mul(R, s.KeyshareP)
}
Sv := modPow(pk.S, s.V, pk.N)
Q := new(big.Int).Mul(Ae, R)
Q.Mul(Q, Sv).Mod(Q, pk.N)
// Signature verifies if Q == Z
return pk.Z.Cmp(Q) == 0
}
// Randomize returns a randomized copy of the signature.
func (s *CLSignature) Randomize(pk *PublicKey) *CLSignature {
r, _ := RandomBigInt(pk.Params.LRA)
APrime := new(big.Int).Mul(s.A, new(big.Int).Exp(pk.S, r, pk.N))
APrime.Mod(APrime, pk.N)
t := new(big.Int).Mul(s.E, r)
VPrime := new(big.Int).Sub(s.V, t)
return &CLSignature{A: APrime, E: new(big.Int).Set(s.E), V: VPrime}
} | clsignature.go | 0.628863 | 0.467028 | clsignature.go | starcoder |
package geoutil
import (
"errors"
"github.com/hiendv/geojson/pkg/util"
"github.com/paulmach/orb"
"github.com/paulmach/orb/geojson"
)
const (
geometryMultiPolygon = "MultiPolygon"
geometryPolygon = "Polygon"
)
// RewindFeatureCollection rewinds a GeoJSON feature collection.
// The second parameter is the direction of winding. True means clockwise.
func RewindFeatureCollection(fc *geojson.FeatureCollection, outer bool) error {
if fc == nil {
return errors.New("invalid feature collection")
}
for _, feature := range fc.Features {
err := RewindFeature(feature, outer)
if err != nil {
return err
}
}
return nil
}
// RewindFeature rewinds a GeoJSON feature.
// The second parameter is the direction of winding. True means clockwise.
func RewindFeature(f *geojson.Feature, outer bool) error {
if f == nil {
return errors.New("invalid feature")
}
err := RewindGeometry(f.Geometry, outer)
if err != nil {
return err
}
return nil
}
// RewindGeometry rewinds a GeoJSON geometry.
// The second parameter is the direction of winding. True means clockwise.
func RewindGeometry(g orb.Geometry, outer bool) error {
if g == nil {
return errors.New("invalid geometry")
}
if g.GeoJSONType() == geometryPolygon {
mp, ok := g.(orb.Polygon)
if !ok {
return errors.New("invalid Polygon")
}
RewindRings(mp, outer)
return nil
}
if g.GeoJSONType() == geometryMultiPolygon {
mp, ok := g.(orb.MultiPolygon)
if !ok {
return errors.New("invalid MultiPolygon")
}
for _, p := range mp {
RewindRings(p, outer)
}
return nil
}
return errors.New("geometry type not supported")
}
// RewindRings rewinds GeoJSON rings.
// The second parameter is the direction of winding. True means clockwise.
func RewindRings(rings []orb.Ring, outer bool) []orb.Ring {
if len(rings) == 0 {
return rings
}
RewindRing(rings[0], outer)
for i := 1; i < len(rings); i++ {
RewindRing(rings[i], !outer)
}
return rings
}
// RewindRing rewinds a GeoJSON ring.
// The second parameter is the direction of winding. True means clockwise.
func RewindRing(ring orb.Ring, cw bool) {
// Shoelace formula: https://mathworld.wolfram.com/PolygonArea.html
var area float64 = 0
len := len(ring)
for i, j := 0, len-1; i < len; i, j = i+1, i+1 {
area += (ring[i][0] - ring[j][0]) * (ring[j][1] + ring[i][1])
}
if area >= 0 != cw {
util.ReverseAny(ring)
}
} | pkg/geoutil/rewind.go | 0.774583 | 0.421492 | rewind.go | starcoder |
package continuous
import (
"github.com/jtejido/ggsl/specfunc"
"github.com/jtejido/linear"
"github.com/jtejido/stats"
"github.com/jtejido/stats/err"
smath "github.com/jtejido/stats/math"
"math"
"math/rand"
)
// Gamma distribution
// https://en.wikipedia.org/wiki/Gamma_distribution
type Gamma struct {
baseContinuousWithSource
shape, rate float64 // α, β
natural linear.RealVector
}
func NewGamma(shape, rate float64) (*Gamma, error) {
return NewGammaWithSource(shape, rate, nil)
}
func NewGammaWithSource(shape, rate float64, src rand.Source) (*Gamma, error) {
if shape <= 0 || rate <= 0 {
return nil, err.Invalid()
}
r := new(Gamma)
r.shape = shape
r.rate = rate
r.src = src
return r, nil
}
func (g *Gamma) String() string {
return "Gamma: Parameters - " + g.Parameters().String() + ", Support(x) - " + g.Support().String()
}
// k ∈ (0,∞)
// θ ∈ (0,∞)
func (g *Gamma) Parameters() stats.Limits {
return stats.Limits{
"k": stats.Interval{0, math.Inf(1), true, true},
"θ": stats.Interval{0, math.Inf(1), true, true},
}
}
// x ∈ (0,∞)
func (g *Gamma) Support() stats.Interval {
return stats.Interval{0, math.Inf(1), true, true}
}
func (g *Gamma) Probability(x float64) float64 {
if g.Support().IsWithinInterval(x) {
return (math.Pow(g.rate, g.shape) / specfunc.Gamma(g.shape)) * math.Pow(x, g.shape-1) * math.Exp(-g.rate*x)
}
return 0
}
func (g *Gamma) Distribution(x float64) float64 {
if g.Support().IsWithinInterval(x) {
return specfunc.Gamma_inc_P(g.shape, g.rate*x)
}
return 0
}
func (g *Gamma) Inverse(p float64) float64 {
if p <= 0 {
return 0
}
if p >= 1 {
return math.Inf(1)
}
return smath.InverseRegularizedLowerIncompleteGamma(g.shape, p) / g.rate
}
func (g *Gamma) Entropy() float64 {
return g.shape - math.Log(g.rate) + specfunc.Lngamma(g.shape) + (1-g.shape)*specfunc.Psi(g.shape)
}
func (g *Gamma) ExKurtosis() float64 {
return 6 / g.shape
}
func (g *Gamma) Skewness() float64 {
return 2 / math.Sqrt(g.shape)
}
func (g *Gamma) Mean() float64 {
return g.shape / g.rate
}
func (g *Gamma) Median() float64 {
μ := g.Mean()
k3 := 3 * g.shape
return μ * ((k3 - 0.8) / (k3 + 0.2))
}
func (g *Gamma) Mode() float64 {
if g.shape < 1 {
return math.NaN()
}
return (g.shape - 1) / g.rate
}
func (g *Gamma) Variance() float64 {
return g.shape / (g.rate * g.rate)
}
func (g *Gamma) Rand() float64 {
var d, c float64
if g.shape < 1 {
d = g.shape + 1.0 - 1.0/3.0
c = (1.0 / 3.0) / math.Sqrt(d)
var v float64
if g.src != nil {
u := rand.New(g.src)
v = u.Float64()
} else {
v = rand.Float64()
}
return (marsaglia(g.src, d, c) * math.Pow(v, 1.0/g.shape)) / g.rate
} else {
d = g.shape - 1.0/3.0
c = (1.0 / 3.0) / math.Sqrt(d)
return marsaglia(g.src, d, c) / g.rate
}
}
func marsaglia(src rand.Source, d, c float64) float64 {
for {
var x, t, v float64
for {
n, _ := NewNormalWithSource(0, 1, src)
x = n.Rand()
t = (1.0 + c*x)
v = t * t * t
if v > 0 {
break
}
}
u := rand.Float64()
x2 := x * x
if u < 1-0.0331*x2*x2 {
return d * v
}
if math.Log(u) < 0.5*x2+d*(1.0-v+math.Log(v)) {
return d * v
}
}
}
func (g *Gamma) ToExponential() {
vec, _ := linear.NewArrayRealVectorFromSlice([]float64{g.shape - 1, -g.rate})
g.natural = vec
// vec2, _ := linear.NewSizedArrayRealVector(2)
// vec2.SetEntry(0, specfunc.Psi(g.shape)+math.Log(g.scale))
// vec2.SetEntry(1, g.shape*g.scale)
// g.Moment = vec2
}
func (g *Gamma) SufficientStatistics(x float64) linear.RealVector {
vec, _ := linear.NewArrayRealVectorFromSlice([]float64{math.Log(x), x})
return vec
} | dist/continuous/gamma.go | 0.798344 | 0.478894 | gamma.go | starcoder |
// D50 illuminant conversion functions
package white
// D50_A functions
func D50_A_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.1573713, 0.0872411, -0.1268788},
{0.1199410, 0.9219445, -0.0455568},
{-0.0200278, 0.0303599, 0.4178345}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_A_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0530074, 0.1867709, -0.1255459},
{0.0205155, 0.9836342, -0.0041391},
{0.0000000, 0.0000000, 0.4312236}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_A_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.1392628, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.4312236}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D50_B functions
func D50_B_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0153440, 0.0093873, 0.0028087},
{0.0147849, 0.9856776, 0.0000806},
{0.0017354, -0.0036522, 1.0351412}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_B_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0048705, 0.0171595, 0.0056280},
{0.0018849, 0.9984968, -0.0003808},
{0.0000000, 0.0000000, 1.0327432}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_B_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0274834, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.0327432}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D50_C functions
func D50_C_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9648789, -0.0164148, 0.0809482},
{-0.0160520, 0.9941478, 0.0258478},
{0.0172576, -0.0297025, 1.4485796}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_C_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9871374, -0.0453262, 0.0899753},
{-0.0049788, 1.0039731, 0.0010028},
{0.0000000, 0.0000000, 1.4327505}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_C_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0171330, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.4327505}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D50_D55 functions
func D50_D55_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9820801, -0.0094156, 0.0233811},
{-0.0118389, 1.0049382, 0.0078491},
{0.0044511, -0.0073485, 1.1203775}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_D55_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9937887, -0.0218861, 0.0248120},
{-0.0024040, 1.0019180, 0.0004847},
{0.0000000, 0.0000000, 1.1166733}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_D55_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9923254, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.1166733}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D50_D65 functions
func D50_D65_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9555766, -0.0230393, 0.0631636},
{-0.0282895, 1.0099416, 0.0210077},
{0.0122982, -0.0204830, 1.3299098}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_D65_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9845002, -0.0546158, 0.0676324},
{-0.0059992, 1.0047864, 0.0012095},
{0.0000000, 0.0000000, 1.3194581}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_D65_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9857398, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.3194581}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D50_D75 functions
func D50_D75_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9369777, -0.0323563, 0.0952771},
{-0.0389795, 1.0115975, 0.0314918},
{0.0188243, -0.0315280, 1.5023535}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_D75_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9778997, -0.0778744, 0.1026211},
{-0.0085539, 1.0068249, 0.0017244},
{0.0000000, 0.0000000, 1.4861429}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_D75_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9849619, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.4861429}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D50_E functions
func D50_E_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0025535, 0.0036238, 0.0359837},
{0.0096914, 0.9819125, 0.0105947},
{0.0089181, -0.0160789, 1.2208770}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_E_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0001136, 0.0003968, 0.0427451},
{0.0000436, 0.9999663, -0.0000100},
{0.0000000, 0.0000000, 1.2118128}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_E_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0371077, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.2118128}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D50_F2 functions
func D50_F2_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0395725, 0.0215350, -0.0388405},
{0.0287482, 0.9834389, -0.0135220},
{-0.0067213, 0.0106555, 0.8116180}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_F2_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0134653, 0.0474454, -0.0397341},
{0.0052115, 0.9958424, -0.0010512},
{0.0000000, 0.0000000, 0.8166770}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_F2_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0286657, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.8166770}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D50_F7 functions
func D50_F7_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9557124, -0.0229756, 0.0628550},
{-0.0282266, 1.0099623, 0.0209090},
{0.0122325, -0.0203701, 1.3282016}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_F7_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9845499, -0.0544408, 0.0672896},
{-0.0059799, 1.0047711, 0.0012056},
{0.0000000, 0.0000000, 1.3178100}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_F7_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9856775, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.3178100}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// D50_F11 functions
func D50_F11_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0562303, 0.0310026, -0.0482555},
{0.0422615, 0.9734013, -0.0171480},
{-0.0078657, 0.0121201, 0.7743049}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_F11_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0189974, 0.0669372, -0.0482967},
{0.0073526, 0.9941346, -0.0014833},
{0.0000000, 0.0000000, 0.7798015}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func D50_F11_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0470847, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.7798015}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
} | f64/white/d50.go | 0.5083 | 0.578597 | d50.go | starcoder |
package golgi
import (
"github.com/pkg/errors"
G "gorgonia.org/gorgonia"
"gorgonia.org/tensor"
)
var (
_ Layer = (*layerNorm)(nil)
)
// layerNorm performs layer normalization as per https://arxiv.org/abs/1607.06450
type layerNorm struct {
FC
epsNode *G.Node
eps float64
flops int
computeFLOPs bool
}
// There is no Model() method. When Model() is called, it simply calls the FC's Model()
// There is no Type() method
// There is no Shape() method
// NewLayerNorm creates a layer-normalization layer. It does not initialize the layer.
func NewLayerNorm(opts ...ConsOpt) Layer {
l := &layerNorm{
eps: 1e-5,
}
for _, opt := range opts {
var o Layer
var err error
if o, err = opt(l); err != nil {
panic(err)
}
l = o.(*layerNorm) // panics if not layernorm
}
// misc settings that has to be reset in case anything else gets set
l.batched = true
l.act = nil
l.nobias = false
return l
}
// ConsLayerNorm is a construction function for a layer normalization layer. `in` has to be at least a *gorgonia.Node
func ConsLayerNorm(in G.Input, opts ...ConsOpt) (retVal Layer, err error) {
x := in.Node()
inshape := x.Shape()
if inshape.Dims() > 2 || inshape.Dims() == 0 {
return nil, errors.Errorf("Expected shape is either a vector or a matrix")
}
// construct
l := &layerNorm{
eps: 1e-5,
}
for _, opt := range opts {
var o Layer
var ok bool
if o, err = opt(l); err != nil {
return nil, err
}
if l, ok = o.(*layerNorm); !ok {
return nil, errors.Errorf("Construction Option returned a non layerNorm. Got %T instead", l)
}
}
// misc settings that has to be reset in case anything else gets set
l.batched = true
l.act = nil
l.nobias = false
if err = l.Init(x); err != nil {
return nil, err
}
return l, nil
}
func (l *layerNorm) Fwd(a G.Input) G.Result {
if err := G.CheckOne(a); err != nil {
return G.Err(errors.Wrap(err, "Fwd of layer norm failed."))
}
x := a.Node()
xshp := x.Shape()
last := xshp.Dims() - 1
// lazy initialization
if !l.IsInitialized() {
if err := l.Init(x); err != nil {
return G.Err(errors.Wrapf(err, "Lazy initialization of *layerNorm %v", l.name))
}
}
var err error
var μ, xmμ, σ2, sd, newX *G.Node
if μ, err = G.KeepDims(x, false, func(x *G.Node) (*G.Node, error) { return G.Mean(x, last) }); err != nil {
return G.Err(errors.Wrapf(err, "Unable to find mean of %dth dimension of %v", last, x))
}
// xmu: x-μ
if xmμ, err = G.BroadcastSub(x, μ, nil, []byte{byte(last)}); err != nil {
return G.Err(errors.Wrapf(err, "Unable to perform (x-μ). Shapes - x: %v, μ: %v. Broadcast on right axis: %v", x.Shape(), μ.Shape(), last))
}
// σ2: ((x-μ)^2)/N
if σ2, err = G.Square(xmμ); err != nil {
return G.Err(errors.Wrap(err, "Unable to perform (x-μ)^2"))
}
if σ2, err = G.KeepDims(σ2, false, func(x *G.Node) (*G.Node, error) { return G.Mean(x, last) }); err != nil {
return G.Err(errors.Wrap(err, "Unable to calculate Mean Squared Variance"))
}
// purturb the variance before sqrting it
if sd, err = G.Add(σ2, l.epsNode); err != nil {
return G.Err(errors.Wrap(err, "Unable to purturb the variance"))
}
if sd, err = G.Sqrt(sd); err != nil {
return G.Err(errors.Wrap(err, "Unable to sqrt the variance"))
}
// now we have a new x
if newX, err = G.BroadcastHadamardDiv(xmμ, sd, nil, []byte{byte(last)}); err != nil {
return G.Err(errors.Wrapf(err, "Unable to do (x-μ)/σ. Shapes - xmμ: %v, sd: %v. Broadcast on right axis: %v", xmμ.Shape(), sd.Shape(), last))
}
// the rest is straightforwards FC
return l.FC.Fwd(newX)
}
func MakeLayerNorm(opts ...ConsOpt) Layer {
l := &layerNorm{
eps: 1e-5,
}
for _, opt := range opts {
var o Layer
var err error
if o, err = opt(l); err != nil {
panic(err)
}
l = o.(*layerNorm) // panics if not layernorm
}
// misc settings that has to be reset in case anything else gets set
l.batched = true
l.act = nil
l.nobias = false
if l.FC.w != nil || l.FC.b != nil {
l.FC.initialized = true
}
return l
}
func (l *layerNorm) Init(xs ...*G.Node) (err error) {
x := xs[0]
// prep
g := x.Graph()
of := x.Dtype()
X := x
if x.IsVec() {
X, err = G.Reshape(x, tensor.Shape{1, x.Shape()[0]})
if err != nil {
return errors.Wrapf(err, "While initializing layerNorm")
}
}
xshp := X.Shape()
switch of {
case tensor.Float32:
l.epsNode = G.NewConstant(float32(l.eps))
case tensor.Float64:
l.epsNode = G.NewConstant(l.eps)
default:
return errors.New("Layer Norm only supports Float32 or Float64")
}
l.w = G.NewMatrix(g, of, G.WithShape(xshp[1], l.size), G.WithInit(G.Ones()), G.WithName(l.name+"_W"))
l.b = G.NewMatrix(g, of, G.WithShape(1, l.size), G.WithInit(G.Zeroes()), G.WithName(l.name+"_B"))
l.initialized = true
if l.computeFLOPs {
l.flops = l.doComputeFLOPs(X.Shape())
}
return nil
}
func (l *layerNorm) SetComputeFLOPs(toCompute bool) error {
l.computeFLOPs = toCompute
l.FC.computeFLOPs = toCompute
return nil
}
func (l *layerNorm) doComputeFLOPs(input tensor.Shape) int {
mean := input.TotalSize() // x-μ
meanSq := mean * 2 // (x-μ)^2
variance := meanSq + mean // (x-μ)^2 / N
variancePerturbed := variance + mean // perturbation
sqrt := variancePerturbed + mean // sqrt
div := sqrt + mean
fc := l.FC.doComputeFLOPs(input)
return div + fc
} | norm.go | 0.727492 | 0.442155 | norm.go | starcoder |
package graphite
import (
"fmt"
"strconv"
)
// Metric is an interface to be able to create new metric types easily.
// Each metric must have some methods to be able to be used by the Aggregator.
type Metric interface {
// Update receives a generic value through interface{} to update its internal value.
Update(interface{})
// Clear is used to reset the metric to the initial value.
Clear()
// Calculate is used to perform the necessary operations to retrieve the final value
// that will be sent to graphite, standarised as a string.
Calculate() string
}
// MetricSum creates a metric that contains a value that increases with time.
type MetricSum struct {
Sum int64
}
// Update increases the value of the metric with the amount received.
func (metric *MetricSum) Update(value interface{}) {
metric.Sum += int64(value.(int))
}
// Clear reinitiales the value to zero.
func (metric *MetricSum) Clear() {
metric.Sum = 0
}
// Calculate calculates the value to send.
func (metric *MetricSum) Calculate() string {
return strconv.FormatInt(metric.Sum, 10)
}
// MetricAverage creates a metric to store the average value between several values.
type MetricAverage struct {
Sum int64
Count int64
}
// Update increases the components necessary to calculate afterwards the average value.
// Each time the metric is updated, the result of Calculate will change.
func (metric *MetricAverage) Update(value interface{}) {
metric.Sum += int64(value.(int))
metric.Count++
}
// Clear reinitiales the average value and counter.
func (metric *MetricAverage) Clear() {
metric.Sum = 0
metric.Count = 0
}
// Calculate calculates the value to send.
func (metric *MetricAverage) Calculate() string {
if metric.Sum > 0 {
return fmt.Sprintf("%.6f", float64(metric.Sum)/float64(metric.Count))
}
return "0"
}
// MetricActive creates a metric to set a boolean status in graphite.
type MetricActive struct {
State bool
}
// Update sets the active/inactive status through a boolean.
func (metric *MetricActive) Update(value interface{}) {
metric.State = value.(bool)
}
// Clear reinitiales the value to inactive.
func (metric *MetricActive) Clear() {
metric.State = false
}
// Calculate calculates the value to send.
func (metric *MetricActive) Calculate() string {
bool2integer := map[bool]int{false: 0, true: 1}
return strconv.Itoa(bool2integer[metric.State])
} | metrics.go | 0.848894 | 0.417093 | metrics.go | starcoder |
package chromath
// RGBTransformer allows tranforms from a user-defined RGB color space to XYZ with optional scaling and adaptation
type RGBTransformer struct {
ws *RGBSpace
compander Compander
transform, transformInv Matrix
inScaler Scaler
outScale float64
spaceTm, adaptTm Matrix
}
// NewRGBTransformer initializes a transform operator from an RGB space to XYZ
// ws specifies the parameters (primaries, working illuminant, and linearization function) of the working RGB space
// adaptation specifies an optional adaptation method to transform into the target illuminant.
// targetIluminant specifies the target illuminant of the XYZ conversion. It is ignored if adaptation is nil.
// The default is CIE D50 (the ICC Profile Connection illuminant)
// scaler is an optional scaling function
// outscale provides a scaling factor into XYZ space, typical values are 1 and 100
// compander is an optional specification for the gamma function to linearize RGB points. This argument overrides
// any compander specified by ws.
func NewRGBTransformer(ws *RGBSpace, adaptation *Adaptation, targetIlluminant *IlluminantRef,
scaler Scaler, outScale float64,
compander Compander) *RGBTransformer {
t := &RGBTransformer{}
t.ws = ws
if compander != nil {
t.compander = compander
} else if ws.Compander != nil {
t.compander = ws.Compander.Init(ws)
}
t.inScaler = scaler
t.spaceTm = ws.XyYPrimary.RGBTransform(ws.IlluminantRef.XYZ)
x := t.spaceTm
if adaptation != nil {
if targetIlluminant == nil {
targetIlluminant = &IlluminantRefD50
}
if targetIlluminant.XYZ != ws.IlluminantRef.XYZ {
t.adaptTm = adaptation.Transform(ws.IlluminantRef.XYZ, targetIlluminant.XYZ)
x = t.adaptTm.Mul3(x)
}
}
if outScale != 0 {
t.outScale = outScale
x = x.Mul3(Matrix{outScale, 0, 0, 0, outScale, 0, 0, 0, outScale})
}
t.transform = x
t.transformInv = x.Inv()
return t
}
// Linearize will apply the current scaling and linearization function to the given RGB point
func (t *RGBTransformer) Linearize(rgb RGB) RGB {
p := Point(rgb)
if scaler := t.inScaler; scaler != nil {
p = scaler.Scale(p)
}
if compander := t.compander; compander != nil {
p = compander.Linearize(p)
}
return RGB(p)
}
// Convert an RGB colorpoint to XYZ based on the parameters of the receiver
func (t *RGBTransformer) Convert(rgb RGB) XYZ {
p := Point(rgb)
if scaler := t.inScaler; scaler != nil {
p = scaler.Scale(p)
}
if compander := t.compander; compander != nil {
p = compander.Linearize(p)
}
p = t.transform.Mul3x1(p)
return XYZ(p)
}
// Invert converts an XYZ point to RGB based on the parameters of the receiver
func (t *RGBTransformer) Invert(xyz XYZ) RGB {
p := Point(xyz)
p = t.transformInv.Mul3x1(p)
if compander := t.compander; compander != nil {
p = compander.Compand(p)
}
if scaler := t.inScaler; scaler != nil {
p = scaler.ScaleInv(p)
}
return RGB(p)
}
// Return the computed adaptation matrix for the transformer
func (t *RGBTransformer) AdaptTM() Matrix {
return t.adaptTm
}
// Return the computed linear RGB⇒XYZ matrix for the transformer
func (t *RGBTransformer) SpaceTM() Matrix {
return t.spaceTm
}
// Return the computed complete transformation matrix for the transformer
// This matrix includes adaptation, space transform, and output scaling
func (t *RGBTransformer) TM() Matrix {
return t.transform
} | vendor/github.com/jkl1337/go-chromath/rgb.go | 0.91323 | 0.547646 | rgb.go | starcoder |
package matcher
import (
"fmt"
"log"
"math"
)
//A Pair is the basic thing that makes up a match
type Pair struct {
a string
b string
}
//NewPair creates a new pairing between to items
func NewPair(a, b string) Pair {
return Pair{a: a, b: b}
}
//Eql returns true if the two pairs are the same
//A Pair is the same regardless of the order of the elements
func (a Pair) Eql(b Pair) bool {
return (a.a == b.a || a.a == b.b) && (a.b == b.a || a.b == b.b)
}
type matches []string
type match struct {
m matches
isA bool
}
func newMatch(l int, is bool) *match {
return &match{m: make(matches, l), isA: is}
}
func (m *match) copy() *match {
m2 := newMatch(m.len(), m.isA)
copy(m2.m, m.m)
return m2
}
func (m *match) len() int {
return len(m.m)
}
func (m *match) append(b string) {
m.m = append(m.m, b)
}
func (m *match) delete(b string) bool {
i := m.m.IndexOf(b)
if i >= 0 {
m.m = append(m.m[:i], m.m[i+1:]...)
return true
}
return false
}
func (m match) String() string {
return fmt.Sprintf("%v", m.m)
}
func (m matches) IndexOf(t string) int {
for i, v := range m {
if t == v {
return i
}
}
return -1
}
//MatchSet represents a collection of Pairs more or less
type MatchSet struct {
pairs map[string]*match
}
func (m MatchSet) String() (s string) {
for k, v := range m.pairs {
if v.isA {
s += fmt.Sprintf("%s(%t):%v ", k, v.isA, v.m)
}
}
return s
}
//Copy returns a new copy of the MatchSet
func (m *MatchSet) Copy() (n MatchSet) {
n = NewMatchSet()
for k, v := range m.pairs {
n.pairs[k] = v.copy()
}
return
}
//NewMatchSet creates a new MatchSet collection
func NewMatchSet() MatchSet {
return MatchSet{make(map[string]*match)}
}
//AddPair adds a new pair of matched items to the collection
func (m *MatchSet) AddPair(p Pair) {
if _, ok := m.pairs[p.a]; !ok {
m.pairs[p.a] = newMatch(0, true)
}
(m.pairs[p.a]).append(p.b)
(m.pairs[p.a]).isA = true
if _, ok := m.pairs[p.b]; !ok {
m.pairs[p.b] = newMatch(0, false)
}
(m.pairs[p.b]).append(p.a)
}
//RemovePair takes a pair of matched items out of the collection if its there
func (m *MatchSet) RemovePair(p Pair) {
m.delete(p.a, p.b)
m.delete(p.b, p.a)
}
//Purge takes a single ID, then removes any and all Pairs that contain at least half of it
func (m *MatchSet) Purge(i string) {
if v, ok := m.pairs[i]; ok {
for _, b := range v.m {
//defer used, since RemovePair will alter the slice v & its range
//this can lead to items being NOT purged, with defer we make sure
//we loop through all the items. Defer used in place of copying v for "fun"
defer m.RemovePair(NewPair(i, b))
}
}
}
func (m *MatchSet) delete(a, b string) {
if v, ok := m.pairs[a]; ok {
if v.delete(b) {
if v.len() <= 0 {
delete(m.pairs, a)
}
}
}
}
//NumPairs returns the number of total pairs/matches in this collection
func (m *MatchSet) NumPairs() (l int) {
for _, v := range m.pairs {
l += v.len()
}
return l / 2
}
func (m *MatchSet) fewestPairs() (t string) {
min := math.MaxInt32
for k, v := range m.pairs {
if v.len() < min {
t = k
min = v.len()
}
}
return
}
func (m *MatchSet) mostPairsOf(t matches) (r string) {
max := 0
for _, p := range t {
if m.pairs[p].len() > max {
max = m.pairs[p].len()
r = p
}
}
return
}
func (m *MatchSet) minMaxPair() (p Pair) {
p.a = m.fewestPairs()
if "" != p.a {
p.b = m.mostPairsOf(m.pairs[p.a].m)
}
return p
}
//QuantityOptimized returns an optimized matchset containing only a single
//pair per item. It attempts to get the largest number of possible pairs without
//duplicating any single item
func (m *MatchSet) QuantityOptimized(allowed ...int) (n MatchSet) {
if len(allowed) <= 0 {
allowed = []int{1}
}
n = NewMatchSet()
if 0 == m.NumPairs() || allowed[0] <= 0 {
return
}
c := m.Copy()
nextSet := m.Copy()
for p := c.minMaxPair(); "" != p.a && "" != p.b; p = c.minMaxPair() {
if m.pairs[p.a].isA && m.pairs[p.b].isA {
log.Fatal("Both cannot be A")
} else if m.pairs[p.b].isA {
tmp := p.a
p.a = p.b
p.b = tmp
}
c.Purge(p.a)
c.Purge(p.b)
nextSet.Purge(p.b)
n.AddPair(p)
}
n.Add(nextSet.QuantityOptimized(allowed[0] - 1))
return
}
func (m *MatchSet) Add(b MatchSet) {
for k, v := range b.pairs {
if v.isA {
for _, p := range v.m {
m.AddPair(NewPair(k, p))
}
}
}
}
//MatchesFor returns a copy of the slice of matches for given identifeir.
func (m *MatchSet) MatchesFor(t string) (r matches) {
if v, ok := m.pairs[t]; ok {
r = make(matches, v.len())
copy(r, v.m)
return r
}
return r
} | matcher/matches.go | 0.696887 | 0.444203 | matches.go | starcoder |
package dsa
type BSTNode struct {
Elem int
left *BSTNode
right *BSTNode
}
type BinarySearchTree struct {
root *BSTNode
}
func (bst *BinarySearchTree) replace(oldNode, newNode, parent *BSTNode) {
if parent == nil {
bst.root = newNode
} else {
if parent.left == oldNode {
parent.left = newNode
} else {
parent.right = newNode
}
}
}
func (bst *BinarySearchTree) Insert(x int) {
if bst.root == nil {
bst.root = &BSTNode{
Elem: x,
}
return
}
for t := bst.root; t != nil; {
if x == t.Elem {
// x exists, do nothing.
return
}
if x > t.Elem {
if t.right == nil {
t.right = &BSTNode{Elem: x}
break
}
t = t.right
} else {
if t.left == nil {
t.left = &BSTNode{Elem: x}
break
}
t = t.left
}
}
}
func (bst *BinarySearchTree) Delete(x int) {
var parent *BSTNode
for t := bst.root; t != nil; {
if x == t.Elem {
if t.left != nil && t.right != nil {
parent = t
n := t.left
flag := false
for ; n.right != nil; n = n.right {
if flag {
parent = n
} else {
flag = true
}
}
t.Elem = n.Elem
t = n
}
if t.left != nil {
bst.replace(t, t.left, parent)
} else {
bst.replace(t, t.right, parent)
}
break
} else if x > t.Elem {
parent = t
t = t.right
} else {
parent = t
t = t.left
}
}
}
func (bst *BinarySearchTree) Find(x int) *BSTNode {
for t := bst.root; t != nil; {
if x == t.Elem {
return t
} else if x > t.Elem {
t = t.right
} else {
t = t.left
}
}
return nil
}
func (bst *BinarySearchTree) FindMax(node *BSTNode) *BSTNode {
t := node
for ; t.right != nil; t = t.right {
}
return t
}
func (bst *BinarySearchTree) FindMin(node *BSTNode) *BSTNode {
t := node
for ; t.left != nil; t = t.left {
}
return t
}
func (bst *BinarySearchTree) InOrder(node *BSTNode, result *[]int) {
if node != nil {
bst.InOrder(node.left, result)
*result = append(*result, node.Elem)
bst.InOrder(node.right, result)
}
} | data-structure/golang/binary_search_tree.go | 0.59796 | 0.447158 | binary_search_tree.go | starcoder |
package fbe
import "math"
// Fast Binary Encoding buffer based on dynamic byte array
type Buffer struct {
// Bytes memory buffer
data []byte
// Bytes memory buffer size
size int
// Bytes memory buffer offset
offset int
}
// Create an empty buffer
func NewEmptyBuffer() *Buffer {
return &Buffer{data: make([]byte, 0)}
}
// Create an empty buffer with the given capacity
func NewCapacityBuffer(capacity int) *Buffer {
return &Buffer{data: make([]byte, capacity)}
}
// Create a buffer with attached bytes memory buffer
func NewAttached(buffer []byte) *Buffer {
result := NewEmptyBuffer()
result.Attach(buffer)
return result
}
// Create a buffer with attached bytes memory buffer with offset and size
func NewAttachedBytes(buffer []byte, offset int, size int) *Buffer {
result := NewEmptyBuffer()
result.AttachBytes(buffer, offset, size)
return result
}
// Create a buffer with another attached buffer
func NewAttachedBuffer(buffer *Buffer) *Buffer {
result := NewEmptyBuffer()
result.AttachBuffer(buffer)
return result
}
// Is the buffer empty?
func (b *Buffer) Empty() bool { return (len(b.data) == 0) || (b.size <= 0) }
// Get bytes memory buffer
func (b *Buffer) Data() []byte { return b.data }
// Get bytes memory buffer capacity
func (b *Buffer) Capacity() int { return len(b.data) }
// Get bytes memory buffer size
func (b *Buffer) Size() int { return b.size }
// Get bytes memory buffer offset
func (b *Buffer) Offset() int { return b.offset }
// Attach an empty memory buffer
func (b *Buffer) AttachNew() {
b.data = make([]byte, 0)
b.size = 0
b.offset = 0
}
// Attach an empty memory buffer with the given capacity
func (b *Buffer) AttachCapacity(capacity int) {
b.data = make([]byte, capacity)
b.size = 0
b.offset = 0
}
// Attach the given bytes memory buffer
func (b *Buffer) Attach(buffer []byte) {
b.AttachBytes(buffer, 0, len(buffer))
}
// Attach the given bytes memory buffer with offset and size
func (b *Buffer) AttachBytes(buffer []byte, offset int, size int) {
if len(buffer) < size {
panic("invalid buffer")
}
if size <= 0 {
panic("invalid size")
}
if offset > size {
panic("invalid offset")
}
b.data = buffer
b.size = size
b.offset = offset
}
// Attach another buffer
func (b *Buffer) AttachBuffer(buffer *Buffer) {
b.AttachBytes(buffer.data, 0, buffer.size)
}
// Allocate memory in the current write buffer and return offset to the allocated memory block
func (b *Buffer) Allocate(size int) int {
if size < 0 {
panic("invalid allocation size")
}
offset := b.size
// Calculate a new buffer size
total := b.size + size
if total <= len(b.data) {
b.size = total
return offset
}
length := 2 * len(b.data)
if length < total {
length = total
}
data := make([]byte, length)
copy(data, b.data[:b.size])
b.data = data
b.size = total
return offset
}
// Remove some memory of the given size from the current write buffer
func (b *Buffer) Remove(offset int, size int) {
if (offset + size) > len(b.data) {
panic("invalid offset & size")
}
copy(b.data[offset:], b.data[offset+size:])
b.size -= size
if b.offset >= (offset + size) {
b.offset -= size
} else if b.offset >= offset {
b.offset -= b.offset - offset
if b.offset > b.size {
b.offset = b.size
}
}
}
// Reserve memory of the given capacity in the current write bufferb
func (b *Buffer) Reserve(capacity int) {
if capacity < 0 {
panic("invalid reserve capacity")
}
if capacity > len(b.data) {
length := 2 * len(b.data)
if length < capacity {
length = capacity
}
data := make([]byte, length)
copy(data, b.data[:b.size])
b.data = data
}
}
// Resize the current write buffer
func (b *Buffer) Resize(size int) {
b.Reserve(size)
b.size = size
if b.offset > b.size {
b.offset = b.size
}
}
// Reset the current write buffer and its offset
func (b *Buffer) Reset() {
b.size = 0
b.offset = 0
}
// Shift the current write buffer offset
func (b *Buffer) Shift(offset int) {
b.offset += offset
}
// Unshift the current write buffer offset
func (b *Buffer) Unshift(offset int) {
b.offset -= offset
}
// Buffer I/O methods
// Read bool from the buffer
func ReadBool(buffer []byte, offset int) bool {
return buffer[offset] != 0
}
// Read byte from the buffer
func ReadByte(buffer []byte, offset int) byte {
return buffer[offset]
}
// Read single byte character from the buffer
func ReadChar(buffer []byte, offset int) rune {
return rune(ReadUInt8(buffer, offset))
}
// Read four bytes character from the buffer
func ReadWChar(buffer []byte, offset int) rune {
return rune(ReadUInt32(buffer, offset))
}
// Read signed 8-bits integer from the buffer
func ReadInt8(buffer []byte, offset int) int8 {
return int8(buffer[offset])
}
// Read unsigned 8-bits integer from the buffer
func ReadUInt8(buffer []byte, offset int) uint8 {
return uint8(buffer[offset])
}
// Read signed 16-bits integer from the buffer
func ReadInt16(buffer []byte, offset int) int16 {
return (int16(buffer[offset + 0]) << 0) | (int16(buffer[offset + 1]) << 8)
}
// Read unsigned 16-bits integer from the buffer
func ReadUInt16(buffer []byte, offset int) uint16 {
return (uint16(buffer[offset + 0]) << 0) | (uint16(buffer[offset + 1]) << 8)
}
// Read signed 32-bits integer from the buffer
func ReadInt32(buffer []byte, offset int) int32 {
return (int32(buffer[offset + 0]) << 0) |
(int32(buffer[offset + 1]) << 8) |
(int32(buffer[offset + 2]) << 16) |
(int32(buffer[offset + 3]) << 24)
}
// Read unsigned 32-bits integer from the buffer
func ReadUInt32(buffer []byte, offset int) uint32 {
return (uint32(buffer[offset + 0]) << 0) |
(uint32(buffer[offset + 1]) << 8) |
(uint32(buffer[offset + 2]) << 16) |
(uint32(buffer[offset + 3]) << 24)
}
// Read signed 64-bits integer from the buffer
func ReadInt64(buffer []byte, offset int) int64 {
return (int64(buffer[offset + 0]) << 0) |
(int64(buffer[offset + 1]) << 8) |
(int64(buffer[offset + 2]) << 16) |
(int64(buffer[offset + 3]) << 24) |
(int64(buffer[offset + 4]) << 32) |
(int64(buffer[offset + 5]) << 40) |
(int64(buffer[offset + 6]) << 48) |
(int64(buffer[offset + 7]) << 56)
}
// Read unsigned 64-bits integer from the buffer
func ReadUInt64(buffer []byte, offset int) uint64 {
return (uint64(buffer[offset + 0]) << 0) |
(uint64(buffer[offset + 1]) << 8) |
(uint64(buffer[offset + 2]) << 16) |
(uint64(buffer[offset + 3]) << 24) |
(uint64(buffer[offset + 4]) << 32) |
(uint64(buffer[offset + 5]) << 40) |
(uint64(buffer[offset + 6]) << 48) |
(uint64(buffer[offset + 7]) << 56)
}
// Read float from the buffer
func ReadFloat(buffer []byte, offset int) float32 {
bits := ReadUInt32(buffer, offset)
return math.Float32frombits(bits)
}
// Read double from the buffer
func ReadDouble(buffer []byte, offset int) float64 {
bits := ReadUInt64(buffer, offset)
return math.Float64frombits(bits)
}
// Read bytes from the buffer
func ReadBytes(buffer []byte, offset int, size int) []byte {
return buffer[offset:offset + size]
}
// Read string from the buffer
func ReadString(buffer []byte, offset int, size int) string {
return string(buffer[offset:offset + size])
}
// Read timestamp from the buffer
func ReadTimestamp(buffer []byte, offset int) Timestamp {
nanoseconds := ReadUInt64(buffer, offset)
return TimestampFromNanoseconds(nanoseconds)
}
// Read UUID from the buffer
func ReadUUID(buffer []byte, offset int) UUID {
bytes := ReadBytes(buffer, offset, 16)
return UUIDFromBytes(bytes)
}
// Write bool into the buffer
func WriteBool(buffer []byte, offset int, value bool) {
if value {
buffer[offset] = 1
} else {
buffer[offset] = 0
}
}
// Write byte into the buffer
func WriteByte(buffer []byte, offset int, value byte) {
buffer[offset] = value
}
// Write single byte character into the buffer
func WriteChar(buffer []byte, offset int, value rune) {
WriteUInt8(buffer, offset, uint8(value))
}
// Write four bytes character into the buffer
func WriteWChar(buffer []byte, offset int, value rune) {
WriteUInt32(buffer, offset, uint32(value))
}
// Write signed 8-bits integer into the buffer
func WriteInt8(buffer []byte, offset int, value int8) {
buffer[offset] = byte(value)
}
// Write unsigned 8-bits integer into the buffer
func WriteUInt8(buffer []byte, offset int, value uint8) {
buffer[offset] = byte(value)
}
// Write signed 16-bits integer into the buffer
func WriteInt16(buffer []byte, offset int, value int16) {
buffer[offset + 0] = byte(value >> 0)
buffer[offset + 1] = byte(value >> 8)
}
// Write unsigned 16-bits integer into the buffer
func WriteUInt16(buffer []byte, offset int, value uint16) {
buffer[offset + 0] = byte(value >> 0)
buffer[offset + 1] = byte(value >> 8)
}
// Write signed 32-bits integer into the buffer
func WriteInt32(buffer []byte, offset int, value int32) {
buffer[offset + 0] = byte(value >> 0)
buffer[offset + 1] = byte(value >> 8)
buffer[offset + 2] = byte(value >> 16)
buffer[offset + 3] = byte(value >> 24)
}
// Write unsigned 32-bits integer into the buffer
func WriteUInt32(buffer []byte, offset int, value uint32) {
buffer[offset + 0] = byte(value >> 0)
buffer[offset + 1] = byte(value >> 8)
buffer[offset + 2] = byte(value >> 16)
buffer[offset + 3] = byte(value >> 24)
}
// Write signed 64-bits integer into the buffer
func WriteInt64(buffer []byte, offset int, value int64) {
buffer[offset + 0] = byte(value >> 0)
buffer[offset + 1] = byte(value >> 8)
buffer[offset + 2] = byte(value >> 16)
buffer[offset + 3] = byte(value >> 24)
buffer[offset + 4] = byte(value >> 32)
buffer[offset + 5] = byte(value >> 40)
buffer[offset + 6] = byte(value >> 48)
buffer[offset + 7] = byte(value >> 56)
}
// Write unsigned 64-bits integer into the buffer
func WriteUInt64(buffer []byte, offset int, value uint64) {
buffer[offset + 0] = byte(value >> 0)
buffer[offset + 1] = byte(value >> 8)
buffer[offset + 2] = byte(value >> 16)
buffer[offset + 3] = byte(value >> 24)
buffer[offset + 4] = byte(value >> 32)
buffer[offset + 5] = byte(value >> 40)
buffer[offset + 6] = byte(value >> 48)
buffer[offset + 7] = byte(value >> 56)
}
// Write float into the buffer
func WriteFloat(buffer []byte, offset int, value float32) {
WriteUInt32(buffer, offset, math.Float32bits(value))
}
// Write double into the buffer
func WriteDouble(buffer []byte, offset int, value float64) {
WriteUInt64(buffer, offset, math.Float64bits(value))
}
// Write bytes into the buffer
func WriteBytes(buffer []byte, offset int, value []byte) {
copy(buffer[offset:offset + len(value)], value)
}
// Write slice into the buffer
func WriteSlice(buffer []byte, offset int, value []byte, valueOffset int, valueSize int) {
copy(buffer[offset:offset + len(value)], value[valueOffset:valueOffset + valueSize])
}
// Write count of single byte into the buffer
func WriteCount(buffer []byte, offset int, value byte, valueCount int) {
for i := 0; i < valueCount; i++ {
buffer[offset + i] = value
}
}
// Write string into the buffer
func WriteString(buffer []byte, offset int, value string) {
WriteBytes(buffer, offset, []byte(value))
}
// Write timestamp into the buffer
func WriteTimestamp(buffer []byte, offset int, value Timestamp) {
nanoseconds := uint64(value.UnixNano())
WriteUInt64(buffer, offset, nanoseconds)
}
// Write UUID into the buffer
func WriteUUID(buffer []byte, offset int, value UUID) {
bytes, _ := value.MarshalBinary()
WriteBytes(buffer, offset, bytes)
} | projects/Go/proto/fbe/Buffer.go | 0.857112 | 0.599749 | Buffer.go | starcoder |
package main
var schemas = `
{
"API": {
"createAsset": {
"description": "Create an asset. One argument, a JSON encoded event. AssetID is required with zero or more writable properties. Establishes an initial asset state.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "A set of fields that constitute the writable fields in an asset's state. AssetID is mandatory along with at least one writable field. In this contract pattern, a partial state is used as an event.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"assetType": {
"description": "Type of asset",
"type": "string"
},
"assetData": {
"description": "AssetData",
"properties": {},
"type": "object"
}
},
"required": [
"assetID"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "createAsset function",
"enum": [
"createAsset"
],
"type": "string"
}
},
"type": "object"
},
"deleteAllAssets": {
"description": "Delete the state of all assets. No arguments are accepted. For each managed asset, the state and history are erased, and the asset is removed if necessary from recent states.",
"properties": {
"args": {
"description": "accepts no arguments",
"items": {},
"maxItems": 0,
"minItems": 0,
"type": "array"
},
"function": {
"description": "deleteAllAssets function",
"enum": [
"deleteAllAssets"
],
"type": "string"
}
},
"type": "object"
},
"deleteAsset": {
"description": "Delete an asset, its history, and any recent state activity. Argument is a JSON encoded string containing only an assetID.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "An object containing only an assetID for use as an argument to read or delete.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "deleteAsset function",
"enum": [
"deleteAsset"
],
"type": "string"
}
},
"type": "object"
},
"deletePropertiesFromAsset": {
"description": "Delete one or more properties from an asset. Argument is a JSON encoded string containing an AssetID and an array of qualified property names. An example would be {'assetID':'A1',['event.common.carrier', 'event.customer.temperature']} and the result of that invoke would be the removal of the carrier field and the temperature field with a recalculation of the alert and compliance status.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "Requested assetID with a list or qualified property names.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"qualPropsToDelete": {
"items": {
"description": "The qualified name of a property. E.g. 'event.common.carrier', 'event.custom.temperature', etc.",
"type": "string"
},
"type": "array"
}
},
"required": [
"assetID",
"qualPropsToDelete"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "deletePropertiesFromAsset function",
"enum": [
"deletePropertiesFromAsset"
],
"type": "string"
}
},
"type": "object"
},
"init": {
"description": "Initializes the contract when started, either by deployment or by peer restart.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "event sent to init on deployment",
"properties": {
"nickname": {
"default": "TRADELANE",
"description": "The nickname of the current contract",
"type": "string"
},
"version": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"required": [
"version"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "init function",
"enum": [
"init"
],
"type": "string"
}
},
"type": "object"
},
"readAllAssets": {
"description": "Returns the state of all assets as an array of JSON encoded strings. Accepts no arguments. For each managed asset, the state is read from the ledger and added to the returned array. Array is sorted by assetID.",
"properties": {
"args": {
"description": "accepts no arguments",
"items": {},
"maxItems": 0,
"minItems": 0,
"type": "array"
},
"function": {
"description": "readAllAssets function",
"enum": [
"readAllAssets"
],
"type": "string"
},
"result": {
"description": "an array of states, often for different assets",
"items": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"inCompliance": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "RFC3339nanos formatted timestamp.",
"type": "string"
}
},
"type": "object"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"readAsset": {
"description": "Returns the state an asset. Argument is a JSON encoded string. AssetID is the only accepted property.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "An object containing only an assetID for use as an argument to read or delete.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "readAsset function",
"enum": [
"readAsset"
],
"type": "string"
},
"result": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"inCompliance": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "RFC3339nanos formatted timestamp.",
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"readAssetHistory": {
"description": "Requests a specified number of history states for an assets. Returns an array of states sorted with the most recent first. AssetID is required and count is optional. A missing count, a count of zero, or too large a count returns all existing history states.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "Requested assetID with item count.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"count": {
"type": "integer"
}
},
"required": [
"assetID"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "readAssetHistory function",
"enum": [
"readAssetHistory"
],
"type": "string"
},
"result": {
"description": "an array of states for one asset sorted by timestamp with the most recent entry first",
"items": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"inCompliance": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "RFC3339nanos formatted timestamp.",
"type": "string"
}
},
"type": "object"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"readRecentStates": {
"description": "Returns the state of recently updated assets as an array of objects sorted with the most recently updated asset first. Each asset appears exactly once up to a maxmum of 20 in this version of the contract.",
"properties": {
"args": {
"description": "accepts no arguments",
"items": {},
"maxItems": 0,
"minItems": 0,
"type": "array"
},
"function": {
"description": "readRecentStates function",
"enum": [
"readRecentStates"
],
"type": "string"
},
"result": {
"description": "an array of states for one asset sorted by timestamp with the most recent entry first",
"items": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"inCompliance": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "RFC3339nanos formatted timestamp.",
"type": "string"
}
},
"type": "object"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"setCreateOnUpdate": {
"description": "Allow updateAsset to redirect to createAsset when assetID does not exist.",
"properties": {
"args": {
"description": "True for redirect allowed, false for error on asset does not exist.",
"items": {
"setCreateOnUpdate": {
"type": "boolean"
}
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "setCreateOnUpdate function",
"enum": [
"setCreateOnUpdate"
],
"type": "string"
}
},
"type": "object"
},
"setLoggingLevel": {
"description": "Sets the logging level in the contract.",
"properties": {
"args": {
"description": "logging levels indicate what you see",
"items": {
"logLevel": {
"enum": [
"CRITICAL",
"ERROR",
"WARNING",
"NOTICE",
"INFO",
"DEBUG"
],
"type": "string"
}
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "setLoggingLevel function",
"enum": [
"setLoggingLevel"
],
"type": "string"
}
},
"type": "object"
},
"updateAsset": {
"description": "Update the state of an asset. The one argument is a JSON encoded event. AssetID is required along with one or more writable properties. Establishes the next asset state. ",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "A set of fields that constitute the writable fields in an asset's state. AssetID is mandatory along with at least one writable field. In this contract pattern, a partial state is used as an event.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "RFC3339nanos formatted timestamp.",
"type": "string"
}
},
"required": [
"assetID"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "updateAsset function",
"enum": [
"updateAsset"
],
"type": "string"
}
},
"type": "object"
}
},
"objectModelSchemas": {
"assetIDKey": {
"description": "An object containing only an assetID for use as an argument to read or delete.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"assetIDandCount": {
"description": "Requested assetID with item count.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"count": {
"type": "integer"
}
},
"required": [
"assetID"
],
"type": "object"
},
"event": {
"description": "A set of fields that constitute the writable fields in an asset's state. AssetID is mandatory along with at least one writable field. In this contract pattern, a partial state is used as an event.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "RFC3339nanos formatted timestamp.",
"type": "string"
}
},
"required": [
"assetID"
],
"type": "object"
},
"initEvent": {
"description": "event sent to init on deployment",
"properties": {
"nickname": {
"default": "TRADELANE",
"description": "The nickname of the current contract",
"type": "string"
},
"version": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"required": [
"version"
],
"type": "object"
},
"state": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"alerts": {
"description": "Active means that the alert is in force in this state. Raised means that the alert became active as the result of the event that generated this state. Cleared means that the alert became inactive as the result of the event that generated this state.",
"properties": {
"active": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"cleared": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
},
"raised": {
"items": {
"description": "Alerts are triggered or cleared by rules that are run against incoming events. This contract considers any active alert to created a state of non-compliance.",
"enum": [
"OVERTTEMP"
],
"type": "string"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"carrier": {
"description": "transport entity currently in possession of asset",
"type": "string"
},
"extension": {
"description": "Application-managed state. Opaque to contract.",
"properties": {},
"type": "object"
},
"inCompliance": {
"description": "A contract-specific indication that this asset is compliant.",
"type": "boolean"
},
"lastEvent": {
"description": "function and string parameter that created this state object",
"properties": {
"args": {
"items": {
"description": "parameters to the function, usually args[0] is populated with a JSON encoded event object",
"type": "string"
},
"type": "array"
},
"function": {
"description": "function that created this state object",
"type": "string"
},
"redirectedFromFunction": {
"description": "function that originally received the event",
"type": "string"
}
},
"type": "object"
},
"location": {
"description": "A geographical coordinate",
"properties": {
"latitude": {
"type": "number"
},
"longitude": {
"type": "number"
}
},
"type": "object"
},
"temperature": {
"description": "Temperature of the asset in CELSIUS.",
"type": "number"
},
"timestamp": {
"description": "RFC3339nanos formatted timestamp.",
"type": "string"
}
},
"type": "object"
}
}
}` | contracts/industry/building/building_sensor_hyperledger/schemas.go | 0.849441 | 0.600452 | schemas.go | starcoder |
package configifytest
import (
"time"
"github.com/robsignorelli/configify"
"github.com/stretchr/testify/suite"
)
// SourceSuite is a testify suite that adds some helpers for asserting whether or not the
// source you're testing has a specific value in it.
type SourceSuite struct {
suite.Suite
Source configify.Source
}
func (suite SourceSuite) checkOK(key string, expectedOK bool, ok bool) bool {
if !expectedOK {
return suite.False(ok, "Value for '%s' should not exist", key)
}
return suite.True(ok, "Value for '%s' was not found", key)
}
// ExpectString reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectString(key string, expected string, expectedOK bool) bool {
output, ok := suite.Source.String(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectStringSlice reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectStringSlice(key string, expected []string, expectedOK bool) bool {
output, ok := suite.Source.StringSlice(key)
return suite.checkOK(key, expectedOK, ok) && suite.ElementsMatch(expected, output)
}
// ExpectInt reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectInt(key string, expected int, expectedOK bool) bool {
output, ok := suite.Source.Int(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectInt8 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectInt8(key string, expected int8, expectedOK bool) bool {
output, ok := suite.Source.Int8(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectInt16 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectInt16(key string, expected int16, expectedOK bool) bool {
output, ok := suite.Source.Int16(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectInt32 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectInt32(key string, expected int32, expectedOK bool) bool {
output, ok := suite.Source.Int32(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectInt64 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectInt64(key string, expected int64, expectedOK bool) bool {
output, ok := suite.Source.Int64(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectUint reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectUint(key string, expected uint, expectedOK bool) bool {
output, ok := suite.Source.Uint(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectUint8 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectUint8(key string, expected uint8, expectedOK bool) bool {
output, ok := suite.Source.Uint8(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectUint16 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectUint16(key string, expected uint16, expectedOK bool) bool {
output, ok := suite.Source.Uint16(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectUint32 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectUint32(key string, expected uint32, expectedOK bool) bool {
output, ok := suite.Source.Uint32(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectUint64 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectUint64(key string, expected uint64, expectedOK bool) bool {
output, ok := suite.Source.Uint64(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectFloat32 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectFloat32(key string, expected float32, expectedOK bool) bool {
output, ok := suite.Source.Float32(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectFloat64 reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectFloat64(key string, expected float64, expectedOK bool) bool {
output, ok := suite.Source.Float64(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectBool reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectBool(key string, expected bool, expectedOK bool) bool {
output, ok := suite.Source.Bool(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectDuration reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectDuration(key string, expected time.Duration, expectedOK bool) bool {
output, ok := suite.Source.Duration(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
}
// ExpectTime reads the config value on the suite's source and asserts that exists (or not)
// as you expect and that the value for the config is what you expect.
func (suite SourceSuite) ExpectTime(key string, expected time.Time, expectedOK bool) bool {
output, ok := suite.Source.Time(key)
return suite.checkOK(key, expectedOK, ok) && suite.Equal(expected, output)
} | configifytest/source_suite.go | 0.694406 | 0.626767 | source_suite.go | starcoder |
package brotli
/* Lookup table to map the previous two bytes to a context id.
There are four different context modeling modes defined here:
contextLSB6: context id is the least significant 6 bits of the last byte,
contextMSB6: context id is the most significant 6 bits of the last byte,
contextUTF8: second-order context model tuned for UTF8-encoded text,
contextSigned: second-order context model tuned for signed integers.
If |p1| and |p2| are the previous two bytes, and |mode| is current context
mode, we calculate the context as:
context = ContextLut(mode)[p1] | ContextLut(mode)[p2 + 256].
For contextUTF8 mode, if the previous two bytes are ASCII characters
(i.e. < 128), this will be equivalent to
context = 4 * context1(p1) + context2(p2),
where context1 is based on the previous byte in the following way:
0 : non-ASCII control
1 : \t, \n, \r
2 : space
3 : other punctuation
4 : " '
5 : %
6 : ( < [ {
7 : ) > ] }
8 : , ; :
9 : .
10 : =
11 : number
12 : upper-case vowel
13 : upper-case consonant
14 : lower-case vowel
15 : lower-case consonant
and context2 is based on the second last byte:
0 : control, space
1 : punctuation
2 : upper-case letter, number
3 : lower-case letter
If the last byte is ASCII, and the second last byte is not (in a valid UTF8
stream it will be a continuation byte, value between 128 and 191), the
context is the same as if the second last byte was an ASCII control or space.
If the last byte is a UTF8 lead byte (value >= 192), then the next byte will
be a continuation byte and the context id is 2 or 3 depending on the LSB of
the last byte and to a lesser extent on the second last byte if it is ASCII.
If the last byte is a UTF8 continuation byte, the second last byte can be:
- continuation byte: the next byte is probably ASCII or lead byte (assuming
4-byte UTF8 characters are rare) and the context id is 0 or 1.
- lead byte (192 - 207): next byte is ASCII or lead byte, context is 0 or 1
- lead byte (208 - 255): next byte is continuation byte, context is 2 or 3
The possible value combinations of the previous two bytes, the range of
context ids and the type of the next byte is summarized in the table below:
|--------\-----------------------------------------------------------------|
| \ Last byte |
| Second \---------------------------------------------------------------|
| last byte \ ASCII | cont. byte | lead byte |
| \ (0-127) | (128-191) | (192-) |
|=============|===================|=====================|==================|
| ASCII | next: ASCII/lead | not valid | next: cont. |
| (0-127) | context: 4 - 63 | | context: 2 - 3 |
|-------------|-------------------|---------------------|------------------|
| cont. byte | next: ASCII/lead | next: ASCII/lead | next: cont. |
| (128-191) | context: 4 - 63 | context: 0 - 1 | context: 2 - 3 |
|-------------|-------------------|---------------------|------------------|
| lead byte | not valid | next: ASCII/lead | not valid |
| (192-207) | | context: 0 - 1 | |
|-------------|-------------------|---------------------|------------------|
| lead byte | not valid | next: cont. | not valid |
| (208-) | | context: 2 - 3 | |
|-------------|-------------------|---------------------|------------------|
*/
const (
contextLSB6 = 0
contextMSB6 = 1
contextUTF8 = 2
contextSigned = 3
)
/* Common context lookup table for all context modes. */
var kContextLookup = [2048]byte{
/* CONTEXT_LSB6, last byte. */
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
/* CONTEXT_LSB6, second last byte, */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
/* CONTEXT_MSB6, last byte. */
0,
0,
0,
0,
1,
1,
1,
1,
2,
2,
2,
2,
3,
3,
3,
3,
4,
4,
4,
4,
5,
5,
5,
5,
6,
6,
6,
6,
7,
7,
7,
7,
8,
8,
8,
8,
9,
9,
9,
9,
10,
10,
10,
10,
11,
11,
11,
11,
12,
12,
12,
12,
13,
13,
13,
13,
14,
14,
14,
14,
15,
15,
15,
15,
16,
16,
16,
16,
17,
17,
17,
17,
18,
18,
18,
18,
19,
19,
19,
19,
20,
20,
20,
20,
21,
21,
21,
21,
22,
22,
22,
22,
23,
23,
23,
23,
24,
24,
24,
24,
25,
25,
25,
25,
26,
26,
26,
26,
27,
27,
27,
27,
28,
28,
28,
28,
29,
29,
29,
29,
30,
30,
30,
30,
31,
31,
31,
31,
32,
32,
32,
32,
33,
33,
33,
33,
34,
34,
34,
34,
35,
35,
35,
35,
36,
36,
36,
36,
37,
37,
37,
37,
38,
38,
38,
38,
39,
39,
39,
39,
40,
40,
40,
40,
41,
41,
41,
41,
42,
42,
42,
42,
43,
43,
43,
43,
44,
44,
44,
44,
45,
45,
45,
45,
46,
46,
46,
46,
47,
47,
47,
47,
48,
48,
48,
48,
49,
49,
49,
49,
50,
50,
50,
50,
51,
51,
51,
51,
52,
52,
52,
52,
53,
53,
53,
53,
54,
54,
54,
54,
55,
55,
55,
55,
56,
56,
56,
56,
57,
57,
57,
57,
58,
58,
58,
58,
59,
59,
59,
59,
60,
60,
60,
60,
61,
61,
61,
61,
62,
62,
62,
62,
63,
63,
63,
63,
/* CONTEXT_MSB6, second last byte, */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
/* CONTEXT_UTF8, last byte. */
/* ASCII range. */
0,
0,
0,
0,
0,
0,
0,
0,
0,
4,
4,
0,
0,
4,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
8,
12,
16,
12,
12,
20,
12,
16,
24,
28,
12,
12,
32,
12,
36,
12,
44,
44,
44,
44,
44,
44,
44,
44,
44,
44,
32,
32,
24,
40,
28,
12,
12,
48,
52,
52,
52,
48,
52,
52,
52,
48,
52,
52,
52,
52,
52,
48,
52,
52,
52,
52,
52,
48,
52,
52,
52,
52,
52,
24,
12,
28,
12,
12,
12,
56,
60,
60,
60,
56,
60,
60,
60,
56,
60,
60,
60,
60,
60,
56,
60,
60,
60,
60,
60,
56,
60,
60,
60,
60,
60,
24,
12,
28,
12,
0,
/* UTF8 continuation byte range. */
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
/* UTF8 lead byte range. */
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
2,
3,
/* CONTEXT_UTF8 second last byte. */
/* ASCII range. */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
1,
1,
1,
1,
1,
1,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
1,
1,
1,
1,
0,
/* UTF8 continuation byte range. */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
/* UTF8 lead byte range. */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
/* CONTEXT_SIGNED, last byte, same as the above values shifted by 3 bits. */
0,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
8,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
16,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
24,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
32,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
40,
48,
48,
48,
48,
48,
48,
48,
48,
48,
48,
48,
48,
48,
48,
48,
56,
/* CONTEXT_SIGNED, second last byte. */
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
5,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
7,
}
type contextLUT []byte
func getContextLUT(mode int) contextLUT {
return kContextLookup[mode<<9:]
}
func getContext(p1 byte, p2 byte, lut contextLUT) byte {
return lut[p1] | lut[256+int(p2)]
} | vendor/github.com/andybalholm/brotli/context.go | 0.536556 | 0.585753 | context.go | starcoder |
package util
import (
"fmt"
"sort"
)
func LessIntSlice(l, r []int) bool {
return Ints(l).Less(r)
}
func SortIntSlice(intSlices [][]int) {
IntsArray(intSlices).Sort()
}
type Int int
func (in Int) Compare(with int) int {
switch {
case int(in) < with:
return -1
case int(in) > with:
return -1
}
return 0
}
func SortStrSlice(strslices [][]string) {
for _, slice := range strslices {
sort.Strings(slice)
}
}
type Ints []int
func IntsFromRange(from, to int) Ints {
s := make([]int, 0, to-from)
for ; from < to; from++ {
s = append(s, from)
}
fmt.Println(from, to, s)
return s
}
func (is Ints) Len() int {
return len(is)
}
func (is Ints) Copy() Ints {
return append([]int(nil), is...)
}
func (is Ints) Append(vals ...int) Ints {
is = append(is, vals...)
return is
}
func (is Ints) RemoveEnd(n int) Ints {
return is[:is.Len()-n]
}
func (is Ints) Pop() Ints {
return is[:is.Len()-1]
}
func (is Ints) Sort() Ints {
sort.Ints(is)
return is
}
func (is Ints) ToItems() Items {
items := NewItems(is.Len())
is.ForEach(func(idx, val int) {
items.Append(val, idx)
})
return items
}
func (is Ints) Slice() []int {
return is
}
func (is Ints) ForEach(fn func(idx, val int)) {
for idx, val := range is {
fn(idx, val)
}
}
func (is Ints) ComputeEach(fn func(idx, val int) int) Ints {
for idx, val := range is {
is[idx] = fn(idx, val)
}
return is
}
func (is Ints) ForEachBreakable(fn func(idx, val int) error) error {
for idx, val := range is {
if err := fn(idx, val); err != nil {
return err
}
}
return nil
}
func (is Ints) Swap(i, j int) {
is[i], is[j] = is[j], is[i]
}
func (is Ints) Equal(with []int) bool {
return is.Compare(with) == 0
}
func (is Ints) ItemEqualWithPre(i int) bool {
return i > 0 && is[i] == is[i-1]
}
func (is Ints) ReversLoopUntil(from int, fn func(idxR, val int) bool) (idx, val int) {
for idx = from; idx < len(is) && idx >= 0; idx-- {
val = is[idx]
if !fn(idx, val) {
return
}
}
return
}
func (is Ints) LoopUntil(from int, fn func(idx, val int) bool) (idx, val int) {
for idx = from; idx < len(is) && idx >= 0; idx++ {
val = is[idx]
if !fn(idx, val) {
return
}
}
return
}
func (is Ints) ElementMatch(with []int) bool {
return is.ContainsAll(with) && Ints(with).ContainsAll(with)
}
func (is Ints) ContainsAll(with []int) bool {
x := is.ValIdxMap()
for _, v := range with {
if _, ok := x[v]; !ok {
return false
}
}
return true
}
func (is Ints) ValIdxMap() map[int]int {
x := map[int]int{}
for idx, v := range is {
x[v] = idx
}
return x
}
func (is Ints) Less(with []int) bool {
return is.Compare(with) < 0
}
func (is Ints) Compare(with []int) int {
l1, l2 := len(is), len(with)
l := MinInt(l1, l2)
for idx := 0; idx < l; idx++ {
if v := Int(is[idx]).Compare(with[idx]); v != 0 {
return v
}
}
return Int(l1).Compare(l2)
}
func (is Ints) Sum() int {
x := 0
for _, v := range is {
x += v
}
return x
}
type IntsArray [][]int
func (ia IntsArray) EmptyCopy() IntsArray {
return make([][]int, 0, len(ia))
}
func (ia IntsArray) Copy() IntsArray {
return append([][]int(nil), ia...)
}
func (ia IntsArray) Append(x ...[]int) IntsArray {
return append(ia, x...)
}
func (ia IntsArray) RemoveEnd(n int) IntsArray {
return ia[:len(ia)-n]
}
func (ia IntsArray) ForEach(fn func(idx int, s Ints)) {
for idx, val := range ia {
fn(idx, val)
}
}
func (ia IntsArray) ComputeEach(fn func(i int, x Ints) Ints) IntsArray {
for idx, val := range ia {
ia[idx] = fn(idx, val)
}
return ia
}
func (ia IntsArray) Equal(with IntsArray) bool {
ia.Sort()
if len(ia) == len(with) {
for idx, s := range ia {
if !Ints(s).Equal(with[idx]) {
return false
}
}
return true
}
return false
}
func (ia IntsArray) SortItems() IntsArray {
ia.ForEach(func(i int, x Ints) {
x.Sort()
})
return ia
}
func (ia IntsArray) Sort() IntsArray {
ia.SortItems()
sort.Slice(ia, func(i, j int) bool {
return Ints(ia[i]).Less(ia[j])
})
return ia
} | util/intslice.go | 0.529507 | 0.446012 | intslice.go | starcoder |
package curl
import (
"math"
"github.com/iotaledger/iota.go/consts"
)
const rotationOffset = 364
// stateRotations stores the chunk offset and the bit shift of the state after each round.
// Since the modulo operations are rather costly, they are pre-computed.
var stateRotations [NumRounds]struct {
offset, shift uint
}
func init() {
var rotation uint = rotationOffset
for r := 0; r < NumRounds; r++ {
// the state is organized as chunks of 243 trits each
stateRotations[r].offset = rotation / consts.HashTrinarySize
stateRotations[r].shift = rotation % consts.HashTrinarySize
rotation = (rotation * rotationOffset) % StateSize // the rotation offset is applied every round
}
}
// transform performs the Curl transformation.
// According to the specification, one Curl round performs the following transformation:
// for i ← 1 to 729
// x ← S[1]
// S ← rot(S)
// y ← S[1]
// N[i] ← g(x,y)
// S ← N
// Each element of the state S is combined with its rotated counterpart using the S-box g.
// This is equivalent to rotating just once and applying the S-box on the entire state:
// N ← rot(S)
// S ← g(S,N)
// The only difference then is, that the trits are at the wrong position. Successive trits are now an opposite rotation
// apart. This rotation offset adds up over the rounds and needs to be reverted in the end.
func transform(p, n *[3]uint256) {
for r := 0; r < NumRounds; r++ {
p2, n2 := rotateState(p, n, stateRotations[r].offset, stateRotations[r].shift)
// unrolled S-box computation on each uint64 of the current state
p[0][0], n[0][0] = batchBox(p[0][0], n[0][0], p2[0][0], n2[0][0])
p[0][1], n[0][1] = batchBox(p[0][1], n[0][1], p2[0][1], n2[0][1])
p[0][2], n[0][2] = batchBox(p[0][2], n[0][2], p2[0][2], n2[0][2])
p[0][3], n[0][3] = batchBox(p[0][3], n[0][3], p2[0][3], n2[0][3])
p[1][0], n[1][0] = batchBox(p[1][0], n[1][0], p2[1][0], n2[1][0])
p[1][1], n[1][1] = batchBox(p[1][1], n[1][1], p2[1][1], n2[1][1])
p[1][2], n[1][2] = batchBox(p[1][2], n[1][2], p2[1][2], n2[1][2])
p[1][3], n[1][3] = batchBox(p[1][3], n[1][3], p2[1][3], n2[1][3])
p[2][0], n[2][0] = batchBox(p[2][0], n[2][0], p2[2][0], n2[2][0])
p[2][1], n[2][1] = batchBox(p[2][1], n[2][1], p2[2][1], n2[2][1])
p[2][2], n[2][2] = batchBox(p[2][2], n[2][2], p2[2][2], n2[2][2])
p[2][3], n[2][3] = batchBox(p[2][3], n[2][3], p2[2][3], n2[2][3])
// only the first 243 bits of each uint256 are used
p[0].norm243()
p[1].norm243()
p[2].norm243()
n[0].norm243()
n[1].norm243()
n[2].norm243()
}
// successive trits are now 364⁸¹ mod 729 = 244 positions apart and need to be reordered
reorder(p, n)
}
// rotateState rotates the Curl state by offset * 243 + s.
// It performs a left rotation of the state elements towards lower indices.
func rotateState(p, n *[3]uint256, offset, s uint) (p2, n2 [3]uint256) {
// rotate the positive part
p2[0].shrInto(&p[(0+offset)%3], s).shlInto(&p[(1+offset)%3], 243-s)
p2[1].shrInto(&p[(1+offset)%3], s).shlInto(&p[(2+offset)%3], 243-s)
p2[2].shrInto(&p[(2+offset)%3], s).shlInto(&p[(3+offset)%3], 243-s)
// rotate the negative part
n2[0].shrInto(&n[(0+offset)%3], s).shlInto(&n[(1+offset)%3], 243-s)
n2[1].shrInto(&n[(1+offset)%3], s).shlInto(&n[(2+offset)%3], 243-s)
n2[2].shrInto(&n[(2+offset)%3], s).shlInto(&n[(3+offset)%3], 243-s)
return p2, n2
}
// batchBox applies the Curl S-box on 64 trits encoded as positive and negative bits.
func batchBox(xP, xN, yP, yN uint64) (uint64, uint64) {
tmp := xN ^ yP
return tmp &^ xP, ^tmp &^ (xP ^ yN)
}
// reorder arranges the state so that the trit at index (244 * k) % 729 becomes the trit at index k.
// Since the state is organized as 3 chunks of 243 trits each, the 1st output trit lies at index (0,0), 2nd at (1,1),
// 3rd at (2,2), 4th at (0,3), 5th at (1,4)...
// Thus, in order to rearrange the 1st chunk, copy trits 3*k from the 1st chunk, trits 3*k+1 from the 2nd chunk and
// trits 3*k+2 from the 3rd chunk.
func reorder(p, n *[3]uint256) {
const (
m0 = 0x9249249249249249 // every 3rd bit set, bit at index 0 set
m1 = m0 << 1 & math.MaxUint64 // every 3rd bit set, bit at index 1 set
m2 = m0 << 2 & math.MaxUint64 // every 3rd bit set, bit at index 2 set
)
var p2, n2 [3]uint256
for i := uint(0); i < 3; i++ { // the uint hints to the compiler that mod 3 will never be negative
p2[i][0] = p[i][0]&m0 | p[(1+i)%3][0]&m1 | p[(2+i)%3][0]&m2
p2[i][1] = p[i][1]&m2 | p[(1+i)%3][1]&m0 | p[(2+i)%3][1]&m1
p2[i][2] = p[i][2]&m1 | p[(1+i)%3][2]&m2 | p[(2+i)%3][2]&m0
p2[i][3] = p[i][3]&m0 | p[(1+i)%3][3]&m1 | p[(2+i)%3][3]&m2
n2[i][0] = n[i][0]&m0 | n[(1+i)%3][0]&m1 | n[(2+i)%3][0]&m2
n2[i][1] = n[i][1]&m2 | n[(1+i)%3][1]&m0 | n[(2+i)%3][1]&m1
n2[i][2] = n[i][2]&m1 | n[(1+i)%3][2]&m2 | n[(2+i)%3][2]&m0
n2[i][3] = n[i][3]&m0 | n[(1+i)%3][3]&m1 | n[(2+i)%3][3]&m2
}
*p, *n = p2, n2
} | curl/transform.go | 0.726329 | 0.622287 | transform.go | starcoder |
package imageoutput
import "math"
// MappedCoordinate stores the journey of an individual coordinate.
type MappedCoordinate struct {
inputImageX int
inputImageY int
patternViewportX float64
patternViewportY float64
transformedX float64
transformedY float64
satisfiedFilter bool
hasMappedCoordinates bool
mappedCoordinateX float64
mappedCoordinateY float64
outputImageX int
outputImageY int
}
// NewMappedCoordinateUsingInputImageCoordinates returns a new mapped coordinate at the given inputImageX and inputImageY location.
func NewMappedCoordinateUsingInputImageCoordinates(inputImageX, inputImageY int) *MappedCoordinate {
return &MappedCoordinate{
inputImageX: inputImageX,
inputImageY: inputImageY,
}
}
// NewMappedCoordinateUsingOutputImageCoordinates returns a new mapped coordinate at the given outputImageX and outputImageY location.
func NewMappedCoordinateUsingOutputImageCoordinates(outputImageX, outputImageY int) *MappedCoordinate {
return &MappedCoordinate{
outputImageX: outputImageX,
outputImageY: outputImageY,
}
}
// NewMappedCoordinateUsingTransformedCoordinates returns a new mapped coordinate at the given transformedX and transformedY location.
func NewMappedCoordinateUsingTransformedCoordinates(transformedX, transformedY float64) *MappedCoordinate {
return &MappedCoordinate{
transformedX: transformedX,
transformedY: transformedY,
satisfiedFilter: false,
}
}
// OutputImageX returns the OutputImageX coordinate.
func (m *MappedCoordinate) OutputImageX() int {
return m.outputImageX
}
// OutputImageY returns the OutputImageY coordinate.
func (m *MappedCoordinate) OutputImageY() int {
return m.outputImageY
}
// InputImageX returns the InputImageX coordinate.
func (m *MappedCoordinate) InputImageX() int {
return m.inputImageX
}
// InputImageY returns the InputImageY coordinate.
func (m *MappedCoordinate) InputImageY() int {
return m.inputImageY
}
// TransformedX returns the TransformedX coordinate.
func (m *MappedCoordinate) TransformedX() float64 {
return m.transformedX
}
// TransformedY returns the TransformedY coordinate.
func (m *MappedCoordinate) TransformedY() float64 {
return m.transformedY
}
// UpdateTransformedCoordinates will update transformedX and transformedY coordinates.
func (m *MappedCoordinate) UpdateTransformedCoordinates(x, y float64) {
m.transformedX = x
m.transformedY = y
}
// PatternViewportX returns the PatternViewportX coordinate.
func (m *MappedCoordinate) PatternViewportX() float64 {
return m.patternViewportX
}
// PatternViewportY returns the PatternViewportY coordinate.
func (m *MappedCoordinate) PatternViewportY() float64 {
return m.patternViewportY
}
// UpdatePatternViewportCoordinates will update patternViewportX and patternViewportY coordinates.
func (m *MappedCoordinate) UpdatePatternViewportCoordinates(x, y float64) {
m.patternViewportX = x
m.patternViewportY = y
}
// CanBeCompared returns true if either transformedX and transformedY coordinate can be compared.
// This means neither are Infinity nor NaN.
func (m *MappedCoordinate) CanBeCompared() bool {
return !(math.IsInf(m.TransformedX(), 0) ||
math.IsInf(m.TransformedY(), 0) ||
math.IsNaN(m.TransformedX()) ||
math.IsNaN(m.TransformedY()))
}
// MarkAsSatisfyingFilter marks this coordinate as satisfying the filter.
func (m *MappedCoordinate) MarkAsSatisfyingFilter() {
m.satisfiedFilter = true
}
// SatisfiesFilter returns the filtered status.
func (m *MappedCoordinate) SatisfiesFilter() bool {
return m.satisfiedFilter
}
// HasMappedCoordinate returns true if this coordinate stored another mapped coordinate
func (m *MappedCoordinate) HasMappedCoordinate() bool {
return m.hasMappedCoordinates
}
// StoreMappedCoordinate sets the coordinate's mapped coordinates.
func (m *MappedCoordinate) StoreMappedCoordinate(x, y float64) {
m.mappedCoordinateX = x
m.mappedCoordinateY = y
m.hasMappedCoordinates = true
}
// MappedCoordinate returns the stored mapped coordinates.
func (m *MappedCoordinate) MappedCoordinate() (float64, float64) {
return m.mappedCoordinateX, m.mappedCoordinateY
} | entities/imageoutput/mappedcoordinate.go | 0.943686 | 0.588712 | mappedcoordinate.go | starcoder |
package bits
import (
"fmt"
)
// A new Gate is defined by a truth table, or by a choicetable
type (
Bit int // bit, as integer
Bits []Bit // input bits, or output bits
// Multiple possible outputs, with equal probability
Choices []Bits
// A logic gate with multiple inputs and one output
OneToManyGate func(Bits) Bit
// A logic gate (like "not")
OneToOneGate func(Bit) Bit
// A ManyToManyGate can have several inputs and outputs
ManyToManyGate func(Bits) Bits
// A simple probgate with one input and several possible outputs
// Example prob table: {"0 -> 1 1 | 0 0"}, would return Choices{Outputs{1, 1}, Outputs{0, 0}} if given 0
ProbGate func(Bit) Choices
// A probgate with many inputs and several possible outputs
// Example prob table: {"0 1 1 0 1 -> 1 1 | 0 0"}, would return Choices{Outputs{1, 1}, Outputs{0, 0}} if given 0
MultiProbGate func(Bits) Choices
)
// A gate must have a Process function, that can take Input or Inputs, and return Output or Outputs
type Gater interface {
Process(interface{}) interface{}
}
// OneToManyGate can act as a gate (Gater)
// many to one
func (lg *OneToManyGate) Process(inputOrInputs interface{}) (outputOrOutputs interface{}) {
switch v := inputOrInputs.(type) {
case Bit:
return (*lg)(Bits{v})
case Bits:
return (*lg)(v)
default:
panic(fmt.Sprintf("Invalid input, value: %v, type: %T", inputOrInputs, inputOrInputs))
}
}
// OneToOneGate can act as a gate (Gater)
// one to one
func (ug *OneToOneGate) Process(inputOrInputs interface{}) (outputOrOutputs interface{}) {
switch v := inputOrInputs.(type) {
case Bit:
return (*ug)(v)
case Bits:
if len(v) == 1 {
return (*ug)(v[0])
}
panic("A OneToOneGate can only take one input")
default:
panic(fmt.Sprintf("Invalid input, value: %v, type: %T", inputOrInputs, inputOrInputs))
}
}
// Multi can act as a gate (Gater)
// many to many
func (mg *ManyToManyGate) Process(inputOrInputs interface{}) (outputOrOutputs interface{}) {
switch v := inputOrInputs.(type) {
case Bit:
return (*mg)(Bits{v})
case Bits:
return (*mg)(v)
default:
panic(fmt.Sprintf("Invalid input, value: %v, type: %T", inputOrInputs, inputOrInputs))
}
}
// Choice Gate can act as a gate (Gater)
// one bit to many possible choices
func (scg *ProbGate) Process(inputOrInputs interface{}) (outputOrOutputs interface{}) {
switch v := inputOrInputs.(type) {
case Bit:
return (*scg)(v)
case Bits:
if len(v) == 1 {
return (*scg)(v[0])
}
panic("A ProbGate can only take one input")
default:
panic(fmt.Sprintf("Invalid input, value: %v, type: %T", inputOrInputs, inputOrInputs))
}
}
// Multi Choice Gate can act as a gate (Gater)
// many bits to many possible choices
func (cg *MultiProbGate) Process(inputOrInputs interface{}) (outputOrOutputs interface{}) {
switch v := inputOrInputs.(type) {
case Bit:
return (*cg)(Bits{v})
case Bits:
return (*cg)(v)
default:
panic(fmt.Sprintf("Invalid input, value: %v, type: %T", inputOrInputs, inputOrInputs))
}
}
// Create a new logic gate, like "and" or "xor", with inputs and one output
func NewOneToManyGate(tt *TruthTable) *OneToManyGate {
g := tt.Gate()
return &g
}
// Create a new probability gate, a ProbGate, where one input can give many alternative outputs
func NewProbGate(pt *ProbTable) *ProbGate {
g := pt.Gate()
return &g
}
// TruthTable can act as a gate (Gater), many to one
func (tt *TruthTable) Process(inputs Bits) Bit {
lg := NewOneToManyGate(tt)
if output, ok := lg.Process(inputs).(Bit); ok {
return output
}
panic("Invalid return value from TruthTable Process")
}
// ProbTable can act as a gate (Gater), one to many different choices
func (pt *ProbTable) Process(input Bit) Choices {
pg := NewProbGate(pt)
if output, ok := pg.Process(input).(Choices); ok {
return output
}
panic("Invalid return value from ProbTable Process")
} | gate.go | 0.691706 | 0.503235 | gate.go | starcoder |
package sweetiebot
import (
"fmt"
"strconv"
"strings"
"github.com/bwmarrin/discordgo"
)
type PollModule struct {
}
func (w *PollModule) Name() string {
return "Polls"
}
func (w *PollModule) Register(info *GuildInfo) {}
func (w *PollModule) Commands() []Command {
return []Command{
&PollCommand{},
&CreatePollCommand{},
&DeletePollCommand{},
&VoteCommand{},
&ResultsCommand{},
&AddOptionCommand{},
}
}
func (w *PollModule) Description() string { return "" }
type PollCommand struct {
}
func (c *PollCommand) Name() string {
return "Poll"
}
func (c *PollCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
gID := SBatoi(info.Guild.ID)
if len(args) < 1 {
polls := sb.db.GetPolls(gID)
str := make([]string, 0, len(polls)+1)
str = append(str, "All active polls:")
for _, v := range polls {
str = append(str, v.name)
}
return strings.Join(str, "\n"), len(str) > 5, nil
}
arg := strings.ToLower(msg.Content[indices[0]:])
id, desc := sb.db.GetPoll(arg, gID)
if id == 0 {
return "```That poll doesn't exist!```", false, nil
}
options := sb.db.GetOptions(id)
str := make([]string, 0, len(options)+2)
str = append(str, desc)
for _, v := range options {
str = append(str, fmt.Sprintf("%v. %s", v.index, v.option))
}
return strings.Join(str, "\n"), len(str) > 11, nil
}
func (c *PollCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Displays currently active polls or possible options for a given poll.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "poll", Desc: "Name of a specific poll to display.", Optional: true},
},
}
}
func (c *PollCommand) UsageShort() string { return "Displays poll description and options." }
type CreatePollCommand struct {
}
func (c *CreatePollCommand) Name() string {
return "CreatePoll"
}
func (c *CreatePollCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) < 3 {
return "```You must provide a name, a description, and one or more options to create the poll. Example: !createpoll pollname \"Description With Space\" \"Option 1\" \"Option 2\"```", false, nil
}
gID := SBatoi(info.Guild.ID)
name := strings.ToLower(args[0])
err := sb.db.AddPoll(name, args[1], gID)
if err != nil {
return "```Error creating poll, make sure you haven't used this name already.```", false, nil
}
poll, _ := sb.db.GetPoll(name, gID)
if poll == 0 {
return "```Error: Orphaned poll!```", false, nil
}
for k, v := range args[2:] {
err = sb.db.AddOption(poll, uint64(k+1), v)
if err != nil {
return fmt.Sprintf("```Error adding option %v:%s. Did you try to add the same option twice? Each option must be unique!```", k+1, v), false, nil
}
}
return fmt.Sprintf("```Successfully created %s poll.```", name), false, nil
}
func (c *CreatePollCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Creates a new poll with the given name, description, and options. All arguments MUST use quotes if they have spaces. \n\nExample usage: `!createpoll pollname \"Description With Space\" \"Option 1\" NoSpaceOption`",
Params: []CommandUsageParam{
CommandUsageParam{Name: "name", Desc: "Name of the new poll. It's suggested to not use spaces because this makes things difficult for other commands. ", Optional: false},
CommandUsageParam{Name: "description", Desc: "Poll description that appears when displaying it.", Optional: false},
CommandUsageParam{Name: "options", Desc: "Name of the new poll. It's suggested to not use spaces because this makes things difficult for other commands. ", Optional: true, Variadic: true},
},
}
}
func (c *CreatePollCommand) UsageShort() string { return "Creates a poll." }
type DeletePollCommand struct {
}
func (c *DeletePollCommand) Name() string {
return "DeletePoll"
}
func (c *DeletePollCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) < 1 {
return "```You have to give me a poll name to delete!```", false, nil
}
arg := msg.Content[indices[0]:]
gID := SBatoi(info.Guild.ID)
id, _ := sb.db.GetPoll(arg, gID)
if id == 0 {
return "```That poll doesn't exist!```", false, nil
}
err := sb.db.RemovePoll(arg, gID)
if err != nil {
return "```Error removing poll.```", false, nil
}
return fmt.Sprintf("```Successfully removed %s.```", arg), false, nil
}
func (c *DeletePollCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Removes the poll with the given poll name.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "poll", Desc: "Name of the poll to delete.", Optional: false},
},
}
}
func (c *DeletePollCommand) UsageShort() string { return "Deletes a poll." }
type VoteCommand struct {
}
func (c *VoteCommand) Name() string {
return "Vote"
}
func (c *VoteCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
gID := SBatoi(info.Guild.ID)
if len(args) < 2 {
polls := sb.db.GetPolls(gID)
lastpoll := ""
if len(polls) > 0 {
lastpoll = fmt.Sprintf(" The most recent poll is \"%s\".", polls[0].name)
}
return fmt.Sprintf("```You have to provide both a poll name and the option you want to vote for!%s Use !poll without any arguments to list all active polls.```", lastpoll), false, nil
}
name := strings.ToLower(args[0])
id, _ := sb.db.GetPoll(name, gID)
if id == 0 {
return "```That poll doesn't exist! Use !poll with no arguments to list all active polls.```", false, nil
}
option, err := strconv.ParseUint(args[1], 10, 64)
if err != nil {
opt := sb.db.GetOption(id, msg.Content[indices[1]:])
if opt == nil {
return fmt.Sprintf("```That's not one of the poll options! You have to either type in the exact name of the option you want, or provide the numeric index. Use \"!poll %s\" to list the available options.```", name), false, nil
}
option = *opt
} else if !sb.db.CheckOption(id, option) {
return fmt.Sprintf("```That's not a valid option index! Use \"!poll %s\" to get all available options for this poll.```", name), false, nil
}
err = sb.db.AddVote(SBatoi(msg.Author.ID), id, option)
if err != nil {
return "```Error adding vote.```", false, nil
}
return "```Voted! Use !results to check the results.```", false, nil
}
func (c *VoteCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Adds your vote to a given poll. If you have already voted in the poll, it changes your vote instead.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "poll", Desc: "Name of the poll you want to vote in.", Optional: false},
CommandUsageParam{Name: "option", Desc: "The numeric index of the option you want to vote for, or the precise text of the option instead.", Optional: false},
},
}
}
func (c *VoteCommand) UsageShort() string { return "Votes in a poll." }
type ResultsCommand struct {
}
func (c *ResultsCommand) Name() string {
return "Results"
}
func (c *ResultsCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
gID := SBatoi(info.Guild.ID)
if len(args) < 1 {
return "```You have to give me a valid poll name! Use \"!poll\" to list active polls.```", false, nil
}
arg := strings.ToLower(msg.Content[indices[0]:])
id, desc := sb.db.GetPoll(arg, gID)
if id == 0 {
return "```That poll doesn't exist! Use \"!poll\" to list active polls.```", false, nil
}
results := sb.db.GetResults(id)
options := sb.db.GetOptions(id)
max := uint64(0)
for _, v := range results {
if v.count > max {
max = v.count
}
}
str := make([]string, 0, len(results)+2)
str = append(str, desc)
k := 0
var count uint64
for _, v := range options {
count = 0
if k < len(results) && v.index == results[k].index {
count = results[k].count
k++
}
normalized := count
if max > 10 {
normalized = uint64(float32(count) * (10.0 / float32(max)))
}
if count > 0 && normalized < 1 {
normalized = 1
}
graph := ""
for i := 0; i < 10; i++ {
if uint64(i) < normalized {
graph += "\u2588" // this isn't very efficient but the maximum is 10 so it doesn't matter
} else {
graph += "\u2591"
}
}
buf := ""
if v.index < 10 && len(options) > 9 {
buf = "_"
}
str = append(str, fmt.Sprintf("`%s%v. `%s %s (%v votes)", buf, v.index, graph, v.option, count))
}
return strings.Join(str, "\n"), len(str) > 11, nil
}
func (c *ResultsCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Displays the results of the given poll, if it exists.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "poll", Desc: "Name of the poll to view.", Optional: false},
},
}
}
func (c *ResultsCommand) UsageShort() string { return "Displays results of a poll." }
type AddOptionCommand struct {
}
func (c *AddOptionCommand) Name() string {
return "AddOption"
}
func (c *AddOptionCommand) Process(args []string, msg *discordgo.Message, indices []int, info *GuildInfo) (string, bool, *discordgo.MessageEmbed) {
if len(args) < 1 {
return "```You have to give me a poll name to add an option to!```", false, nil
}
if len(args) < 2 {
return "```You have to give me an option to add!```", false, nil
}
gID := SBatoi(info.Guild.ID)
id, _ := sb.db.GetPoll(args[0], gID)
if id == 0 {
return "```That poll doesn't exist!```", false, nil
}
arg := msg.Content[indices[1]:]
err := sb.db.AppendOption(id, arg)
if err != nil {
return "```Error appending option, make sure no other option has this value!```", false, nil
}
return fmt.Sprintf("```Successfully added %s to %s.```", arg, args[0]), false, nil
}
func (c *AddOptionCommand) Usage(info *GuildInfo) *CommandUsage {
return &CommandUsage{
Desc: "Appends an option to a poll.",
Params: []CommandUsageParam{
CommandUsageParam{Name: "poll", Desc: "Name of the poll to modify.", Optional: false},
CommandUsageParam{Name: "option", Desc: "The option to append to the end of the poll.", Optional: false},
},
}
}
func (c *AddOptionCommand) UsageShort() string { return "Appends an option to a poll." } | sweetiebot/poll_command.go | 0.708313 | 0.420659 | poll_command.go | starcoder |
package main
import (
"fmt"
"github.com/Ullaakut/aoc19/pkg/aocutils"
"github.com/fatih/color"
)
type Cell struct {
wireID int
r rune
}
type Grid struct {
g map[aocutils.Vector2D]Cell
formats map[rune]func(string, ...interface{}) string
}
func NewGrid(formats map[rune]func(string, ...interface{}) string) Grid {
return Grid{
g: make(map[aocutils.Vector2D]Cell),
formats: formats,
}
}
// Cell returns a cell with the given coordinates.
func (g Grid) Cell(pos aocutils.Vector2D) rune {
cell, exists := g.g[pos]
if !exists {
return '.'
}
return cell.r
}
// DisplaySquare prints a square of the grid, with padding.
func (g Grid) DisplaySquare(xMax, xMin, yMax, yMin int) {
negativePadding := 1
positivePaddidng := 2
for y := yMin - negativePadding; y < yMax+positivePaddidng; y++ {
for x := xMin - negativePadding; x < xMax+positivePaddidng; x++ {
g.PrintCell(x, y)
}
fmt.Println()
}
}
// PrintCell prints a cell using custom formatting if any is specified.
func (g Grid) PrintCell(x, y int) {
cell := g.Cell(aocutils.NewVector2D(x, y))
format := color.WhiteString
if g.formats != nil {
f, exists := g.formats[cell]
if exists {
format = f
}
}
fmt.Print(format("%c", cell))
}
// FindClosest finds the closest rune to the origin rune, and returns
// the Manhattan distance between both.
func (g Grid) FindClosest(originRune, otherRune rune) int {
var (
originPos aocutils.Vector2D
otherPos []aocutils.Vector2D
)
for pos, cell := range g.g {
// This is the origin
if cell.r == originRune {
originPos = pos
continue
}
// List the positions of all of the runes that match.
if cell.r == otherRune {
otherPos = append(otherPos, pos)
}
}
// Go through the list of positions, and return the one with the smallest
// Manhattan distance to the origin rune.
var closest int
for _, pos := range otherPos {
if closest == 0 {
closest = originPos.ManhattanDistance(pos)
continue
}
closest = aocutils.MinInt(closest, originPos.ManhattanDistance(pos))
}
return closest
} | Day03/Part1/grid.go | 0.763836 | 0.501526 | grid.go | starcoder |
package utils
/**
* These utilities format output in a variety of ways.
*
* The goal is to have multiple methods of output so that it's easy to script the CLI
* in whatever way a user wants. Currently we just implement JSON output, but we plan
* to implement a subset of the JSONPath spec so that we can implement:
* - output just a single value (e.g. ID) from an object
* - output a list of objects as a table with the columns specified by the user
*
* In order to make life easier for callers, they pass us the CLI context and we examine
* the arguments in here. Note that the arguments are global arguments (they occur before
* the subcommand) because they apply uniformly to all subcommands.
*/
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"github.com/vmware/photon-controller-cli/Godeps/_workspace/src/github.com/codegangsta/cli"
)
// Called by main to validate the output arguments
// Currently it only validates the --output argument. Eventually when we support
// json path, it will validate that as well.
func ValidateArgs(c *cli.Context) error {
if c.GlobalBool("non-interactive") == true && c.GlobalString("output") != "" {
return fmt.Errorf("--non-interactive and --output are mutually exclusive")
}
if c.GlobalString("output") != "" && c.GlobalString("output") != "json" {
return fmt.Errorf("output type must be 'json'")
}
return nil
}
// Tells the caller if we should assume the user wants non-interactive mode
// interactive mode means two things:
// 1. The user may be prompted for input parameters that are not provided
// 2. The user gets fancy, human-readable output
// The user can disable this with non-interactive mode
// It's assumed that if the user wants custom output (e.g. JSON), they may be
// using this in a script, so it should be non-interactive
func IsNonInteractive(c *cli.Context) bool {
return c.GlobalString("output") != "" || c.GlobalIsSet("non-interactive")
}
// Tells the caller if the user has requested custom formatting
func NeedsFormatting(c *cli.Context) bool {
return c.GlobalString("output") != ""
}
// Outputs the given object (image, list of images, VM, etc...) as specified by the user
// Currently we only support JSON output, but we will support more formats later.
func FormatObject(o interface{}, w io.Writer, c *cli.Context) {
outputType := c.GlobalString("output")
switch outputType {
case "json":
formatObjectJson(o, w)
default:
fmt.Fprintf(w, "Unknown output type: '%s'", outputType)
}
}
// Just like FormatObject, but if the incoming object is nil, it prints
// it as an empty list, not "null". This makes for nicer JSON output for
// list commands, which return lists of objects
func FormatObjects(o interface{}, w io.Writer, c *cli.Context) {
value := reflect.ValueOf(o)
kind := value.Kind()
if (kind == reflect.Array || kind == reflect.Slice) && value.Len() == 0 {
FormatObject(new([0]int), w, c)
} else {
FormatObject(o, w, c)
}
}
// Ouptut an object as JSON
func formatObjectJson(o interface{}, w io.Writer) {
jsonBytes, err := json.Marshal(o)
if err != nil {
fmt.Fprintf(w, "Cannot convert output to JSON: %s", err)
return
}
var prettyJSON bytes.Buffer
err = json.Indent(&prettyJSON, jsonBytes, "", " ")
if err != nil {
fmt.Fprintf(w, "Cannot format JSON output: %s", err)
return
}
fmt.Fprintf(w, "%s\n", string(prettyJSON.Bytes()))
} | photon/utils/formatting.go | 0.718594 | 0.469946 | formatting.go | starcoder |
package coinharness
import (
"fmt"
"github.com/jfixby/coin"
"github.com/jfixby/pin"
"reflect"
"testing"
"time"
)
// JoinType is an enum representing a particular type of "node join". A node
// join is a synchronization tool used to wait until a subset of nodes have a
// consistent state with respect to an attribute.
type JoinType uint8
const (
// Blocks is a JoinType which waits until all nodes share the same
// block height.
Blocks JoinType = iota
// Mempools is a JoinType which blocks until all nodes have identical
// mempool.
Mempools
)
// JoinNodes is a synchronization tool used to block until all passed nodes are
// fully synced with respect to an attribute. This function will block for a
// period of time, finally returning once all nodes are synced according to the
// passed JoinType. This function be used to to ensure all active test
// harnesses are at a consistent state before proceeding to an assertion or
// check within rpc tests.
func JoinNodes(command interface{}, nodes []*Harness, joinType JoinType) error {
switch joinType {
case Blocks:
return syncBlocks(nodes)
case Mempools:
return syncMempools(command, nodes)
}
return nil
}
// syncMempools blocks until all nodes have identical mempools.
func syncMempools(command interface{}, nodes []*Harness) error {
poolsMatch := false
for !poolsMatch {
retry:
firstPool, err := nodes[0].NodeRPCClient().GetRawMempool(command)
if err != nil {
return err
}
// If all nodes have an identical mempool with respect to the
// first node, then we're done. Otherwise, drop back to the top
// of the loop and retry after a short wait period.
for _, node := range nodes[1:] {
nodePool, err := node.NodeRPCClient().GetRawMempool(command)
if err != nil {
return err
}
eq := reflect.DeepEqual(firstPool, nodePool)
//eq := firstPool.EqualsTo(nodePool)
if !eq {
time.Sleep(time.Millisecond * 100)
goto retry
}
}
poolsMatch = true
}
return nil
}
// syncBlocks blocks until all nodes report the same block height.
func syncBlocks(nodes []*Harness) error {
blocksMatch := false
for !blocksMatch {
retry:
blockHeights := make(map[int64]struct{})
for _, node := range nodes {
blockHeight, err := node.NodeRPCClient().GetBlockCount()
if err != nil {
return err
}
blockHeights[blockHeight] = struct{}{}
if len(blockHeights) > 1 {
time.Sleep(time.Millisecond * 100)
goto retry
}
}
blocksMatch = true
}
return nil
}
// ConnectNode establishes a new peer-to-peer connection between the "from"
// harness and the "to" harness. The connection made is flagged as persistent,
// therefore in the case of disconnects, "from" will attempt to reestablish a
// connection to the "to" harness.
func ConnectNode(from *Harness, to *Harness, command interface{}) error {
peerInfo, err := from.NodeRPCClient().GetPeerInfo()
if err != nil {
return err
}
numPeers := len(peerInfo)
targetAddr := to.P2PAddress()
args := &AddNodeArguments{
TargetAddr: targetAddr,
//rpcclient.ANAdd,
Command: command,
}
if err := from.NodeRPCClient().AddNode(args); err != nil {
return err
}
// Block until a new connection has been established.
for attempts := 5; attempts > 0; attempts-- {
peerInfo, err = from.NodeRPCClient().GetPeerInfo()
if err != nil {
return err
}
if len(peerInfo) > numPeers {
return nil
}
pin.Sleep(1000)
}
return fmt.Errorf("failed to connet node")
}
func AssertConnectedTo(t *testing.T, nodeA *Harness, nodeB *Harness) {
nodeAPeers, err := nodeA.NodeRPCClient().GetPeerInfo()
if err != nil {
t.Fatalf("unable to get nodeA's peer info")
}
nodeAddr := nodeB.P2PAddress()
addrFound := false
for _, peerInfo := range nodeAPeers {
if peerInfo.Addr == nodeAddr {
addrFound = true
break
}
}
if !addrFound {
t.Fatal("nodeA not connected to nodeB")
}
}
// GenerateTestChain with the desired number of mature coinbase outputs
func GenerateTestChain(numToGenerate int64, node RPCClient) error {
fmt.Printf("Generating %v blocks...\n", numToGenerate)
_, err := node.Generate(uint32(numToGenerate))
if err != nil {
return err
}
fmt.Println("Block generation complete.")
return nil
}
func GenSpend(
t *testing.T,
r *Harness,
account string,
amt coin.Amount,
PkScriptVersion uint16,
PayToAddrScript func(Address) ([]byte, error),
TxSerializeSize func(*MessageTx) int,
) Hash {
pin.AssertNotEmpty("account", account)
// Grab a fresh address from the wallet.
addr, err := r.Wallet.NewAddress(account)
if err != nil {
t.Fatalf("unable to get new address: %v", err)
}
// Next, send amt to this address, spending from one of our
// mature coinbase outputs.
addrScript, err := PayToAddrScript(addr)
if err != nil {
t.Fatalf("unable to generate pkscript to addr: %v", err)
}
output := &TxOut{
Value: amt,
PkScript: addrScript,
Version: PkScriptVersion, //wire.DefaultPkScriptVersion
}
arg := &CreateTransactionArgs{
Outputs: []*TxOut{output},
FeeRate: coin.FromFloat(10),
PayToAddrScript: PayToAddrScript,
TxSerializeSize: TxSerializeSize,
Account: account,
}
txid, err := CreateTransaction(r.Wallet, arg)
if err != nil {
t.Fatalf("coinbase spend failed: %v", err)
}
return txid.TxHash
}
func AssertTxMined(t *testing.T, r *Harness, txid Hash, blockHash Hash) {
block, err := r.NodeRPCClient().GetBlock(blockHash)
if err != nil {
t.Fatalf("unable to get block: %v", err)
}
numBlockTxns := len(block.Transactions)
if numBlockTxns < 2 {
t.Fatalf("crafted transaction wasn't mined, block should have "+
"at least %v transactions instead has %v", 2, numBlockTxns)
}
txHash1 := block.Transactions[1].TxHash()
if txHash1 != txid {
t.Fatalf("txid's don't match, %v vs %v", txHash1, txid)
}
} | helpers.go | 0.66454 | 0.426381 | helpers.go | starcoder |
package minimalisp
// Expression is the interface all types of expressions must fulfil.
type Expression interface {
Accept(visitor visitor) (interface{}, error)
}
// Visitor is the interface which an interpreter has to fulfil.
type visitor interface {
visitLiteralExpr(literalExpr *LiteralExpr) (interface{}, error)
visitDefvarExpr(defvarExpr *DefvarExpr) (interface{}, error)
visitVarExpr(varExpr *VarExpr) (interface{}, error)
visitIfExpr(ifExpr *IfExpr) (interface{}, error)
visitDefunExpr(defunExpr *DefunExpr) (interface{}, error)
visitFuncCallExpr(funcCallExpr *FuncCallExpr) (interface{}, error)
visitListExpr(listExpr *ListExpr) (interface{}, error)
visitLetExpr(letExpr *LetExpr) (interface{}, error)
visitLambdaExpr(lambdaExpr *LambdaExpr) (interface{}, error)
}
// LiteralExpr is a literal such as a string or a number.
type LiteralExpr struct {
Value interface{}
}
// Accept visits the literal expression.
func (e *LiteralExpr) Accept(visitor visitor) (interface{}, error) {
return visitor.visitLiteralExpr(e)
}
// DefvarExpr is a definition of a variable.
type DefvarExpr struct {
Name Token
Initializer Expression
}
// Accept visits the defvar expression.
func (e *DefvarExpr) Accept(visitor visitor) (interface{}, error) {
return visitor.visitDefvarExpr(e)
}
// VarExpr is a reference to a variable.
type VarExpr struct {
Name Token
}
// Accept visits the var expression.
func (e *VarExpr) Accept(visitor visitor) (interface{}, error) {
return visitor.visitVarExpr(e)
}
// IfExpr is an if expression :).
type IfExpr struct {
Condition Expression
ThenBranch Expression
ElseBranch Expression
}
// Accept visits the if expression.
func (e *IfExpr) Accept(visitor visitor) (interface{}, error) {
return visitor.visitIfExpr(e)
}
// DefunExpr is a definition of a function.
type DefunExpr struct {
Name Token
Params []Token
Body Expression
}
// Accept visits the function definition.
func (e *DefunExpr) Accept(visitor visitor) (interface{}, error) {
return visitor.visitDefunExpr(e)
}
// FuncCallExpr is a function call.
type FuncCallExpr struct {
Name Token
Arguments []Expression
}
// Accept visits the function call.
func (e *FuncCallExpr) Accept(visitor visitor) (interface{}, error) {
return visitor.visitFuncCallExpr(e)
}
// ListExpr represents a list collection.
type ListExpr struct {
Elements []Expression
}
// Accept visits the list expression.
func (e *ListExpr) Accept(visitor visitor) (interface{}, error) {
return visitor.visitListExpr(e)
}
// LetExpr is a let expression to define local variables.
type LetExpr struct {
Names []Token
Values []Expression
Body Expression
}
// Accept visits the let expression.
func (e *LetExpr) Accept(visitor visitor) (interface{}, error) {
return visitor.visitLetExpr(e)
}
// LambdaExpr is a lambda expression to define anonymous functions.
type LambdaExpr struct {
Params []Token
Body Expression
}
// Accept visits the lambda expression.
func (e *LambdaExpr) Accept(visitor visitor) (interface{}, error) {
return visitor.visitLambdaExpr(e)
} | ast.go | 0.71423 | 0.42185 | ast.go | starcoder |
package rui
import (
"fmt"
"strings"
)
const (
// Radius is the SizeUnit view property that determines the corners rounding radius
// of an element's outer border edge.
Radius = "radius"
// RadiusX is the SizeUnit view property that determines the x-axis corners elliptic rounding
// radius of an element's outer border edge.
RadiusX = "radius-x"
// RadiusY is the SizeUnit view property that determines the y-axis corners elliptic rounding
// radius of an element's outer border edge.
RadiusY = "radius-y"
// RadiusTopLeft is the SizeUnit view property that determines the top-left corner rounding radius
// of an element's outer border edge.
RadiusTopLeft = "radius-top-left"
// RadiusTopLeftX is the SizeUnit view property that determines the x-axis top-left corner elliptic
// rounding radius of an element's outer border edge.
RadiusTopLeftX = "radius-top-left-x"
// RadiusTopLeftY is the SizeUnit view property that determines the y-axis top-left corner elliptic
// rounding radius of an element's outer border edge.
RadiusTopLeftY = "radius-top-left-y"
// RadiusTopRight is the SizeUnit view property that determines the top-right corner rounding radius
// of an element's outer border edge.
RadiusTopRight = "radius-top-right"
// RadiusTopRightX is the SizeUnit view property that determines the x-axis top-right corner elliptic
// rounding radius of an element's outer border edge.
RadiusTopRightX = "radius-top-right-x"
// RadiusTopRightY is the SizeUnit view property that determines the y-axis top-right corner elliptic
// rounding radius of an element's outer border edge.
RadiusTopRightY = "radius-top-right-y"
// RadiusBottomLeft is the SizeUnit view property that determines the bottom-left corner rounding radius
// of an element's outer border edge.
RadiusBottomLeft = "radius-bottom-left"
// RadiusBottomLeftX is the SizeUnit view property that determines the x-axis bottom-left corner elliptic
// rounding radius of an element's outer border edge.
RadiusBottomLeftX = "radius-bottom-left-x"
// RadiusBottomLeftY is the SizeUnit view property that determines the y-axis bottom-left corner elliptic
// rounding radius of an element's outer border edge.
RadiusBottomLeftY = "radius-bottom-left-y"
// RadiusBottomRight is the SizeUnit view property that determines the bottom-right corner rounding radius
// of an element's outer border edge.
RadiusBottomRight = "radius-bottom-right"
// RadiusBottomRightX is the SizeUnit view property that determines the x-axis bottom-right corner elliptic
// rounding radius of an element's outer border edge.
RadiusBottomRightX = "radius-bottom-right-x"
// RadiusBottomRightY is the SizeUnit view property that determines the y-axis bottom-right corner elliptic
// rounding radius of an element's outer border edge.
RadiusBottomRightY = "radius-bottom-right-y"
// X is the SizeUnit property of the ShadowProperty that determines the x-axis corners elliptic rounding
// radius of an element's outer border edge.
X = "x"
// Y is the SizeUnit property of the ShadowProperty that determines the y-axis corners elliptic rounding
// radius of an element's outer border edge.
Y = "y"
// TopLeft is the SizeUnit property of the ShadowProperty that determines the top-left corner rounding radius
// of an element's outer border edge.
TopLeft = "top-left"
// TopLeftX is the SizeUnit property of the ShadowProperty that determines the x-axis top-left corner elliptic
// rounding radius of an element's outer border edge.
TopLeftX = "top-left-x"
// TopLeftY is the SizeUnit property of the ShadowProperty that determines the y-axis top-left corner elliptic
// rounding radius of an element's outer border edge.
TopLeftY = "top-left-y"
// TopRight is the SizeUnit property of the ShadowProperty that determines the top-right corner rounding radius
// of an element's outer border edge.
TopRight = "top-right"
// TopRightX is the SizeUnit property of the ShadowProperty that determines the x-axis top-right corner elliptic
// rounding radius of an element's outer border edge.
TopRightX = "top-right-x"
// TopRightY is the SizeUnit property of the ShadowProperty that determines the y-axis top-right corner elliptic
// rounding radius of an element's outer border edge.
TopRightY = "top-right-y"
// BottomLeft is the SizeUnit property of the ShadowProperty that determines the bottom-left corner rounding radius
// of an element's outer border edge.
BottomLeft = "bottom-left"
// BottomLeftX is the SizeUnit property of the ShadowProperty that determines the x-axis bottom-left corner elliptic
// rounding radius of an element's outer border edge.
BottomLeftX = "bottom-left-x"
// BottomLeftY is the SizeUnit property of the ShadowProperty that determines the y-axis bottom-left corner elliptic
// rounding radius of an element's outer border edge.
BottomLeftY = "bottom-left-y"
// BottomRight is the SizeUnit property of the ShadowProperty that determines the bottom-right corner rounding radius
// of an element's outer border edge.
BottomRight = "bottom-right"
// BottomRightX is the SizeUnit property of the ShadowProperty that determines the x-axis bottom-right corner elliptic
// rounding radius of an element's outer border edge.
BottomRightX = "bottom-right-x"
// BottomRightY is the SizeUnit property of the ShadowProperty that determines the y-axis bottom-right corner elliptic
// rounding radius of an element's outer border edge.
BottomRightY = "bottom-right-y"
)
type RadiusProperty interface {
Properties
ruiStringer
fmt.Stringer
BoxRadius(session Session) BoxRadius
}
type radiusPropertyData struct {
propertyList
}
// NewRadiusProperty creates the new RadiusProperty
func NewRadiusProperty(params Params) RadiusProperty {
result := new(radiusPropertyData)
result.properties = map[string]interface{}{}
if params != nil {
for _, tag := range []string{X, Y, TopLeft, TopRight, BottomLeft, BottomRight, TopLeftX, TopLeftY,
TopRightX, TopRightY, BottomLeftX, BottomLeftY, BottomRightX, BottomRightY} {
if value, ok := params[tag]; ok {
result.Set(tag, value)
}
}
}
return result
}
func (radius *radiusPropertyData) normalizeTag(tag string) string {
return strings.TrimPrefix(strings.ToLower(tag), "radius-")
}
func (radius *radiusPropertyData) ruiString(writer ruiWriter) {
writer.startObject("_")
for _, tag := range []string{X, Y, TopLeft, TopLeftX, TopLeftY, TopRight, TopRightX, TopRightY,
BottomLeft, BottomLeftX, BottomLeftY, BottomRight, BottomRightX, BottomRightY} {
if value, ok := radius.properties[tag]; ok {
writer.writeProperty(Style, value)
}
}
writer.endObject()
}
func (radius *radiusPropertyData) String() string {
writer := newRUIWriter()
radius.ruiString(writer)
return writer.finish()
}
func (radius *radiusPropertyData) delete(tags []string) {
for _, tag := range tags {
delete(radius.properties, tag)
}
}
func (radius *radiusPropertyData) deleteUnusedTags() {
for _, tag := range []string{X, Y} {
if _, ok := radius.properties[tag]; ok {
unused := true
for _, t := range []string{TopLeft, TopRight, BottomLeft, BottomRight} {
if _, ok := radius.properties[t+"-"+tag]; !ok {
if _, ok := radius.properties[t]; !ok {
unused = false
break
}
}
}
if unused {
delete(radius.properties, tag)
}
}
}
equalValue := func(value1, value2 interface{}) bool {
switch value1 := value1.(type) {
case string:
switch value2 := value2.(type) {
case string:
return value1 == value2
}
case SizeUnit:
switch value2 := value2.(type) {
case SizeUnit:
return value1.Equal(value2)
}
}
return false
}
for _, tag := range []string{TopLeft, TopRight, BottomLeft, BottomRight} {
tagX := tag + "-x"
tagY := tag + "-y"
valueX, okX := radius.properties[tagX]
valueY, okY := radius.properties[tagY]
if value, ok := radius.properties[tag]; ok {
if okX && okY {
delete(radius.properties, tag)
} else if okX && !okY {
if equalValue(value, valueX) {
delete(radius.properties, tagX)
} else {
radius.properties[tagY] = value
delete(radius.properties, tag)
}
} else if !okX && okY {
if equalValue(value, valueY) {
delete(radius.properties, tagY)
} else {
radius.properties[tagX] = value
delete(radius.properties, tag)
}
}
} else if okX && okY && equalValue(valueX, valueY) {
radius.properties[tag] = valueX
delete(radius.properties, tagX)
delete(radius.properties, tagY)
}
}
}
func (radius *radiusPropertyData) Remove(tag string) {
tag = radius.normalizeTag(tag)
switch tag {
case X, Y:
if _, ok := radius.properties[tag]; ok {
radius.Set(tag, AutoSize())
delete(radius.properties, tag)
}
case TopLeftX, TopLeftY, TopRightX, TopRightY, BottomLeftX, BottomLeftY, BottomRightX, BottomRightY:
delete(radius.properties, tag)
case TopLeft, TopRight, BottomLeft, BottomRight:
radius.delete([]string{tag, tag + "-x", tag + "-y"})
default:
ErrorLogF(`"%s" property is not compatible with the RadiusProperty`, tag)
}
}
func (radius *radiusPropertyData) Set(tag string, value interface{}) bool {
if value == nil {
radius.Remove(tag)
return true
}
tag = radius.normalizeTag(tag)
switch tag {
case X:
if radius.setSizeProperty(tag, value) {
radius.delete([]string{TopLeftX, TopRightX, BottomLeftX, BottomRightX})
for _, t := range []string{TopLeft, TopRight, BottomLeft, BottomRight} {
if val, ok := radius.properties[t]; ok {
if _, ok := radius.properties[t+"-y"]; !ok {
radius.properties[t+"-y"] = val
}
delete(radius.properties, t)
}
}
return true
}
case Y:
if radius.setSizeProperty(tag, value) {
radius.delete([]string{TopLeftY, TopRightY, BottomLeftY, BottomRightY})
for _, t := range []string{TopLeft, TopRight, BottomLeft, BottomRight} {
if val, ok := radius.properties[t]; ok {
if _, ok := radius.properties[t+"-x"]; !ok {
radius.properties[t+"-x"] = val
}
delete(radius.properties, t)
}
}
return true
}
case TopLeftX, TopLeftY, TopRightX, TopRightY, BottomLeftX, BottomLeftY, BottomRightX, BottomRightY:
if radius.setSizeProperty(tag, value) {
radius.deleteUnusedTags()
return true
}
case TopLeft, TopRight, BottomLeft, BottomRight:
switch value := value.(type) {
case SizeUnit:
radius.properties[tag] = value
radius.delete([]string{tag + "-x", tag + "-y"})
radius.deleteUnusedTags()
return true
case string:
if strings.Contains(value, "/") {
if values := strings.Split(value, "/"); len(values) == 2 {
xOK := radius.Set(tag+"-x", value[0])
yOK := radius.Set(tag+"-y", value[1])
return xOK && yOK
} else {
notCompatibleType(tag, value)
}
} else {
if radius.setSizeProperty(tag, value) {
radius.delete([]string{tag + "-x", tag + "-y"})
radius.deleteUnusedTags()
return true
}
}
}
default:
ErrorLogF(`"%s" property is not compatible with the RadiusProperty`, tag)
}
return false
}
func (radius *radiusPropertyData) Get(tag string) interface{} {
tag = radius.normalizeTag(tag)
if value, ok := radius.properties[tag]; ok {
return value
}
switch tag {
case TopLeftX, TopLeftY, TopRightX, TopRightY, BottomLeftX, BottomLeftY, BottomRightX, BottomRightY:
tagLen := len(tag)
if value, ok := radius.properties[tag[:tagLen-2]]; ok {
return value
}
if value, ok := radius.properties[tag[tagLen-1:]]; ok {
return value
}
}
return nil
}
func (radius *radiusPropertyData) BoxRadius(session Session) BoxRadius {
x, _ := sizeProperty(radius, X, session)
y, _ := sizeProperty(radius, Y, session)
getRadius := func(tag string) (SizeUnit, SizeUnit) {
rx := x
ry := y
if r, ok := sizeProperty(radius, tag, session); ok {
rx = r
ry = r
}
if r, ok := sizeProperty(radius, tag+"-x", session); ok {
rx = r
}
if r, ok := sizeProperty(radius, tag+"-y", session); ok {
ry = r
}
return rx, ry
}
var result BoxRadius
result.TopLeftX, result.TopLeftY = getRadius(TopLeft)
result.TopRightX, result.TopRightY = getRadius(TopRight)
result.BottomLeftX, result.BottomLeftY = getRadius(BottomLeft)
result.BottomRightX, result.BottomRightY = getRadius(BottomRight)
return result
}
// BoxRadius defines radii of rounds the corners of an element's outer border edge
type BoxRadius struct {
TopLeftX, TopLeftY, TopRightX, TopRightY, BottomLeftX, BottomLeftY, BottomRightX, BottomRightY SizeUnit
}
// AllAnglesIsEqual returns 'true' if all angles is equal, 'false' otherwise
func (radius BoxRadius) AllAnglesIsEqual() bool {
return radius.TopLeftX.Equal(radius.TopRightX) &&
radius.TopLeftY.Equal(radius.TopRightY) &&
radius.TopLeftX.Equal(radius.BottomLeftX) &&
radius.TopLeftY.Equal(radius.BottomLeftY) &&
radius.TopLeftX.Equal(radius.BottomRightX) &&
radius.TopLeftY.Equal(radius.BottomRightY)
}
// String returns a string representation of a BoxRadius struct
func (radius BoxRadius) String() string {
if radius.AllAnglesIsEqual() {
if radius.TopLeftX.Equal(radius.TopLeftY) {
return radius.TopLeftX.String()
} else {
return fmt.Sprintf("_{ x = %s, y = %s }", radius.TopLeftX.String(), radius.TopLeftY.String())
}
}
buffer := allocStringBuilder()
defer freeStringBuilder(buffer)
buffer.WriteString("_{ ")
if radius.TopLeftX.Equal(radius.TopLeftY) {
buffer.WriteString("top-left = ")
buffer.WriteString(radius.TopLeftX.String())
} else {
buffer.WriteString("top-left-x = ")
buffer.WriteString(radius.TopLeftX.String())
buffer.WriteString("top-left-y = ")
buffer.WriteString(radius.TopLeftY.String())
}
if radius.TopRightX.Equal(radius.TopRightY) {
buffer.WriteString(", top-right = ")
buffer.WriteString(radius.TopRightX.String())
} else {
buffer.WriteString(", top-right-x = ")
buffer.WriteString(radius.TopRightX.String())
buffer.WriteString(", top-right-y = ")
buffer.WriteString(radius.TopRightY.String())
}
if radius.BottomLeftX.Equal(radius.BottomLeftY) {
buffer.WriteString(", bottom-left = ")
buffer.WriteString(radius.BottomLeftX.String())
} else {
buffer.WriteString(", bottom-left-x = ")
buffer.WriteString(radius.BottomLeftX.String())
buffer.WriteString(", bottom-left-y = ")
buffer.WriteString(radius.BottomLeftY.String())
}
if radius.BottomRightX.Equal(radius.BottomRightY) {
buffer.WriteString(", bottom-right = ")
buffer.WriteString(radius.BottomRightX.String())
} else {
buffer.WriteString(", bottom-right-x = ")
buffer.WriteString(radius.BottomRightX.String())
buffer.WriteString(", bottom-right-y = ")
buffer.WriteString(radius.BottomRightY.String())
}
buffer.WriteString(" }")
return buffer.String()
}
func (radius BoxRadius) cssValue(builder cssBuilder) {
if (radius.TopLeftX.Type == Auto || radius.TopLeftX.Value == 0) &&
(radius.TopLeftY.Type == Auto || radius.TopLeftY.Value == 0) &&
(radius.TopRightX.Type == Auto || radius.TopRightX.Value == 0) &&
(radius.TopRightY.Type == Auto || radius.TopRightY.Value == 0) &&
(radius.BottomRightX.Type == Auto || radius.BottomRightX.Value == 0) &&
(radius.BottomRightY.Type == Auto || radius.BottomRightY.Value == 0) &&
(radius.BottomLeftX.Type == Auto || radius.BottomLeftX.Value == 0) &&
(radius.BottomLeftY.Type == Auto || radius.BottomLeftY.Value == 0) {
return
}
buffer := allocStringBuilder()
defer freeStringBuilder(buffer)
buffer.WriteString(radius.TopLeftX.cssString("0"))
if radius.AllAnglesIsEqual() {
if !radius.TopLeftX.Equal(radius.TopLeftY) {
buffer.WriteString(" / ")
buffer.WriteString(radius.TopLeftY.cssString("0"))
}
} else {
buffer.WriteRune(' ')
buffer.WriteString(radius.TopRightX.cssString("0"))
buffer.WriteRune(' ')
buffer.WriteString(radius.BottomRightX.cssString("0"))
buffer.WriteRune(' ')
buffer.WriteString(radius.BottomLeftX.cssString("0"))
if !radius.TopLeftX.Equal(radius.TopLeftY) ||
!radius.TopRightX.Equal(radius.TopRightY) ||
!radius.BottomLeftX.Equal(radius.BottomLeftY) ||
!radius.BottomRightX.Equal(radius.BottomRightY) {
buffer.WriteString(" / ")
buffer.WriteString(radius.TopLeftY.cssString("0"))
buffer.WriteRune(' ')
buffer.WriteString(radius.TopRightY.cssString("0"))
buffer.WriteRune(' ')
buffer.WriteString(radius.BottomRightY.cssString("0"))
buffer.WriteRune(' ')
buffer.WriteString(radius.BottomLeftY.cssString("0"))
}
}
builder.add("border-radius", buffer.String())
}
func (radius BoxRadius) cssString() string {
var builder cssValueBuilder
radius.cssValue(&builder)
return builder.finish()
}
func getRadiusProperty(style Properties) RadiusProperty {
if value := style.Get(Radius); value != nil {
switch value := value.(type) {
case RadiusProperty:
return value
case BoxRadius:
result := NewRadiusProperty(nil)
if value.AllAnglesIsEqual() {
result.Set(X, value.TopLeftX)
result.Set(Y, value.TopLeftY)
} else {
if value.TopLeftX.Equal(value.TopLeftY) {
result.Set(TopLeft, value.TopLeftX)
} else {
result.Set(TopLeftX, value.TopLeftX)
result.Set(TopLeftY, value.TopLeftY)
}
if value.TopRightX.Equal(value.TopRightY) {
result.Set(TopRight, value.TopRightX)
} else {
result.Set(TopRightX, value.TopRightX)
result.Set(TopRightY, value.TopRightY)
}
if value.BottomLeftX.Equal(value.BottomLeftY) {
result.Set(BottomLeft, value.BottomLeftX)
} else {
result.Set(BottomLeftX, value.BottomLeftX)
result.Set(BottomLeftY, value.BottomLeftY)
}
if value.BottomRightX.Equal(value.BottomRightY) {
result.Set(BottomRight, value.BottomRightX)
} else {
result.Set(BottomRightX, value.BottomRightX)
result.Set(BottomRightY, value.BottomRightY)
}
}
return result
case SizeUnit:
return NewRadiusProperty(Params{
X: value,
Y: value,
})
case string:
return NewRadiusProperty(Params{
X: value,
Y: value,
})
}
}
return NewRadiusProperty(nil)
}
func (properties *propertyList) setRadius(value interface{}) bool {
if value == nil {
delete(properties.properties, Radius)
return true
}
switch value := value.(type) {
case RadiusProperty:
properties.properties[Radius] = value
return true
case SizeUnit:
properties.properties[Radius] = value
return true
case BoxRadius:
radius := NewRadiusProperty(nil)
if value.AllAnglesIsEqual() {
radius.Set(X, value.TopLeftX)
radius.Set(Y, value.TopLeftY)
} else {
if value.TopLeftX.Equal(value.TopLeftY) {
radius.Set(TopLeft, value.TopLeftX)
} else {
radius.Set(TopLeftX, value.TopLeftX)
radius.Set(TopLeftY, value.TopLeftY)
}
if value.TopRightX.Equal(value.TopRightY) {
radius.Set(TopRight, value.TopRightX)
} else {
radius.Set(TopRightX, value.TopRightX)
radius.Set(TopRightY, value.TopRightY)
}
if value.BottomLeftX.Equal(value.BottomLeftY) {
radius.Set(BottomLeft, value.BottomLeftX)
} else {
radius.Set(BottomLeftX, value.BottomLeftX)
radius.Set(BottomLeftY, value.BottomLeftY)
}
if value.BottomRightX.Equal(value.BottomRightY) {
radius.Set(BottomRight, value.BottomRightX)
} else {
radius.Set(BottomRightX, value.BottomRightX)
radius.Set(BottomRightY, value.BottomRightY)
}
}
properties.properties[Radius] = radius
return true
case string:
if strings.Contains(value, "/") {
values := strings.Split(value, "/")
if len(values) == 2 {
okX := properties.setRadiusElement(RadiusX, values[0])
okY := properties.setRadiusElement(RadiusY, values[1])
return okX && okY
} else {
notCompatibleType(Radius, value)
}
} else {
return properties.setSizeProperty(Radius, value)
}
case DataObject:
radius := NewRadiusProperty(nil)
for _, tag := range []string{X, Y, TopLeft, TopRight, BottomLeft, BottomRight, TopLeftX, TopLeftY,
TopRightX, TopRightY, BottomLeftX, BottomLeftY, BottomRightX, BottomRightY} {
if value, ok := value.PropertyValue(tag); ok {
radius.Set(tag, value)
}
}
properties.properties[Radius] = radius
return true
default:
notCompatibleType(Radius, value)
}
return false
}
func (properties *propertyList) removeRadiusElement(tag string) {
if value, ok := properties.properties[Radius]; ok && value != nil {
radius := getRadiusProperty(properties)
radius.Remove(tag)
if len(radius.AllTags()) == 0 {
delete(properties.properties, Radius)
} else {
properties.properties[Radius] = radius
}
}
}
func (properties *propertyList) setRadiusElement(tag string, value interface{}) bool {
if value == nil {
properties.removeRadiusElement(tag)
return true
}
radius := getRadiusProperty(properties)
if radius.Set(tag, value) {
properties.properties[Radius] = radius
return true
}
return false
}
func getRadiusElement(style Properties, tag string) interface{} {
value := style.Get(Radius)
if value != nil {
switch value := value.(type) {
case string:
return value
case SizeUnit:
return value
case RadiusProperty:
return value.Get(tag)
case BoxRadius:
switch tag {
case RadiusX:
if value.TopLeftX.Equal(value.TopRightX) &&
value.TopLeftX.Equal(value.BottomLeftX) &&
value.TopLeftX.Equal(value.BottomRightX) {
return value.TopLeftX
}
case RadiusY:
if value.TopLeftY.Equal(value.TopRightY) &&
value.TopLeftY.Equal(value.BottomLeftY) &&
value.TopLeftY.Equal(value.BottomRightY) {
return value.TopLeftY
}
case RadiusTopLeft:
if value.TopLeftX.Equal(value.TopLeftY) {
return value.TopLeftY
}
case RadiusTopRight:
if value.TopRightX.Equal(value.TopRightY) {
return value.TopRightY
}
case RadiusBottomLeft:
if value.BottomLeftX.Equal(value.BottomLeftY) {
return value.BottomLeftY
}
case RadiusBottomRight:
if value.BottomRightX.Equal(value.BottomRightY) {
return value.BottomRightY
}
case RadiusTopLeftX:
return value.TopLeftX
case RadiusTopLeftY:
return value.TopLeftY
case RadiusTopRightX:
return value.TopRightX
case RadiusTopRightY:
return value.TopRightY
case RadiusBottomLeftX:
return value.BottomLeftX
case RadiusBottomLeftY:
return value.BottomLeftY
case RadiusBottomRightX:
return value.BottomRightX
case RadiusBottomRightY:
return value.BottomRightY
}
}
}
return nil
}
func getRadius(properties Properties, session Session) BoxRadius {
if value := properties.Get(Radius); value != nil {
switch value := value.(type) {
case BoxRadius:
return value
case RadiusProperty:
return value.BoxRadius(session)
case SizeUnit:
return BoxRadius{TopLeftX: value, TopLeftY: value, TopRightX: value, TopRightY: value,
BottomLeftX: value, BottomLeftY: value, BottomRightX: value, BottomRightY: value}
case string:
if text, ok := session.resolveConstants(value); ok {
if size, ok := StringToSizeUnit(text); ok {
return BoxRadius{TopLeftX: size, TopLeftY: size, TopRightX: size, TopRightY: size,
BottomLeftX: size, BottomLeftY: size, BottomRightX: size, BottomRightY: size}
}
}
}
}
return BoxRadius{}
} | radius.go | 0.713531 | 0.549882 | radius.go | starcoder |
package processor
import (
"time"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/response"
"github.com/Jeffail/benthos/v3/lib/types"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeSplit] = TypeSpec{
constructor: NewSplit,
description: `
Breaks message batches (synonymous with multiple part messages) into smaller
batches. The size of the resulting batches are determined either by a discrete
size or, if the field ` + "`byte_size`" + ` is non-zero, then by total size in
bytes (which ever limit is reached first).
If there is a remainder of messages after splitting a batch the remainder is
also sent as a single batch. For example, if your target size was 10, and the
processor received a batch of 95 message parts, the result would be 9 batches of
10 messages followed by a batch of 5 messages.`,
}
}
//------------------------------------------------------------------------------
// SplitConfig is a configuration struct containing fields for the Split
// processor, which breaks message batches down into batches of a smaller size.
type SplitConfig struct {
Size int `json:"size" yaml:"size"`
ByteSize int `json:"byte_size" yaml:"byte_size"`
}
// NewSplitConfig returns a SplitConfig with default values.
func NewSplitConfig() SplitConfig {
return SplitConfig{
Size: 1,
ByteSize: 0,
}
}
//------------------------------------------------------------------------------
// Split is a processor that splits messages into a message per part.
type Split struct {
log log.Modular
stats metrics.Type
size int
byteSize int
mCount metrics.StatCounter
mDropped metrics.StatCounter
mSent metrics.StatCounter
mBatchSent metrics.StatCounter
}
// NewSplit returns a Split processor.
func NewSplit(
conf Config, mgr types.Manager, log log.Modular, stats metrics.Type,
) (Type, error) {
return &Split{
log: log,
stats: stats,
size: conf.Split.Size,
byteSize: conf.Split.ByteSize,
mCount: stats.GetCounter("count"),
mDropped: stats.GetCounter("dropped"),
mSent: stats.GetCounter("sent"),
mBatchSent: stats.GetCounter("batch.sent"),
}, nil
}
//------------------------------------------------------------------------------
// ProcessMessage applies the processor to a message, either creating >0
// resulting messages or a response to be sent back to the message source.
func (s *Split) ProcessMessage(msg types.Message) ([]types.Message, types.Response) {
s.mCount.Incr(1)
if msg.Len() == 0 {
s.mDropped.Incr(1)
return nil, response.NewAck()
}
msgs := []types.Message{}
nextMsg := message.New(nil)
byteSize := 0
msg.Iter(func(i int, p types.Part) error {
if (s.size > 0 && nextMsg.Len() >= s.size) ||
(s.byteSize > 0 && (byteSize+len(p.Get())) > s.byteSize) {
if nextMsg.Len() > 0 {
msgs = append(msgs, nextMsg)
nextMsg = message.New(nil)
byteSize = 0
} else {
s.log.Warnf("A single message exceeds the target batch byte size of '%v', actual size: '%v'", s.byteSize, len(p.Get()))
}
}
nextMsg.Append(p)
byteSize += len(p.Get())
return nil
})
if nextMsg.Len() > 0 {
msgs = append(msgs, nextMsg)
}
s.mBatchSent.Incr(int64(len(msgs)))
s.mSent.Incr(int64(msg.Len()))
return msgs, nil
}
// CloseAsync shuts down the processor and stops processing requests.
func (s *Split) CloseAsync() {
}
// WaitForClose blocks until the processor has closed down.
func (s *Split) WaitForClose(timeout time.Duration) error {
return nil
}
//------------------------------------------------------------------------------ | lib/processor/split.go | 0.781205 | 0.440469 | split.go | starcoder |
package fixture
import (
"testing"
"strconv"
"reflect"
)
type Test func(t *testing.T)
type F func(p Param) interface{}
type Ft struct {
*testing.T
params ParamReader
fixture F
data []interface{}
results *Results
}
func New(t *testing.T, f F, p ParamReader, r *Results) *Ft {
return &Ft{ T: t, params: p, fixture: f, results: r }
}
func (f *Ft) Fixture() []interface{} {
return f.data
}
func (f *Ft) Value() interface{} {
return f.data[len(f.data) - 1]
}
func (f *Ft) Result() interface{} {
return f.results.Values()[len(f.data) - 1]
}
func (f *Ft) RunWith(t Test) {
f.data = nil
for i, r := range(f.params.Values()) {
f.data = append(f.data, f.fixture(r))
f.Run(strconv.Itoa(i), t)
}
}
func (f *Ft) assertOp(value interface{}, o func (a interface{}, b interface{}) bool) {
r := f.Result()
if ! o(r, value) {
f.Errorf("Failure: value does not match expected result: [%s] != [%s]", r, value)
}
}
func (f *Ft) AssertStrEq(value string) {
f.assertOp(value, func (a interface{}, b interface{}) bool { return a.(string) == b.(string) })
}
func (f *Ft) AssertGt(value interface{}) {
cmpOp := func (a, b interface{}) bool {
rType := reflect.TypeOf(a)
switch rType.Name() {
case "float64", "float32":
return reflect.ValueOf(a).Convert(rType).Float() > reflect.ValueOf(b).Convert(rType).Float()
default:
return reflect.ValueOf(a).Convert(rType).Int() > reflect.ValueOf(b).Convert(rType).Int()
}
}
f.assertOp(value, cmpOp)
}
func (f *Ft) AssertGe(value interface{}) {
cmpOp := func (a, b interface{}) bool {
rType := reflect.TypeOf(a)
switch rType.Name() {
case "float64", "float32":
return reflect.ValueOf(a).Convert(rType).Float() >= reflect.ValueOf(b).Convert(rType).Float()
default:
return reflect.ValueOf(a).Convert(rType).Int() >= reflect.ValueOf(b).Convert(rType).Int()
}
}
f.assertOp(value, cmpOp)
}
func (f *Ft) AssertLt(value interface{}) {
cmpOp := func (a, b interface{}) bool {
rType := reflect.TypeOf(a)
switch rType.Name() {
case "float64", "float32":
return reflect.ValueOf(a).Convert(rType).Float() < reflect.ValueOf(b).Convert(rType).Float()
default:
return reflect.ValueOf(a).Convert(rType).Int() < reflect.ValueOf(b).Convert(rType).Int()
}
}
f.assertOp(value, cmpOp)
}
func (f *Ft) AssertLe(value interface{}) {
cmpOp := func (a, b interface{}) bool {
rType := reflect.TypeOf(a)
switch rType.Name() {
case "float64", "float32":
return reflect.ValueOf(a).Convert(rType).Float() <= reflect.ValueOf(b).Convert(rType).Float()
default:
return reflect.ValueOf(a).Convert(rType).Int() <= reflect.ValueOf(b).Convert(rType).Int()
}
}
f.assertOp(value, cmpOp)
}
func (f *Ft) Assert() {
r := f.results.Values()
i := len(f.data) - 1
data := f.data[i]
res := r[i]
switch r[i].(type) {
case string:
data = string(f.data[i].([]byte))
res = string(r[i].(string))
}
if data != res {
f.Errorf("Failed value does not match expected result: [%s] != [%s]", f.data[i], r[i])
}
} | fixture/fixture.go | 0.670069 | 0.602851 | fixture.go | starcoder |
package edges
import (
"image"
"image/color"
"image/draw"
)
/*
* the four dials / knobs you can play with
* to control the results from this algorithm
*/
const (
WindowSize = 7
Ratio = 0.80
SmoothingFactor = 0.94
ThinningFactor = 0
)
type Detector struct {
original image.Image
gray *image.Gray
graySize, bufSize image.Point
workRect image.Rectangle
buffer, smoothed [][]float32
bli, res [][]uint8
bgColor, fgColor color.Color
doHysteresis bool
outlineSize, windowSize, thinningFactor int
ratio, smoothingFactor float32
}
func (d *Detector) i2d() (slice [][]uint8) {
slice = make([][]uint8, d.bufSize.X)
for i := range slice {
slice[i] = make([]uint8, d.bufSize.Y)
}
return
}
func (d *Detector) f2d() (slice [][]float32) {
slice = make([][]float32, d.bufSize.X)
for i := range slice {
slice[i] = make([]float32, d.bufSize.Y)
}
return
}
// compute the binary Laplacian image from the band-limited Laplacian of the input image
func (d *Detector) computeBLI() {
d.bli = d.i2d()
rect := d.workRect
/*
* the bli is computed by taking the difference between the smoothed image
* and the original image. In Shen and Castan's paper, this is shown to
* approximate the band-limited Laplacian of the image. The bli is then
* made by setting all values in the bli to 1 where the Laplacian is
* positive and 0 otherwise.
*/
for row := rect.Min.Y; row < rect.Max.Y; row++ {
for col := rect.Min.X; col < rect.Max.X; col++ {
if d.smoothed[col][row]-d.buffer[col][row] > 0.0 {
d.bli[col][row] = 1
}
}
}
}
func (d *Detector) inputGray() {
// convert the original image to grayscale
d.gray = image.NewGray(d.original.Bounds())
d.graySize = d.original.Bounds().Size()
draw.Draw(d.gray, d.original.Bounds(), d.original, image.ZP, draw.Src)
// calculate various sizes used throughout the implementation
d.calcSizes()
// convert the grayscale image to a 2D floating point slice
d.buffer = d.f2d()
rect1 := d.gray.Bounds()
rect2 := d.workRect
for y1, y2 := rect1.Min.Y, rect2.Min.Y; y1 < rect1.Max.Y; y1, y2 = y1+1, y2+1 {
for x1, x2 := rect1.Min.X, rect2.Min.X; x1 < rect1.Max.X; x1, x2 = x1+1, x2+1 {
r, _, _, _ := d.gray.At(x1, y1).RGBA()
d.buffer[x2][y2] = float32(r)
}
}
}
func (d *Detector) outputGray() *image.Gray {
// create the grayscale image to be returned
edges := image.NewGray(d.gray.Bounds())
// transfer the final 2D slice to the grayscale image
rect1 := edges.Bounds()
rect2 := d.workRect
for y1, y2 := rect1.Min.Y, rect2.Min.Y; y1 < rect1.Max.Y; y1, y2 = y1+1, y2+1 {
for x1, x2 := rect1.Min.X, rect2.Min.X; x1 < rect1.Max.X; x1, x2 = x1+1, x2+1 {
if d.res[x2][y2] > 0 {
edges.Set(x1, y1, d.fgColor)
} else {
edges.Set(x1, y1, d.bgColor)
}
}
}
return edges
}
func (d *Detector) calcSizes() {
d.outlineSize = (d.windowSize/2 + 1) * 2
d.bufSize.X = d.graySize.X + (d.outlineSize * 2)
d.bufSize.Y = d.graySize.Y + (d.outlineSize * 2)
min := image.Point{X: 0 + d.outlineSize, Y: 0 + d.outlineSize}
max := image.Point{X: (d.bufSize.X - d.outlineSize) + 1, Y: (d.bufSize.Y - d.outlineSize) + 1}
d.workRect = image.Rectangle{Min: min, Max: max}
}
func (d *Detector) ShenCastan() *image.Gray {
d.inputGray()
// smooth input image using recursively implemented ISEF filter
d.computeISEF()
// compute binary Laplacian image from smoothed image
d.computeBLI()
// perform edge detection using bli and gradient thresholding
d.locateZeroCrossings()
// perform hysteresis to remove false positives
d.thresholdEdges()
return d.outputGray()
}
func (d *Detector) SetThinningFactor(factor int) {
d.thinningFactor = factor
}
func (d *Detector) SetSmoothingFactor(factor float32) {
d.smoothingFactor = factor
}
func (d *Detector) DoHysteresis(b bool) {
d.doHysteresis = b
}
func (d *Detector) SetRatio(r float32) {
d.ratio = r
}
func (d *Detector) SetWindowSize(size int) {
if size >= 3 && (size%2 != 0) {
d.windowSize = size
d.outlineSize = (size / 2) + 1
}
}
func (d *Detector) SetForegroundColor(c color.Color) {
d.fgColor = c
}
func (d *Detector) SetBackgroundColor(c color.Color) {
d.bgColor = c
}
func NewEdgeDetector(img image.Image) *Detector {
return &Detector{
doHysteresis: true,
thinningFactor: ThinningFactor,
windowSize: WindowSize,
ratio: Ratio,
smoothingFactor: SmoothingFactor,
original: img,
bgColor: image.White.C,
fgColor: image.Black.C,
}
} | edges.go | 0.710327 | 0.438364 | edges.go | starcoder |
package schemes
import "image/color"
// PGAitch is a gradient color scheme from a dismal dark to a bright
// yellow.
var PGAitch []color.Color
func init() {
PGAitch = []color.Color{
color.RGBA{R: 0xff, G: 0xfe, B: 0xa5, A: 0xff},
color.RGBA{R: 0xff, G: 0xfe, B: 0xa4, A: 0xff},
color.RGBA{R: 0xff, G: 0xfd, B: 0xa3, A: 0xff},
color.RGBA{R: 0xff, G: 0xfd, B: 0xa2, A: 0xff},
color.RGBA{R: 0xff, G: 0xfd, B: 0xa1, A: 0xff},
color.RGBA{R: 0xff, G: 0xfc, B: 0xa0, A: 0xff},
color.RGBA{R: 0xff, G: 0xfc, B: 0x9f, A: 0xff},
color.RGBA{R: 0xff, G: 0xfc, B: 0x9d, A: 0xff},
color.RGBA{R: 0xff, G: 0xfb, B: 0x9c, A: 0xff},
color.RGBA{R: 0xff, G: 0xfb, B: 0x9b, A: 0xff},
color.RGBA{R: 0xff, G: 0xfb, B: 0x99, A: 0xff},
color.RGBA{R: 0xff, G: 0xfa, B: 0x98, A: 0xff},
color.RGBA{R: 0xff, G: 0xfa, B: 0x96, A: 0xff},
color.RGBA{R: 0xff, G: 0xfa, B: 0x95, A: 0xff},
color.RGBA{R: 0xff, G: 0xf9, B: 0x94, A: 0xff},
color.RGBA{R: 0xff, G: 0xf9, B: 0x92, A: 0xff},
color.RGBA{R: 0xff, G: 0xf9, B: 0x91, A: 0xff},
color.RGBA{R: 0xff, G: 0xf8, B: 0x8f, A: 0xff},
color.RGBA{R: 0xff, G: 0xf8, B: 0x8d, A: 0xff},
color.RGBA{R: 0xff, G: 0xf8, B: 0x8b, A: 0xff},
color.RGBA{R: 0xff, G: 0xf7, B: 0x8a, A: 0xff},
color.RGBA{R: 0xff, G: 0xf7, B: 0x88, A: 0xff},
color.RGBA{R: 0xff, G: 0xf6, B: 0x86, A: 0xff},
color.RGBA{R: 0xff, G: 0xf6, B: 0x84, A: 0xff},
color.RGBA{R: 0xff, G: 0xf6, B: 0x82, A: 0xff},
color.RGBA{R: 0xff, G: 0xf5, B: 0x81, A: 0xff},
color.RGBA{R: 0xff, G: 0xf5, B: 0x7f, A: 0xff},
color.RGBA{R: 0xff, G: 0xf5, B: 0x7d, A: 0xff},
color.RGBA{R: 0xff, G: 0xf4, B: 0x7b, A: 0xff},
color.RGBA{R: 0xff, G: 0xf4, B: 0x79, A: 0xff},
color.RGBA{R: 0xff, G: 0xf3, B: 0x77, A: 0xff},
color.RGBA{R: 0xff, G: 0xf3, B: 0x75, A: 0xff},
color.RGBA{R: 0xff, G: 0xf2, B: 0x72, A: 0xff},
color.RGBA{R: 0xff, G: 0xf2, B: 0x70, A: 0xff},
color.RGBA{R: 0xff, G: 0xf1, B: 0x6f, A: 0xff},
color.RGBA{R: 0xff, G: 0xf1, B: 0x6d, A: 0xff},
color.RGBA{R: 0xff, G: 0xf0, B: 0x6b, A: 0xff},
color.RGBA{R: 0xff, G: 0xf0, B: 0x69, A: 0xff},
color.RGBA{R: 0xff, G: 0xef, B: 0x66, A: 0xff},
color.RGBA{R: 0xff, G: 0xef, B: 0x64, A: 0xff},
color.RGBA{R: 0xff, G: 0xee, B: 0x63, A: 0xff},
color.RGBA{R: 0xff, G: 0xee, B: 0x61, A: 0xff},
color.RGBA{R: 0xff, G: 0xed, B: 0x5f, A: 0xff},
color.RGBA{R: 0xff, G: 0xed, B: 0x5c, A: 0xff},
color.RGBA{R: 0xff, G: 0xec, B: 0x5a, A: 0xff},
color.RGBA{R: 0xff, G: 0xed, B: 0x59, A: 0xff},
color.RGBA{R: 0xff, G: 0xec, B: 0x57, A: 0xff},
color.RGBA{R: 0xff, G: 0xeb, B: 0x54, A: 0xff},
color.RGBA{R: 0xff, G: 0xeb, B: 0x52, A: 0xff},
color.RGBA{R: 0xff, G: 0xea, B: 0x50, A: 0xff},
color.RGBA{R: 0xff, G: 0xe9, B: 0x4f, A: 0xff},
color.RGBA{R: 0xff, G: 0xe9, B: 0x4d, A: 0xff},
color.RGBA{R: 0xff, G: 0xe8, B: 0x4a, A: 0xff},
color.RGBA{R: 0xff, G: 0xe7, B: 0x48, A: 0xff},
color.RGBA{R: 0xff, G: 0xe6, B: 0x46, A: 0xff},
color.RGBA{R: 0xff, G: 0xe6, B: 0x45, A: 0xff},
color.RGBA{R: 0xff, G: 0xe5, B: 0x43, A: 0xff},
color.RGBA{R: 0xff, G: 0xe4, B: 0x41, A: 0xff},
color.RGBA{R: 0xff, G: 0xe3, B: 0x3f, A: 0xff},
color.RGBA{R: 0xff, G: 0xe2, B: 0x3d, A: 0xff},
color.RGBA{R: 0xff, G: 0xe1, B: 0x3c, A: 0xff},
color.RGBA{R: 0xff, G: 0xe1, B: 0x3a, A: 0xff},
color.RGBA{R: 0xff, G: 0xe0, B: 0x38, A: 0xff},
color.RGBA{R: 0xff, G: 0xdf, B: 0x36, A: 0xff},
color.RGBA{R: 0xff, G: 0xde, B: 0x34, A: 0xff},
color.RGBA{R: 0xff, G: 0xde, B: 0x33, A: 0xff},
color.RGBA{R: 0xff, G: 0xdd, B: 0x31, A: 0xff},
color.RGBA{R: 0xff, G: 0xdc, B: 0x2f, A: 0xff},
color.RGBA{R: 0xff, G: 0xdb, B: 0x2e, A: 0xff},
color.RGBA{R: 0xff, G: 0xda, B: 0x2c, A: 0xff},
color.RGBA{R: 0xff, G: 0xd8, B: 0x2b, A: 0xff},
color.RGBA{R: 0xff, G: 0xd7, B: 0x2a, A: 0xff},
color.RGBA{R: 0xff, G: 0xd6, B: 0x29, A: 0xff},
color.RGBA{R: 0xff, G: 0xd5, B: 0x27, A: 0xff},
color.RGBA{R: 0xff, G: 0xd4, B: 0x27, A: 0xff},
color.RGBA{R: 0xff, G: 0xd3, B: 0x25, A: 0xff},
color.RGBA{R: 0xff, G: 0xd1, B: 0x24, A: 0xff},
color.RGBA{R: 0xff, G: 0xd0, B: 0x22, A: 0xff},
color.RGBA{R: 0xff, G: 0xd0, B: 0x21, A: 0xff},
color.RGBA{R: 0xff, G: 0xce, B: 0x21, A: 0xff},
color.RGBA{R: 0xff, G: 0xcd, B: 0x20, A: 0xff},
color.RGBA{R: 0xff, G: 0xcc, B: 0x1e, A: 0xff},
color.RGBA{R: 0xff, G: 0xca, B: 0x1d, A: 0xff},
color.RGBA{R: 0xff, G: 0xc9, B: 0x1d, A: 0xff},
color.RGBA{R: 0xff, G: 0xc7, B: 0x1c, A: 0xff},
color.RGBA{R: 0xfe, G: 0xc7, B: 0x1c, A: 0xff},
color.RGBA{R: 0xfe, G: 0xc7, B: 0x1b, A: 0xff},
color.RGBA{R: 0xfd, G: 0xc6, B: 0x1b, A: 0xff},
color.RGBA{R: 0xfc, G: 0xc5, B: 0x1b, A: 0xff},
color.RGBA{R: 0xfb, G: 0xc4, B: 0x1b, A: 0xff},
color.RGBA{R: 0xfa, G: 0xc3, B: 0x1a, A: 0xff},
color.RGBA{R: 0xf9, G: 0xc3, B: 0x1a, A: 0xff},
color.RGBA{R: 0xf8, G: 0xc2, B: 0x1a, A: 0xff},
color.RGBA{R: 0xf8, G: 0xc1, B: 0x1a, A: 0xff},
color.RGBA{R: 0xf7, G: 0xc0, B: 0x1a, A: 0xff},
color.RGBA{R: 0xf6, G: 0xc0, B: 0x19, A: 0xff},
color.RGBA{R: 0xf5, G: 0xbf, B: 0x1a, A: 0xff},
color.RGBA{R: 0xf4, G: 0xbe, B: 0x1a, A: 0xff},
color.RGBA{R: 0xf3, G: 0xbd, B: 0x19, A: 0xff},
color.RGBA{R: 0xf1, G: 0xbc, B: 0x19, A: 0xff},
color.RGBA{R: 0xf0, G: 0xbb, B: 0x19, A: 0xff},
color.RGBA{R: 0xef, G: 0xbb, B: 0x19, A: 0xff},
color.RGBA{R: 0xee, G: 0xba, B: 0x19, A: 0xff},
color.RGBA{R: 0xec, G: 0xb9, B: 0x19, A: 0xff},
color.RGBA{R: 0xec, G: 0xb8, B: 0x1a, A: 0xff},
color.RGBA{R: 0xeb, G: 0xb7, B: 0x1a, A: 0xff},
color.RGBA{R: 0xe9, G: 0xb6, B: 0x19, A: 0xff},
color.RGBA{R: 0xe8, G: 0xb5, B: 0x19, A: 0xff},
color.RGBA{R: 0xe6, G: 0xb5, B: 0x1a, A: 0xff},
color.RGBA{R: 0xe5, G: 0xb4, B: 0x1a, A: 0xff},
color.RGBA{R: 0xe4, G: 0xb3, B: 0x19, A: 0xff},
color.RGBA{R: 0xe3, G: 0xb2, B: 0x19, A: 0xff},
color.RGBA{R: 0xe2, G: 0xb1, B: 0x1a, A: 0xff},
color.RGBA{R: 0xe0, G: 0xb0, B: 0x1a, A: 0xff},
color.RGBA{R: 0xde, G: 0xb0, B: 0x19, A: 0xff},
color.RGBA{R: 0xdd, G: 0xaf, B: 0x19, A: 0xff},
color.RGBA{R: 0xdc, G: 0xad, B: 0x1a, A: 0xff},
color.RGBA{R: 0xdb, G: 0xac, B: 0x1a, A: 0xff},
color.RGBA{R: 0xd9, G: 0xab, B: 0x19, A: 0xff},
color.RGBA{R: 0xd7, G: 0xaa, B: 0x19, A: 0xff},
color.RGBA{R: 0xd6, G: 0xaa, B: 0x1a, A: 0xff},
color.RGBA{R: 0xd4, G: 0xa9, B: 0x1a, A: 0xff},
color.RGBA{R: 0xd3, G: 0xa7, B: 0x19, A: 0xff},
color.RGBA{R: 0xd1, G: 0xa6, B: 0x19, A: 0xff},
color.RGBA{R: 0xd0, G: 0xa6, B: 0x1a, A: 0xff},
color.RGBA{R: 0xce, G: 0xa5, B: 0x1a, A: 0xff},
color.RGBA{R: 0xcc, G: 0xa3, B: 0x1a, A: 0xff},
color.RGBA{R: 0xcb, G: 0xa2, B: 0x1a, A: 0xff},
color.RGBA{R: 0xca, G: 0xa1, B: 0x19, A: 0xff},
color.RGBA{R: 0xc8, G: 0xa1, B: 0x1a, A: 0xff},
color.RGBA{R: 0xc6, G: 0x9f, B: 0x1a, A: 0xff},
color.RGBA{R: 0xc5, G: 0x9e, B: 0x1a, A: 0xff},
color.RGBA{R: 0xc3, G: 0x9d, B: 0x1a, A: 0xff},
color.RGBA{R: 0xc1, G: 0x9d, B: 0x1b, A: 0xff},
color.RGBA{R: 0xc0, G: 0x9b, B: 0x1b, A: 0xff},
color.RGBA{R: 0xbe, G: 0x9a, B: 0x1b, A: 0xff},
color.RGBA{R: 0xbd, G: 0x99, B: 0x1b, A: 0xff},
color.RGBA{R: 0xbb, G: 0x98, B: 0x1c, A: 0xff},
color.RGBA{R: 0xba, G: 0x97, B: 0x1c, A: 0xff},
color.RGBA{R: 0xb8, G: 0x96, B: 0x1c, A: 0xff},
color.RGBA{R: 0xb6, G: 0x95, B: 0x1c, A: 0xff},
color.RGBA{R: 0xb5, G: 0x94, B: 0x1d, A: 0xff},
color.RGBA{R: 0xb3, G: 0x93, B: 0x1d, A: 0xff},
color.RGBA{R: 0xb1, G: 0x92, B: 0x1d, A: 0xff},
color.RGBA{R: 0xaf, G: 0x90, B: 0x1d, A: 0xff},
color.RGBA{R: 0xae, G: 0x90, B: 0x1e, A: 0xff},
color.RGBA{R: 0xac, G: 0x8e, B: 0x1e, A: 0xff},
color.RGBA{R: 0xaa, G: 0x8d, B: 0x1e, A: 0xff},
color.RGBA{R: 0xa9, G: 0x8c, B: 0x1e, A: 0xff},
color.RGBA{R: 0xa7, G: 0x8b, B: 0x1f, A: 0xff},
color.RGBA{R: 0xa5, G: 0x8a, B: 0x1f, A: 0xff},
color.RGBA{R: 0xa4, G: 0x89, B: 0x1f, A: 0xff},
color.RGBA{R: 0xa2, G: 0x88, B: 0x1f, A: 0xff},
color.RGBA{R: 0xa1, G: 0x87, B: 0x20, A: 0xff},
color.RGBA{R: 0x9f, G: 0x86, B: 0x20, A: 0xff},
color.RGBA{R: 0x9d, G: 0x85, B: 0x20, A: 0xff},
color.RGBA{R: 0x9a, G: 0x84, B: 0x20, A: 0xff},
color.RGBA{R: 0x99, G: 0x83, B: 0x21, A: 0xff},
color.RGBA{R: 0x97, G: 0x82, B: 0x21, A: 0xff},
color.RGBA{R: 0x96, G: 0x81, B: 0x21, A: 0xff},
color.RGBA{R: 0x94, G: 0x7f, B: 0x21, A: 0xff},
color.RGBA{R: 0x93, G: 0x7f, B: 0x22, A: 0xff},
color.RGBA{R: 0x91, G: 0x7e, B: 0x22, A: 0xff},
color.RGBA{R: 0x8f, G: 0x7c, B: 0x22, A: 0xff},
color.RGBA{R: 0x8d, G: 0x7b, B: 0x22, A: 0xff},
color.RGBA{R: 0x8c, G: 0x7a, B: 0x23, A: 0xff},
color.RGBA{R: 0x8b, G: 0x79, B: 0x23, A: 0xff},
color.RGBA{R: 0x89, G: 0x78, B: 0x23, A: 0xff},
color.RGBA{R: 0x87, G: 0x77, B: 0x23, A: 0xff},
color.RGBA{R: 0x86, G: 0x76, B: 0x24, A: 0xff},
color.RGBA{R: 0x84, G: 0x75, B: 0x24, A: 0xff},
color.RGBA{R: 0x82, G: 0x74, B: 0x24, A: 0xff},
color.RGBA{R: 0x81, G: 0x73, B: 0x24, A: 0xff},
color.RGBA{R: 0x7f, G: 0x71, B: 0x24, A: 0xff},
color.RGBA{R: 0x7e, G: 0x71, B: 0x25, A: 0xff},
color.RGBA{R: 0x7c, G: 0x70, B: 0x25, A: 0xff},
color.RGBA{R: 0x7a, G: 0x6f, B: 0x25, A: 0xff},
color.RGBA{R: 0x79, G: 0x6e, B: 0x25, A: 0xff},
color.RGBA{R: 0x78, G: 0x6d, B: 0x26, A: 0xff},
color.RGBA{R: 0x76, G: 0x6c, B: 0x26, A: 0xff},
color.RGBA{R: 0x74, G: 0x6b, B: 0x26, A: 0xff},
color.RGBA{R: 0x73, G: 0x69, B: 0x26, A: 0xff},
color.RGBA{R: 0x71, G: 0x68, B: 0x26, A: 0xff},
color.RGBA{R: 0x70, G: 0x68, B: 0x27, A: 0xff},
color.RGBA{R: 0x6e, G: 0x67, B: 0x27, A: 0xff},
color.RGBA{R: 0x6c, G: 0x66, B: 0x27, A: 0xff},
color.RGBA{R: 0x6b, G: 0x65, B: 0x27, A: 0xff},
color.RGBA{R: 0x6a, G: 0x64, B: 0x28, A: 0xff},
color.RGBA{R: 0x68, G: 0x63, B: 0x28, A: 0xff},
color.RGBA{R: 0x66, G: 0x62, B: 0x28, A: 0xff},
color.RGBA{R: 0x65, G: 0x60, B: 0x28, A: 0xff},
color.RGBA{R: 0x63, G: 0x60, B: 0x28, A: 0xff},
color.RGBA{R: 0x63, G: 0x60, B: 0x29, A: 0xff},
color.RGBA{R: 0x61, G: 0x5e, B: 0x29, A: 0xff},
color.RGBA{R: 0x60, G: 0x5d, B: 0x29, A: 0xff},
color.RGBA{R: 0x5e, G: 0x5c, B: 0x29, A: 0xff},
color.RGBA{R: 0x5c, G: 0x5b, B: 0x29, A: 0xff},
color.RGBA{R: 0x5c, G: 0x5a, B: 0x2a, A: 0xff},
color.RGBA{R: 0x5a, G: 0x5a, B: 0x2a, A: 0xff},
color.RGBA{R: 0x59, G: 0x59, B: 0x2a, A: 0xff},
color.RGBA{R: 0x57, G: 0x57, B: 0x2a, A: 0xff},
color.RGBA{R: 0x56, G: 0x56, B: 0x2a, A: 0xff},
color.RGBA{R: 0x55, G: 0x56, B: 0x2b, A: 0xff},
color.RGBA{R: 0x54, G: 0x55, B: 0x2b, A: 0xff},
color.RGBA{R: 0x53, G: 0x54, B: 0x2b, A: 0xff},
color.RGBA{R: 0x51, G: 0x53, B: 0x2b, A: 0xff},
color.RGBA{R: 0x50, G: 0x52, B: 0x2b, A: 0xff},
color.RGBA{R: 0x50, G: 0x52, B: 0x2c, A: 0xff},
color.RGBA{R: 0x4e, G: 0x50, B: 0x2c, A: 0xff},
color.RGBA{R: 0x4d, G: 0x50, B: 0x2c, A: 0xff},
color.RGBA{R: 0x4b, G: 0x4f, B: 0x2c, A: 0xff},
color.RGBA{R: 0x4b, G: 0x4e, B: 0x2c, A: 0xff},
color.RGBA{R: 0x4a, G: 0x4e, B: 0x2d, A: 0xff},
color.RGBA{R: 0x49, G: 0x4c, B: 0x2d, A: 0xff},
color.RGBA{R: 0x47, G: 0x4b, B: 0x2d, A: 0xff},
color.RGBA{R: 0x47, G: 0x4b, B: 0x2d, A: 0xff},
color.RGBA{R: 0x46, G: 0x4a, B: 0x2d, A: 0xff},
color.RGBA{R: 0x45, G: 0x4a, B: 0x2e, A: 0xff},
color.RGBA{R: 0x44, G: 0x49, B: 0x2e, A: 0xff},
color.RGBA{R: 0x43, G: 0x48, B: 0x2e, A: 0xff},
color.RGBA{R: 0x42, G: 0x47, B: 0x2e, A: 0xff},
color.RGBA{R: 0x41, G: 0x47, B: 0x2e, A: 0xff},
color.RGBA{R: 0x40, G: 0x45, B: 0x2e, A: 0xff},
color.RGBA{R: 0x40, G: 0x45, B: 0x2f, A: 0xff},
color.RGBA{R: 0x3f, G: 0x44, B: 0x2f, A: 0xff},
color.RGBA{R: 0x3e, G: 0x43, B: 0x2f, A: 0xff},
color.RGBA{R: 0x3d, G: 0x43, B: 0x2f, A: 0xff},
color.RGBA{R: 0x3c, G: 0x42, B: 0x2f, A: 0xff},
color.RGBA{R: 0x3b, G: 0x41, B: 0x2f, A: 0xff},
color.RGBA{R: 0x3b, G: 0x41, B: 0x30, A: 0xff},
color.RGBA{R: 0x3b, G: 0x40, B: 0x30, A: 0xff},
color.RGBA{R: 0x3a, G: 0x3f, B: 0x30, A: 0xff},
color.RGBA{R: 0x39, G: 0x3f, B: 0x30, A: 0xff},
color.RGBA{R: 0x38, G: 0x3e, B: 0x30, A: 0xff},
color.RGBA{R: 0x38, G: 0x3e, B: 0x30, A: 0xff},
color.RGBA{R: 0x37, G: 0x3d, B: 0x30, A: 0xff},
color.RGBA{R: 0x37, G: 0x3d, B: 0x31, A: 0xff},
color.RGBA{R: 0x37, G: 0x3c, B: 0x31, A: 0xff},
color.RGBA{R: 0x37, G: 0x3c, B: 0x31, A: 0xff},
color.RGBA{R: 0x36, G: 0x3b, B: 0x31, A: 0xff},
color.RGBA{R: 0x35, G: 0x3a, B: 0x31, A: 0xff},
color.RGBA{R: 0x35, G: 0x39, B: 0x31, A: 0xff},
color.RGBA{R: 0x34, G: 0x39, B: 0x31, A: 0xff},
color.RGBA{R: 0x34, G: 0x39, B: 0x32, A: 0xff},
color.RGBA{R: 0x34, G: 0x38, B: 0x32, A: 0xff},
color.RGBA{R: 0x34, G: 0x38, B: 0x32, A: 0xff},
color.RGBA{R: 0x34, G: 0x38, B: 0x32, A: 0xff},
color.RGBA{R: 0x34, G: 0x37, B: 0x32, A: 0xff},
color.RGBA{R: 0x33, G: 0x36, B: 0x32, A: 0xff},
color.RGBA{R: 0x33, G: 0x35, B: 0x32, A: 0xff},
color.RGBA{R: 0x33, G: 0x35, B: 0x32, A: 0xff},
color.RGBA{R: 0x33, G: 0x34, B: 0x32, A: 0xff},
color.RGBA{R: 0x33, G: 0x35, B: 0x33, A: 0xff},
color.RGBA{R: 0x33, G: 0x35, B: 0x33, A: 0xff},
color.RGBA{R: 0x33, G: 0x34, B: 0x33, A: 0xff},
color.RGBA{R: 0x33, G: 0x34, B: 0x33, A: 0xff},
}
} | schemes/pgaitch.go | 0.550607 | 0.639229 | pgaitch.go | starcoder |
package main
// Rate Limiting is an important mechanism for controlling resource utilization and maintaining
// quality of service. Go elegantlt supports rate limiting with Goroutines, channels and tickers
import (
"fmt"
"time"
)
func main() {
// First we'll look at basic rate limiting.
// Suppose we want to limit our handling of incoming requests. We'll serve these requests off
// a channel of the same name.
requests := make(chan int, 5)
for i := 1; i <= 5; i++ {
requests <- i
fmt.Println("Wrote request", i, "to requests channel")
}
close(requests)
// The limiter channel will receive a value every 200 milliseconds.
// This is the regulator in our rate limiting scheme.
limiter := time.Tick(500 * time.Millisecond)
// By blocking on a receive from the limiter channel before serving each request, we limit ourselves
// to 1 request every 200 milliseconds
for req := range requests {
<-limiter
fmt.Println("request", req, time.Now())
}
fmt.Printf("\nBurst limiting\n\n")
// We may want to allow short bursts of requests in our rate limiting scheme while preserving the
// overall rate limit. We can accomplish this by buffering our limiter channel. This burstyLimiter
// channel will allow bursts of up to 3 events.
burstyLimiter := make(chan time.Time, 3)
// Fill up the channel to represent allowed bursting
for i := 0; i < 3; i++ {
burstyLimiter <- time.Now()
}
// Every 200 milliseconds we'll try to add a new value to burstyLimiter, up to it's limit of 3.
go func() {
for t := range time.Tick(200 * time.Millisecond) {
burstyLimiter <- t
}
}()
// Now simulate 5 more incoming requets. The first 3 of these will benefit from the burst capability
// of burstyLimiter
const capacity int = 5
requests = make(chan int, capacity)
for i := 1; i <= capacity; i++ {
requests <- i
// fmt.Println("Wrote request ", i, "to requests")
}
close(requests) // close it so the below range doesn't wait forever
for req := range requests {
<-burstyLimiter
fmt.Println("request", req, time.Now())
}
// Running our first program we see the first batch of requests handled once every ~200 milliseconds
// For the second batch of requests, we serve the first 3 immediately because of the burstable
// rate limitting. Then we server the 2 remaining batches with ~200ms delays each.
} | cmd/concurrency/rate-limiting/rate-limiting.go | 0.617628 | 0.414603 | rate-limiting.go | starcoder |
package tree_node
import (
"fmt"
"os"
)
// Postcondition: Return value is the tree node with target value, if it cannot
// find any node with matching value then return NULL.
func TreeSearch(root *TreeNode, target int) *TreeNode {
if root == nil { // Base case
return root
}
if root.item == target { // Found target
return root
}
if target < root.item { // Recursive step
return TreeSearch(root.left, target) // Left sub-tree
}
return TreeSearch(root.right, target) // Right sub-tree
}
// Postcondition: Insert the given value to correct place in BST
func TreeInsert(root **TreeNode, insertMe int) {
if *root == nil { // Base case
*root = NewTreeNode(insertMe, nil, nil)
} else if insertMe < (*root).item { // Move to left sub-tree
TreeInsert(&(*root).left, insertMe)
} else { // Move to right sub-tree
TreeInsert(&(*root).right, insertMe)
}
(*root).UpdateHeight() // Update tree height
}
// Postcondition: Return value is a BST copy of the given BST
func TreeCopy(root *TreeNode) *TreeNode {
if root == nil { // Base case
return nil
}
// Copy BST node by node
return NewTreeNode(root.item, TreeCopy(root.left), TreeCopy(root.right))
}
// Postcondition: Free tree node pointers and clear BST object
func TreeClear(root **TreeNode) {
if *root == nil { // Base case
return
}
TreeClear(&(*root).left) // Clear left sub-tree first
TreeClear(&(*root).right) // then clear right sub-tree
*root = nil // assign pointer to NULL
}
func HeightHelper(root **TreeNode) {
if *root != nil {
(*root).UpdateHeight()
}
}
// Postcondition: Delete the given target from BST object. If cannot find
// target then return false and do nothing to BST. Otherwise, delete
// the target tree node, and adjust the BST object accordingly.
func TreeErase(root **TreeNode, target int) bool {
if *root == nil { // Base case
return false
}
defer HeightHelper(root) // Update tree height
if target == (*root).item { // Case 4: Found the target tree node
if (*root).left == nil { // Case 4.a: Root has no left sub-tree,
*root = (*root).right // bypass root and connect the right child
} else { // Case 4.b: Target has a left child: Replace target with left child's
TreeRemoveMax(&(*root).left, &(*root).item) // rightmost child. Eliminate the
} // rightmost child from left sub-tree.
return true
} else if target < (*root).item { // Recursive step: Search left sub-tree
return TreeErase(&(*root).left, target)
}
return TreeErase(&(*root).right, target) // Recursive step: Search right sub-tree
}
// Postcondition: Replace target with its left child's rightmost child. Also
// eliminate the rightmost child from left sub-tree
func ReplaceMax(root **TreeNode, target **TreeNode) {
if (*root).right == nil { // Base case
(*target).item = (*root).item // Replace target with max value
(*root) = (*root).right
return
}
ReplaceMax(&(*root).right, target) // Recursive step: Find max value
(*root).UpdateHeight() // Update tree height afterward
}
// Postcondition: Erase rightmost node from the tree -> max_value
func TreeRemoveMax(root **TreeNode, maxVal *int) {
if (*root).right == nil { // Base case
*maxVal = (*root).item // Assign max value to destination
(*root) = (*root).left // Connect its left sub-tree
return
}
// Recursive step: Search right child
TreeRemoveMax(&(*root).right, maxVal)
(*root).UpdateHeight() // Update tree height afterward
}
// Postcondition: Insert all values in second BST object to first BST object.
func TreeAdd(dest **TreeNode, src *TreeNode) {
if src == nil { // Base case
return
}
TreeInsert(dest, src.item) // Insert current node to first BST
// Recursive step
TreeAdd(dest, src.left) // Insert left sub-tree
TreeAdd(dest, src.right) // Insert right sub-tree
(*dest).UpdateHeight() // Update tree height
}
// Postcondition: Construct a BST object from a sorted list
func FromSortedList(arr []int) *TreeNode {
size := len(arr)
if size == 0 { // Base case
return nil
}
// Find mid element and create node
mid := size / 2
root := NewTreeNode(arr[mid], nil, nil)
left, right := arr[:mid], arr[mid+1:]
// Recursive step: Repeat process for left and right sub-tree
root.left = FromSortedList(left)
root.right = FromSortedList(right)
return root
}
// Postcondition: Print BST
func TreePrint(root *TreeNode, depth int) {
if root != nil {
TreePrint(root.right, depth+1) // Print right sub-tree
fmt.Printf("%*s[%d]\n", 4*depth, "", root.item) // Print current root
TreePrint(root.left, depth+1) // Print left sub-tree
}
}
// =================================
// Traversal
// =================================
func InOrder(root *TreeNode, out *os.File) {
if root == nil {
return
}
InOrder(root.left, out)
_, err := out.Write([]byte(fmt.Sprintf("%4d", root.item)))
if err != nil {
panic(err)
}
InOrder(root.right, out)
}
func PreOrder(root *TreeNode, out *os.File) {
if root == nil {
return
}
_, err := out.Write([]byte(fmt.Sprintf("%4d", root.item)))
if err != nil {
panic(err)
}
PreOrder(root.left, out)
PreOrder(root.right, out)
}
func PostOrder(root *TreeNode, out *os.File) {
if root == nil {
return
}
PostOrder(root.left, out)
PostOrder(root.right, out)
_, err := out.Write([]byte(fmt.Sprintf("%4d", root.item)))
if err != nil {
panic(err)
}
}
func InOrderString(root *TreeNode) string {
if root == nil {
return ""
}
return InOrderString(root.left) + fmt.Sprintf("[%d]", root.item) + InOrderString(root.right)
}
func PreOrderString(root *TreeNode) string {
if root == nil {
return ""
}
return fmt.Sprintf("[%d]", root.item) + PreOrderString(root.left) + PreOrderString(root.right)
}
func PostOrderString(root *TreeNode) string {
if root == nil {
return ""
}
return PostOrderString(root.left) + PostOrderString(root.right) + fmt.Sprintf("[%d]", root.item)
} | pkg/tree_node/tree_functions.go | 0.80112 | 0.57687 | tree_functions.go | starcoder |
package pos
import (
"bytes"
"fmt"
)
// Pos contains the position of a lexical Item
type Pos struct {
stack []int
}
// New returns a new position before the first symbol
func New() *Pos {
p := &Pos{
stack: make([]int, 0, 8),
}
return p.Push(0)
}
// From returns a Pos derived from the position stack, `stack`
func From(stack []int) *Pos {
return &Pos{stack}
}
// Clone returns a deep copy of pos
func (pos *Pos) Clone() *Pos {
clone := &Pos{
stack: make([]int, len(pos.stack)),
}
copy(clone.stack[:len(pos.stack)], pos.stack[:len(pos.stack)])
return clone
}
// Equal is true if the two stacks are the same
func (pos *Pos) Equal(other *Pos) bool {
if len(pos.stack) != len(other.stack) {
return false
}
for i := range pos.stack {
if pos.stack[i] != other.stack[i] {
return false
}
}
return true
}
// Inc increments the value of pos.Top(). It returns pos for
// command chaining
func (pos *Pos) Inc() *Pos {
pos.stack[len(pos.stack)-1]++
return pos
}
// Len returns the number of items on the stack
func (pos *Pos) Len() int {
return len(pos.stack)
}
// Peek returns the value of the item at height above the bottom of the stack.
// Pos remains unmodified
func (pos *Pos) Peek(height int) int {
return pos.stack[height]
}
// Push pushes p onto the stack and returns a pointer to itself for command chainging.
func (pos *Pos) Push(p int) *Pos {
pos.stack = append(pos.stack, p)
return pos
}
// Pop removes the top n elements from the stack.
// Pop returns a self pointer for command chaining.
func (pos *Pos) Pop(n int) *Pos {
pos.stack = pos.stack[:len(pos.stack)-n]
return pos
}
func (pos *Pos) String() string {
w := new(bytes.Buffer)
fmt.Fprint(w, "[")
for i, p := range pos.stack {
if i > 0 {
fmt.Fprint(w, ",")
}
fmt.Fprintf(w, "%d", p)
}
fmt.Fprint(w, "]")
return w.String()
}
func (pos *Pos) Tail() *Pos {
tail := pos.Clone()
tail.stack = tail.stack[1:]
return tail
}
// Top returns the value at the top of the stack without changing the stack
func (pos *Pos) Top() int {
return pos.stack[len(pos.stack)-1]
} | lex/item/pos/pos.go | 0.75183 | 0.618176 | pos.go | starcoder |
package openapi
// Paths Object
// Holds the relative paths to the individual endpoints and their operations.
// The path is appended to the URL from the Server Object in order to construct
// the full URL. The Paths MAY be empty, due to ACL constraints.
type Paths struct {
// A relative path to an individual endpoint. The field name MUST begin with
// a forward slash (/). The path is appended (no relative URL resolution)
// to the expanded URL from the Server Object's url field in order to
// construct the full URL. Path templating is allowed. When matching URLs,
// concrete (non-templated) paths would be matched before their templated
// counterparts. Templated paths with the same hierarchy but different
// templated names MUST NOT exist as they are identical. In case of
// ambiguous matching, it's up to the tooling to decide which one to use.
// /{path} *PathItem
// This object MAY be extended with Specification Extensions.
}
// Path Templating Matching
// Assuming the following paths, the concrete definition, /pets/mine, will be matched first if used:
// /pets/{petId}
// /pets/mine
// The following paths are considered identical and invalid:
// /pets/{petId}
// /pets/{name}
// The following may lead to ambiguous resolution:
// /{entity}/me
// /books/{id}
// Paths Object Example
// {
// "/pets": {
// "get": {
// "description": "Returns all pets from the system that the user has access to",
// "responses": {
// "200": {
// "description": "A list of pets.",
// "content": {
// "application/json": {
// "schema": {
// "type": "array",
// "items": {
// "$ref": "#/components/schemas/pet"
// }
// }
// }
// }
// }
// }
// }
// }
// }
// /pets:
// get:
// description: Returns all pets from the system that the user has access to
// responses:
// '200':
// description: A list of pets.
// content:
// application/json:
// schema:
// type: array
// items:
// $ref: '#/components/schemas/pet' | paths.go | 0.647352 | 0.528716 | paths.go | starcoder |
package nasType
// IntegrityProtectionMaximumDataRate 9.11.4.7
// MaximumDataRatePerUEForUserPlaneIntegrityProtectionForUpLink Row, sBit, len = [0, 0], 8 , 8
// MaximumDataRatePerUEForUserPlaneIntegrityProtectionForDownLink Row, sBit, len = [1, 1], 8 , 8
type IntegrityProtectionMaximumDataRate struct {
Iei uint8
Octet [2]uint8
}
func NewIntegrityProtectionMaximumDataRate(iei uint8) (integrityProtectionMaximumDataRate *IntegrityProtectionMaximumDataRate) {}
// IntegrityProtectionMaximumDataRate 9.11.4.7
// Iei Row, sBit, len = [], 8, 8
func (a *IntegrityProtectionMaximumDataRate) GetIei() (iei uint8) {}
// IntegrityProtectionMaximumDataRate 9.11.4.7
// Iei Row, sBit, len = [], 8, 8
func (a *IntegrityProtectionMaximumDataRate) SetIei(iei uint8) {}
// IntegrityProtectionMaximumDataRate 9.11.4.7
// MaximumDataRatePerUEForUserPlaneIntegrityProtectionForUpLink Row, sBit, len = [0, 0], 8 , 8
func (a *IntegrityProtectionMaximumDataRate) GetMaximumDataRatePerUEForUserPlaneIntegrityProtectionForUpLink() (maximumDataRatePerUEForUserPlaneIntegrityProtectionForUpLink uint8) {}
// IntegrityProtectionMaximumDataRate 9.11.4.7
// MaximumDataRatePerUEForUserPlaneIntegrityProtectionForUpLink Row, sBit, len = [0, 0], 8 , 8
func (a *IntegrityProtectionMaximumDataRate) SetMaximumDataRatePerUEForUserPlaneIntegrityProtectionForUpLink(maximumDataRatePerUEForUserPlaneIntegrityProtectionForUpLink uint8) {}
// IntegrityProtectionMaximumDataRate 9.11.4.7
// MaximumDataRatePerUEForUserPlaneIntegrityProtectionForDownLink Row, sBit, len = [1, 1], 8 , 8
func (a *IntegrityProtectionMaximumDataRate) GetMaximumDataRatePerUEForUserPlaneIntegrityProtectionForDownLink() (maximumDataRatePerUEForUserPlaneIntegrityProtectionForDownLink uint8) {}
// IntegrityProtectionMaximumDataRate 9.11.4.7
// MaximumDataRatePerUEForUserPlaneIntegrityProtectionForDownLink Row, sBit, len = [1, 1], 8 , 8
func (a *IntegrityProtectionMaximumDataRate) SetMaximumDataRatePerUEForUserPlaneIntegrityProtectionForDownLink(maximumDataRatePerUEForUserPlaneIntegrityProtectionForDownLink uint8) {} | lib/nas/nasType/NAS_IntegrityProtectionMaximumDataRate.go | 0.585575 | 0.402686 | NAS_IntegrityProtectionMaximumDataRate.go | starcoder |
package graphReconstruct
import (
"github.com/vertgenlab/gonomics/dna"
"github.com/vertgenlab/gonomics/dnaTwoBit"
"github.com/vertgenlab/gonomics/expandedTree"
"github.com/vertgenlab/gonomics/genomeGraph"
"log"
)
type graphColumn struct {
AlignId int
AlignNodes map[string][]*genomeGraph.Node //string keys refer to species that key to a slice of pointers to the nodes of that species that fall into a single slignment column
}
//BuildNodes uses a graphColumn to create nodes for an ancestor's graph seq that represents all the unique sequences in an aligned graph
func BuildNodes(root *expandedTree.ETree, column graphColumn, id uint32) uint32 {
var nodeInfo = make(map[string]bool)
for _, nodes := range column.AlignNodes { //nodes is all nodes for an individual species
for n := range nodes { //n is an individual node of an individual species
stringSeq := dna.BasesToString(nodes[n].Seq)
nodeInfo[stringSeq] = true
}
}
for seq := range nodeInfo {
var newNode *genomeGraph.Node
newNode = &genomeGraph.Node{Id: id, Seq: dna.StringToBases(seq), SeqTwoBit: dnaTwoBit.NewTwoBit(dna.StringToBases(seq)), Next: nil, Prev: nil}
column.AlignNodes[root.Name] = append(column.AlignNodes[root.Name], newNode)
id += 1
}
return id
}
//BuildEdges connects the nodes of a species' graph that are stored in GraphColumns
//func BuildEdges
//start without prob
//loop through species in column, go through all nodes
//check if that node's seq matches the seq of the ancestor node without an edge
//make the next of that node the same as this node's next
//FindAncSeq creates a graph from the node records stored in GraphColumns and then calls PathFinder and seqOfPath to determine the most likley seq of the ancestor before assigning that
//seq to the Fasta field of the ancestors tree node
//func FindAncSeq will loop through aligncolumns and build a single graph of all of the nodes that belong to the ancestor species after edges are created
//run PathFinder on the graph for the anc, run seqOfPath, then turn that to a fasta for that node of the tree
//seqOfPath takes in a graph and a path specified by the Node IDs and returns the seq of the path through the graph
func seqOfPath(g *genomeGraph.GenomeGraph, path []uint32) []dna.Base {
var seq []dna.Base
var foundInGraph = false
for p := 0; p < len(path); p++ {
foundInGraph = false
for n := 0; n < len(g.Nodes) && !foundInGraph; n++ {
if g.Nodes[n].Id == path[p] {
foundInGraph = true
seq = append(seq, g.Nodes[n].Seq...)
} else {
}
}
if !foundInGraph {
log.Fatal("path is invalid")
}
}
return seq
}
//PathFinder takes a graph and returns the most likely path through that graph after checking all possible paths from the first node to the last
func PathFinder(g *genomeGraph.GenomeGraph) ([]uint32, float32) {
var finalPath []uint32
var finalProb float32
var tempPath = make([]uint32, 0)
for n := 0; n < len(g.Nodes); n++ {
if g.Nodes[n].Id == 0 {
finalProb, finalPath = bestPath(&g.Nodes[n], 1, tempPath)
}
}
return finalPath, finalProb
}
//bestPath is the helper function for PathFinder, and recursively traverses the graph depth first to determine the most likely path from start to finish
func bestPath(node *genomeGraph.Node, prevProb float32, path []uint32) (prob float32, pathOut []uint32) {
var tempProb float32 = 0
var finalProb float32
var finalPath []uint32
path = append(path, node.Id)
if len(node.Next) == 0 {
return prevProb, path
}
for i := range node.Next {
tempProb = node.Next[i].Prob * prevProb
currentProb, currentPath := bestPath(node.Next[i].Dest, tempProb, path)
if currentProb > finalProb {
finalProb = currentProb
finalPath = currentPath
}
}
return finalProb, finalPath
} | graphReconstruct/graphReconstruct.go | 0.628521 | 0.661971 | graphReconstruct.go | starcoder |
package expect
import (
"errors"
"fmt"
"reflect"
)
type Be struct {
Else *Else
And *Be
t T
actual interface{}
assert bool
}
func newBe(t T, e *Else, actual interface{}, assert bool) *Be {
be := &Be{
Else: e,
t: t,
actual: actual,
assert: assert,
}
be.And = be
return be
}
// Assert numeric value above the given value (> n)
func (b *Be) Above(e float64) *Be {
msg := b.msg(fmt.Sprintf("above %v", e))
if b.Num() > e != b.assert {
b.fail(2, msg)
}
return b
}
// Assert numeric value below the given value (< n)
func (b *Be) Below(e float64) *Be {
msg := b.msg(fmt.Sprintf("below %v", e))
if b.Num() < e != b.assert {
b.fail(2, msg)
}
return b
}
// Assert inclusive numeric range (<= to and >= from)
func (b *Be) Within(from, to float64) *Be {
msg := b.msg(fmt.Sprintf("between range %v <= x <= %v", from, to))
x := b.Num()
if x <= to && x >= from != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is empty, Array, Slice, Map or String
func (b *Be) Empty() *Be {
msg := b.msg("empty")
if i, ok := length(b.actual); ok {
if i == 0 != b.assert {
b.fail(2, msg)
}
} else {
b.t.Fatal(invMsg("Array, Slice, Map or String"))
}
return b
}
// Assert if the given value is truthy(i.e: not "", nil, 0, false)
func (b *Be) Ok() *Be {
msg := b.msg("ok")
var exp bool
switch b.actual.(type) {
case int, int8, int32, int64, uint, uint8, uint32, uint64, float32, float64:
exp = b.actual != 0
case string:
exp = b.actual != ""
case bool:
exp = b.actual != false // TODO(Ariel): without the `!= false`, it's ask for type assertion
default:
exp = b.actual != nil
}
if exp != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of string
func (b *Be) String() *Be {
msg := b.msg("string")
if _, ok := b.actual.(string); ok != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of int
func (b *Be) Int() *Be {
msg := b.msg("int")
if _, ok := b.actual.(int); ok != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of float32/64
func (b *Be) Float() *Be {
msg := b.msg("float")
exp := false
switch b.actual.(type) {
case float32, float64:
exp = true
}
if exp != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of boolean
func (b *Be) Bool() *Be {
msg := b.msg("boolean")
if _, ok := b.actual.(bool); ok != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of map
func (b *Be) Map() *Be {
msg := b.msg("map")
if reflect.TypeOf(b.actual).Kind() == reflect.Map != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of array
func (b *Be) Array() *Be {
msg := b.msg("array")
if reflect.TypeOf(b.actual).Kind() == reflect.Array != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of slice
func (b *Be) Slice() *Be {
msg := b.msg("slice")
if reflect.TypeOf(b.actual).Kind() == reflect.Slice != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of channel
func (b *Be) Chan() *Be {
msg := b.msg("channel")
if reflect.TypeOf(b.actual).Kind() == reflect.Chan != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of struct
func (b *Be) Struct() *Be {
msg := b.msg("struct")
if reflect.TypeOf(b.actual).Kind() == reflect.Struct != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of pointer
func (b *Be) Ptr() *Be {
msg := b.msg("pointer")
if reflect.TypeOf(b.actual).Kind() == reflect.Ptr != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is nil
func (b *Be) Nil() *Be {
msg := b.msg("nil")
if b.actual == nil != b.assert {
b.fail(2, msg)
}
return b
}
// Assert given value is type of the given string
func (b *Be) Type(s string) *Be {
msg := b.msg(fmt.Sprintf("type %v", s))
if reflect.TypeOf(b.actual).Name() == s != b.assert {
b.fail(2, msg)
}
return b
}
func (b *Be) fail(callers int, msg string) {
b.Else.failed = true
fail(b.t, callers, msg)
}
func (b *Be) msg(s string) string {
return errMsg("to be")(b.actual, s, b.assert)
}
func (b *Be) Num() float64 {
rv := reflect.ValueOf(b.actual)
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(rv.Int())
case reflect.Uint, reflect.Uintptr, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return float64(rv.Uint())
case reflect.Float32, reflect.Float64:
return float64(rv.Float())
default:
b.t.Fatal(invMsg("numeric"))
return 0
}
}
func (b *Be) True() *Be {
v, err := b.bool()
if err != nil {
b.t.Fatal(invMsg("bool"))
return b
}
if !v {
b.fail(2, b.msg("true"))
}
return b
}
func (b *Be) False() *Be {
v, err := b.bool()
if err != nil {
b.t.Fatal(invMsg("bool"))
return b
}
if v {
b.fail(2, b.msg("false"))
}
return b
}
func (b *Be) bool() (bool, error) {
rv := reflect.ValueOf(b.actual)
switch rv.Kind() {
case reflect.Bool:
return rv.Bool(), nil
default:
}
return false, errors.New("not a bool")
} | be.go | 0.613584 | 0.522141 | be.go | starcoder |
package common
import (
"context"
"github.com/DataDog/datadog-go/statsd"
)
// ContextKey is key to be used to store values to context
type ContextKey string
// Extras are extra information to be added into context
type Extras map[string]interface{}
// Basics are basics information to be added into context
type Basics map[string]interface{}
const (
// keep it private to avoid conflicts
ctxBasics ContextKey = "basics"
ctxExtras ContextKey = "extras"
ctxStatsD ContextKey = "statsD"
)
// GetString gets string from a context by ContextKey
func GetString(ctx context.Context, key ContextKey) (value string) {
if ctx != nil {
value, _ = ctx.Value(key).(string)
}
return
}
// GetBasic gets basic from the context with key
func GetBasic(ctx context.Context, key string) (value interface{}) {
if ctx != nil {
if basics, ok := ctx.Value(ctxBasics).(Basics); ok {
value, _ = basics[key]
}
}
return
}
// GetBasics gets basics from the context if any
func GetBasics(ctx context.Context) (value Basics) {
if ctx != nil {
value, _ = ctx.Value(ctxBasics).(Basics)
}
return
}
// SetBasic returns a copy of parent context with basic key value added into it
func SetBasic(parent context.Context, key string, value interface{}) context.Context {
if parent == nil {
parent = context.Background()
}
basics, ok := parent.Value(ctxBasics).(Basics)
if !ok {
basics = make(map[string]interface{})
}
basics[key] = value
return context.WithValue(parent, ctxBasics, basics)
}
// SetBasics returns a copy of parent context with basics added into it
func SetBasics(parent context.Context, basics Basics) context.Context {
if parent == nil {
parent = context.Background()
}
return context.WithValue(parent, ctxBasics, basics)
}
// GetExtras gets extras from the context if any
func GetExtras(ctx context.Context) (value Extras) {
if ctx != nil {
value, _ = ctx.Value(ctxExtras).(Extras)
}
return
}
// SetExtras returns a copy of parent context with extras added into it
func SetExtras(parent context.Context, extras Extras) context.Context {
if parent == nil {
parent = context.Background()
}
return context.WithValue(parent, ctxExtras, extras)
}
// GetExtra gets Extra from the context with key
func GetExtra(ctx context.Context, key string) (value interface{}) {
if ctx != nil {
if extras, ok := ctx.Value(ctxExtras).(Extras); ok {
value, _ = extras[key]
}
}
return
}
// SetExtra returns a copy of parent context with Extra key value added into it
func SetExtra(parent context.Context, key string, value interface{}) context.Context {
if parent == nil {
parent = context.Background()
}
extras, ok := parent.Value(ctxExtras).(Extras)
if !ok {
extras = make(map[string]interface{})
}
extras[key] = value
return context.WithValue(parent, ctxExtras, extras)
}
// GetStatsD gets statsD client from the context if any
func GetStatsD(ctx context.Context) (statsD *statsd.Client) {
if ctx != nil {
statsD, _ = ctx.Value(ctxStatsD).(*statsd.Client)
}
return
}
// SetStatsD returns a copy of parent context with statsD client added into it
func SetStatsD(parent context.Context, statsD *statsd.Client) context.Context {
if parent == nil {
parent = context.Background()
}
return context.WithValue(parent, ctxStatsD, statsD)
} | common/context.go | 0.661814 | 0.463141 | context.go | starcoder |
package metrics
import (
"time"
)
/*
Package metrics wants to help us to know what's going on.
We'll try to keep it simple.
*/
// Data has all the metrics data in memory. It has counters and loggers.
// The formers can only be incremented or decreased, while the latter
// can used to get time differences.
type Data struct {
// This is just a map of `+1` counters. You know.
// How many iterations? How many cases of A? etc.
counters map[string]int
// Loggers have two use cases:
// * Store time differences (with StartLogDiff / StopLogDiff)
// + Useful for RPC Calls and DB Queries
// * Store series of values (mem / CPU / active goroutines / etc)
loggers map[string][]int64
}
// The global variable here
var data Data
func init() {
data = Data{}
data.counters = make(map[string]int)
data.loggers = make(map[string][]int64)
}
/*
COUNTERS
*/
// NewCounter returns a counter with the given key.
func NewCounter(key string) {
if _, ok := data.counters[key]; !ok {
data.counters[key] = 0
}
}
// IncCounter increments the given counter by 1.
func IncCounter(key string) {
if _, ok := data.counters[key]; ok {
data.counters[key]++
}
}
// GetCounter returns the current value of the given counter.
func GetCounter(key string) int {
if val, ok := data.counters[key]; ok {
return val
}
return 0
}
/*
LOGGERS
*/
// NewLogger returns a logger.
func NewLogger(key string) {
if _, ok := data.loggers[key]; !ok {
var slice []int64
data.loggers[key] = slice
}
}
// AddLog adds an int64 value to the logger. Useful for
// aggregations, such as the total number of bytes stored.
func AddLog(key string, val int64) {
if _, ok := data.loggers[key]; ok {
data.loggers[key] = append(data.loggers[key], val)
}
}
// StartLogDiff returns the index of the value logged,
// So you can get the time difference with it uwing StopLogDiff().
// It will store your time value as a negative number.
// Successive functions to get averages will ignore the negative values,
// deeming them as "incomplete logs".
func StartLogDiff(key string) int {
if _, ok := data.loggers[key]; ok {
data.loggers[key] = append(data.loggers[key], -1*time.Now().UnixNano())
return len(data.loggers[key]) - 1
}
// No key found
return -1
}
// StopLogDiff completed the functionality documented by StartLogDiff.
func StopLogDiff(key string, idx int) {
if _, ok := data.loggers[key]; ok {
if len(data.loggers[key]) > idx {
// The value created at StartLogDiff is a negative one
data.loggers[key][idx] = data.loggers[key][idx] + time.Now().UnixNano()
}
}
}
// GetAverageLogDiff will calculate the average of the log differences,
// discarding the negative ones, as those will be deemed as incomplete ops.
func GetAverageLogDiff(key string) (int, int64, float64) {
if _, ok := data.loggers[key]; ok {
n := 0
sum := int64(0)
for _, v := range data.loggers[key] {
if v >= 0 {
sum += v
n++
}
}
return n, sum, float64(sum) / float64(n)
}
return 0, 0, 0
} | metrics/metrics.go | 0.649245 | 0.418994 | metrics.go | starcoder |
package van
import (
"context"
"fmt"
"reflect"
)
var (
typeVan = reflect.TypeOf((*Van)(nil)).Elem()
typeError = reflect.TypeOf((*error)(nil)).Elem()
typeContext = reflect.TypeOf((*context.Context)(nil)).Elem()
)
func isStructPtr(t reflect.Type) bool {
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
}
func validateProviderSignature(t reflect.Type) error {
switch {
case t.Kind() != reflect.Func:
return errInvalidType.fmt("provider must be a function, got %s", t.String())
case t.NumOut() != 2:
return errInvalidType.fmt("provider must have two return values, got %d", t.NumOut())
case t.Out(0).Kind() != reflect.Interface:
return errInvalidType.fmt("provider's first return value must be an interface, got %s", t.Out(0).String())
case !t.Out(1).Implements(typeError):
return errInvalidType.fmt("provider's second return value must be an error, got %s", t.Out(1).String())
}
if err := validateDependencyArgs(t, 0); err != nil {
return err
}
return nil
}
func validateHandlerSignature(t reflect.Type) error {
switch {
case t.Kind() != reflect.Func:
return errInvalidType.fmt("handler must be a function, got %s", t.String())
case t.NumIn() < 2:
return errInvalidType.fmt("handler must have at least 2 arguments, got %s", fmt.Sprint(t.NumIn()))
case t.In(0) != typeContext:
return errInvalidType.fmt("handler's first argument must be context.Context, got %s", t.In(0).String())
case !isStructPtr(t.In(1)):
return errInvalidType.fmt("handler's second argument must be a struct pointer, got %s", t.In(1).String())
case t.NumOut() != 1:
return errInvalidType.fmt("handler must have one return value, got %s", fmt.Sprint(t.NumOut()))
case !t.Out(0).Implements(typeError):
return errInvalidType.fmt("handler's return type must be error, got %s", t.Out(0).String())
}
if err := validateDependencyArgs(t, 2); err != nil {
return err
}
return nil
}
func validateListenerSignature(t reflect.Type) error {
switch {
case t.Kind() != reflect.Func:
return errInvalidType.fmt("handler must be a function, got %s", t.String())
case t.NumIn() < 2:
return errInvalidType.fmt("handler must have at least 2 arguments, got %s", fmt.Sprint(t.NumIn()))
case t.In(0) != typeContext:
return errInvalidType.fmt("handler's first argument must be context.Context, got %s", t.In(0).String())
case t.In(1).Kind() != reflect.Struct:
return errInvalidType.fmt("handler's second argument must be a struct, got %s", t.In(1).String())
case t.NumOut() != 0:
return errInvalidType.fmt("event handler should not have any return values")
}
if err := validateDependencyArgs(t, 2); err != nil {
return err
}
return nil
}
func validateExecLambdaSignature(t reflect.Type) error {
switch {
case t.Kind() != reflect.Func:
return errInvalidType.fmt("fn should be a function, got %s", t.String())
case t.NumOut() != 1:
return errInvalidType.fmt("fn must have one return value, got %s", fmt.Sprint(t.NumOut()))
case !t.Out(0).Implements(typeError):
return errInvalidType.fmt("return value must be an error, got %s", t.Out(0).String())
}
if err := validateDependencyArgs(t, 0); err != nil {
return err
}
return nil
}
func validateDependencyArgs(t reflect.Type, start int) error {
for i := start; i < t.NumIn(); i++ {
argType := t.In(i)
switch argType.Kind() {
case reflect.Interface:
continue
case reflect.Struct:
if err := validateDependencyStruct(argType); err != nil {
return fmt.Errorf("error in dependency struct argument %d: %w", i, err)
}
continue
default:
return errInvalidType.fmt("argument %d must be an interface or a struct, got %s", i, argType.String())
}
}
return nil
}
func validateDependencyStruct(t reflect.Type) error {
for _, f := range reflect.VisibleFields(t) {
if !f.IsExported() {
return errInvalidType.fmt("field %s must be exported", f.Name)
}
if f.Type.Kind() != reflect.Interface {
return errInvalidType.fmt("field %s must be an interface, got %s", f.Name, f.Type.String())
}
}
return nil
}
func toError(v reflect.Value) error {
if v.IsNil() {
return nil
}
return v.Interface().(error)
} | types.go | 0.521471 | 0.42322 | types.go | starcoder |
package bird_data_guessing
import (
"sort"
"testing"
"github.com/gbdubs/inference"
)
// string
type testStringCase struct {
name string
expected string
}
type testStringBehavior func(englishOrLatinName string) *inference.String
func stringCase(englishOrLatinName string, expectedResult string) testStringCase {
return testStringCase{
name: englishOrLatinName,
expected: expectedResult,
}
}
func testStringCases(t *testing.T, b testStringBehavior, cases ...testStringCase) {
for _, c := range cases {
actual := b(c.name)
if c.expected != actual.Value {
t.Errorf(
`case %s assertion error: expected '%s', was '%s'. Debug %+v source: %s`,
c.name, c.expected, actual.Value, actual, actual.Source.Dump())
}
}
}
// []string
type testSliceStringCase struct {
name string
expected []string
expectedOrdered bool
}
type testSliceStringBehavior func(englishOrLatinName string) []*inference.String
func unorderedSliceStringCase(englishOrLatinName string, expectedResult ...string) testSliceStringCase {
return testSliceStringCase{
name: englishOrLatinName,
expected: expectedResult,
expectedOrdered: false,
}
}
func testSliceStringCases(t *testing.T, b testSliceStringBehavior, cases ...testSliceStringCase) {
for _, c := range cases {
actual := b(c.name)
if len(actual) != len(c.expected) {
t.Errorf(`case %s assertion error - lengths differ - expected len '%v', was '%v'. Debug %+v`,
c.name, len(c.expected), len(actual), actual)
}
as := make([]string, len(actual))
es := make([]string, len(actual))
for i, a := range actual {
as[i] = a.Value
es[i] = c.expected[i]
}
if !c.expectedOrdered {
sort.Strings(as)
sort.Strings(es)
}
errorIndexes := make([]int, 0)
for i, _ := range as {
if as[i] != es[i] {
errorIndexes = append(errorIndexes, i)
}
}
if len(errorIndexes) > 0 {
t.Errorf(
`case %s assertion error - errors at indexes %v - expected '%s' - was '%s' - debug: %+v`,
c.name, errorIndexes, es, as, actual)
}
}
} | testing_string.go | 0.517327 | 0.445409 | testing_string.go | starcoder |
package vectormodel
import (
"errors"
"fmt"
"sort"
"gonum.org/v1/gonum/mat"
)
type (
// VectorModel is a struct to handle document vector space models.
VectorModel struct {
confidence float64
regularization float64
docIDs []int
docIndexes map[int]int
nFactors int
itemFactorsY *mat.Dense
squaredItemFactorsYtY *mat.Dense
}
// DocumentScore is the result of a recommendation
DocumentScore struct {
DocumentID int
Score float64
}
)
func (a byDocScoreDesc) Len() int { return len(a) }
func (a byDocScoreDesc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byDocScoreDesc) Less(i, j int) bool { return a[i].Score > a[j].Score }
type byDocScoreDesc []DocumentScore
// NewVectorModel creates a new VectorModel
func NewVectorModel(documents map[int][]float64, confidence, regularization float64) (*VectorModel, error) {
var vm VectorModel
vm.confidence = confidence
vm.regularization = regularization
vm.docIDs = make([]int, len(documents))
vm.docIndexes = make(map[int]int)
data := make([]float64, 0)
i := 0
for doc, vector := range documents {
if i == 0 {
vm.nFactors = len(vector)
} else if len(vector) != vm.nFactors {
return nil, errors.New("Invalid vector size")
}
vm.docIndexes[doc] = i
vm.docIDs[i] = doc
data = append(data, vector...)
i++
}
vm.itemFactorsY = mat.NewDense(len(documents), vm.nFactors, data)
var YtY mat.Dense
YtY.Mul(vm.itemFactorsY.T(), vm.itemFactorsY)
vm.squaredItemFactorsYtY = &YtY
return &vm, nil
}
// Rank sorts a list of candidate assets for a given user history
func (vm *VectorModel) Rank(candidates []int, seenDocs map[int]bool) (scores []float64, err error) {
candidateScores, err := vm.scoreCandidates(candidates, seenDocs)
if err != nil {
return nil, err
}
scores = make([]float64, len(candidateScores))
for i, candidateScore := range candidateScores {
candidates[i] = candidateScore.DocumentID
scores[i] = candidateScore.Score
}
return scores, nil
}
// Recommend returns a list of recommendedDocs and a list of scores
func (vm *VectorModel) Recommend(seenDocs map[int]bool, n int) (recommendations []DocumentScore, err error) {
recommendations, err = vm.scoreCandidates(vm.docIDs, seenDocs)
if err != nil {
return nil, err
}
if len(recommendations) > n {
recommendations = recommendations[:n]
}
return recommendations, nil
}
func (vm *VectorModel) scoreCandidates(candidates []int, seenDocs map[int]bool) (recommendations []DocumentScore, err error) {
confidenceMap := vm.confidenceMap(seenDocs)
if len(confidenceMap) == 0 {
return nil, fmt.Errorf("No seen doc is in model. History: %d Model: %d",
len(seenDocs), len(vm.docIndexes))
}
userVec, err := vm.userVector(confidenceMap)
if err != nil {
return recommendations, err
}
scoresVec := vm.scoresForUserVec(&userVec)
candidateScores := make([]DocumentScore, len(candidates))
for i, doc := range candidates {
var score float64
if _, docAlreadySeen := seenDocs[doc]; docAlreadySeen {
score = -1
} else if docIndex, docInModel := vm.docIndexes[doc]; !docInModel {
score = 0
} else {
score = scoresVec.At(docIndex, 0)
}
candidateScores[i] = DocumentScore{doc, score}
}
sort.Sort(byDocScoreDesc(candidateScores))
return candidateScores, nil
}
func (vm *VectorModel) confidenceMap(seenDocs map[int]bool) map[int]float64 {
confidenceMap := make(map[int]float64)
for doc := range seenDocs {
if _, inModel := vm.docIndexes[doc]; inModel {
confidenceMap[doc] = vm.confidence
}
}
return confidenceMap
}
// userVector returns the user vector for a given set of consumed documents
func (vm *VectorModel) userVector(confidenceMap map[int]float64) (mat.VecDense, error) {
// We follow the notation from the paper "Collaborative Filtering for Implicit Feedback Datasets"
// Please see github.com/benfred/implicit as a reference implementation
// We solve the following linear equation:
// Xu = (YtCuY + regularization*I)i^-1 * YtYCuPu
// A = YtCuY + reg * I = YtY + reg * I + Yt(Cu - I)Y
// We initialize A to YtY + reg * I and sum the last term for each doc
var A mat.Dense
A.Add(vm.squaredItemFactorsYtY, eye(vm.nFactors, vm.regularization))
// b = YtCuPu
b := mat.NewVecDense(vm.nFactors, make([]float64, vm.nFactors))
for doc, confidence := range confidenceMap {
index, docFound := vm.docIndexes[doc]
if !docFound {
continue
}
factor := vm.itemFactorsY.RowView(index)
// A += (confidence - 1) * np.outer(factor, factor)
var factor2 mat.Dense
factor2.Mul(factor, factor.T())
factor2.Scale(confidence-1, &factor2)
A.Add(&A, &factor2)
// b += confidence * factor
b.AddScaledVec(b, confidence, factor)
}
var x mat.VecDense
// We could just solve the matrix by calling the next line, but
// A is positively defined, so we can use the Cholesky solver
// err := x.SolveVec(&A, b)
var ch mat.Cholesky
if ok := ch.Factorize(&unsafeSymmetric{A, vm.nFactors}); !ok {
return x, errors.New("Failed to run Cholesky factorization")
}
err := ch.SolveVec(&x, b)
return x, err
}
// scoresForUserVec returns a vector with scores given set of consumed documents
func (vm *VectorModel) scoresForUserVec(userVec *mat.VecDense) mat.VecDense {
var y mat.VecDense
y.MulVec(vm.itemFactorsY, userVec)
return y
}
// eye returns an identity matrix with size n and `value` in the diagonal
func eye(n int, value float64) mat.Matrix {
m := mat.NewDense(n, n, make([]float64, n*n))
for i := 0; i < n; i++ {
m.Set(i, i, value)
}
return m
}
type unsafeSymmetric struct {
mat.Dense
n int
}
func (s *unsafeSymmetric) Symmetric() int {
return s.n
} | vectormodel/vector_model.go | 0.717012 | 0.487124 | vector_model.go | starcoder |
package validation
import (
"encoding/json"
"errors"
"math"
"strconv"
"sync"
"github.com/PaddlePaddle/PaddleDTX/crypto/core/machine_learning/evaluation/metrics"
)
// BinClassValidation performs validation of Binary Classfication case
type BinClassValidation interface {
// Splitter divides data set into several subsets with some strategies (such as KFolds, LOO),
// and hold out one subset as validation set and others as training set
Splitter
// SetPredictOut sets predicted probabilities from a prediction set to which `idx` refers.
SetPredictOut(idx int, predProbas []float64) error
// GetAllPredictOuts returns all prediction results has been stored.
GetAllPredictOuts() map[int][]string
// GetAccuracy returns classification accuracy.
// idx is the index of prediction set (also of validation set) in split folds.
GetAccuracy(idx int) (float64, error)
// GetAllAccuracy returns scores of classification accuracy over all split folds,
// and its Mean and Standard Deviation.
GetAllAccuracy() (map[int]float64, float64, float64, error)
// GetReport returns a json bytes of precision, recall, f1, true positive,
// false positive, true negatives and false negatives for each class, and accuracy.
GetReport(idx int) ([]byte, error)
// GetReport returns a json bytes of precision, recall, f1, true positive,
// false positive, true negatives and false negatives for each class, and accuracy, over all split folds.
GetOverallReport() (map[int][]byte, error)
// GetROCAndAUC returns a json bytes of roc's points and auc.
GetROCAndAUC(idx int) ([]byte, error)
// GetAllROCAndAUC returns a map contains all split folds' json bytes of roc and auc.
GetAllROCAndAUC() (map[int][]byte, error)
}
// RegressionValidation performs validation of Regression case
type RegressionValidation interface {
// Splitter divides data set into several subsets with some strategies (such as KFolds, LOO),
// and hold out one subset as validation set and others as training set
Splitter
// SetPredictOut sets prediction outcomes from a prediction set to which `idx` refers.
SetPredictOut(idx int, yPred []float64) error
// GetAllPredictOuts returns all prediction results has been stored.
GetAllPredictOuts() map[int][]float64
// GetRMSE returns RMSE over the validation set to which `idx` refers.
GetRMSE(idx int) (float64, error)
// GetAllRMSE returns scores of RMSE over all split folds,
// and its Mean and Standard Deviation.
GetAllRMSE() (map[int]float64, float64, float64, error)
}
// Splitter divides data set into several subsets with some strategies (such as KFolds, LOO),
// and hold out one subset as validation set and others as training set
type Splitter interface {
// Split divides the file into two parts directly
// based on percentage which denotes the first part of divisions.
Split(percents int) error
// ShuffleSplit shuffles the rows with `seed`,
// then divides the file into two parts
// based on `percents` which denotes the first part of divisions.
ShuffleSplit(percents int, seed string) error
// KFoldsSplit divides the file into `k` parts directly.
// k is the number of parts that only could be 5 or 10.
KFoldsSplit(k int) error
// ShuffleKFoldsSplit shuffles the sorted rows with `seed`,
// then divides the file into `k` parts.
// k is the number of parts that only could be 5 or 10.
ShuffleKFoldsSplit(k int, seed string) error
// LooSplit sorts file rows by IDs which extracted from file by `idName`,
// then divides each row into a subset.
LooSplit() error
// GetAllFolds returns all folds after split.
// And could be only called successfully after split.
GetAllFolds() ([][][]string, error)
// GetTrainSet holds out the subset to which refered by `idxHO`
// and returns the remainings as training set.
GetTrainSet(idxHO int) ([][]string, error)
// GetPredictSet returns the subset to which refered by `idx`
// as predicting set (without label feature).
GetPredictSet(idx int) ([][]string, error)
// GetPredictSet returns the subset to which refered by `idx`
// as validation set.
GetValidSet(idx int) ([][]string, error)
}
type binClassValidation struct {
// Splitter divides data set into several subsets with some strategies (such as KFolds, LOO),
// and hold out one subset as validation set and others as training set
Splitter
// label denotes name of lable feature
label string
// posClass denotes name of positive class
posClass string
// negClass denotes name of negtive class
negClass string
// classify sample as a positive class if its predicted probability exceeds threshold
threshold float64
// predResults stores prediction outcomes
predResults sync.Map
// predClasses stores predicted classes
predClasses sync.Map
}
// NewBinClassValidation creates a BinClassValidation instance to handle binary classification validation.
// file contains all rows of a file,
// and its first row contains just names of feature, and others contain all feature values
// idName denotes which feature is ID that would be used in sample alignment
// label denotes name of lable feature
// posClass denotes name of positive class and must be one feature name in `file`
// negClass denotes name of negtive class, could be set with empty string
func NewBinClassValidation(file [][]string, label string, idName string,
posClass string, negClass string, threshold float64) (BinClassValidation, error) {
if len(negClass) == 0 {
negClass = "non-" + posClass
}
if threshold <= 0 {
threshold = 0.5
}
lf := len(file)
newFile := make([][]string, 0, lf)
if lf <= 1 {
return nil, errors.New("invalid file")
}
// first row contains just names of feature
// find where the label feature is
idx := -1
for i, v := range file[0] {
if v == label {
idx = i
break
}
}
if idx < 0 { // find no label feature
for _, r := range file {
newR := make([]string, 0, len(file[0]))
newR = append(newR, r...)
newFile = append(newFile, newR)
}
} else {
newR := make([]string, 0, len(file[0]))
newR = append(newR, file[0]...)
newFile = append(newFile, newR)
// reset value of label row by row
for _, r := range file[1:] {
if len(r) <= idx {
return nil, errors.New("invalid file")
}
newR := make([]string, 0, len(file[0]))
newR = append(newR, r[0:idx]...)
if r[idx] == posClass {
newR = append(newR, posClass)
} else {
newR = append(newR, negClass)
}
if len(r) > idx+1 {
newR = append(newR, r[idx+1:]...)
}
newFile = append(newFile, newR)
}
}
return &binClassValidation{
Splitter: NewSplitter(newFile, idName, label),
label: label,
posClass: posClass,
negClass: negClass,
threshold: threshold,
}, nil
}
// SetPredictOut sets predicted probabilities for a prediction set to which `idx` refers.
// returns error if the file hasn't been split or other errors occur.
func (bv *binClassValidation) SetPredictOut(idx int, predProbas []float64) error {
set, err := bv.GetValidSet(idx)
if err != nil {
return err
}
lp := len(predProbas)
if len(set)-1 != lp {
return errors.New("there is a mismatch between the number of predicted classes and that of prediction set")
}
classes := make([]string, 0, lp)
for _, p := range predProbas {
c := bv.posClass
if p <= bv.threshold {
c = bv.negClass
}
classes = append(classes, c)
}
bv.predResults.Store(idx, predProbas)
bv.predClasses.Store(idx, classes)
return nil
}
// GetAllPredictOuts returns all prediction results has been stored.
func (bv *binClassValidation) GetAllPredictOuts() map[int][]string {
ret := make(map[int][]string)
bv.predClasses.Range(func(key, value interface{}) bool {
ret[key.(int)] = value.([]string)
return true
})
return ret
}
// GetAccuracy returns classification accuracy.
// idx is the index of prediction set (also of validation set) in split folds.
func (bv *binClassValidation) GetAccuracy(idx int) (float64, error) {
predClasses, ok := bv.predClasses.Load(idx)
if !ok {
return 0, errors.New("not find prediction outcomes according to idx")
}
validSet, err := bv.GetValidSet(idx)
if err != nil {
return 0, err
}
realClasses, err := getFeaturesByName(validSet, bv.label)
if err != nil {
return 0, err
}
cm, err := metrics.NewConfusionMatrix(realClasses, predClasses.([]string))
if err != nil {
return 0, err
}
return cm.GetAccuracy(), nil
}
// GetAllAccuracy returns scores of classification accuracy over all split folds,
// and its Mean and Standard Deviation.
func (bv *binClassValidation) GetAllAccuracy() (map[int]float64, float64, float64, error) {
var errRet error
accs := make(map[int]float64)
bv.predClasses.Range(func(key, value interface{}) bool {
i := key.(int)
predClasses := value.([]string)
validSet, err := bv.GetValidSet(i)
if err != nil {
errRet = err
return false
}
realClasses, err := getFeaturesByName(validSet, bv.label)
if err != nil {
errRet = err
return false
}
cm, err := metrics.NewConfusionMatrix(realClasses, predClasses)
if err != nil {
errRet = err
return false
}
accs[i] = cm.GetAccuracy()
return true
})
if errRet != nil {
return map[int]float64{}, 0, 0, errRet
}
meanAcc, stdDevAcc := getStdDeviation(accs)
return accs, meanAcc, stdDevAcc, nil
}
// GetReport returns a json bytes of precision, recall, f1, true positive,
// false positive, true negatives and false negatives for each class, and accuracy.
// JSON type summary is something like :
// {
// "Metrics": {
// "NO": {
// "TP": 2,
// "FP": 1,
// "FN": 1,
// "TN": 4,
// "Precision": 0.6666666666666666,
// "Recall": 0.6666666666666666,
// "F1Score": 0.6666666666666666
// },
// "YES": {
// "TP": 4,
// "FP": 1,
// "FN": 1,
// "TN": 2,
// "Precision": 0.8,
// "Recall": 0.8,
// "F1Score": 0.8000000000000002
// }
// },
// "Accuracy": 0.75
//}
// NO and Yes are classes.
// idx is the index of prediction set (also of validation set) in split folds.
func (bv *binClassValidation) GetReport(idx int) ([]byte, error) {
predClasses, ok := bv.predClasses.Load(idx)
if !ok {
return []byte{}, errors.New("not find prediction outcomes according to idx")
}
validSet, err := bv.GetValidSet(idx)
if err != nil {
return []byte{}, err
}
realClasses, err := getFeaturesByName(validSet, bv.label)
if err != nil {
return []byte{}, err
}
cm, err := metrics.NewConfusionMatrix(realClasses, predClasses.([]string))
if err != nil {
return []byte{}, err
}
return cm.SummaryAsJSON()
}
// GetReport returns a json bytes of precision, recall, f1, true positive,
// false positive, true negatives and false negatives for each class, and accuracy, over all split folds.
// key of return is the index of fold
// and value of return is JSON type summary, something like :
// {
// "Metrics": {
// "NO": {
// "TP": 2,
// "FP": 1,
// "FN": 1,
// "TN": 4,
// "Precision": 0.6666666666666666,
// "Recall": 0.6666666666666666,
// "F1Score": 0.6666666666666666
// },
// "YES": {
// "TP": 4,
// "FP": 1,
// "FN": 1,
// "TN": 2,
// "Precision": 0.8,
// "Recall": 0.8,
// "F1Score": 0.8000000000000002
// }
// },
// "Accuracy": 0.75
//}
// NO and Yes are classes.
func (bv *binClassValidation) GetOverallReport() (map[int][]byte, error) {
var errRet error
summaries := make(map[int][]byte)
bv.predClasses.Range(func(key, value interface{}) bool {
i := key.(int)
predClasses := value.([]string)
validSet, err := bv.GetValidSet(i)
if err != nil {
errRet = err
return false
}
realClasses, err := getFeaturesByName(validSet, bv.label)
if err != nil {
errRet = err
return false
}
cm, err := metrics.NewConfusionMatrix(realClasses, predClasses)
if err != nil {
errRet = err
return false
}
summary, err := cm.SummaryAsJSON()
if err != nil {
errRet = err
return false
}
summaries[i] = summary
return true
})
if errRet != nil {
return map[int][]byte{}, errRet
}
return summaries, nil
}
type reportROCAndAUC struct {
// Roc is expressed by a series of points.
// A point of roc is represented by [3]float64, [FPR, TPR, threshold]([x,y,threshold])
PointsOnROC [][3]float64
// AUC is the area under curve ROC.
AUC float64
}
// GetROCAndAUC returns a json bytes of roc's points and auc.
// JSON type summary is something like :
// {
// "PointsOnROC": [
// [0,0,1.9],
// [0,0.1,0.9],
// [0,0.2,0.8],
// [0.1,0.2,0.7],
// [0.1,0.3,0.6],
// [0.1,0.4,0.55],
// ...
// ],
// "AUC":0.68
//}
// PointsOnROC is a [3]float64, represents [FPR, TPR, threshold]([x,y,threshold])
// idx is the index of prediction set (also of validation set) in split folds.
func (bv *binClassValidation) GetROCAndAUC(idx int) ([]byte, error) {
predResult, ok := bv.predResults.Load(idx)
if !ok {
return []byte{}, errors.New("not find prediction results according to idx")
}
validSet, err := bv.GetValidSet(idx)
if err != nil {
return []byte{}, err
}
realClasses, err := getFeaturesByName(validSet, bv.label)
if err != nil {
return []byte{}, err
}
points, err := metrics.GetROC(realClasses, predResult.([]float64), bv.posClass)
if err != nil {
return []byte{}, err
}
auc, err := metrics.GetAUC(metrics.GetCoordinates(points))
if err != nil {
return []byte{}, err
}
return json.Marshal(&reportROCAndAUC{
PointsOnROC: points,
AUC: auc,
})
}
// GetAllROCAndAUC returns a map contains all split folds' json bytes of roc and auc.
// JSON type summary is something like :
// {
// "PointsOnROC": [
// [0,0,1.9],
// [0,0.1,0.9],
// [0,0.2,0.8],
// [0.1,0.2,0.7],
// [0.1,0.3,0.6],
// [0.1,0.4,0.55],
// ...
// ],
// "AUC":0.68
//}
// PointsOnROC is a [3]float64, represents [FPR, TPR, threshold]([x,y,threshold])
// map's idx is the index of prediction set (also of validation set) in split folds.
func (bv *binClassValidation) GetAllROCAndAUC() (map[int][]byte, error) {
var errRet error
summaries := make(map[int][]byte)
bv.predResults.Range(func(key, value interface{}) bool {
i := key.(int)
predResult := value.([]float64)
validSet, err := bv.GetValidSet(i)
if err != nil {
errRet = err
return false
}
realClasses, err := getFeaturesByName(validSet, bv.label)
if err != nil {
errRet = err
return false
}
points, err := metrics.GetROC(realClasses, predResult, bv.posClass)
if err != nil {
errRet = err
return false
}
auc, err := metrics.GetAUC(metrics.GetCoordinates(points))
if err != nil {
errRet = err
return false
}
summary, _ := json.Marshal(&reportROCAndAUC{
PointsOnROC: points,
AUC: auc,
})
summaries[i] = summary
return true
})
if errRet != nil {
return map[int][]byte{}, errRet
}
return summaries, nil
}
type regressionValidation struct {
// Splitter divides data set into several subsets with some strategies (such as KFolds, LOO),
// and hold out one subset as validation set and others as training set.
Splitter
// label denotes name of lable feature.
label string
// predResults stores prediction outcomes.
predResults sync.Map
}
// NewRegressionValidation creates a RegressionValidation instance to handle regression validation.
// file contains all rows of a file,
// and its first row contains just names of feature, and others contain all feature values
// idName denotes which feature is ID that would be used in sample alignment
func NewRegressionValidation(file [][]string, label string, idName string) (RegressionValidation, error) {
return ®ressionValidation{
Splitter: NewSplitter(file, idName, label),
label: label,
}, nil
}
// SetPredictOut sets prediction outcomes for a prediction set to which `idx` refers.
func (rv *regressionValidation) SetPredictOut(idx int, yPred []float64) error {
set, err := rv.GetValidSet(idx)
if err != nil {
return err
}
if len(set)-1 != len(yPred) {
return errors.New("there is a mismatch between the number of predicted values and that of prediction set")
}
rv.predResults.Store(idx, yPred)
return nil
}
// GetAllPredictOuts returns all prediction results has been stored.
func (rv *regressionValidation) GetAllPredictOuts() map[int][]float64 {
ret := make(map[int][]float64)
rv.predResults.Range(func(key, value interface{}) bool {
ret[key.(int)] = value.([]float64)
return true
})
return ret
}
// GetRMSE returns RMSE over the validation set to which `idx` refers.
func (rv *regressionValidation) GetRMSE(idx int) (float64, error) {
yPredS, ok := rv.predResults.Load(idx)
if !ok {
return 0, errors.New("not find prediction outcomes according to idx")
}
validSet, err := rv.GetValidSet(idx)
if err != nil {
return 0, err
}
yPred := yPredS.([]float64)
yRealS, err := getFeaturesByName(validSet, rv.label)
if err != nil {
return 0, err
}
yReal := make([]float64, 0, len(yRealS))
for _, v := range yRealS {
v2, err := strconv.ParseFloat(v, 64)
if err != nil {
return 0, errors.New("failed to parse label from file, and error is:" + err.Error())
}
yReal = append(yReal, v2)
}
return metrics.GetRMSE(yReal, yPred)
}
// GetAllRMSE returns scores of RMSE over all split folds,
// and its Mean and Standard Deviation.
func (rv *regressionValidation) GetAllRMSE() (map[int]float64, float64, float64, error) {
var errRet error
rmses := make(map[int]float64)
rv.predResults.Range(func(key, value interface{}) bool {
i := key.(int)
yPred := value.([]float64)
validSet, err := rv.GetValidSet(i)
if err != nil {
errRet = err
return false
}
yRealS, err := getFeaturesByName(validSet, rv.label)
if err != nil {
errRet = err
return false
}
yReal := make([]float64, 0, len(yRealS))
for _, v := range yRealS {
v2, err := strconv.ParseFloat(v, 64)
if err != nil {
errRet = errors.New("failed to parse label from file, and error is:" + err.Error())
return false
}
yReal = append(yReal, v2)
}
rmse, err := metrics.GetRMSE(yReal, yPred)
if err != nil {
errRet = err
return false
}
rmses[i] = rmse
return true
})
if errRet != nil {
return map[int]float64{}, 0, 0, errRet
}
meanRMSE, stdDevRMSE := getStdDeviation(rmses)
return rmses, meanRMSE, stdDevRMSE, nil
}
type splitter struct {
//fileRows are all rows of a file
fileRows [][]string
//idName denotes which feature is ID that would be used in sample alignment
idName string
// label denotes name of lable feature
label string
//folds stores division result of `fileRows`
folds [][][]string
}
// NewSplitter creates a Splitter instance.
// file contains all rows of a file,
// and its first row contains just names of feature, and others contain all feature values.
// idName denotes which feature is ID that would be used in sample alignment.
// label denotes name of lable feature.
func NewSplitter(file [][]string, idName string, label string) Splitter {
return &splitter{
idName: idName,
fileRows: file,
label: label,
}
}
// Split divides the file into two parts directly
// based on percentage which denotes the first part of divisions.
func (s *splitter) Split(percents int) error {
if percents < 1 || percents > 100 {
return errors.New("percents must between 1 and 100")
}
splitSets, err := Split(s.fileRows, percents)
if err != nil {
return err
}
s.folds = make([][][]string, 0, 2)
s.folds = append(s.folds, splitSets[0], splitSets[1])
return nil
}
// ShuffleSplit sorts file rows by IDs which extracted from file by `idName`,
// and shuffles the sorted rows with `seed`,
// then divides the file into two parts
// based on `percents` which denotes the first part of divisions.
func (s *splitter) ShuffleSplit(percents int, seed string) error {
if percents < 1 || percents > 100 {
return errors.New("percents must between 1 and 100")
}
splitSets, err := ShuffleSplit(s.fileRows, s.idName, percents, seed)
if err != nil {
return err
}
s.folds = make([][][]string, 0, 2)
s.folds = append(s.folds, splitSets[0], splitSets[1])
return nil
}
// KFoldsSplit divides the file into `k` parts directly.
// k is the number of parts that only could be 5 or 10.
func (s *splitter) KFoldsSplit(k int) error {
splitSets, err := KFoldsSplit(s.fileRows, k)
if err != nil {
return err
}
s.folds = splitSets
return nil
}
// ShuffleKFoldsSplit sorts file rows by IDs which extracted from file by `idName`,
// and shuffles the sorted rows with `seed`,
// then divides the file into `k` parts.
// k is the number of parts that only could be 5 or 10.
func (s *splitter) ShuffleKFoldsSplit(k int, seed string) error {
splitSets, err := ShuffleKFoldsSplit(s.fileRows, s.idName, k, seed)
if err != nil {
return err
}
s.folds = splitSets
return nil
}
// LooSplit sorts file rows by IDs which extracted from file by `idName`,
// then divides each row into a subset.
func (s *splitter) LooSplit() error {
splitSets, err := LooSplit(s.fileRows, s.idName)
if err != nil {
return err
}
s.folds = splitSets
return nil
}
// GetAllFolds returns all folds after split.
// And could be only called successfully after split.
func (s *splitter) GetAllFolds() ([][][]string, error) {
if len(s.folds) == 0 {
return [][][]string{}, errors.New("the file has not been split")
}
return s.folds, nil
}
// GetTrainSet holds out the subset to which refered by `idxHO`
// and returns the remainings as training set.
func (s *splitter) GetTrainSet(idxHO int) ([][]string, error) {
l := len(s.folds)
if l == 0 {
return [][]string{}, errors.New("the file has not been split")
}
if idxHO >= l {
return [][]string{}, errors.New("invalid index referring to subset held out")
}
lHO := len(s.folds[idxHO])
lTrain := len(s.fileRows) - lHO + 1 // each subset has a row containing the names of feature, so take it out
trainSet := make([][]string, 0, lTrain)
trainSet = append(trainSet, s.fileRows[0])
for i, fold := range s.folds {
if i == idxHO {
continue
}
trainSet = append(trainSet, fold[1:]...)
}
return trainSet, nil
}
// GetPredictSet returns the subset to which refered by `idx`
// as predicting set (without label feature).
func (s *splitter) GetPredictSet(idx int) ([][]string, error) {
validSet, err := s.GetValidSet(idx)
if err != nil {
return validSet, err
}
// find label feature and remove it row by row.
idxL := -1
for i, v := range validSet[0] {
if v == s.label {
idxL = i
break
}
}
lenFile := len(validSet)
predictFile := make([][]string, 0, lenFile)
if idxL < 0 { //no label feature found, no need to remove it
predictFile = validSet
} else {
for _, r := range validSet {
lR := len(r)
if lR <= idxL {
return [][]string{}, errors.New("invalid file")
}
newR := make([]string, 0, lR-1)
for i, s := range r {
if i == idxL {
continue
}
newR = append(newR, s)
}
predictFile = append(predictFile, newR)
}
}
return predictFile, nil
}
// GetPredictSet returns the subset to which refered by `idx`
// as validation set.
func (s *splitter) GetValidSet(idx int) ([][]string, error) {
l := len(s.folds)
if l == 0 {
return [][]string{}, errors.New("the file has not been split")
}
if idx >= l {
return [][]string{}, errors.New("invalid index referring to validation set ")
}
return s.folds[idx], nil
}
// getFeaturesByName abstracts features from file according to `name`,
// and order of returns is the same as that of rows.
func getFeaturesByName(fileRows [][]string, name string) ([]string, error) {
lenFile := len(fileRows)
if lenFile < 1 {
return []string{}, errors.New("invalid file")
}
// find where the target feature is
idx := -1
for i, v := range fileRows[0] {
if v == name {
idx = i
break
}
}
if idx < 0 {
return []string{}, errors.New("not find name")
}
features := make([]string, 0, lenFile-1)
for _, r := range fileRows[1:] { // first row contains just names of feature, skip it
if len(r) <= idx {
return []string{}, errors.New("invalid file")
}
features = append(features, r[idx])
}
return features, nil
}
// getStdDeviation return mean and standard deviation.
func getStdDeviation(score map[int]float64) (float64, float64) {
l := len(score)
if l == 0 {
return 0, 0
}
mean := 0.0
for _, v := range score {
mean += v
}
mean /= float64(l)
if l == 1 {
return mean, 0
}
deviation := 0.0
for _, v := range score {
deviation += math.Pow(v-mean, 2)
}
deviation /= float64(l - 1)
stdDeviation := math.Sqrt(deviation)
return mean, stdDeviation
} | crypto/core/machine_learning/evaluation/validation/validation.go | 0.818193 | 0.488161 | validation.go | starcoder |
package money
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"regexp"
)
// parseNumber expects the string to contain a single
// floating point number and returns the same. If the
// string doesn't contain exactly one number, it returns an error.
func parseNumber(s string) (float64, error) {
var value float64
n, err := fmt.Sscanf(s, "%f", &value)
if n != 1 || err != nil {
return 0, errors.New("invalid number")
} else {
return value, nil
}
}
// Converters have a method that given the exchange rate from
// USD to INR return a printable result in the other currency.
type Converter interface {
Convert(usdToInr float64) fmt.Stringer
}
// Inr is an amount denominated in Rupees.
type Inr float64
const (
lakh = 100000.0
crore = 10000000.0
)
// Convert returns the equivalent Usd for an Inr.
func (amount Inr) Convert(usdToInr float64) fmt.Stringer {
return Usd(float64(amount) / usdToInr)
}
func (amount Inr) String() string {
if v := amount / crore; v >= 1.0 {
return fmt.Sprintf("₹ %.1f crore", v)
} else if v := amount / lakh; v >= 1.0 {
return fmt.Sprintf("₹ %.1f lakh", v)
} else {
return fmt.Sprintf("₹ %.1f", amount)
}
}
// Usd is an amount denominated in dollars.
type Usd float64
const (
million = 1000000.0
billion = 1000000000.0
trillion = 1000000000000.0
)
func (amount Usd) Convert(usdToInr float64) fmt.Stringer {
return Inr(float64(amount) * usdToInr)
}
func (amount Usd) String() string {
if v := amount / trillion; v >= 1.0 {
return fmt.Sprintf("$ %.1f trillion", v)
} else if v := amount / billion; v >= 1.0 {
return fmt.Sprintf("$ %.1f billion", v)
} else if v := amount / million; v >= 1.0 {
return fmt.Sprintf("$ %.1f million", v)
} else {
return fmt.Sprintf("$ %.1f", amount)
}
}
// Parser represents a value that knows how to parse strings that denote
// an amount in a particular currency. A Parser also provides a method
// that identifies if a string is an amount in its currency.
type Parser interface {
Match(s string) bool
Parse(s string) (Converter, error)
}
// InrParser parses amounts in INR.
type InrParser struct{}
func (p InrParser) Match(s string) bool {
return regexp.MustCompile(`(?i:lakh|crore|rs|inr|₹|rupee)`).MatchString(s)
}
func (p InrParser) Parse(s string) (Converter, error) {
units := regexp.MustCompile(`lakh|crore`)
unit := units.FindString(s)
number, err := parseNumber(s)
if err != nil {
return Usd(0), err
}
switch unit {
case "lakh":
return Inr(number * lakh), nil
case "crore":
return Inr(number * crore), nil
default:
return Inr(number), nil
}
}
// UsdParser parses amounts in USD.
type UsdParser struct{}
func (p UsdParser) Match(s string) bool {
return regexp.MustCompile(`(?i:million|billion|trillion|\$|usd|dollar)`).MatchString(s)
}
func (p UsdParser) Parse(s string) (Converter, error) {
units := regexp.MustCompile("million|billion|trillion")
unit := units.FindString(s)
number, err := parseNumber(s)
if err != nil {
return Inr(0), err
}
switch unit {
case "million":
return Usd(number * million), nil
case "billion":
return Usd(number * billion), nil
case "trillion":
return Usd(number * trillion), nil
default:
return Usd(number), nil
}
}
// ErrorParser matches any string, and its Parse() always returns an error.
type ErrorParser struct{}
func (p ErrorParser) Match(s string) bool {
return true
}
func (p ErrorParser) Parse(s string) (Converter, error) {
return nil, errors.New("could not parse: unknown currency")
}
// Parse tries to match the string against all available parsers
// and returns the parsed valued from the first one that matches.
// The search will always succeed because ErrorParser will always
// match.
func Parse(s string) (Converter, error) {
parsers := []Parser{InrParser{}, UsdParser{}, ErrorParser{}}
for _, p := range parsers {
if p.Match(s) {
return p.Parse(s)
}
}
panic("none of the parsers matched!")
}
// FixerResponse is the response from api.fixer.io, which provides
// us the exchange rate.
type FixerResponse struct {
Rates map[string]float64 `json:"rates"`
}
// GetUsdToInr fetches the exchange rate from fixer.io. To simplify
// error handling, it returns a default value if there was an error
// accessing the API.
func GetUsdToInr() float64 {
defaultRate := 62.0
r, err := http.Get("http://api.fixer.io/latest?base=USD")
if err != nil {
return defaultRate
}
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return defaultRate
}
var response FixerResponse
if err = json.Unmarshal(body, &response); err != nil {
return defaultRate
}
return response.Rates["INR"]
} | money.go | 0.799521 | 0.472927 | money.go | starcoder |
package unimap
import "fmt"
// Composite is an interface to a collection, indexed by namespace, of consistent mappings from string to string. The
// mappings are consistent in the sense that no two mappings map a given pair to distinct results.
type Composite interface {
// Add updates the composite mapping by adding a mapping with the given namespace and name. If a mapping with the
// given namespace and name already exists, it is replaced providing it is consistent with all other mappings. If
// the named mapping is not consistent with all other mappings in the same namespace, the mapping is removed from
// the composite mapping for the namespace and error is returned.
Add(namespace string, name string, mapping map[string]string) error
// Delete updates the composite mapping by removing a mapping with the given namespace and name. If there is no
// mapping with the given namespace and name, an error is returned.
Delete(namespace string, name string) error
// Map applies the composite mapping for the given namespace to the given name and returns the mapped value. If the
// given namespace is not known or the given name is not in the domain of the composite mapping for the namespace,
// the given name is returned. In other words, the default composite mapping for any namespace is the identity
// function.
Map(namespace string, name string) string
// Dump returns a string representing the state of the composite.
Dump() string
}
type errCh chan error
type addOp struct {
namespace string
name string
mapping relmap
errCh errCh
}
type deleteOp struct {
namespace string
name string
errCh errCh
}
type mapOp struct {
namespace string
name string
resultCh chan string
}
type dumpOp struct {
resultCh chan string
}
type namespace string
// a relmap is a relocation mapping which maps image references to image references
type relmap map[string]string
// a unimap is a consistent collection of relocation mappings, consistent in the sense that no two distinct keys of the
// unimap have corresponding relocation mappings which a particular image reference to distinct values
type unimap map[string]relmap
type composite struct {
// a collection of unimaps indexed by namespace
mappings map[namespace]unimap
// a collection of mappings indexed by namespace each of which is the composition of all the relocation mappings in the namespace's unimap
composite map[namespace]relmap
addCh chan *addOp
deleteCh chan *deleteOp
mapCh chan *mapOp
dumpCh chan *dumpOp
stopCh <-chan struct{}
}
func New(stopCh <-chan struct{}) Composite {
c := &composite{
mappings: make(map[namespace]unimap),
composite: make(map[namespace]relmap),
addCh: make(chan *addOp),
deleteCh: make(chan *deleteOp),
mapCh: make(chan *mapOp),
dumpCh: make(chan *dumpOp),
stopCh: stopCh,
}
go c.monitor()
return c
}
func (c *composite) Add(namespace string, name string, mapping map[string]string) error {
errCh := make(chan error)
c.addCh <- &addOp{
namespace: namespace,
name: name,
mapping: mapping,
errCh: errCh,
}
return <-errCh
}
func (c *composite) Delete(namespace string, name string) error {
errCh := make(chan error)
c.deleteCh <- &deleteOp{
namespace: namespace,
name: name,
errCh: errCh,
}
return <-errCh
}
func (c *composite) Map(namespace string, value string) string {
resultCh := make(chan string)
c.mapCh <- &mapOp{
namespace: namespace,
name: value,
resultCh: resultCh,
}
return <-resultCh
}
func (c *composite) Dump() string {
resultCh := make(chan string)
c.dumpCh <- &dumpOp{
resultCh: resultCh,
}
return <-resultCh
}
func (c *composite) monitor() {
for {
select {
case addOp := <-c.addCh:
addOp.errCh <- c.add(addOp.namespace, addOp.name, addOp.mapping)
case deleteOp := <-c.deleteCh:
deleteOp.errCh <- c.delete(deleteOp.namespace, deleteOp.name)
case mapOp := <-c.mapCh:
mapOp.resultCh <- c.doMap(mapOp.namespace, mapOp.name)
case dumpOp := <-c.dumpCh:
dumpOp.resultCh <- c.dump()
case <-c.stopCh:
close(c.addCh)
close(c.deleteCh)
close(c.mapCh)
return
}
}
}
func (c *composite) add(ns string, name string, mapping relmap) error {
_ = c.delete(ns, name) // name may not be present, so ignore any error
n := namespace(ns)
if err := c.checkConsistency(n, name, mapping); err != nil {
return err
}
// save a copy of mapping
nsMapping := c.getNamespaceMapping(n)
nsMapping[name] = make(relmap, len(mapping))
for k, v := range mapping {
nsMapping[name][k] = v
}
c.merge(n)
return nil
}
func (c *composite) getNamespaceMapping(ns namespace) unimap {
nsMapping, ok := c.mappings[ns]
if !ok {
nsMapping = make(map[string]relmap)
c.mappings[ns] = nsMapping
c.composite[ns] = make(relmap)
}
return nsMapping
}
func (c *composite) delete(ns string, name string) error {
n := namespace(ns)
nsMapping := c.getNamespaceMapping(n)
if _, ok := nsMapping[name]; !ok {
return fmt.Errorf("mapping not found: %s", name)
}
delete(nsMapping, name)
c.merge(n)
return nil
}
func (c *composite) doMap(ns string, value string) string {
n := namespace(ns)
_ = c.getNamespaceMapping(n)
if result, ok := c.composite[n][value]; ok {
return result
}
return value
}
func (c *composite) dump() string {
return fmt.Sprintf("%#v", c)
}
func (c *composite) merge(ns namespace) {
c.composite[ns] = make(map[string]string)
empty := true
for _, m := range c.mappings[ns] {
empty = false
for k, v := range m {
c.composite[ns][k] = v
}
}
if empty {
// avoid leaking memory when namespaces go away
delete(c.mappings, ns)
}
}
func (c *composite) checkConsistency(ns namespace, name string, mapping map[string]string) error {
collisions := ""
for k, v := range mapping {
if w, ok := c.composite[ns][k]; ok && v != w {
for n, m := range c.mappings[ns] {
if w, ok := m[k]; ok && v != w {
sep := ""
if collisions != "" {
sep = ", "
}
collisions = fmt.Sprintf("%s%sit maps %q to %q but %s maps %q to %q", collisions, sep, k, v, n, k, w)
break
}
}
}
}
if collisions != "" {
return fmt.Errorf("imagemap %q in namespace %q was rejected: %s", name, ns, collisions)
}
return nil
} | pkg/unimap/composite.go | 0.721056 | 0.504639 | composite.go | starcoder |
package src
import (
"encoding/binary"
"math"
"reflect"
)
type IEncoder interface {
countByteLen(data interface{}) (error, uint64)
SetByteOrder(order ByteOrder)
Encode(data interface{}) (error, []byte)
encodeUint8(stream []byte, pos uint64, value uint8) ([]byte, uint64)
encodeUint16(stream []byte, pos uint64, value uint16) ([]byte, uint64)
encodeUint32(stream []byte, pos uint64, value uint32) ([]byte, uint64)
encodeUint64(stream []byte, pos uint64, value uint64) ([]byte, uint64)
encodeFloat32(stream []byte, pos uint64, value float32) ([]byte, uint64)
encodeFloat64(stream []byte, pos uint64, value float64) ([]byte, uint64)
}
type Encoder struct {
IEncoder
order binary.ByteOrder
}
func NewEncoder() *Encoder {
e := new(Encoder)
e.order = binary.ByteOrder(binary.LittleEndian)
return e
}
func (e *Encoder) SetByteOrder(order ByteOrder) {
if order == BigEndian {
e.order = binary.ByteOrder(binary.BigEndian)
} else {
e.order = binary.ByteOrder(binary.LittleEndian)
}
}
func (e Encoder) Encode(data interface{}) (error, []byte) {
// only allocate the amount of bytes which are necessary
value := reflect.ValueOf(data)
err, length := countByteLen(value)
if err != nil {
return err, []byte{}
}
stream := make([]byte, length)
pos := uint64(0)
stream, pos = e.encode(value, stream, pos)
return nil, stream
}
func (e Encoder) encode(value reflect.Value, stream []byte, pos uint64) ([]byte, uint64) {
switch value.Kind() {
case reflect.Struct:
return e.encodeStruct(value, stream, pos)
case reflect.Array:
return e.encodeArray(value, stream, pos)
case reflect.Slice:
return e.encodeArray(value, stream, pos)
case reflect.String:
return e.encodeString(stream, pos, value.String())
default:
return e.encodeSingle(value, stream, pos)
}
}
func (e Encoder) encodeStruct(structure reflect.Value, stream []byte, pos uint64) ([]byte, uint64) {
for i := 0; i < structure.NumField(); i++ {
field := structure.Field(i)
stream, pos = e.encode(field, stream, pos)
}
return stream, pos
}
func (e Encoder) encodeArray(array reflect.Value, stream []byte, pos uint64) ([]byte, uint64) {
stream, pos = e.encodeUint32(stream, pos, uint32(array.Len()))
for i := 0; i < array.Len(); i++ {
elem := array.Index(i)
stream, pos = e.encode(elem, stream, pos)
}
return stream, pos
}
func (e Encoder) encodeSingle(value reflect.Value, stream []byte, pos uint64) ([]byte, uint64) {
switch value.Kind() {
case reflect.Uint8:
return e.encodeUint8(stream, pos, uint8(value.Uint()))
case reflect.Int8:
return e.encodeUint8(stream, pos, uint8(value.Int()))
case reflect.Uint16:
return e.encodeUint16(stream, pos, uint16(value.Uint()))
case reflect.Int16:
return e.encodeUint16(stream, pos, uint16(value.Int()))
case reflect.Uint32:
return e.encodeUint32(stream, pos, uint32(value.Uint()))
case reflect.Uint:
return e.encodeUint32(stream, pos, uint32(value.Uint()))
case reflect.Int32:
return e.encodeUint32(stream, pos, uint32(value.Int()))
case reflect.Int:
return e.encodeUint32(stream, pos, uint32(value.Int()))
case reflect.Uint64:
return e.encodeUint64(stream, pos, value.Uint())
case reflect.Int64:
return e.encodeUint64(stream, pos, uint64(value.Int()))
case reflect.Float32:
return e.encodeFloat32(stream, pos, float32(value.Float()))
case reflect.Float64:
return e.encodeFloat64(stream, pos, value.Float())
default:
return stream, pos
}
}
func (e Encoder) encodeString(stream []byte, pos uint64, str string) ([]byte, uint64) {
copy(stream[pos:], str)
pos += uint64(len(str))
return stream, pos
}
func (e Encoder) encodeUint8(stream []byte, pos uint64, value uint8) ([]byte, uint64) {
stream[pos] = value
pos += 1
return stream, pos
}
func (e Encoder) encodeUint16(stream []byte, pos uint64, value uint16) ([]byte, uint64) {
byteLen := uint64(2)
e.order.PutUint16(stream[pos:pos+byteLen], value)
pos += byteLen
return stream, pos
}
func (e Encoder) encodeUint32(stream []byte, pos uint64, value uint32) ([]byte, uint64) {
byteLen := uint64(4)
e.order.PutUint32(stream[pos:pos+byteLen], value)
pos += byteLen
return stream, pos
}
func (e Encoder) encodeUint64(stream []byte, pos uint64, value uint64) ([]byte, uint64) {
byteLen := uint64(8)
e.order.PutUint64(stream[pos:pos+byteLen], value)
pos += byteLen
return stream, pos
}
func (e Encoder) encodeFloat32(stream []byte, pos uint64, value float32) ([]byte, uint64) {
byteLen := uint64(4)
e.order.PutUint32(stream[pos:pos+byteLen], math.Float32bits(value))
pos += byteLen
return stream, pos
}
func (e Encoder) encodeFloat64(stream []byte, pos uint64, value float64) ([]byte, uint64) {
byteLen := uint64(8)
e.order.PutUint64(stream[pos:pos+byteLen], math.Float64bits(value))
pos += byteLen
return stream, pos
} | src/Encoder.go | 0.69368 | 0.41941 | Encoder.go | starcoder |
package mcutils
type ValueType interface {
string | int64 | float64 | bool
}
type Number interface {
int64 | float64
}
func Index[T ValueType](arr []T, val T) int {
// types - string, int, float, bool
for i, value := range arr {
if value == val {
return i
}
}
return -1
}
func ArrayContains[T ValueType](arr []T, str T) bool {
for _, a := range arr {
if a == str {
return true
}
}
return false
}
func ArrayStringContains(arr []string, val string) bool {
for _, a := range arr {
if a == val {
return true
}
}
return false
}
func ArrayIntContains(arr []int, val int) bool {
for _, a := range arr {
if a == val {
return true
}
}
return false
}
func ArrayFloatContains(arr []float64, str float64) bool {
for _, a := range arr {
if a == str {
return true
}
}
return false
}
func Any[T ValueType](arr []T, val T) bool {
for _, value := range arr {
if value == val {
return true
}
}
return false
}
func All[T ValueType](arr []T, val T) bool {
for _, value := range arr {
if value != val {
return true
}
}
return false
}
func Map[T ValueType](arr []T, mapFunc func(T) T) []T {
var mapResult []T
for _, v := range arr {
mapResult = append(mapResult, mapFunc(v))
}
return mapResult
}
func MapGen[T ValueType](arr []T, mapFunc func(T) T, mapChan chan<- T) {
for _, v := range arr {
mapChan <- mapFunc(v)
}
if mapChan != nil {
close(mapChan)
}
}
func MapInt(arr []int, mapFunc func(int) int) []int {
var mapResult []int
for _, v := range arr {
mapResult = append(mapResult, mapFunc(v))
}
return mapResult
}
func MapFloat(arr []float64, mapFunc func(float64) float64) []float64 {
var mapResult []float64
for _, v := range arr {
mapResult = append(mapResult, mapFunc(v))
}
return mapResult
}
func MapString(arr []string, mapFunc func(string) string) []string {
var mapResult []string
for _, v := range arr {
mapResult = append(mapResult, mapFunc(v))
}
return mapResult
}
func Filter[T ValueType](arr []T, filterFunc func(T) bool) []T {
var mapResult []T
for _, v := range arr {
if filterFunc(v) {
mapResult = append(mapResult, v)
}
}
return mapResult
}
func FilterGen[T ValueType](arr []T, filterFunc func(T) bool, filterChan chan<- T) {
for _, v := range arr {
if filterFunc(v) {
filterChan <- v
}
}
if filterChan != nil {
close(filterChan)
}
}
func Take[T ValueType](num uint, arr []T) []T {
var takeResult []T
var cnt uint = 0
for _, v := range arr {
if cnt == num {
break
}
takeResult = append(takeResult, v)
cnt++
}
return takeResult
}
func TakeGen[T ValueType](num uint, arr []T, takeChan chan<- T) {
// use channels to implement generator to send/yield/generate num of values from arr
var cnt uint = 0
for _, v := range arr {
if cnt == num {
break
}
takeChan <- v
cnt++
}
if takeChan != nil {
close(takeChan)
}
} | collections.go | 0.628065 | 0.422803 | collections.go | starcoder |
package fuzzymatcher
import (
"unicode/utf8"
)
type wordEntry struct {
// wordIdx contains one bit sifted to the left for each word in the sentence
// So wordIdx will be 1, 2, 4, 8, 16, 32, ...
// This also means the wordIdx can be a maximum of 64 words
WordIdx uint64
Letters []rune
len int
allowedOffset int
FuzzyFirstLetter [3]rune
FuzzyLettersOrder [][3]rune
}
func (we *wordEntry) letterAt(idx int) rune {
if idx < 0 || idx >= len(we.Letters) {
return 0
}
return we.Letters[idx]
}
func (we *wordEntry) calculateFuzzyLetterOrder() {
we.len = len(we.Letters)
if we.len <= 4 {
we.allowedOffset = 1
} else if we.len <= 7 {
we.allowedOffset = 2
} else {
we.allowedOffset = 3
}
we.FuzzyLettersOrder = [][3]rune{}
lettersLen := len(we.Letters)
possibleNextLetters := [3]rune{}
for i := 0; i < lettersLen-1; i++ {
possibleNextLetters = [3]rune{we.letterAt(i + 1), we.letterAt(i + 2), we.letterAt(i + 3)}
if we.allowedOffset < 3 {
possibleNextLetters[2] = 0
if we.allowedOffset < 2 {
possibleNextLetters[1] = 0
}
}
we.FuzzyLettersOrder = append(we.FuzzyLettersOrder, possibleNextLetters)
}
we.FuzzyFirstLetter = [3]rune{we.letterAt(0)}
nextLetter := we.letterAt(1)
if nextLetter != utf8.RuneError && we.allowedOffset >= 2 {
we.FuzzyFirstLetter[1] = nextLetter
nextLetter := we.letterAt(2)
if nextLetter != utf8.RuneError && we.allowedOffset >= 3 {
we.FuzzyFirstLetter[2] = nextLetter
}
}
}
type pathToWord struct {
Letter rune
Sentence int
Word int
WordOffset int
MustRemainingChars int
}
type sentenceT struct {
Words []wordEntry
IdxInNewMatcherInput int
// the fields below are generated with the (*sentence).complete() method
Paths []pathToWord
IndexSum uint64
SentenceLen int
// Used in the matching process
MatchIndexSum uint64
}
func (s *sentenceT) complete() {
s.Paths = []pathToWord{}
for wordIdx, word := range s.Words {
for offset, letter := range word.FuzzyFirstLetter {
if letter == 0 {
break
}
s.Paths = append(s.Paths, pathToWord{
Letter: letter,
Sentence: -1,
Word: wordIdx,
WordOffset: offset,
MustRemainingChars: word.len - word.allowedOffset - 1,
})
}
s.IndexSum |= word.WordIdx
s.SentenceLen += word.len
if wordIdx != len(s.Words)-1 {
// Also add a space character for the
s.SentenceLen++
}
}
}
// Matcher is used to match sentences
type Matcher struct {
Sentences []sentenceT
// the fields below are generated with the (*Matcher).complete() method
Paths []pathToWord
HasPathsWithRuneSelf bool // basicly tells if there are complex utf8 chars
PathByLetterMap map[rune][]pathToWord // Use if HasPathsWithRuneSelf == true
PathByLetterList [utf8.RuneSelf][]pathToWord // Use if HasPathsWithRuneSelf == false
// Zero alloc cache
UTF8RuneCreation []byte
InProgressMatches []inProgressMatch
}
func (m *Matcher) complete() {
m.Paths = []pathToWord{}
m.PathByLetterMap = map[rune][]pathToWord{}
m.PathByLetterList = [utf8.RuneSelf][]pathToWord{}
for idx, sentence := range m.Sentences {
for _, path := range sentence.Paths {
path.Sentence = idx
letter := path.Letter
m.Paths = append(m.Paths, path)
list, ok := m.PathByLetterMap[letter]
// Add the path to a specific paths list or create a new paths list
if !ok {
m.PathByLetterMap[letter] = []pathToWord{path}
} else {
m.PathByLetterMap[letter] = append(list, path)
}
if letter < utf8.RuneSelf {
m.PathByLetterList[letter] = append(m.PathByLetterList[letter], path)
} else {
m.HasPathsWithRuneSelf = true
}
}
}
}
const upperToLowerCaseOffset = 'a' - 'A'
// NewMatcher creates a new instance of the matcher
// This function takes relatively long to execute so do this once, and use the returned matcher to match it against lots of entries
func NewMatcher(sentences ...string) *Matcher {
res := Matcher{
Sentences: []sentenceT{},
UTF8RuneCreation: []byte{},
InProgressMatches: []inProgressMatch{},
}
for sentenceIdx, sentence := range sentences {
parsedSentence := sentenceT{
Words: []wordEntry{},
IdxInNewMatcherInput: sentenceIdx,
}
word := wordEntry{WordIdx: 1}
commitWord := func() {
if len(word.Letters) == 0 {
// Just reset the current word
word = wordEntry{WordIdx: word.WordIdx}
} else {
word.calculateFuzzyLetterOrder()
parsedSentence.Words = append(parsedSentence.Words, word)
word = wordEntry{WordIdx: word.WordIdx << 1}
}
}
for _, c := range []rune(sentence) {
if (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') {
word.Letters = append(word.Letters, c)
} else if c >= 'A' && c <= 'Z' {
word.Letters = append(word.Letters, c+upperToLowerCaseOffset)
} else if c >= utf8.RuneSelf {
newC, ok := checkAndCorredUnicodeChar(c)
if ok {
word.Letters = append(word.Letters, newC)
}
} else {
commitWord()
}
}
commitWord()
if len(parsedSentence.Words) == 0 {
continue
}
parsedSentence.complete()
res.Sentences = append(res.Sentences, parsedSentence)
}
res.complete()
return &res
}
type inProgressMatch struct {
PathToWord pathToWord
Word *wordEntry
Sentence *sentenceT
WordOffset int
SkippedChars int
NoMoreLetters bool
}
func (e *inProgressMatch) addWordIdxToSentence() int {
e.Sentence.MatchIndexSum |= e.Word.WordIdx
if e.Sentence.MatchIndexSum == e.Sentence.IndexSum {
return e.Sentence.IdxInNewMatcherInput
}
return -1
}
// Match matches a sentence to the matchers input
// Returns the index of the matched sentenced
// If nothing found returns -1
func (m *Matcher) Match(sentence string) int {
// Reset the matching index sums and zero alloc cache
for idx := range m.Sentences {
m.Sentences[idx].MatchIndexSum = 0
}
m.InProgressMatches = m.InProgressMatches[:0]
sentenceLen := len(sentence)
var rLetter rune
beginWord := true
for i := 0; i < sentenceLen; i++ {
letter := sentence[i]
if letter == 0 {
continue
}
if letter >= utf8.RuneSelf {
if beginWord && !m.HasPathsWithRuneSelf {
// There are not even words starting with this letter, lets skip this one
beginWord = false
continue
}
if !beginWord && len(m.InProgressMatches) == 0 {
// We are matching nothing on the current word, no need to execute heavy instructions
continue
}
switch i {
case sentenceLen - 1:
// This is a invalid character
rLetter = utf8.RuneError
case sentenceLen - 2:
r, size := utf8.DecodeRune(append(m.UTF8RuneCreation[:0], letter, sentence[i+1]))
i += size - 1
rLetter, _ = checkAndCorredUnicodeChar(r)
case sentenceLen - 3:
r, size := utf8.DecodeRune(append(m.UTF8RuneCreation[:0], letter, sentence[i+1], sentence[i+2]))
i += size - 1
rLetter, _ = checkAndCorredUnicodeChar(r)
default:
r, size := utf8.DecodeRune(append(m.UTF8RuneCreation[:0], letter, sentence[i+1], sentence[i+2], sentence[i+3]))
i += size - 1
rLetter, _ = checkAndCorredUnicodeChar(r)
}
if rLetter == utf8.RuneError {
continue
}
} else {
rLetter = rune(sentence[i])
if (rLetter >= 'a' && rLetter <= 'z') || (rLetter >= '0' && rLetter <= '9') {
// Do nothing
} else if rLetter >= 'A' && rLetter <= 'Z' {
rLetter += upperToLowerCaseOffset
} else {
// go to next word
// Firstly lets check if there where any matches from the last word
for _, entry := range m.InProgressMatches {
// Check if we mis the last chars
// If so this entry is oke
// Makes sure "banan" can match "banana"
if len(entry.Word.FuzzyLettersOrder)-entry.WordOffset <= entry.Word.allowedOffset-entry.SkippedChars-1 {
res := entry.addWordIdxToSentence()
if res != -1 {
return res
}
}
}
// Reset the m.InProgressMatches so we can scan for new words
m.InProgressMatches = m.InProgressMatches[:0]
beginWord = true
continue
}
}
if beginWord {
var paths []pathToWord
if !m.HasPathsWithRuneSelf || rLetter < utf8.RuneSelf {
paths = m.PathByLetterList[rLetter]
} else {
paths = m.PathByLetterMap[rLetter]
}
for _, path := range paths {
if sentenceLen-i >= path.MustRemainingChars {
sentence := &m.Sentences[path.Sentence]
word := &sentence.Words[path.Word]
if sentence.MatchIndexSum&word.WordIdx != 0 {
// This word was earlier already matched
continue
}
m.InProgressMatches = append(m.InProgressMatches, inProgressMatch{
PathToWord: path,
Word: word,
Sentence: sentence,
WordOffset: path.WordOffset,
SkippedChars: path.WordOffset,
NoMoreLetters: word.len == 1,
})
}
}
beginWord = false
continue
}
outer:
for i := len(m.InProgressMatches) - 1; i >= 0; i-- {
entry := m.InProgressMatches[i]
if !entry.NoMoreLetters {
for offset, c := range entry.Word.FuzzyLettersOrder[entry.WordOffset] {
if c == rLetter {
if offset > 0 && offset >= entry.Word.allowedOffset-entry.SkippedChars {
continue
}
entry.WordOffset += offset + 1
entry.SkippedChars += offset
if entry.WordOffset == len(entry.Word.FuzzyLettersOrder) {
entry.NoMoreLetters = true
}
m.InProgressMatches[i] = entry
continue outer
}
if c == 0 {
break
}
}
}
if entry.SkippedChars < entry.Word.allowedOffset {
entry.SkippedChars++
m.InProgressMatches[i] = entry
} else {
m.InProgressMatches = append(m.InProgressMatches[:i], m.InProgressMatches[i+1:]...)
}
}
}
for _, entry := range m.InProgressMatches {
// Check if we mis the last chars
// If so this entry is oke
// Makes sure "banan" can match "banana"
if len(entry.Word.FuzzyLettersOrder)-entry.WordOffset <= entry.Word.allowedOffset-entry.SkippedChars-1 {
res := entry.addWordIdxToSentence()
if res != -1 {
return res
}
}
}
return -1
}
func checkAndCorredUnicodeChar(c rune) (rune, bool) {
switch c {
case 'à', 'À', 'á', 'Á', 'â', 'Â', 'ã', 'Ã', 'ä', 'Ä', 'å', 'Å', 'æ', 'Æ':
return 'a', true
case 'è', 'È', 'é', 'É', 'ê', 'Ê', 'ë', 'Ë':
return 'e', true
case 'ì', 'Ì', 'í', 'Í', 'î', 'Î', 'ï', 'Ï':
return 'i', true
case 'ò', 'Ò', 'ó', 'Ó', 'ô', 'Ô', 'õ', 'Õ', 'ö', 'Ö', 'ð', 'Ð', 'ø', 'Ø':
return 'o', true
case 'ù', 'Ù', 'ú', 'Ú', 'û', 'Û', 'ü', 'Ü':
return 'u', true
case 'ß':
return 's', true
case 'ñ', 'Ñ':
return 'n', true
case 'ý', 'Ý', 'ÿ', 'Ÿ':
return 'y', true
case 'ç', 'Ç', '©':
return 'c', true
case '®':
return 'r', true
case 768, // accent of: à
769, // accent of: á
770, // accent of: â
771, // accent of: ã
776, // accent of: ä
778, // accent of: å
'¿',
'¡',
0x2002, // En space
0x2003, // Em space
0x2004, // Three-per-em space
0x2005, // Four-per-em space
0x2006, // Six-per-em space
0x2007, // Figure space
0x2008, // Punctuation space
0x2009, // Thin space
0x200A, // Hair space
0x200B, // Zero width space
0x202F, // Narrow no-break space
0x205F, // Medium mathematical space
0x3000, // Ideographic space
'“',
'”',
'’',
'‵',
'‹',
'›',
'»',
'«',
utf8.RuneError:
return utf8.RuneError, false
default:
return c, true
}
} | matcher.go | 0.511717 | 0.413122 | matcher.go | starcoder |
package dax
import (
"github.com/dlespiau/dax/math"
)
type Node struct {
// Grapher
parent Grapher
children []Grapher
transformValid bool
worldTransformValid bool
position math.Vec3
rotation math.Quaternion
scale math.Vec3
transform math.Transform
// worldTransform is the local space to world space transform matrix. It is
// only valid between an updateWorldTransform() and any scene graph
// manipulation. In other word, internal passes on the scene graph, like
// rendering passes.
worldTransform math.Transform
// List of components.
components []interface{}
}
func NewNode() *Node {
n := new(Node)
n.Init()
return n
}
func (n *Node) Init() {
n.rotation.Iden()
n.scale = math.Vec3{1, 1, 1}
}
func (n *Node) GetPosition() *math.Vec3 {
return &n.position
}
func (n *Node) SetPosition(x, y, z float32) {
n.position[0] = x
n.position[1] = y
n.position[2] = z
n.transformValid = false
}
func (n *Node) SetPositionV(position *math.Vec3) {
n.position = *position
n.transformValid = false
}
func (n *Node) Translate(tx, ty, tz float32) {
n.position[0] += tx
n.position[1] += ty
n.position[2] += tz
n.transformValid = false
}
func (n *Node) TranslateV(t *math.Vec3) {
n.position[0] += t[0]
n.position[1] += t[1]
n.position[2] += t[2]
n.transformValid = false
}
func (n *Node) TranslateX(tx float32) {
n.position[0] += tx
n.transformValid = false
}
func (n *Node) TranslateY(ty float32) {
n.position[1] += ty
n.transformValid = false
}
func (n *Node) TranslateZ(tz float32) {
n.position[2] += tz
n.transformValid = false
}
func (n *Node) GetRotation() *math.Quaternion {
return &n.rotation
}
func (n *Node) SetRotation(q *math.Quaternion) {
n.rotation = *q
n.transformValid = false
}
func (n *Node) RotateAroundAxis(axis *math.Vec3, angle float32) {
q := math.QuatRotate(angle, axis)
n.rotation.MulWith(&q)
n.transformValid = false
}
func (n *Node) RotateX(angle float32) {
n.RotateAroundAxis(&math.Vec3{1, 0, 0}, angle)
}
func (n *Node) RotateY(angle float32) {
n.RotateAroundAxis(&math.Vec3{0, 1, 0}, angle)
}
func (n *Node) RotateZ(angle float32) {
n.RotateAroundAxis(&math.Vec3{0, 0, 1}, angle)
}
func (n *Node) GetScale() *math.Vec3 {
return &n.scale
}
func (n *Node) SetScale(sx, sy, sz float32) {
n.scale[0] = sx
n.scale[1] = sy
n.scale[2] = sz
n.transformValid = false
}
func (n *Node) SetScaleV(s *math.Vec3) {
n.scale = *s
n.transformValid = false
}
func (n *Node) Scale(sx, sy, sz float32) {
n.scale[0] *= sx
n.scale[1] *= sy
n.scale[2] *= sz
n.transformValid = false
}
func (n *Node) ScaleV(s *math.Vec3) {
n.scale[0] *= s[0]
n.scale[1] *= s[1]
n.scale[2] *= s[2]
n.transformValid = false
}
func (n *Node) ScaleX(sx float32) {
n.scale[0] *= sx
n.transformValid = false
}
func (n *Node) ScaleY(sy float32) {
n.scale[1] *= sy
n.transformValid = false
}
func (n *Node) ScaleZ(sz float32) {
n.scale[2] *= sz
n.transformValid = false
}
func (n *Node) updateTransform() {
if n.transformValid {
return
}
n.transform.SetTranslateVec3(&n.position)
n.transform.RotateQuat(&n.rotation)
n.transform.ScaleVec3(&n.scale)
n.transformValid = true
n.worldTransformValid = false
}
func (n *Node) getTransform() *math.Transform {
n.updateTransform()
return &n.transform
}
func (n *Node) GetTransform() *math.Mat4 {
n.updateTransform()
return (*math.Mat4)(&n.transform)
}
// updateWorldTransform will update the transformation from node space to world
// space recursively on all nodes.
// force can be used to force the updates on children when a parent has changed
// its transform and we, then, need to update the world transform on that
// subtree.
func (n *Node) updateWorldTransform(force bool) {
// Start by updating the local transform, and, as side effect,
// worldTransformValid
n.updateTransform()
if !n.worldTransformValid || force {
if n.parent == nil {
// this node isn't parented (root or not part of a
// scene graph)
n.worldTransform = n.transform
} else {
// compose with parent transform
parent := (n.parent).(*Node)
world := (*math.Mat4)(&parent.worldTransform)
local := (*math.Mat4)(&n.transform)
(*math.Mat4)(&n.worldTransform).Mul4Of(world, local)
}
force = true
}
for _, child := range n.children {
node := child.(*Node)
node.updateWorldTransform(force)
}
}
// Components
func (n *Node) AddComponent(c interface{}) *Node {
n.components = append(n.components, c)
return n
}
// Grapher implementation
// GetParent returns the paren of the node n.
func (n *Node) GetParent() Grapher {
return n.parent
}
// setParent parents the node n to parent.
func (n *Node) setParent(parent Grapher) {
n.parent = parent
}
// AddChild adds a child the node.
func (n *Node) AddChild(child Grapher) {
childNode := child.(*Node)
childNode.setParent(n)
n.children = append(n.children, child)
}
// AddChildren adds a number of children to the node n.
func (n *Node) AddChildren(children ...Grapher) {
for i := range children {
n.AddChild(children[i])
}
}
// GetChildren returns the list of children for the node n.
func (n *Node) GetChildren() []Grapher {
return n.children
} | node.go | 0.758958 | 0.438304 | node.go | starcoder |
// Package assert contains wrapper on top of go's testing library
// to make tests easier.
package assert
import (
"fmt"
"math"
"reflect"
"regexp"
"runtime"
"testing"
)
// used to strip out the long filename.
var fileRegex = regexp.MustCompile("([^/]*/){0,2}[^/]*$")
// Assert is a helper struct for testing methods.
type Assert struct {
t *testing.T
stack int // number of stack frames to traverse to generate error.
context string
}
// New creates a new Assert struct.
func New(t *testing.T) Assert {
return Assert{t, 0, ""}
}
// Stack shifts how many stack frames to traverse to print the error message.
// this may be useful if you're creating a helper testing method.
// returns a new instances of Assert.
func (assert Assert) Stack(stack int) Assert {
assert.stack += stack
return assert
}
// Contextf sets the human-readable context of the test. This is useful
// when the line number is not sufficient locator for the test failure:
// i.e. testing in a loop.
// returns a new instances of Assert.
// It will add to the existing context for the Assert object.
func (assert Assert) Contextf(format string, a ...interface{}) Assert {
if assert.context != "" {
assert.context += ", "
}
assert.context += fmt.Sprintf(format, a...)
return assert
}
// Errorf marks the test as failed.
func (assert Assert) Errorf(format string, a ...interface{}) {
assert.withCaller(format, a...)
}
// EqString errors the test if two strings aren't equal.
func (assert Assert) EqString(actual, expected string) {
if actual != expected {
assert.withCaller("Expected=[%s], actual=[%s]", expected, actual)
}
}
// EqBool errors the test if two booleans aren't equal.
func (assert Assert) EqBool(actual, expected bool) {
if actual != expected {
assert.withCaller("Expected=[%t], actual=[%t]", expected, actual)
}
}
// MustEqBool fails the test if two booleans aren't equal.
func (assert Assert) MustEqBool(actual, expected bool) {
if actual != expected {
assert.withCallerFatal("Expected=[%t], actual=[%t]", expected, actual)
}
}
// EqInt errors the test if two ints aren't equal.
func (assert Assert) EqInt(actual, expected int) {
if actual != expected {
assert.withCaller("Expected=[%d], actual=[%d]", expected, actual)
}
}
// MustEqInt fails the test if two ints aren't equal.
func (assert Assert) MustEqInt(actual, expected int) {
if actual != expected {
assert.withCallerFatal("Expected=[%d], actual=[%d]", expected, actual)
}
}
func (assert Assert) EqFloatArray(actual, expected []float64, epsilon float64) {
if len(actual) != len(expected) {
assert.withCaller("Expected=%+v, actual=%+v", expected, actual)
return
}
for i := range actual {
if math.IsNaN(expected[i]) {
if !math.IsNaN(actual[i]) {
assert.withCaller("Expected=%+v, actual=%+v", expected, actual)
return
}
} else {
delta := actual[i] - expected[i]
if math.IsNaN(delta) || math.Abs(delta) > epsilon {
assert.withCaller("Expected=%+v, actual=%+v", expected, actual)
return
}
}
}
}
// EqFloat errors the test if two floats aren't equal. NaNs are considered equal.
func (assert Assert) EqFloat(actual, expected, epsilon float64) {
if math.IsNaN(actual) != math.IsNaN(expected) {
assert.withCaller("Expected=[%f], actual=[%f]", expected, actual)
}
if math.IsNaN(expected) {
return
}
delta := math.Abs(actual - expected)
if delta > epsilon {
assert.withCaller("Expected=[%f], actual=[%f]", expected, actual)
}
}
// EqApproximate errors the test if two floats aren't equal. NaNs are considered equal.
func (assert Assert) EqApproximate(actual, expected, epsilon float64) {
delta := actual - expected
if !(-epsilon < delta && delta < epsilon) {
assert.withCaller("Expected=[%f], actual=[%f]", expected, actual)
}
}
// Eq errors the test if two arguments are not equal.
func (assert Assert) Eq(actual, expected interface{}) {
if !reflect.DeepEqual(actual, expected) {
assert.withCaller("\nExpected=%+v\nActual =%+v", expected, actual)
}
}
// CheckError errors the test if a non-nil error is passed.
func (assert Assert) CheckError(err error) {
if err != nil {
assert.withCaller("Unexpected error: %s", err.Error())
}
}
// Utility Functions
// =================
func (assert Assert) withCaller(format string, a ...interface{}) {
file, line := caller(assert.stack)
if assert.context != "" {
assert.t.Errorf("%s:%d> [%s] %s", file, line, assert.context, fmt.Sprintf(format, a...))
} else {
assert.t.Errorf("%s:%d>%s", file, line, fmt.Sprintf(format, a...))
}
}
func (assert Assert) withCallerFatal(format string, a ...interface{}) {
file, line := caller(assert.stack)
if assert.context != "" {
assert.t.Fatalf("%s:%d> [%s] %s", file, line, assert.context, fmt.Sprintf(format, a...))
} else {
assert.t.Fatalf("%s:%d>%s", file, line, fmt.Sprintf(format, a...))
}
}
func caller(depth int) (string, int) {
// determines how many stack frames to traverse.
// we need to traverse 3 for the original caller:
// 0: caller()
// 1: Assert.withCaller()
// 2: Assert.Eq...()
// 3: <- original caller
_, file, line, _ := runtime.Caller(depth + 3)
match := fileRegex.FindString(file)
if match != "" {
return match, line
}
return file, line
} | testing_support/assert/assert.go | 0.720467 | 0.69937 | assert.go | starcoder |
package los
import "math"
// Map is a map of a roguelike level. Make sure all functions are
// linear time
type Map interface {
OOB(int, int) bool // Is the point x, y out of bounds?
Activate(int, int) // Activate x, y
Discover(int, int) // Mark x, y discovered
Lit(int, int) // Light x, y
UnLit(int, int) // Darken x, y
CantSeeThrough(int, int) bool // Can you see through x, y?
}
// CalcVisibility updates the visibility for the given map, about the
// given point (px, py) and with the radius given.
func CalcVisibility(m Map, px, py, radius int) {
clearlight(m, px, py, radius)
fov(m, px, py, radius)
}
func clearlight(m Map, px, py, light int) {
for x := px - light - 1; x < px+light+1; x++ {
for y := py - light - 1; y < py+light+1; y++ {
m.UnLit(x, y)
}
}
}
func fov(m Map, x, y int, radius int) {
for i := -radius; i <= radius; i++ { //iterate out of map bounds as well (radius^1)
for j := -radius; j <= radius; j++ { //(radius^2)
if i*i+j*j < radius*radius {
los(m, x, y, x+i, y+j)
}
}
}
}
/* Los calculation http://www.roguebasin.com/index.php?title=LOS_using_strict_definition */
func los(m Map, x0, y0, x1, y1 int) {
// By taking source by reference, litting can be done outside of this function which would be better made generic.
var sx int
var sy int
var dx int
var dy int
var dist float64
dx = x1 - x0
dy = y1 - y0
//determine which quadrant to we're calculating: we climb in these two directions
if x0 < x1 { //sx = (x0 < x1) ? 1 : -1;
sx = 1
} else {
sx = -1
}
if y0 < y1 { //sy = (y0 < y1) ? 1 : -1;
sy = 1
} else {
sy = -1
}
xnext := x0
ynext := y0
//calculate length of line to cast (distance from start to final tile)
dist = sqrt(dx*dx + dy*dy)
for xnext != x1 || ynext != y1 { //essentially casting a ray of length radius: (radius^3)
if m.OOB(xnext, ynext) {
return
}
if m.CantSeeThrough(xnext, ynext) {
m.Discover(xnext, ynext)
return
}
// Line-to-point distance formula < 0.5
if abs(dy*(xnext-x0+sx)-dx*(ynext-y0))/dist < 0.5 {
xnext += sx
} else if abs(dy*(xnext-x0)-dx*(ynext-y0+sy))/dist < 0.5 {
ynext += sy
} else {
xnext += sx
ynext += sy
}
}
m.Lit(x1, y1)
m.Discover(x1, y1)
if !m.OOB(x1, y1) {
m.Activate(x1, y1)
}
}
func sqrt(x int) float64 {
return math.Sqrt(float64(x))
}
func abs(x int) float64 {
return math.Abs(float64(x))
} | los.go | 0.7237 | 0.531574 | los.go | starcoder |
package vo
import (
"sort"
"strings"
"github.com/unknwon/com"
)
var TwoToneSphereRuleMap = map[int]map[int]int{
6: {1: 1, 0: 2},
5: {1: 3, 0: 4},
4: {1: 4, 0: 5},
3: {1: 5, 0: 0},
2: {1: 6, 0: 0},
1: {1: 6, 0: 0},
0: {1: 6, 0: 0},
}
var TwoToneSpherePriceMap = map[int]int{
1: 5000000,
2: 200000,
3: 3000,
4: 200,
5: 10,
6: 5,
}
type TwoToneSphereCalculator struct {
}
func (t TwoToneSphereCalculator) Calculate(number, winNumber string) (level, prize int) {
splitNumber := strings.Split(number, "|")
if len(splitNumber) != 2 {
return level, prize
}
splitWinNumber := strings.Split(winNumber, "|")
if len(splitWinNumber) != 2 {
return level, prize
}
red, blue := t.helper(number)
redWin, blueWin := t.helper(winNumber)
redCount, blueCount := t.helperTriple(red, redWin), t.helperTriple(blue, blueWin)
return t.helperQuadra(len(redCount), len(blueCount))
}
func (t TwoToneSphereCalculator) helper(number string) (red, blue []int) {
splitNumber := strings.Split(number, "|")
if len(splitNumber) != 2 {
return red, blue
}
red = t.helperAgain(splitNumber[0])
blue = t.helperAgain(splitNumber[1])
return red, blue
}
func (t TwoToneSphereCalculator) helperAgain(number string) (out []int) {
numbers := strings.Split(number, ",")
numberMap := make(map[string]bool, len(numbers))
for _, v := range numbers {
if _, ok := numberMap[v]; ok {
continue
}
if com.StrTo(v).MustInt() == 0 {
continue
}
out = append(out, com.StrTo(v).MustInt())
}
sort.Ints(out)
return out
}
func (t TwoToneSphereCalculator) helperTriple(numsOne, numsTwo []int) (out []int) {
m := make(map[int]bool)
for _, v := range numsOne {
m[v] = true
}
for _, v := range numsTwo {
if m[v] {
out = append(out, v)
}
}
return out
}
func (t TwoToneSphereCalculator) helperQuadra(redCount, blueCount int) (level, price int) {
if redCount > 6 {
redCount = 6
}
if redCount < 0 {
redCount = 0
}
if blueCount < 0 {
blueCount = 0
}
if blueCount > 1 {
redCount = 1
}
level = TwoToneSphereRuleMap[redCount][blueCount]
return level, TwoToneSpherePriceMap[level]
}
func init() {
CalculatorMap[TwoToneSphere] = TwoToneSphereCalculator{}
} | internal/domain/lottery/vo/two_tone_sphere_calculator.go | 0.547222 | 0.441854 | two_tone_sphere_calculator.go | starcoder |
package repository
import (
"context"
"cloud.google.com/go/bigtable"
"github.com/sendinblue/bigtable-access-layer/data"
"github.com/sendinblue/bigtable-access-layer/mapping"
)
const defaultMaxRows = 100
type Repository struct {
adapter Adapter
mapper *mapping.Mapper
maxRows int
}
// NewRepository creates a new Repository for the given table.
func NewRepository(table *bigtable.Table, mapper *mapping.Mapper, opts ...Option) *Repository {
adapter := &bigTableAdapter{
table: table,
}
repo := &Repository{
adapter: adapter,
mapper: mapper,
maxRows: defaultMaxRows,
}
for _, opt := range opts {
opt.apply(repo)
}
return repo
}
type Option interface {
apply(r *Repository)
}
type MaxRowsOption struct {
maxRows int
}
func (o MaxRowsOption) apply(r *Repository) {
r.maxRows = o.maxRows
}
/*
Read a row from the repository and map it to a data.Set
This method takes a row key as an argument, uses its internal adapter to read the row from Big Table,
parses all cells contained in the row to turn it into a map of data.Event and finally returns the data.Set that contains all the events.
*/
func (r *Repository) Read(ctx context.Context, key string) (*data.Set, error) {
return r.read(ctx, key)
}
/*
ReadFamily reads a row from the repository keeping only the desired column family and map it to a data.Set
This method takes a row key and the column family as an argument, uses its internal adapter to read the row from Big Table,
parses all cells contained in the row to turn it into a map of data.Event and finally returns the data.Set that contains all the events.
Be careful, this method will perform an exact match on the column family name.
*/
func (r *Repository) ReadFamily(ctx context.Context, key string, family string) (*data.Set, error) {
familyFilter := bigtable.RowFilter(bigtable.FamilyFilter(family))
return r.read(ctx, key, familyFilter)
}
// ReadLast reads a row from the repository while returning only the latest cell values after
// mapping it to a data.Set. This method takes a row key as an argument, uses its internal adapter
// to read the row from Big Table, parses only the latest cells contained in the row to turn it into
// a map of data.Event and finally returns the data.Set that contains all the events.
func (r *Repository) ReadLast(ctx context.Context, key string) (*data.Set, error) {
return r.read(ctx, key, bigtable.RowFilter(bigtable.LatestNFilter(1)))
}
func (r *Repository) read(ctx context.Context, key string, opts ...bigtable.ReadOption) (*data.Set, error) {
row, err := r.adapter.ReadRow(ctx, key, opts...)
if err != nil {
return nil, err
}
return buildEventSet([]bigtable.Row{row}, r.mapper), nil
}
func buildEventSet(rows []bigtable.Row, mapper *mapping.Mapper) *data.Set {
set := &data.Set{
Events: make(map[string][]*data.Event),
Columns: make([]string, 0),
}
for _, row := range rows {
for family, readItem := range row {
cols, events := mapper.GetMappedEvents(readItem)
set.Events[family] = append(set.Events[family], events...)
set.Columns = merge(set.Columns, cols)
}
}
return set
}
// Search for rows in the repository that match the given filter and return the according data.Set
func (r *Repository) Search(ctx context.Context, rowSet bigtable.RowSet, filter bigtable.Filter) (*data.Set, error) {
rows, err := r.search(ctx, rowSet, filter)
if err != nil {
return nil, err
}
resultMap := mapResult(rows, r.maxRows)
result := make([]bigtable.Row, 0, len(resultMap))
for key, row := range resultMap {
fullRow, err := r.adapter.ReadRow(ctx, key)
if err != nil {
return nil, err
}
result = append(result, filterReadItems(fullRow, row))
}
return buildEventSet(result, r.mapper), nil
}
func (r *Repository) Write(ctx context.Context, eventSet *data.Set) ([]error, error) {
allMutations := r.mapper.GetMutations(eventSet)
rowKeys := make([]string, 0, len(allMutations))
mutations := make([]*bigtable.Mutation, 0, len(allMutations))
for key := range allMutations {
rowKeys = append(rowKeys, key)
mutations = append(mutations, allMutations[key])
}
return r.adapter.ApplyBulk(ctx, rowKeys, mutations)
}
func (r *Repository) search(ctx context.Context, rowSet bigtable.RowSet, filter bigtable.Filter) ([]bigtable.Row, error) {
var rows []bigtable.Row
err := r.adapter.ReadRows(ctx, rowSet, func(row bigtable.Row) bool {
rows = append(rows, row)
return true
}, bigtable.RowFilter(filter))
if err != nil {
return nil, err
}
return rows, nil
}
func mapResult(rows []bigtable.Row, limit int) map[string][]bigtable.Timestamp {
resultMap := make(map[string][]bigtable.Timestamp)
for i, row := range rows {
for _, items := range row {
resultMap[row.Key()] = make([]bigtable.Timestamp, 0)
for _, item := range items {
resultMap[row.Key()] = append(resultMap[row.Key()], item.Timestamp)
}
}
if i > limit {
return resultMap
}
}
return resultMap
}
func filterReadItems(row bigtable.Row, ts []bigtable.Timestamp) map[string][]bigtable.ReadItem {
result := make(map[string][]bigtable.ReadItem)
timestamps := make(map[bigtable.Timestamp]bool, len(ts))
for _, t := range ts {
timestamps[t] = true
}
for fam, items := range row {
if result[fam] == nil {
result[fam] = make([]bigtable.ReadItem, 0)
}
for _, item := range items {
if _, ok := timestamps[item.Timestamp]; ok {
result[fam] = append(result[fam], item)
}
}
}
return result
}
// Adapter acts as a proxy between the repository and the actual data source.
// It allows to easily mock the data source in tests.
type Adapter interface {
ReadRow(ctx context.Context, row string, opts ...bigtable.ReadOption) (bigtable.Row, error)
ReadRows(ctx context.Context, arg bigtable.RowSet, f func(bigtable.Row) bool, opts ...bigtable.ReadOption) (err error)
ApplyBulk(ctx context.Context, rowKeys []string, muts []*bigtable.Mutation, opts ...bigtable.ApplyOption) (errs []error, err error)
}
type bigTableAdapter struct {
table *bigtable.Table
}
func (a *bigTableAdapter) ReadRow(ctx context.Context, row string, opts ...bigtable.ReadOption) (bigtable.Row, error) {
return a.table.ReadRow(ctx, row, opts...)
}
func (a *bigTableAdapter) ReadRows(ctx context.Context, arg bigtable.RowSet, f func(bigtable.Row) bool, opts ...bigtable.ReadOption) (err error) {
return a.table.ReadRows(ctx, arg, f, opts...)
}
func (a *bigTableAdapter) ApplyBulk(ctx context.Context, rowKeys []string, muts []*bigtable.Mutation, opts ...bigtable.ApplyOption) (errs []error, err error) {
return a.table.ApplyBulk(ctx, rowKeys, muts, opts...)
}
// merge returns a new slice with the contents of both slices.
func merge(a, b []string) []string {
m := make(map[string]bool)
for _, x := range a {
m[x] = true
}
for _, x := range b {
m[x] = true
}
var res []string
for x := range m {
res = append(res, x)
}
return res
} | repository/repository.go | 0.69233 | 0.440951 | repository.go | starcoder |
package types
import (
"io"
"reflect"
"strings"
"github.com/lyraproj/pcore/px"
"github.com/lyraproj/pcore/utils"
)
type EnumType struct {
caseInsensitive bool
values []string
}
var EnumMetaType px.ObjectType
func init() {
EnumMetaType = newObjectType(`Pcore::EnumType`,
`Pcore::ScalarDataType {
attributes => {
values => Array[String[1]],
case_insensitive => {
type => Boolean,
value => false
}
}
}`, func(ctx px.Context, args []px.Value) px.Value {
return newEnumType2(args...)
})
}
func DefaultEnumType() *EnumType {
return enumTypeDefault
}
func NewEnumType(enums []string, caseInsensitive bool) *EnumType {
if caseInsensitive {
top := len(enums)
if top > 0 {
lce := make([]string, top)
for i, v := range enums {
lce[i] = strings.ToLower(v)
}
enums = lce
}
}
return &EnumType{caseInsensitive, enums}
}
func newEnumType2(args ...px.Value) *EnumType {
return newEnumType3(WrapValues(args))
}
func newEnumType3(args px.List) *EnumType {
if args.Len() == 0 {
return DefaultEnumType()
}
var enums []string
top := args.Len()
caseInsensitive := false
first := args.At(0)
if top == 1 {
switch first := first.(type) {
case stringValue:
enums = []string{first.String()}
case *Array:
return newEnumType3(first)
default:
panic(illegalArgumentType(`Enum[]`, 0, `String or Array[String]`, args.At(0)))
}
} else {
if ar, ok := first.(*Array); ok {
enumArgs := ar.AppendTo(make([]px.Value, 0, ar.Len()+top-1))
for i := 1; i < top; i++ {
enumArgs = append(enumArgs, args.At(i))
}
if len(enumArgs) == 0 {
return DefaultEnumType()
}
args = WrapValues(enumArgs)
top = args.Len()
}
enums = make([]string, top)
args.EachWithIndex(func(arg px.Value, idx int) {
str, ok := arg.(stringValue)
if !ok {
if ci, ok := arg.(booleanValue); ok && idx == top-1 {
caseInsensitive = ci.Bool()
return
}
panic(illegalArgumentType(`Enum[]`, idx, `String`, arg))
}
enums[idx] = string(str)
})
}
return NewEnumType(enums, caseInsensitive)
}
func (t *EnumType) Accept(v px.Visitor, g px.Guard) {
v(t)
}
func (t *EnumType) Default() px.Type {
return enumTypeDefault
}
func (t *EnumType) Equals(o interface{}, g px.Guard) bool {
if ot, ok := o.(*EnumType); ok {
return t.caseInsensitive == ot.caseInsensitive && len(t.values) == len(ot.values) && utils.ContainsAllStrings(t.values, ot.values)
}
return false
}
func (t *EnumType) Generic() px.Type {
return enumTypeDefault
}
func (t *EnumType) Get(key string) (px.Value, bool) {
switch key {
case `values`:
return WrapValues(t.enums()), true
case `case_insensitive`:
return booleanValue(t.caseInsensitive), true
default:
return nil, false
}
}
func (t *EnumType) IsAssignable(o px.Type, g px.Guard) bool {
if len(t.values) == 0 {
switch o.(type) {
case *stringType, *vcStringType, *scStringType, *EnumType, *PatternType:
return true
}
return false
}
if st, ok := o.(*vcStringType); ok {
return px.IsInstance(t, stringValue(st.value))
}
if en, ok := o.(*EnumType); ok {
oEnums := en.values
if len(oEnums) > 0 && (t.caseInsensitive || !en.caseInsensitive) {
for _, v := range en.values {
if !px.IsInstance(t, stringValue(v)) {
return false
}
}
return true
}
}
return false
}
func (t *EnumType) IsInstance(o px.Value, g px.Guard) bool {
if str, ok := o.(stringValue); ok {
if len(t.values) == 0 {
return true
}
s := string(str)
if t.caseInsensitive {
s = strings.ToLower(s)
}
for _, v := range t.values {
if v == s {
return true
}
}
}
return false
}
func (t *EnumType) MetaType() px.ObjectType {
return EnumMetaType
}
func (t *EnumType) Name() string {
return `Enum`
}
func (t *EnumType) ReflectType(c px.Context) (reflect.Type, bool) {
return reflect.TypeOf(`x`), true
}
func (t *EnumType) String() string {
return px.ToString2(t, None)
}
func (t *EnumType) Parameters() []px.Value {
result := t.enums()
if t.caseInsensitive {
result = append(result, BooleanTrue)
}
return result
}
func (t *EnumType) CanSerializeAsString() bool {
return true
}
func (t *EnumType) SerializationString() string {
return t.String()
}
func (t *EnumType) ToString(b io.Writer, f px.FormatContext, g px.RDetect) {
TypeToString(t, b, f, g)
}
func (t *EnumType) PType() px.Type {
return &TypeType{t}
}
func (t *EnumType) enums() []px.Value {
top := len(t.values)
if top == 0 {
return px.EmptyValues
}
v := make([]px.Value, top)
for idx, e := range t.values {
v[idx] = stringValue(e)
}
return v
}
var enumTypeDefault = &EnumType{false, []string{}} | types/enumtype.go | 0.533397 | 0.404684 | enumtype.go | starcoder |
package bookingclient
import (
"encoding/json"
)
// RateRestrictionsModel struct for RateRestrictionsModel
type RateRestrictionsModel struct {
// The minimum length of stay in order to book the rate. If at least this number of time slices are covered by the stay duration the rate will be offered.
MinLengthOfStay *int32 `json:"minLengthOfStay,omitempty"`
// The maximum length of stay in order to book the rate. If not more than this number of time slices are covered by the stay duration the rate will be offered.
MaxLengthOfStay *int32 `json:"maxLengthOfStay,omitempty"`
// Whether the rate can be booked for a stay-through reservation
Closed bool `json:"closed"`
// Whether the rate can be booked on the reservation's arrival date
ClosedOnArrival bool `json:"closedOnArrival"`
// Whether the rate can be booked on the reservation's departure date
ClosedOnDeparture bool `json:"closedOnDeparture"`
}
// NewRateRestrictionsModel instantiates a new RateRestrictionsModel object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewRateRestrictionsModel(closed bool, closedOnArrival bool, closedOnDeparture bool) *RateRestrictionsModel {
this := RateRestrictionsModel{}
this.Closed = closed
this.ClosedOnArrival = closedOnArrival
this.ClosedOnDeparture = closedOnDeparture
return &this
}
// NewRateRestrictionsModelWithDefaults instantiates a new RateRestrictionsModel object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewRateRestrictionsModelWithDefaults() *RateRestrictionsModel {
this := RateRestrictionsModel{}
return &this
}
// GetMinLengthOfStay returns the MinLengthOfStay field value if set, zero value otherwise.
func (o *RateRestrictionsModel) GetMinLengthOfStay() int32 {
if o == nil || o.MinLengthOfStay == nil {
var ret int32
return ret
}
return *o.MinLengthOfStay
}
// GetMinLengthOfStayOk returns a tuple with the MinLengthOfStay field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RateRestrictionsModel) GetMinLengthOfStayOk() (*int32, bool) {
if o == nil || o.MinLengthOfStay == nil {
return nil, false
}
return o.MinLengthOfStay, true
}
// HasMinLengthOfStay returns a boolean if a field has been set.
func (o *RateRestrictionsModel) HasMinLengthOfStay() bool {
if o != nil && o.MinLengthOfStay != nil {
return true
}
return false
}
// SetMinLengthOfStay gets a reference to the given int32 and assigns it to the MinLengthOfStay field.
func (o *RateRestrictionsModel) SetMinLengthOfStay(v int32) {
o.MinLengthOfStay = &v
}
// GetMaxLengthOfStay returns the MaxLengthOfStay field value if set, zero value otherwise.
func (o *RateRestrictionsModel) GetMaxLengthOfStay() int32 {
if o == nil || o.MaxLengthOfStay == nil {
var ret int32
return ret
}
return *o.MaxLengthOfStay
}
// GetMaxLengthOfStayOk returns a tuple with the MaxLengthOfStay field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RateRestrictionsModel) GetMaxLengthOfStayOk() (*int32, bool) {
if o == nil || o.MaxLengthOfStay == nil {
return nil, false
}
return o.MaxLengthOfStay, true
}
// HasMaxLengthOfStay returns a boolean if a field has been set.
func (o *RateRestrictionsModel) HasMaxLengthOfStay() bool {
if o != nil && o.MaxLengthOfStay != nil {
return true
}
return false
}
// SetMaxLengthOfStay gets a reference to the given int32 and assigns it to the MaxLengthOfStay field.
func (o *RateRestrictionsModel) SetMaxLengthOfStay(v int32) {
o.MaxLengthOfStay = &v
}
// GetClosed returns the Closed field value
func (o *RateRestrictionsModel) GetClosed() bool {
if o == nil {
var ret bool
return ret
}
return o.Closed
}
// GetClosedOk returns a tuple with the Closed field value
// and a boolean to check if the value has been set.
func (o *RateRestrictionsModel) GetClosedOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.Closed, true
}
// SetClosed sets field value
func (o *RateRestrictionsModel) SetClosed(v bool) {
o.Closed = v
}
// GetClosedOnArrival returns the ClosedOnArrival field value
func (o *RateRestrictionsModel) GetClosedOnArrival() bool {
if o == nil {
var ret bool
return ret
}
return o.ClosedOnArrival
}
// GetClosedOnArrivalOk returns a tuple with the ClosedOnArrival field value
// and a boolean to check if the value has been set.
func (o *RateRestrictionsModel) GetClosedOnArrivalOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.ClosedOnArrival, true
}
// SetClosedOnArrival sets field value
func (o *RateRestrictionsModel) SetClosedOnArrival(v bool) {
o.ClosedOnArrival = v
}
// GetClosedOnDeparture returns the ClosedOnDeparture field value
func (o *RateRestrictionsModel) GetClosedOnDeparture() bool {
if o == nil {
var ret bool
return ret
}
return o.ClosedOnDeparture
}
// GetClosedOnDepartureOk returns a tuple with the ClosedOnDeparture field value
// and a boolean to check if the value has been set.
func (o *RateRestrictionsModel) GetClosedOnDepartureOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.ClosedOnDeparture, true
}
// SetClosedOnDeparture sets field value
func (o *RateRestrictionsModel) SetClosedOnDeparture(v bool) {
o.ClosedOnDeparture = v
}
func (o RateRestrictionsModel) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.MinLengthOfStay != nil {
toSerialize["minLengthOfStay"] = o.MinLengthOfStay
}
if o.MaxLengthOfStay != nil {
toSerialize["maxLengthOfStay"] = o.MaxLengthOfStay
}
if true {
toSerialize["closed"] = o.Closed
}
if true {
toSerialize["closedOnArrival"] = o.ClosedOnArrival
}
if true {
toSerialize["closedOnDeparture"] = o.ClosedOnDeparture
}
return json.Marshal(toSerialize)
}
type NullableRateRestrictionsModel struct {
value *RateRestrictionsModel
isSet bool
}
func (v NullableRateRestrictionsModel) Get() *RateRestrictionsModel {
return v.value
}
func (v *NullableRateRestrictionsModel) Set(val *RateRestrictionsModel) {
v.value = val
v.isSet = true
}
func (v NullableRateRestrictionsModel) IsSet() bool {
return v.isSet
}
func (v *NullableRateRestrictionsModel) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableRateRestrictionsModel(val *RateRestrictionsModel) *NullableRateRestrictionsModel {
return &NullableRateRestrictionsModel{value: val, isSet: true}
}
func (v NullableRateRestrictionsModel) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableRateRestrictionsModel) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | api/clients/bookingclient/model_rate_restrictions_model.go | 0.754192 | 0.504639 | model_rate_restrictions_model.go | starcoder |
package nune
import (
"math"
"reflect"
"github.com/vorduin/slices"
)
// From returns a Tensor from the given backing - be it a numeric type,
// a sequence, or nested sequences - with the corresponding shape.
func From[T Number](b any) Tensor[T] {
switch k := reflect.TypeOf(b).Kind(); k {
case reflect.String:
b = any([]byte(b.(string)))
fallthrough
case reflect.Array, reflect.Slice:
v := reflect.ValueOf(b)
c := make([]any, v.Len())
for i := 0; i < v.Len(); i++ {
c[i] = v.Index(i).Interface()
}
d, s, err := unwrapAny[T](c, []int{len(c)})
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
return Tensor[T]{
Err: err,
}
}
}
return Tensor[T]{
data: d,
shape: s,
stride: configStride(s),
}
default:
if anyIsNumeric(b) {
return Tensor[T]{
data: anyToNumeric[T](b),
}
} else if c, ok := anyToTensor[T](b); ok {
return c
} else {
if EnvConfig.Interactive {
panic(ErrUnwrapBacking)
} else {
return Tensor[T]{
Err: ErrUnwrapBacking,
}
}
}
}
}
// Full returns a Tensor full with the given value and
// satisfying the given shape.
func Full[T Number](x T, shape []int) Tensor[T] {
err := verifyGoodShape(shape...)
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
return Tensor[T]{
Err: err,
}
}
}
data := slices.WithLen[T](slices.Prod(shape))
for i := 0; i < len(data); i++ {
data[i] = T(x)
}
return Tensor[T]{
data: data,
shape: slices.Clone(shape),
stride: configStride(shape),
}
}
// FullLike returns a Tensor full with the given value and
// resembling the other Tensor's shape.
func FullLike[T Number, U Number](x T, other Tensor[U]) Tensor[T] {
data := slices.WithLen[T](other.Numel())
for i := 0; i < len(data); i++ {
data[i] = T(x)
}
return Tensor[T]{
data: data,
shape: slices.Clone(other.shape),
stride: configStride(other.shape),
}
}
// Zeros returns a Tensor full with zeros and satisfying the given shape.
func Zeros[T Number](shape ...int) Tensor[T] {
err := verifyGoodShape(shape...)
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
return Tensor[T]{
Err: err,
}
}
}
return Tensor[T]{
data: slices.WithLen[T](int(slices.Prod(shape))),
shape: slices.Clone(shape),
stride: configStride(shape),
}
}
// ZerosLike returns a Tensor full with zeros and resembling the other
// Tensor's shape.
func ZerosLike[T Number, U Number](other Tensor[U]) Tensor[T] {
return Tensor[T]{
data: slices.WithLen[T](other.Numel()),
shape: slices.Clone(other.shape),
stride: configStride(other.shape),
}
}
// Ones returns a Tensor full with ones and satisfying the given shape.
func Ones[T Number](shape ...int) Tensor[T] {
return Full(T(1), shape)
}
// OnesLike returns a Tensor full with ones and resembling the other
// Tensor's shape.
func OnesLike[T Number, U Number](other Tensor[U]) Tensor[T] {
return FullLike(T(1), other)
}
// Range returns a rank 1 Tensor on the interval [start, end),
// and with the given step-size.
func Range[T Number](start, end, step int) Tensor[T] {
err := verifyGoodStep(step, start, end)
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
return Tensor[T]{
Err: err,
}
}
}
d := math.Sqrt(math.Pow(float64(end-start), 2)) // distance
l := int(math.Floor(d / math.Abs(float64(step)))) // length
i := 0
rng := slices.WithLen[T](l)
for x := 0; x < l; x += 1 {
rng[i] = T(start + x*step)
i++
}
return Tensor[T]{
data: rng,
shape: []int{len(rng)},
stride: configStride([]int{len(rng)}),
}
}
// FromBuffer returns a Tensor with the given buffer set as its data buffer.
func FromBuffer[T Number](buf []T) Tensor[T] {
err := verifyGoodShape(len(buf))
if err != nil {
if EnvConfig.Interactive {
panic(err)
} else {
return Tensor[T]{
Err: err,
}
}
}
return Tensor[T]{
data: buf,
shape: []int{len(buf)},
stride: configStride([]int{len(buf)}),
}
} | factory.go | 0.779657 | 0.642643 | factory.go | starcoder |
package chart
import "math"
// BollingerBandsSeries draws bollinger bands for an inner series.
// Bollinger bands are defined by two lines, one at SMA+k*stddev, one at SMA-k*stdev.
type BollingerBandsSeries struct {
Name string
Style Style
YAxis YAxisType
Period int
K float64
InnerSeries ValueProvider
valueBuffer *RingBuffer
}
// GetName returns the name of the time series.
func (bbs BollingerBandsSeries) GetName() string {
return bbs.Name
}
// GetStyle returns the line style.
func (bbs BollingerBandsSeries) GetStyle() Style {
return bbs.Style
}
// GetYAxis returns which YAxis the series draws on.
func (bbs BollingerBandsSeries) GetYAxis() YAxisType {
return bbs.YAxis
}
// GetPeriod returns the window size.
func (bbs BollingerBandsSeries) GetPeriod() int {
if bbs.Period == 0 {
return DefaultSimpleMovingAveragePeriod
}
return bbs.Period
}
// GetK returns the K value.
func (bbs BollingerBandsSeries) GetK(defaults ...float64) float64 {
if bbs.K == 0 {
if len(defaults) > 0 {
return defaults[0]
}
return 2.0
}
return bbs.K
}
// Len returns the number of elements in the series.
func (bbs *BollingerBandsSeries) Len() int {
return bbs.InnerSeries.Len()
}
// GetBoundedValue gets the bounded value for the series.
func (bbs *BollingerBandsSeries) GetBoundedValue(index int) (x, y1, y2 float64) {
if bbs.InnerSeries == nil {
return
}
if bbs.valueBuffer == nil || index == 0 {
bbs.valueBuffer = NewRingBufferWithCapacity(bbs.GetPeriod())
}
if bbs.valueBuffer.Len() >= bbs.GetPeriod() {
bbs.valueBuffer.Dequeue()
}
px, py := bbs.InnerSeries.GetValue(index)
bbs.valueBuffer.Enqueue(py)
x = px
ay := bbs.getAverage(bbs.valueBuffer)
std := bbs.getStdDev(bbs.valueBuffer)
y1 = ay + (bbs.GetK() * std)
y2 = ay - (bbs.GetK() * std)
return
}
// GetBoundedLastValue returns the last bounded value for the series.
func (bbs *BollingerBandsSeries) GetBoundedLastValue() (x, y1, y2 float64) {
if bbs.InnerSeries == nil {
return
}
period := bbs.GetPeriod()
seriesLength := bbs.InnerSeries.Len()
startAt := seriesLength - period
if startAt < 0 {
startAt = 0
}
vb := NewRingBufferWithCapacity(period)
for index := startAt; index < seriesLength; index++ {
xn, yn := bbs.InnerSeries.GetValue(index)
vb.Enqueue(yn)
x = xn
}
ay := bbs.getAverage(vb)
std := bbs.getStdDev(vb)
y1 = ay + (bbs.GetK() * std)
y2 = ay - (bbs.GetK() * std)
return
}
// Render renders the series.
func (bbs *BollingerBandsSeries) Render(r Renderer, canvasBox Box, xrange, yrange Range, defaults Style) {
s := bbs.Style.InheritFrom(defaults.InheritFrom(Style{
StrokeWidth: 1.0,
StrokeColor: DefaultAxisColor.WithAlpha(64),
FillColor: DefaultAxisColor.WithAlpha(32),
}))
Draw.BoundedSeries(r, canvasBox, xrange, yrange, s, bbs, bbs.GetPeriod())
}
func (bbs BollingerBandsSeries) getAverage(valueBuffer *RingBuffer) float64 {
var accum float64
valueBuffer.Each(func(v interface{}) {
if typed, isTyped := v.(float64); isTyped {
accum += typed
}
})
return accum / float64(valueBuffer.Len())
}
func (bbs BollingerBandsSeries) getVariance(valueBuffer *RingBuffer) float64 {
if valueBuffer.Len() == 0 {
return 0
}
var variance float64
m := bbs.getAverage(valueBuffer)
valueBuffer.Each(func(v interface{}) {
if n, isTyped := v.(float64); isTyped {
variance += (float64(n) - m) * (float64(n) - m)
}
})
return variance / float64(valueBuffer.Len())
}
func (bbs BollingerBandsSeries) getStdDev(valueBuffer *RingBuffer) float64 {
return math.Pow(bbs.getVariance(valueBuffer), 0.5)
} | vendor/github.com/nicholasjackson/bench/vendor/github.com/wcharczuk/go-chart/bollinger_band_series.go | 0.859325 | 0.591989 | bollinger_band_series.go | starcoder |
package matrix
import (
"fmt"
"math"
"strings"
)
type Shape struct {
Row int
Col int
}
func (s Shape) Size() int {
return s.Row * s.Col
}
func (s Shape) String() string {
return fmt.Sprintf("(%d, %d)", s.Row, s.Col)
}
func ShapeNotEqual(a, b Shape) bool {
return a.Row != b.Row || a.Col != b.Col
}
// Matrix Struct is two-dim matrix like Matlab.
type Matrix struct {
Shape
array []float64
}
// Get 获取元素
func (A Matrix) Get(i, j int) float64 {
if i >= A.Row {
panic(fmt.Sprintf("index out of bounds: i[%d] >= Row[%d]", i, A.Row))
}
if j >= A.Col {
panic(fmt.Sprintf("index out of bounds: j[%d] >= Cow[%d]", j, A.Col))
}
ind := i*A.Col + j
return A.array[ind]
}
// GetIndex 按下标索取
func (A Matrix) GetIndex(ind int) float64 {
if ind >= len(A.array) {
panic(fmt.Sprintf("index out of bounds: %d >= %d", ind, len(A.array)))
}
return A.array[ind]
}
// Set 设置元素
func (A Matrix) Set(i, j int, v float64) {
ind := i*A.Col + j
A.array[ind] = v
}
// SetIndex 按下标替换
func (A Matrix) SetIndex(ind int, v float64) {
A.array[ind] = v
}
func (A Matrix) String() string {
var Cols []string
for i := 0; i < A.Row; i++ {
var Col []string
for j := 0; j < A.Col; j++ {
v := A.Get(i, j)
if v-math.Floor(v) < 1e-6 {
Col = append(Col, fmt.Sprintf("%d", int(v)))
} else {
Col = append(Col, fmt.Sprintf("%5f", A.Get(i, j)))
}
}
Cols = append(Cols, strings.Join(Col, ", "))
}
return fmt.Sprintf("[%s]", strings.Join(Cols, "; "))
}
// GetCol 获取列向量
func (A Matrix) GetCol(j int) (V Matrix) {
shape := Shape{
Row: A.Row,
Col: 1,
}
V = NewMatrix(shape, make([]float64, shape.Size()))
for i := 0; i < A.Row; i++ {
V.Set(i, 0, A.Get(i, j))
}
return
}
// SetCol 指定位置替换列向量
func (A Matrix) SetCol(j int, V Matrix) {
for i := 0; i < A.Row; i++ {
A.Set(i, j, V.Get(i, 0))
}
}
// GetRow 获取行向量
func (A Matrix) GetRow(i int) (V Matrix) {
shape := Shape{
Row: 1,
Col: A.Col,
}
V = NewMatrix(shape, make([]float64, shape.Size()))
for j := 0; j < shape.Col; j++ {
V.Set(0, j, A.Get(i, j))
}
return
}
// SetRow 指定位置替换行向量
func (A Matrix) SetRow(i int, V Matrix) {
for j := 0; j < A.Col; j++ {
A.Set(i, j, V.Get(0, j))
}
}
// Add 矩阵相加
func (A Matrix) Add(B Matrix) (S Matrix) {
if ShapeNotEqual(A.Shape, B.Shape) {
panic(fmt.Sprintf("two matrix cannot [add]. %v x %v", A.Shape, B.Shape))
}
S = Zeros(A.Shape)
for i := 0; i < S.Row; i++ {
for j := 0; j < S.Col; j++ {
S.Set(i, j, A.Get(i, j)+B.Get(i, j))
}
}
return
}
// Sub 矩阵相减
func (A Matrix) Sub(B Matrix) (S Matrix) {
if ShapeNotEqual(A.Shape, B.Shape) {
panic(fmt.Sprintf("two matrix cannot [sub]. %v x %v", A.Shape, B.Shape))
}
S = Zeros(A.Shape)
for i := 0; i < S.Row; i++ {
for j := 0; j < S.Col; j++ {
S.Set(i, j, A.Get(i, j)-B.Get(i, j))
}
}
return
}
// Mul 点乘(同位置相乘,形状不变)
func (A Matrix) Mul(B Matrix) (S Matrix) {
if ShapeNotEqual(A.Shape, B.Shape) {
panic(fmt.Sprintf("two matrix cannot [mul]. %v x %v", A.Shape, B.Shape))
}
S = Zeros(A.Shape)
for i := 0; i < S.Row; i++ {
for j := 0; j < S.Col; j++ {
S.Set(i, j, A.Get(i, j)*B.Get(i, j))
}
}
return
}
// Dot 矩阵乘法
func (A Matrix) Dot(B Matrix) (S Matrix) {
if A.Col != B.Row {
panic(fmt.Sprintf("two matrix cannot [dot]. %v x %v", A.Shape, B.Shape))
}
shape := Shape{
Row: A.Row,
Col: B.Col,
}
S = Zeros(shape)
for i := 0; i < shape.Row; i++ {
for j := 0; j < shape.Col; j++ {
v := 0.0
for k := 0; k < A.Col; k++ {
v = v + A.Get(i, k)*B.Get(k, j)
}
S.Set(i, j, v)
}
}
return
}
// ScaleMul 矩阵比例乘
func (A Matrix) ScaleMul(k float64) (S Matrix) {
S = Zeros(A.Shape)
for i := 0; i < S.Row; i++ {
for j := 0; j < S.Col; j++ {
S.Set(i, j, A.Get(i, j)*k)
}
}
return
}
// T 转置
func (A Matrix) T() (S Matrix) {
shape := Shape{
Row: A.Col,
Col: A.Row,
}
S = Zeros(shape)
for i := 0; i < shape.Row; i++ {
for j := 0; j < shape.Col; j++ {
S.Set(i, j, A.Get(j, i))
}
}
return
}
// Copy 矩阵复制
func (A Matrix) Copy() (S Matrix) {
S = Zeros(A.Shape)
for i := 0; i < S.Row; i++ {
for j := 0; j < S.Col; j++ {
S.Set(i, j, A.Get(i, j))
}
}
return
}
func MatrixAdd(target, sour Matrix) {
shape := target.Shape
for i := 0; i < shape.Row; i++ {
for j := 0; j < shape.Col; j++ {
v := target.Get(i, j) + sour.Get(i, j)
target.Set(i, j, v)
}
}
}
func MatrixSub(target, sour Matrix) {
shape := target.Shape
for i := 0; i < shape.Row; i++ {
for j := 0; j < shape.Col; j++ {
v := target.Get(i, j) - sour.Get(i, j)
target.Set(i, j, v)
}
}
}
func MatrixScaleMul(A Matrix, k float64) {
for i := 0; i < A.Row; i++ {
for j := 0; j < A.Col; j++ {
v := A.Get(i, j) * k
A.Set(i, j, v)
}
}
}
func MatrixEqual(A, B Matrix) bool {
if ShapeNotEqual(A.Shape, B.Shape) {
return false
}
for i := 0; i < A.Row; i++ {
for j := 0; j < A.Col; j++ {
if math.Abs(A.Get(i, j)-B.Get(i, j)) > 1e-8 {
return false
}
}
}
return true
}
// Zeros 零矩阵
func Zeros(shape Shape) Matrix {
array := make([]float64, shape.Size())
return NewMatrix(shape, array)
}
// Ones 全 1 矩阵
func Ones(shape Shape) Matrix {
array := make([]float64, shape.Size())
for i := 0; i < len(array); i++ {
array[i] = 1
}
return NewMatrix(shape, array)
}
// Full 填充矩阵
func Full(shape Shape, v float64) Matrix {
array := make([]float64, shape.Size())
for i := 0; i < len(array); i++ {
array[i] = v
}
return NewMatrix(shape, array)
}
// Eye 单位矩阵
func Eye(n int) Matrix {
shape := Shape{n, n}
array := make([]float64, shape.Size())
m := NewMatrix(shape, array)
for i := 0; i < n; i++ {
m.Set(i, i, 1)
}
return m
}
// Diag 分块矩阵
func Diag(vs []float64) Matrix {
n := len(vs)
A := Zeros(Shape{n, n})
for i := 0; i < n; i++ {
A.Set(i, i, vs[i])
}
return A
}
// NewMatrix 默认构造方法
func NewMatrix(shape Shape, array []float64) (A Matrix) {
A.Shape = shape
A.array = array
return
}
// NewSquareMatrix 方块矩阵
func NewSquareMatrix(n int, array []float64) (A Matrix) {
A.Shape = Shape{n, n}
A.array = array
return A
} | matrix.go | 0.651466 | 0.466603 | matrix.go | starcoder |
package turbot
import (
"context"
"fmt"
"regexp"
"strconv"
"github.com/turbot/steampipe-plugin-sdk/grpc/proto"
"github.com/turbot/steampipe-plugin-sdk/plugin"
"github.com/turbot/steampipe-plugin-sdk/plugin/transform"
)
func tableTurbotPolicySetting(ctx context.Context) *plugin.Table {
return &plugin.Table{
Name: "turbot_policy_setting",
Description: "Policy settings defined in the Turbot workspace.",
List: &plugin.ListConfig{
KeyColumns: []*plugin.KeyColumn{
{Name: "id", Require: plugin.Optional},
{Name: "resource_id", Require: plugin.Optional},
{Name: "policy_type_id", Require: plugin.Optional},
{Name: "policy_type_uri", Require: plugin.Optional},
{Name: "orphan", Require: plugin.Optional},
{Name: "exception", Require: plugin.Optional},
{Name: "filter", Require: plugin.Optional},
},
Hydrate: listPolicySetting,
},
Columns: []*plugin.Column{
// Top columns
{Name: "id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.ID"), Description: "Unique identifier of the policy setting."},
{Name: "precedence", Type: proto.ColumnType_STRING, Description: "Precedence of the setting: REQUIRED or RECOMMENDED."},
{Name: "resource_id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.ResourceID"), Description: "ID of the resource this policy setting is associated with."},
{Name: "resource_trunk_title", Type: proto.ColumnType_STRING, Transform: transform.FromField("Resource.Trunk.Title"), Description: "Full title (including ancestor trunk) of the resource."},
{Name: "policy_type_uri", Type: proto.ColumnType_STRING, Transform: transform.FromField("Type.URI"), Description: "URI of the policy type for this policy setting."},
{Name: "policy_type_trunk_title", Type: proto.ColumnType_STRING, Transform: transform.FromField("Type.Trunk.Title"), Description: "Full title (including ancestor trunk) of the policy type."},
{Name: "value", Type: proto.ColumnType_STRING, Description: "Value of the policy setting (for non-calculated policy settings)."},
{Name: "is_calculated", Type: proto.ColumnType_BOOL, Description: "True if this is a policy setting will be calculated for each value."},
{Name: "exception", Type: proto.ColumnType_BOOL, Transform: transform.FromField("Exception").Transform(intToBool), Description: "True if this setting is an exception to a higher level setting."},
{Name: "orphan", Type: proto.ColumnType_BOOL, Transform: transform.FromField("Orphan").Transform(intToBool), Description: "True if this setting is orphaned by a higher level setting."},
{Name: "note", Type: proto.ColumnType_STRING, Description: "Optional note or comment for the setting."},
// Other columns
{Name: "create_timestamp", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromField("Turbot.CreateTimestamp"), Description: "When the policy setting was first discovered by Turbot. (It may have been created earlier.)"},
{Name: "default", Type: proto.ColumnType_BOOL, Description: "True if this policy setting is the default."},
{Name: "filter", Type: proto.ColumnType_STRING, Transform: transform.FromQual("filter"), Description: "Filter used for this policy setting list."},
{Name: "input", Type: proto.ColumnType_STRING, Description: "For calculated policy settings, this is the input GraphQL query."},
{Name: "policy_type_id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.PolicyTypeID"), Description: "ID of the policy type for this policy setting."},
{Name: "template", Type: proto.ColumnType_STRING, Description: "For a calculated policy setting, this is the nunjucks template string defining a YAML string which is parsed to get the value."},
{Name: "template_input", Type: proto.ColumnType_STRING, Description: "For calculated policy settings, this GraphQL query is run and used as input to the template."},
{Name: "timestamp", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromField("Turbot.Timestamp"), Description: "Timestamp when the policy setting was last modified (created, updated or deleted)."},
{Name: "update_timestamp", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromField("Turbot.UpdateTimestamp"), Description: "When the policy setting was last updated in Turbot."},
{Name: "valid_from_timestamp", Type: proto.ColumnType_TIMESTAMP, Description: "Timestamp when the policy setting becomes valid."},
{Name: "valid_to_timestamp", Type: proto.ColumnType_TIMESTAMP, Description: "Timestamp when the policy setting expires."},
{Name: "value_source", Type: proto.ColumnType_STRING, Description: "The raw value in YAML format. If the setting was made via YAML template including comments, these will be included here."},
{Name: "version_id", Type: proto.ColumnType_INT, Transform: transform.FromField("Turbot.VersionID"), Description: "Unique identifier for this version of the policy setting."},
{Name: "workspace", Type: proto.ColumnType_STRING, Hydrate: plugin.HydrateFunc(getTurbotWorkspace).WithCache(), Transform: transform.FromValue(), Description: "Specifies the workspace URL."},
},
}
}
const (
queryPolicySettingList = `
query policySettingList($filter: [String!], $next_token: String) {
policySettings(filter: $filter, paging: $next_token) {
items {
default
exception
input
isCalculated
note
orphan
precedence
resource {
trunk {
title
}
}
#secretValue
#secretValueSource
template
templateInput
type {
uri
trunk {
title
}
}
turbot {
id
timestamp
createTimestamp
updateTimestamp
versionId
policyTypeId
resourceId
}
validFromTimestamp
validToTimestamp
value
valueSource
}
paging {
next
}
}
}
`
)
func listPolicySetting(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
conn, err := connect(ctx, d)
if err != nil {
plugin.Logger(ctx).Error("turbot_policy_setting.listPolicySetting", "connection_error", err)
return nil, err
}
filters := []string{}
quals := d.KeyColumnQuals
filter := ""
if quals["filter"] != nil {
filter = quals["filter"].GetStringValue()
filters = append(filters, filter)
}
// Additional filters
if quals["id"] != nil {
filters = append(filters, fmt.Sprintf("id:%s", getQualListValues(ctx, quals, "id", "int64")))
}
if quals["policy_type_id"] != nil {
filters = append(filters, fmt.Sprintf("policyTypeId:%s policyTypeLevel:self", getQualListValues(ctx, quals, "policy_type_id", "int64")))
}
if quals["policy_type_uri"] != nil {
filters = append(filters, fmt.Sprintf("policyTypeId:%s policyTypeLevel:self", getQualListValues(ctx, quals, "policy_type_uri", "string")))
}
if quals["resource_id"] != nil {
filters = append(filters, fmt.Sprintf("resourceId:%s resourceTypeLevel:self", getQualListValues(ctx, quals, "resource_id", "int64")))
}
if quals["orphan"] != nil {
orphan := quals["orphan"].GetBoolValue()
if orphan {
filters = append(filters, "is:orphan")
} else {
filters = append(filters, "-is:orphan")
}
}
if quals["exception"] != nil {
exception := quals["exception"].GetBoolValue()
if exception {
filters = append(filters, "is:exception")
} else {
filters = append(filters, "-is:exception")
}
}
// Default to a very large page size. Page sizes earlier in the filter string
// win, so this is only used as a fallback.
pageResults := false
// Add a limit if they haven't given one in the filter field
re := regexp.MustCompile(`(^|\s)limit:[0-9]+($|\s)`)
if !re.MatchString(filter) {
// The caller did not specify a limit, so set a high limit and page all
// results.
pageResults = true
var pageLimit int64 = 5000
// Adjust page limit, if less than default value
limit := d.QueryContext.Limit
if d.QueryContext.Limit != nil {
if *limit < pageLimit {
pageLimit = *limit
}
}
filters = append(filters, fmt.Sprintf("limit:%s", strconv.Itoa(int(pageLimit))))
}
plugin.Logger(ctx).Trace("turbot_policy_setting.listPolicySetting", "filters", filters)
nextToken := ""
for {
result := &PolicySettingsResponse{}
err = conn.DoRequest(queryPolicySettingList, map[string]interface{}{"filter": filters, "next_token": nextToken}, result)
if err != nil {
plugin.Logger(ctx).Error("turbot_policy_setting.listPolicySetting", "query_error", err)
return nil, err
}
for _, r := range result.PolicySettings.Items {
d.StreamListItem(ctx, r)
// Context can be cancelled due to manual cancellation or the limit has been hit
if d.QueryStatus.RowsRemaining(ctx) == 0 {
return nil, nil
}
}
if !pageResults || result.PolicySettings.Paging.Next == "" {
break
}
nextToken = result.PolicySettings.Paging.Next
}
return nil, nil
} | turbot/table_turbot_policy_setting.go | 0.622459 | 0.407982 | table_turbot_policy_setting.go | starcoder |
package xlsx
import (
"fmt"
"strconv"
"time"
"github.com/tealeg/xlsx"
"github.com/varshaprasad96/operator-sdk-data-collector/pkg/fields"
)
func GetOutput(data fields.OperatorData, outputFilePath string) error {
output := xlsx.NewFile()
if err := createSheetAnfFillOverallData(output, data.SDKVersionCount, data.OperatorTypeCount, data.LayoutData, data.VersionData); err != nil {
return fmt.Errorf("error getting overall data %v", err)
}
if err := createSheetsAndFillIndexData(output, "all-operators", data.AllOperators); err != nil {
return fmt.Errorf("error writing data for all operators %v", err)
}
if err := createSheetsAndFillIndexData(output, "community", data.CommunityOperators); err != nil {
return fmt.Errorf("error writing data for community operators %v", err)
}
if err := createSheetsAndFillIndexData(output, "certified", data.CertifiedOperators); err != nil {
return fmt.Errorf("error writing data for certified operators %v", err)
}
if err := createSheetsAndFillIndexData(output, "marketplace", data.MarketplaceOperators); err != nil {
return fmt.Errorf("error writing data for marketplace operators %v", err)
}
if err := createSheetsAndFillIndexData(output, "operatorhub", data.OperatorHub); err != nil {
return fmt.Errorf("error writing data for operatorhub operators %v", err)
}
if err := createSheetsAndFillIndexData(output, "redhat", data.RedHatOperators); err != nil {
return fmt.Errorf("error writing data for redhat operators %v", err)
}
// prod is same as redhat operator index, hence commenting out this.
if err := createSheetsAndFillIndexData(output, "prod", data.ProdOperators); err != nil {
return fmt.Errorf("error writing data for prod operators %v", err)
}
defer func() {
outputName := time.Now().Format("Mon-Jan2-15:04:05PST-2006")
if err := output.Save(outputFilePath + outputName + ".xlsx"); err != nil {
fmt.Printf("error whilesaving report")
}
}()
return nil
}
func createSheetsAndFillIndexData(f *xlsx.File, index string, data map[string]fields.ReportColumns) error {
sheet, err := f.AddSheet(index)
if err != nil {
return fmt.Errorf("error creating xlsx sheet")
}
initializeReport(sheet)
for _, value := range data {
row := sheet.AddRow()
// Add operator Name
row.AddCell().Value = value.Operator
// Add csv timestamp
row.AddCell().Value = value.CreatedAt
// Add name of the company
row.AddCell().Value = value.Company
// Add operator type
row.AddCell().Value = value.OperatorType
// Add sdk version
row.AddCell().Value = value.SDKVersion
// Add csv name
row.AddCell().Value = value.CSVName
}
return nil
}
func initializeReport(sh *xlsx.Sheet) {
row := sh.AddRow()
row.AddCell().Value = "Operator name"
row.AddCell().Value = "CreatedAt - timestamp"
row.AddCell().Value = "Company"
row.AddCell().Value = "Operator type"
row.AddCell().Value = "Sdk Version"
}
func createSheetAnfFillOverallData(f *xlsx.File, version fields.SDKVersion, opType fields.OperatorType, layout, versionData map[string]int) error {
sheet, err := f.AddSheet("overall")
if err != nil {
return fmt.Errorf("error creating xlsx sheet")
}
r := sheet.AddRow()
r.AddCell().Value = "Kind of Operator"
r.AddCell().Value = "Count"
r = sheet.AddRow()
r.AddCell().Value = "Go"
r.AddCell().Value = strconv.Itoa(opType.Go)
r = sheet.AddRow()
r.AddCell().Value = "Ansible"
r.AddCell().Value = strconv.Itoa(opType.Ansible)
r = sheet.AddRow()
r.AddCell().Value = "Helm"
r.AddCell().Value = strconv.Itoa(opType.Helm)
addGap(sheet)
r = sheet.AddRow()
r.AddCell().Value = "Layout"
r.AddCell().Value = "Number of operators"
for key, val := range layout {
if key == "" {
key = "Without stamp"
}
r = sheet.AddRow()
r.AddCell().Value = key
r.AddCell().Value = strconv.Itoa(val)
}
addGap(sheet)
r = sheet.AddRow()
r.AddCell().Value = "Version"
r.AddCell().Value = "Count"
r = sheet.AddRow()
r.AddCell().Value = "Pre SDK 1.0 Operators"
r.AddCell().Value = strconv.Itoa(version.PreMajorRel)
r = sheet.AddRow()
r.AddCell().Value = "Post SDK 1.0 Operators"
r.AddCell().Value = strconv.Itoa(version.PostMajorel)
addGap(sheet)
r = sheet.AddRow()
r.AddCell().Value = "Version"
r.AddCell().Value = "Number of operators"
for key, val := range versionData {
if key == "" {
key = "Without stamp"
}
r = sheet.AddRow()
r.AddCell().Value = key
r.AddCell().Value = strconv.Itoa(val)
}
return nil
}
func initializeVersionTable(sh *xlsx.Sheet) {
row := sh.AddRow()
row.AddCell().Value = "Kind of operator"
row.AddCell().Value = "Go"
row.AddCell().Value = "Ansible"
row.AddCell().Value = "Helm"
}
func addGap(sh *xlsx.Sheet) {
sh.AddRow()
sh.AddRow()
} | pkg/output/xlsx/xlsx.go | 0.518546 | 0.438966 | xlsx.go | starcoder |
package items
import (
"fmt"
"strings"
"github.com/goccmack/gocc/internal/ast"
)
type itemPos struct {
stack []stackElement
}
type stackElement struct {
node ast.LexNTNode
pos int
}
func newItemPos(lexPattern *ast.LexPattern) (pos *itemPos) {
pos = &itemPos{
stack: make([]stackElement, 0, 8),
}
pos.push(lexPattern, 0)
return
}
func (this *itemPos) clone() *itemPos {
clone := &itemPos{
stack: make([]stackElement, len(this.stack)),
}
copy(clone.stack, this.stack[0:len(this.stack)])
return clone
}
func (this *itemPos) inc() {
this.stack[this.level()].pos++
}
func (this *itemPos) pop() (ntNode ast.LexNTNode, pos int) {
ntNode, pos = this.top()
this.stack = this.stack[:len(this.stack)-1]
return
}
func (this *itemPos) push(node ast.LexNTNode, pos int) {
this.stack = append(this.stack, stackElement{node, pos})
}
func (this *itemPos) top() (nt ast.LexNTNode, pos int) {
top := len(this.stack) - 1
nt, pos = this.stack[top].node, this.stack[top].pos
return
}
// returns level of stacking in the item. Bottom is 0
func (this *itemPos) level() int {
return len(this.stack) - 1
}
/*
This function returns the ast.Node at the top of the stack
*/
func (this *itemPos) ntNode() ast.LexNTNode {
n, _ := this.top()
return n
}
// returns the position within the top level of the stack
func (this *itemPos) pos() int {
_, pos := this.top()
return pos
}
func (this *itemPos) setPos(i int) {
this.stack[this.level()].pos = i
}
func (this *itemPos) setToEnd() {
node, _ := this.top()
this.stack[this.level()].pos = node.Len()
}
func (this *itemPos) equal(that *itemPos) bool {
if len(this.stack) != len(that.stack) {
return false
}
for i := 0; i < len(this.stack); i++ {
if this.stack[i].node != that.stack[i].node ||
this.stack[i].pos != that.stack[i].pos {
return false
}
}
return true
}
func (this *itemPos) String() string {
buf := new(strings.Builder)
for i := 0; i < len(this.stack); i++ {
fmt.Fprintf(buf, "\t%T:%v; pos %d\n", (*this).stack[i].node, (*this).stack[i].node, this.stack[i].pos)
}
return buf.String()
} | internal/lexer/items/itempos.go | 0.6137 | 0.441673 | itempos.go | starcoder |
package nn
import (
"encoding/json"
tsr "../tensor"
)
// FlattenLayer is a layer that flattens data into a single frame with a single row.
type FlattenLayer struct {
inputShape LayerShape
outputShape LayerShape
inputs *tsr.Tensor
outputs *tsr.Tensor
}
// NewFlattenLayer creates a new instance of a flattening layer.
func NewFlattenLayer(inputRows int, inputCols int, inputFrames int) *FlattenLayer {
inputs := tsr.NewEmptyTensor3D(inputFrames, inputRows, inputCols)
outputSize := inputRows * inputCols * inputFrames
outputs := tsr.NewEmptyTensor1D(outputSize)
return &FlattenLayer{
inputShape: LayerShape{inputRows, inputCols, inputFrames},
outputShape: LayerShape{1, outputSize, 1},
inputs: inputs,
outputs: outputs,
}
}
// Copy creates a deep copy of the layer.
func (layer *FlattenLayer) Copy() Layer {
return NewFlattenLayer(layer.InputShape().Rows, layer.InputShape().Cols, layer.InputShape().Frames)
}
// InputShape returns the rows, columns and frames of the inputs to the layer.
func (layer *FlattenLayer) InputShape() LayerShape {
return layer.inputShape
}
// OutputShape returns the rows, columns and frames of outputs from the layer.
func (layer *FlattenLayer) OutputShape() LayerShape {
return layer.outputShape
}
// FeedForward flattens the data from its input shape to a shape of 1 row and 1 frame.
func (layer *FlattenLayer) FeedForward(inputs *tsr.Tensor) (*tsr.Tensor, error) {
flattenedIndex := 0
layer.inputs.SetTensor(inputs)
for frame := 0; frame < inputs.Frames; frame++ {
for row := 0; row < inputs.Rows; row++ {
for col := 0; col < inputs.Cols; col++ {
layer.outputs.Set(0, 0, flattenedIndex, inputs.Get(frame, row, col))
flattenedIndex++
}
}
}
return layer.outputs, nil
}
// BackPropagate unflattens the data to its original shape.
func (layer *FlattenLayer) BackPropagate(outputs *tsr.Tensor, learningRate float32, momentum float32) (*tsr.Tensor, error) {
return layer.inputs, nil
}
// FlattenLayerData represents a serialized layer that can be saved to a file.
type FlattenLayerData struct {
Type LayerType `json:"type"`
InputRows int `json:"inputRows"`
InputCols int `json:"inputCols"`
InputFrames int `json:"inputFrames"`
}
// MarshalJSON converts the layer to JSON.
func (layer *FlattenLayer) MarshalJSON() ([]byte, error) {
data := FlattenLayerData{
Type: LayerTypeFlatten,
InputRows: layer.InputShape().Rows,
InputCols: layer.InputShape().Cols,
InputFrames: layer.InputShape().Frames,
}
return json.Marshal(data)
}
// UnmarshalJSON creates a new layer from JSON.
func (layer *FlattenLayer) UnmarshalJSON(b []byte) error {
data := FlattenLayerData{}
err := json.Unmarshal(b, &data)
if err != nil {
return err
}
layer.inputs = tsr.NewEmptyTensor3D(data.InputFrames, data.InputRows, data.InputCols)
outputSize := data.InputRows * data.InputCols * data.InputFrames
layer.outputs = tsr.NewEmptyTensor1D(outputSize)
layer.inputShape = LayerShape{data.InputRows, data.InputCols, data.InputFrames}
layer.outputShape = LayerShape{1, outputSize, 1}
return nil
} | nn/flattenLayer.go | 0.872822 | 0.599016 | flattenLayer.go | starcoder |
package plan
import (
"fmt"
"strconv"
"strings"
"github.com/XiaoMi/Gaea/mysql"
"github.com/XiaoMi/Gaea/parser/ast"
"github.com/XiaoMi/Gaea/util/hack"
"github.com/XiaoMi/Gaea/util/math"
)
// ResultRow is one Row in Result
type ResultRow []interface{}
// GetInt get int value from column
// copy from Resultset.GetInt()
func (r ResultRow) GetInt(column int) (int64, error) {
d := r[column]
switch v := d.(type) {
case uint64:
return int64(v), nil
case int64:
return v, nil
case float64:
return int64(v), nil
case string:
return strconv.ParseInt(v, 10, 64)
case []byte:
return strconv.ParseInt(string(v), 10, 64)
case nil:
return 0, nil
default:
return 0, fmt.Errorf("data type is %T", v)
}
}
// GetUint get uint64 value from column
func (r ResultRow) GetUint(column int) (uint64, error) {
d := r[column]
switch v := d.(type) {
case uint64:
return v, nil
case int64:
return uint64(v), nil
case float64:
return uint64(v), nil
case string:
return strconv.ParseUint(v, 10, 64)
case []byte:
return strconv.ParseUint(string(v), 10, 64)
case nil:
return 0, nil
default:
return 0, fmt.Errorf("data type is %T", v)
}
}
// GetFloat get float64 value from column
func (r ResultRow) GetFloat(column int) (float64, error) {
d := r[column]
switch v := d.(type) {
case float64:
return v, nil
case uint64:
return float64(v), nil
case int64:
return float64(v), nil
case string:
return strconv.ParseFloat(v, 64)
case []byte:
return strconv.ParseFloat(string(v), 64)
case nil:
return 0, nil
default:
return 0, fmt.Errorf("data type is %T", v)
}
}
// SetValue set value to column
func (r ResultRow) SetValue(column int, value interface{}) {
r[column] = value
}
// GetValue get value from column
func (r ResultRow) GetValue(column int) interface{} {
return r[column]
}
// AggregateFuncMerger is the merger of aggregate function
type AggregateFuncMerger interface {
// MergeTo 合并结果集, from为待合并行, to为结果聚合行
MergeTo(from, to ResultRow) error
}
type aggregateFuncBaseMerger struct {
fieldIndex int // 所在列位置
}
// CreateAggregateFunctionMerger create AggregateFunctionMerger by function type
// currently support: "count", "sum", "max", "min"
func CreateAggregateFunctionMerger(funcType string, fieldIndex int) (AggregateFuncMerger, error) {
switch strings.ToLower(funcType) {
case "count":
ret := new(AggregateFuncCountMerger)
ret.fieldIndex = fieldIndex
return ret, nil
case "sum":
ret := new(AggregateFuncSumMerger)
ret.fieldIndex = fieldIndex
return ret, nil
case "max":
ret := new(AggregateFuncMaxMerger)
ret.fieldIndex = fieldIndex
return ret, nil
case "min":
ret := new(AggregateFuncMinMerger)
ret.fieldIndex = fieldIndex
return ret, nil
default:
return nil, fmt.Errorf("aggregate function type is not support: %s", funcType)
}
}
// AggregateFuncCountMerger merge COUNT() column in result
type AggregateFuncCountMerger struct {
aggregateFuncBaseMerger
}
// MergeTo implement AggregateFuncMerger
func (a *AggregateFuncCountMerger) MergeTo(from, to ResultRow) error {
idx := a.fieldIndex
if idx >= len(from) || idx >= len(to) {
return fmt.Errorf("field index out of bound: %d", a.fieldIndex)
}
valueToMerge, err := from.GetInt(idx)
if err != nil {
return fmt.Errorf("get from int value error: %v", err)
}
originValue, err := to.GetInt(idx)
if err != nil {
return fmt.Errorf("get to int value error: %v", err)
}
to.SetValue(idx, originValue+valueToMerge)
return nil
}
// AggregateFuncSumMerger merge SUM() column in result
type AggregateFuncSumMerger struct {
aggregateFuncBaseMerger
}
// MergeTo implement AggregateFuncMerger
func (a *AggregateFuncSumMerger) MergeTo(from, to ResultRow) error {
idx := a.fieldIndex
if idx >= len(from) || idx >= len(to) {
return fmt.Errorf("field index out of bound: %d", a.fieldIndex)
}
fromValueI := from.GetValue(idx)
// nil对应NULL, NULL不参与比较
if fromValueI == nil {
return nil
}
switch to.GetValue(idx).(type) {
case int64:
return a.sumToInt64(from, to)
case uint64:
return a.sumToUint64(from, to)
case float64, string, []byte, nil:
return a.sumToFloat64(from, to)
default:
fromValue := from.GetValue(idx)
toValue := to.GetValue(idx)
return fmt.Errorf("cannot sum value %v (%T) to %v (%T)", fromValue, fromValue, toValue, toValue)
}
}
func (a *AggregateFuncSumMerger) sumToInt64(from, to ResultRow) error {
idx := a.fieldIndex // does not need to check
valueToMerge, err := from.GetInt(idx)
if err != nil {
return fmt.Errorf("get from int value error: %v", err)
}
originValue, err := to.GetInt(idx)
if err != nil {
return fmt.Errorf("get to int value error: %v", err)
}
to.SetValue(idx, originValue+valueToMerge)
return nil
}
func (a *AggregateFuncSumMerger) sumToUint64(from, to ResultRow) error {
idx := a.fieldIndex // does not need to check
valueToMerge, err := from.GetUint(idx)
if err != nil {
return fmt.Errorf("get from int value error: %v", err)
}
originValue, err := to.GetUint(idx)
if err != nil {
return fmt.Errorf("get to int value error: %v", err)
}
to.SetValue(idx, originValue+valueToMerge)
return nil
}
func (a *AggregateFuncSumMerger) sumToFloat64(from, to ResultRow) error {
idx := a.fieldIndex // does not need to check
valueToMerge, err := from.GetFloat(idx)
if err != nil {
return fmt.Errorf("get from int value error: %v", err)
}
originValue, err := to.GetFloat(idx)
if err != nil {
return fmt.Errorf("get to int value error: %v", err)
}
to.SetValue(idx, originValue+valueToMerge)
return nil
}
// AggregateFuncMaxMerger merge MAX() column in result
type AggregateFuncMaxMerger struct {
aggregateFuncBaseMerger
}
// MergeTo implement AggregateFuncMerger
func (a *AggregateFuncMaxMerger) MergeTo(from, to ResultRow) error {
idx := a.fieldIndex
if idx >= len(from) || idx >= len(to) {
return fmt.Errorf("field index out of bound: %d", a.fieldIndex)
}
fromValueI := from.GetValue(idx)
toValueI := to.GetValue(idx)
// nil对应NULL, NULL不参与比较
if fromValueI == nil {
return nil
}
switch toValue := toValueI.(type) {
case nil:
to.SetValue(idx, fromValueI)
return nil
case int64:
if fromValueI.(int64) > toValue {
to.SetValue(idx, fromValueI)
}
return nil
case uint64:
if fromValueI.(uint64) > toValue {
to.SetValue(idx, fromValueI)
}
return nil
case float64:
if fromValueI.(float64) > toValue {
to.SetValue(idx, fromValueI)
}
return nil
case string:
if fromValueI.(string) > toValue {
to.SetValue(idx, fromValueI)
}
return nil
// does not handle []byte
default:
return fmt.Errorf("cannot compare value %v (%T) to %v (%T)", fromValueI, fromValueI, toValueI, toValueI)
}
}
// AggregateFuncMinMerger merge MIN() column in result
type AggregateFuncMinMerger struct {
aggregateFuncBaseMerger
}
// MergeTo implement AggregateFuncMerger
func (a *AggregateFuncMinMerger) MergeTo(from, to ResultRow) error {
idx := a.fieldIndex
if idx >= len(from) || idx >= len(to) {
return fmt.Errorf("field index out of bound: %d", a.fieldIndex)
}
fromValueI := from.GetValue(idx)
toValueI := to.GetValue(idx)
// nil对应NULL, NULL不参与比较
if fromValueI == nil {
return nil
}
switch toValue := toValueI.(type) {
case nil:
to.SetValue(idx, fromValueI)
return nil
case int64:
if fromValueI.(int64) < toValue {
to.SetValue(idx, fromValueI)
}
return nil
case uint64:
if fromValueI.(uint64) < toValue {
to.SetValue(idx, fromValueI)
}
return nil
case float64:
if fromValueI.(float64) < toValue {
to.SetValue(idx, fromValueI)
}
return nil
case string:
if fromValueI.(string) < toValue {
to.SetValue(idx, fromValueI)
}
return nil
// does not handle []byte
default:
return fmt.Errorf("cannot compare value %v (%T) to %v (%T)", fromValueI, fromValueI, toValueI, toValueI)
}
}
// MergeExecResult merge execution results, like UPDATE, INSERT, DELETE, ...
func MergeExecResult(rs []*mysql.Result) (*mysql.Result, error) {
r := new(mysql.Result)
for _, v := range rs {
r.Status |= v.Status
r.AffectedRows += v.AffectedRows
if r.InsertID == 0 {
r.InsertID = v.InsertID
} else if v.InsertID != 0 && r.InsertID > v.InsertID {
//last insert id is first gen id for multi row inserted
//see http://dev.mysql.com/doc/refman/5.6/en/information-functions.html#function_last-insert-id
r.InsertID = v.InsertID
}
}
return r, nil
}
// MergeSelectResult merge select results
func MergeSelectResult(p *SelectPlan, stmt *ast.SelectStmt, rs []*mysql.Result) (*mysql.Result, error) {
ret := mergeMultiResultSet(rs)
if p.distinct {
if err := removeDistinctRowInResult(p, ret); err != nil {
return nil, err
}
}
if stmt.GroupBy != nil {
if err := buildSelectGroupByResult(p, ret); err != nil {
return nil, err
}
} else {
if err := buildSelectOnlyResult(p, ret); err != nil {
return nil, err
}
}
if err := sortSelectResult(p, stmt, ret); err != nil {
return nil, err
}
if err := limitSelectResult(p, ret); err != nil {
return nil, err
}
if err := trimExtraFields(p, ret); err != nil {
return nil, fmt.Errorf("trimExtraFields error: %v", err)
}
if err := GenerateSelectResultRowData(ret); err != nil {
return nil, fmt.Errorf("generate RowData error: %v", err)
}
return ret, nil
}
// 合并结果集, 返回一个Result
func mergeMultiResultSet(rs []*mysql.Result) *mysql.Result {
if len(rs) == 1 {
return rs[0]
}
// 列信息认为相同, 因此只合并结果
for i := 1; i < len(rs); i++ {
rs[0].Status |= rs[i].Status
rs[0].Values = append(rs[0].Values, rs[i].Values...)
rs[0].RowDatas = append(rs[0].RowDatas, rs[i].RowDatas...)
}
return rs[0]
}
func removeDistinctRowInResult(p *SelectPlan, r *mysql.Result) error {
distinctKeySet := make(map[string]bool)
var rowToRemove []int
// 计算除补列之外的原始列数
resultFieldLength := len(r.Fields)
originColumnCount := p.GetColumnCount()
deltaColumnCount := resultFieldLength - originColumnCount
colCnt := p.originColumnCount + deltaColumnCount
// 根据原始列的值进行去重
rowCount := len(r.Values)
for i := 0; i < rowCount; i++ {
keySlice := r.Values[i][0:colCnt]
mk, err := generateMapKey(keySlice)
if err != nil {
return err
}
_, ok := distinctKeySet[mk]
if !ok {
distinctKeySet[mk] = true
} else {
rowToRemove = append(rowToRemove, i)
}
}
rowToRemoveCnt := len(rowToRemove)
if rowToRemoveCnt == 0 {
return nil
}
originRows := r.Values
r.RowDatas = nil
r.Values = originRows[:0]
var j int
for i := 0; i < rowCount; i++ {
if j == rowToRemoveCnt {
r.Values = append(r.Values, originRows[i:]...)
break
}
if i == rowToRemove[j] {
j++
} else {
r.Values = append(r.Values, originRows[i])
}
}
return nil
}
// contains mergeGroupByWithoutFunc() and mergeGroupByWithFunc()
func buildSelectGroupByResult(p *SelectPlan, r *mysql.Result) error {
resultMap := make(map[string]ResultRow)
resultFieldLength := len(r.Fields)
originColumnCount := p.GetColumnCount()
deltaColumnCount := resultFieldLength - originColumnCount
groupByStart, groupByCount := p.GetGroupByColumnInfo()
groupByStartIndex := deltaColumnCount + groupByStart
groupByEndIndex := groupByStartIndex + groupByCount
// 根据group by的列进行结果聚合
for i, v := range r.Values {
keySlice := v[groupByStartIndex:groupByEndIndex]
mk, err := generateMapKey(keySlice)
if err != nil {
return err
}
// 用找到的第一个结果行作为聚合结果
_, ok := resultMap[mk]
if !ok {
resultMap[mk] = ResultRow(r.Values[i])
continue
}
if len(p.aggregateFuncs) == 0 {
continue
}
// 如果存在聚合函数, 则对聚合列进行结果聚合, 非聚合列不处理
retToMerge := ResultRow(r.Values[i])
for _, mfunc := range p.aggregateFuncs {
if err := mfunc.MergeTo(retToMerge, resultMap[mk]); err != nil {
return fmt.Errorf("MergeTo error, func: %v, value: %v, err: %v", mfunc, retToMerge, err)
}
}
}
err := buildResultFromResultMap(r, resultMap)
if err != nil {
return fmt.Errorf("buildResultFromResultMap error: %v", err)
}
return nil
}
func buildSelectOnlyResult(p *SelectPlan, rs *mysql.Result) error {
r := rs.Resultset
// 没有聚合函数, 直接把所有分片结果添加到同一个ResultSet下面
if len(p.aggregateFuncs) == 0 {
return nil
}
// 存在聚合函数, 需要改写聚合列的值, 然后返回 (应该只有一行记录)
isSet := false
var currRet ResultRow
for i, v := range r.Values {
if !isSet {
isSet = true
currRet = ResultRow(v)
continue
}
retToMerge := ResultRow(r.Values[i])
for _, mfunc := range p.aggregateFuncs {
if err := mfunc.MergeTo(retToMerge, currRet); err != nil {
return fmt.Errorf("MergeTo error, func: %v, value: %v, err: %v", mfunc, retToMerge, err)
}
}
}
r.Values = r.Values[:0]
r.Values = append(r.Values, currRet)
r.RowDatas = nil
return nil
}
// this function modifies the first value of origin results
func buildResultFromResultMap(r *mysql.Result, resultMap map[string]ResultRow) error {
// no group by result means the result row count is 0, so return the first result
if len(resultMap) == 0 {
return nil
}
r.Values = nil
r.RowDatas = nil
for _, v := range resultMap {
r.Values = append(r.Values, v)
}
return nil
}
// 去掉补充的列
// 与补充列的顺序相反, 先去掉ORDER BY补充的列, 再去掉GROUP BY补充的列
func trimExtraFields(p *SelectPlan, r *mysql.Result) error {
resultFieldLength := len(r.Fields)
originColumnCount := p.GetColumnCount()
deltaColumnCount := resultFieldLength - originColumnCount
var extraFieldStartIndex = -1
if p.HasOrderBy() {
orderByStart, orderByDescs := p.GetOrderByColumnInfo()
if len(orderByDescs) != 0 {
extraFieldStartIndex = deltaColumnCount + orderByStart
}
}
if p.HasGroupBy() {
extraFieldStartIndex = deltaColumnCount + p.groupByColumnStart
}
if extraFieldStartIndex != -1 {
r.Fields = r.Fields[0:extraFieldStartIndex]
for i := 0; i < len(r.Values); i++ {
r.Values[i] = r.Values[i][0:extraFieldStartIndex]
}
}
return nil
}
func sortSelectResult(p *SelectPlan, stmt *ast.SelectStmt, ret *mysql.Result) error {
if !p.HasOrderBy() {
return nil
}
resultFieldLength := len(ret.Fields)
originColumnCount := p.GetColumnCount()
deltaColumnCount := resultFieldLength - originColumnCount
orderByStart, orderByDirections := p.GetOrderByColumnInfo()
orderByStartIndex := deltaColumnCount + orderByStart
var sortKeys []mysql.SortKey
for i := 0; i < len(orderByDirections); i++ {
sortKey := mysql.SortKey{}
sortKey.Column = orderByStartIndex + i
if orderByDirections[i] {
sortKey.Direction = mysql.SortDesc
} else {
sortKey.Direction = mysql.SortAsc
}
sortKeys = append(sortKeys, sortKey)
}
return ret.SortWithoutColumnName(sortKeys)
}
// the result from backend is aggregated and offset = 0, count = (originOffset + originCount)
func limitSelectResult(p *SelectPlan, ret *mysql.Result) error {
if !p.HasLimit() {
return nil
}
start, count := p.GetLimitValue()
rowLen := int64(len(ret.Values))
end := math.MinInt64(start+count, rowLen)
if start >= rowLen {
ret.RowDatas = ret.RowDatas[:0]
ret.Values = ret.Values[:0]
return nil
}
ret.Values = ret.Values[start:end]
return nil
}
// GenerateSelectResultRowData generate raw RowData from values
// 根据value反向构造RowData
// copy from server.buildResultset()
func GenerateSelectResultRowData(r *mysql.Result) error {
r.RowDatas = nil
for i, vs := range r.Values {
if len(vs) != len(r.Fields) {
return fmt.Errorf("row %d has %d column not equal %d", i, len(vs), len(r.Fields))
}
var row []byte
for _, value := range vs {
// build row values
if value == nil {
row = append(row, 0xfb)
} else {
b, err := formatValue(value)
if err != nil {
return err
}
row = mysql.AppendLenEncStringBytes(row, b)
}
}
r.RowDatas = append(r.RowDatas, row)
}
return nil
}
// copy from server.generateMapKey()
func generateMapKey(groupColumns []interface{}) (string, error) {
bk := make([]byte, 0, 8)
separatorBuf, err := formatValue("+")
if err != nil {
return "", err
}
for _, v := range groupColumns {
b, err := formatValue(v)
if err != nil {
return "", err
}
bk = append(bk, b...)
bk = append(bk, separatorBuf...)
}
return string(bk), nil
}
// copy from server.formatValue()
// formatValue encode value into a string format
func formatValue(value interface{}) ([]byte, error) {
if value == nil {
return hack.Slice("NULL"), nil
}
switch v := value.(type) {
case int8:
return strconv.AppendInt(nil, int64(v), 10), nil
case int16:
return strconv.AppendInt(nil, int64(v), 10), nil
case int32:
return strconv.AppendInt(nil, int64(v), 10), nil
case int64:
return strconv.AppendInt(nil, int64(v), 10), nil
case int:
return strconv.AppendInt(nil, int64(v), 10), nil
case uint8:
return strconv.AppendUint(nil, uint64(v), 10), nil
case uint16:
return strconv.AppendUint(nil, uint64(v), 10), nil
case uint32:
return strconv.AppendUint(nil, uint64(v), 10), nil
case uint64:
return strconv.AppendUint(nil, uint64(v), 10), nil
case uint:
return strconv.AppendUint(nil, uint64(v), 10), nil
case float32:
return strconv.AppendFloat(nil, float64(v), 'f', -1, 64), nil
case float64:
return strconv.AppendFloat(nil, float64(v), 'f', -1, 64), nil
case []byte:
return v, nil
case string:
return hack.Slice(v), nil
default:
return nil, fmt.Errorf("invalid type %T", value)
}
} | proxy/plan/merge_result.go | 0.573917 | 0.432303 | merge_result.go | starcoder |
package universe
import (
"math"
"sort"
"github.com/influxdata/flux"
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/plan"
"github.com/influxdata/flux/runtime"
)
const HistogramQuantileKind = "histogramQuantile"
const DefaultUpperBoundColumnLabel = "le"
type HistogramQuantileOpSpec struct {
Quantile float64 `json:"quantile"`
CountColumn string `json:"countColumn"`
UpperBoundColumn string `json:"upperBoundColumn"`
ValueColumn string `json:"valueColumn"`
MinValue float64 `json:"minValue"`
}
func init() {
histogramQuantileSignature := runtime.MustLookupBuiltinType("universe", "histogramQuantile")
runtime.RegisterPackageValue("universe", HistogramQuantileKind, flux.MustValue(flux.FunctionValue(HistogramQuantileKind, createHistogramQuantileOpSpec, histogramQuantileSignature)))
flux.RegisterOpSpec(HistogramQuantileKind, newHistogramQuantileOp)
plan.RegisterProcedureSpec(HistogramQuantileKind, newHistogramQuantileProcedure, HistogramQuantileKind)
execute.RegisterTransformation(HistogramQuantileKind, createHistogramQuantileTransformation)
}
func createHistogramQuantileOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) {
if err := a.AddParentFromArgs(args); err != nil {
return nil, err
}
s := new(HistogramQuantileOpSpec)
q, err := args.GetRequiredFloat("quantile")
if err != nil {
return nil, err
}
s.Quantile = q
if col, ok, err := args.GetString("countColumn"); err != nil {
return nil, err
} else if ok {
s.CountColumn = col
} else {
s.CountColumn = execute.DefaultValueColLabel
}
if col, ok, err := args.GetString("upperBoundColumn"); err != nil {
return nil, err
} else if ok {
s.UpperBoundColumn = col
} else {
s.UpperBoundColumn = DefaultUpperBoundColumnLabel
}
if col, ok, err := args.GetString("valueColumn"); err != nil {
return nil, err
} else if ok {
s.ValueColumn = col
} else {
s.ValueColumn = execute.DefaultValueColLabel
}
if min, ok, err := args.GetFloat("minValue"); err != nil {
return nil, err
} else if ok {
s.MinValue = min
}
return s, nil
}
func newHistogramQuantileOp() flux.OperationSpec {
return new(HistogramQuantileOpSpec)
}
func (s *HistogramQuantileOpSpec) Kind() flux.OperationKind {
return HistogramQuantileKind
}
type HistogramQuantileProcedureSpec struct {
plan.DefaultCost
Quantile float64 `json:"quantile"`
CountColumn string `json:"countColumn"`
UpperBoundColumn string `json:"upperBoundColumn"`
ValueColumn string `json:"valueColumn"`
MinValue float64 `json:"minValue"`
}
func newHistogramQuantileProcedure(qs flux.OperationSpec, a plan.Administration) (plan.ProcedureSpec, error) {
spec, ok := qs.(*HistogramQuantileOpSpec)
if !ok {
return nil, errors.Newf(codes.Internal, "invalid spec type %T", qs)
}
return &HistogramQuantileProcedureSpec{
Quantile: spec.Quantile,
CountColumn: spec.CountColumn,
UpperBoundColumn: spec.UpperBoundColumn,
ValueColumn: spec.ValueColumn,
MinValue: spec.MinValue,
}, nil
}
func (s *HistogramQuantileProcedureSpec) Kind() plan.ProcedureKind {
return HistogramQuantileKind
}
func (s *HistogramQuantileProcedureSpec) Copy() plan.ProcedureSpec {
ns := new(HistogramQuantileProcedureSpec)
*ns = *s
return ns
}
type histogramQuantileTransformation struct {
execute.ExecutionNode
d execute.Dataset
cache execute.TableBuilderCache
spec HistogramQuantileProcedureSpec
}
type bucket struct {
count float64
upperBound float64
}
func createHistogramQuantileTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) {
s, ok := spec.(*HistogramQuantileProcedureSpec)
if !ok {
return nil, nil, errors.Newf(codes.Internal, "invalid spec type %T", spec)
}
cache := execute.NewTableBuilderCache(a.Allocator())
d := execute.NewDataset(id, mode, cache)
t := NewHistorgramQuantileTransformation(d, cache, s)
return t, d, nil
}
func NewHistorgramQuantileTransformation(
d execute.Dataset,
cache execute.TableBuilderCache,
spec *HistogramQuantileProcedureSpec,
) execute.Transformation {
return &histogramQuantileTransformation{
d: d,
cache: cache,
spec: *spec,
}
}
func (t histogramQuantileTransformation) RetractTable(id execute.DatasetID, key flux.GroupKey) error {
// TODO
return nil
}
func (t histogramQuantileTransformation) Process(id execute.DatasetID, tbl flux.Table) error {
builder, created := t.cache.TableBuilder(tbl.Key())
if !created {
return errors.Newf(codes.FailedPrecondition, "histogramQuantile found duplicate table with key: %v", tbl.Key())
}
if err := execute.AddTableKeyCols(tbl.Key(), builder); err != nil {
return err
}
valueIdx, err := builder.AddCol(flux.ColMeta{
Label: t.spec.ValueColumn,
Type: flux.TFloat,
})
if err != nil {
return err
}
countIdx := execute.ColIdx(t.spec.CountColumn, tbl.Cols())
if countIdx < 0 {
return errors.Newf(codes.FailedPrecondition, "table is missing count column %q", t.spec.CountColumn)
}
if tbl.Cols()[countIdx].Type != flux.TFloat {
return errors.Newf(codes.FailedPrecondition, "count column %q must be of type float", t.spec.CountColumn)
}
upperBoundIdx := execute.ColIdx(t.spec.UpperBoundColumn, tbl.Cols())
if upperBoundIdx < 0 {
return errors.Newf(codes.FailedPrecondition, "table is missing upper bound column %q", t.spec.UpperBoundColumn)
}
if tbl.Cols()[upperBoundIdx].Type != flux.TFloat {
return errors.Newf(codes.FailedPrecondition, "upper bound column %q must be of type float", t.spec.UpperBoundColumn)
}
// Read buckets
var cdf []bucket
sorted := true //track if the cdf was naturally sorted
if err := tbl.Do(func(cr flux.ColReader) error {
offset := len(cdf)
// Grow cdf by number of rows
l := offset + cr.Len()
if cap(cdf) < l {
cpy := make([]bucket, l, l*2)
// Copy existing buckets to new slice
copy(cpy, cdf)
cdf = cpy
} else {
cdf = cdf[:l]
}
for i := 0; i < cr.Len(); i++ {
curr := i + offset
prev := curr - 1
b := bucket{}
if vs := cr.Floats(countIdx); vs.IsValid(i) {
b.count = vs.Value(i)
} else {
return errors.Newf(codes.FailedPrecondition, "unexpected null in the countColumn")
}
if vs := cr.Floats(upperBoundIdx); vs.IsValid(i) {
b.upperBound = vs.Value(i)
} else {
return errors.Newf(codes.FailedPrecondition, "unexpected null in the upperBoundColumn")
}
cdf[curr] = b
if prev >= 0 {
sorted = sorted && cdf[prev].upperBound <= cdf[curr].upperBound
}
}
return nil
}); err != nil {
return err
}
if !sorted {
sort.Slice(cdf, func(i, j int) bool {
return cdf[i].upperBound < cdf[j].upperBound
})
}
q, err := t.computeQuantile(cdf)
if err != nil {
return err
}
if err := execute.AppendKeyValues(tbl.Key(), builder); err != nil {
return err
}
if err := builder.AppendFloat(valueIdx, q); err != nil {
return err
}
return nil
}
func (t *histogramQuantileTransformation) computeQuantile(cdf []bucket) (float64, error) {
if len(cdf) == 0 {
return 0, errors.New(codes.FailedPrecondition, "histogram is empty")
}
// Find rank index and check counts are monotonic
prevCount := 0.0
totalCount := cdf[len(cdf)-1].count
rank := t.spec.Quantile * totalCount
rankIdx := -1
for i, b := range cdf {
if b.count < prevCount {
return 0, errors.New(codes.FailedPrecondition, "histogram records counts are not monotonic")
}
prevCount = b.count
if rank >= b.count {
rankIdx = i
}
}
var (
lowerCount,
lowerBound,
upperCount,
upperBound float64
)
switch rankIdx {
case -1:
// Quantile is below the lowest upper bound, interpolate using the min value
lowerCount = 0
lowerBound = t.spec.MinValue
upperCount = cdf[0].count
upperBound = cdf[0].upperBound
case len(cdf) - 1:
// Quantile is above the highest upper bound, simply return it as it must be finite
return cdf[len(cdf)-1].upperBound, nil
default:
lowerCount = cdf[rankIdx].count
lowerBound = cdf[rankIdx].upperBound
upperCount = cdf[rankIdx+1].count
upperBound = cdf[rankIdx+1].upperBound
}
if rank == lowerCount {
// No need to interpolate
return lowerBound, nil
}
if math.IsInf(lowerBound, -1) {
// We cannot interpolate with infinity
return upperBound, nil
}
if math.IsInf(upperBound, 1) {
// We cannot interpolate with infinity
return lowerBound, nil
}
// Compute quantile using linear interpolation
scale := (rank - lowerCount) / (upperCount - lowerCount)
return lowerBound + (upperBound-lowerBound)*scale, nil
}
func (t histogramQuantileTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error {
return t.d.UpdateWatermark(mark)
}
func (t histogramQuantileTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error {
return t.d.UpdateProcessingTime(pt)
}
func (t histogramQuantileTransformation) Finish(id execute.DatasetID, err error) {
t.d.Finish(err)
} | stdlib/universe/histogram_quantile.go | 0.670285 | 0.419945 | histogram_quantile.go | starcoder |
package pvss
import (
"math/big"
logging "github.com/sirupsen/logrus"
"github.com/torusresearch/torus-common/common"
"github.com/torusresearch/torus-common/secp256k1"
"github.com/torusresearch/torus-node/pkcs7"
)
func pad(b []byte) []byte {
byt, err := pkcs7.Pad(b, 64)
if err != nil {
logging.Error("Could not pad bytes")
return b
}
return byt
}
// Generates NIZK Proof with the aims of increasing asynchronicity within AVSS
// Returns proof in the form of c, u1, u2
func GenerateNIZKPK(s big.Int, r big.Int) (big.Int, big.Int, big.Int) {
// create randomness
v1 := *RandomBigInt()
v2 := *RandomBigInt()
t1 := common.BigIntToPoint(secp256k1.Curve.ScalarBaseMult(v1.Bytes()))
t2 := common.BigIntToPoint(secp256k1.Curve.ScalarMult(&secp256k1.H.X, &secp256k1.H.Y, v2.Bytes()))
//create dlog and pederson commitments
gs := common.BigIntToPoint(secp256k1.Curve.ScalarBaseMult(s.Bytes()))
hr := common.BigIntToPoint(secp256k1.Curve.ScalarMult(&secp256k1.H.X, &secp256k1.H.Y, r.Bytes()))
gshr := common.BigIntToPoint(secp256k1.Curve.Add(&gs.X, &gs.Y, &hr.X, &hr.Y))
//prepare bytestring for hashing
bytesToBeHashed := append(pad(secp256k1.G.X.Bytes()), pad(secp256k1.H.X.Bytes())...)
bytesToBeHashed = append(bytesToBeHashed, pad(gs.X.Bytes())...)
bytesToBeHashed = append(bytesToBeHashed, pad(gshr.X.Bytes())...)
bytesToBeHashed = append(bytesToBeHashed, pad(t1.X.Bytes())...)
bytesToBeHashed = append(bytesToBeHashed, pad(t2.X.Bytes())...)
c := new(big.Int)
c.SetBytes(secp256k1.Keccak256(bytesToBeHashed))
u1 := new(big.Int)
u1.Set(&v1)
cs := new(big.Int)
cs.Mul(c, &s)
cs.Mod(cs, secp256k1.GeneratorOrder)
u1.Sub(u1, cs)
u1.Mod(u1, secp256k1.GeneratorOrder)
u2 := new(big.Int)
u2.Set(&v2)
cr := new(big.Int)
cr.Mul(c, &r)
cr.Mod(cr, secp256k1.GeneratorOrder)
u2.Sub(u2, cr)
u2.Mod(u2, secp256k1.GeneratorOrder)
return *c, *u1, *u2
}
// Generates NIZK Proof with commitments
// Returns proof in the form of c, u1, u2 gs and gshr
func GenerateNIZKPKWithCommitments(s big.Int, r big.Int) (c big.Int, u1 big.Int, u2 big.Int, gs common.Point, gshr common.Point) {
c, u1, u2 = GenerateNIZKPK(s, r)
gs = common.BigIntToPoint(secp256k1.Curve.ScalarBaseMult(s.Bytes()))
hr := common.BigIntToPoint(secp256k1.Curve.ScalarMult(&secp256k1.H.X, &secp256k1.H.Y, r.Bytes()))
gshr = common.BigIntToPoint(secp256k1.Curve.Add(&gs.X, &gs.Y, &hr.X, &hr.Y))
return
}
func VerifyNIZKPK(c, u1, u2 big.Int, gs, gshr common.Point) bool {
//compute t1prime
t1prime := common.BigIntToPoint(secp256k1.Curve.ScalarBaseMult(u1.Bytes()))
gsc := common.BigIntToPoint(secp256k1.Curve.ScalarMult(&gs.X, &gs.Y, c.Bytes()))
t1prime = common.BigIntToPoint(secp256k1.Curve.Add(&t1prime.X, &t1prime.Y, &gsc.X, &gsc.Y))
//compute t2
t2prime := common.BigIntToPoint(secp256k1.Curve.ScalarMult(&secp256k1.H.X, &secp256k1.H.Y, u2.Bytes()))
//computing gshr/gs^C
neggsY := new(big.Int)
neggsY.Set(&gs.Y)
neggsY.Neg(neggsY)
neggsY.Mod(neggsY, secp256k1.FieldOrder)
gshrsubgs := common.BigIntToPoint(secp256k1.Curve.Add(&gshr.X, &gshr.Y, &gs.X, neggsY))
gshrsubgsC := common.BigIntToPoint(secp256k1.Curve.ScalarMult(&gshrsubgs.X, &gshrsubgs.Y, c.Bytes()))
t2prime = common.BigIntToPoint(secp256k1.Curve.Add(&t2prime.X, &t2prime.Y, &gshrsubgsC.X, &gshrsubgsC.Y)) // add them all up here
bytesToBeHashed := append(pad(secp256k1.G.X.Bytes()), pad(secp256k1.H.X.Bytes())...)
bytesToBeHashed = append(bytesToBeHashed, pad(gs.X.Bytes())...)
bytesToBeHashed = append(bytesToBeHashed, pad(gshr.X.Bytes())...)
bytesToBeHashed = append(bytesToBeHashed, pad(t1prime.X.Bytes())...)
bytesToBeHashed = append(bytesToBeHashed, pad(t2prime.X.Bytes())...)
cprime := new(big.Int)
cprime.SetBytes(secp256k1.Keccak256(bytesToBeHashed))
//compare c to RHS
if cprime.Cmp(&c) == 0 {
return true
} else {
return false
}
} | pvss/nizkpk.go | 0.676727 | 0.467757 | nizkpk.go | starcoder |
package dlx
// Matrix is a 2D representation of the exact cover problem consisting of 1s and 0s
type Matrix interface {
// AddRow allows the puzzle to be built up by adding one row at a time.
// A row is an int slice of column indeces for the "1" elements in the matrix
AddRow([]int) int
// Solve attempts to find a subset of the rows that contain a single 1 in
// each of the columns of the puzzle. It returns when the first solution is found.
Solve() (map[int][]int, bool)
// SolveComplete attempts to find all subsets of the rows that satisfy the exact
// cover problem. It provides a solutions channel and a success channel
SolveComplete() (<-chan map[int][]int, <-chan bool)
}
// matrix is the 2D circular doubly linked list where a node has pointers in the
// up, down, left, and right directions to its neighbouring nodes.
type matrix struct {
root *columnHeader //The left most column header that never has nodes assigned to it.
columns []columnHeader
solutionRows []matrixElement
rowCount int
}
// A matrix element is either a node or a columnHeader in the dlx matrix
type matrixElement interface {
rowIndex() int
setRowIndex(int)
parentColumn() *columnHeader
setParentColumn(*columnHeader)
parentRow() matrixElement
setParentRow(matrixElement)
up() matrixElement
down() matrixElement
left() matrixElement
right() matrixElement
setUp(matrixElement)
setDown(matrixElement)
setLeft(matrixElement)
setRight(matrixElement)
}
// node is an element of the matrix signifying a one
type node struct {
rowInd int
parentColumnPtr *columnHeader
parentRowPtr matrixElement
upPtr, downPtr, leftPtr, rightPtr matrixElement
}
func (n *node) rowIndex() int {
return n.rowInd
}
func (n *node) setRowIndex(i int) {
n.rowInd = i
}
func (n *node) parentColumn() *columnHeader {
return n.parentColumnPtr
}
func (n *node) setParentColumn(c2 *columnHeader) {
n.parentColumnPtr = c2
}
func (n *node) parentRow() matrixElement{
return n.parentRowPtr
}
func (n *node) setParentRow(e matrixElement) {
n.parentRowPtr = e
}
func (n *node) up() matrixElement {
return n.upPtr
}
func (n *node) down() matrixElement {
return n.downPtr
}
func (n *node) left() matrixElement {
return n.leftPtr
}
func (n *node) right() matrixElement {
return n.rightPtr
}
func (n *node) setUp(e matrixElement) {
n.upPtr = e
}
func (n *node) setDown(e matrixElement) {
n.downPtr = e
}
func (n *node) setLeft(e matrixElement) {
n.leftPtr = e
}
func (n *node) setRight(e matrixElement) {
n.rightPtr = e
}
// column is a single column in the matrix struct
type columnHeader struct {
node // column headers are just special nodes.
columnLen int
columnInd int
}
func (c *columnHeader) columnIndex() int {
return c.columnInd
}
func (c *columnHeader) setColumnIndex(i int) {
c.columnInd = i
}
func (c *columnHeader) len() int {
return c.columnLen
}
func (c *columnHeader) incrementLen() {
c.columnLen++
}
func (c *columnHeader) decrementLen() {
c.columnLen--
}
// NewMatrix acts as a constructor to return a functioning but empty instantiation
// of a matrix that contains only they number of columnHeaders specified
func NewMatrix(columnCount int) Matrix {
var solutionRows []matrixElement
columnHeaders := make([]columnHeader, columnCount+1)
for i := 0; i < columnCount+1; i++ {
head := &columnHeaders[i]
head.setRowIndex(-1)
head.setParentColumn(head)
head.setParentRow(&columnHeaders[0])
head.setUp(head)
head.setDown(head)
if i == 0 {
head.setLeft(&columnHeaders[columnCount]) // left most heads's left pointer is to the right most head
} else {
head.setLeft(&columnHeaders[i-1])
}
if i == columnCount {
head.setRight(&columnHeaders[0]) // right most head's right point is to the left most head
} else {
head.setRight(&columnHeaders[i+1])
}
head.setColumnIndex(i - 1)
}
return &matrix{&columnHeaders[0], columnHeaders, solutionRows, 0}
}
// AddRow allows a constraint row in the form of an int slice of node indeces to be added.
func (m *matrix) AddRow(rowIndeces []int) int {
nodeCount := len(rowIndeces)
row := make([]node, nodeCount)
for i, c := range rowIndeces {
currentNode := &row[i]
column := &m.columns[c+1]
downNode := column.up()
currentNode.setRowIndex(m.rowCount)
currentNode.setParentColumn(column)
currentNode.setParentRow(&row[0])
column.incrementLen() // Increase the length of this column by 1
// Set the currentNode up and down pointers
currentNode.setUp(downNode)
currentNode.setDown(column)
// Set the neighbouring nodes above and below to point to currentNode
downNode.setDown(currentNode)
column.setUp(currentNode)
if i == 0 {
currentNode.setLeft(&row[nodeCount-1]) // left most node's left pointer is to the right most node
} else {
currentNode.setLeft(&row[i-1])
}
if i == nodeCount-1 {
currentNode.setRight(&row[0]) // right most node's right point is to the left most node
} else {
currentNode.setRight(&row[i+1])
}
}
m.rowCount++
return m.rowCount - 1
}
// cover a column in the matrix
func cover(column *columnHeader) {
// Bypass the column
leftColumn := column.left()
rightColumn := column.right()
leftColumn.setRight(rightColumn)
rightColumn.setLeft(leftColumn)
// For each node down the column
downNode := column.down()
for downNode != column {
// For each node right
rightNode := downNode.right()
for rightNode != downNode {
// Bypass the rightNode vertically and reduce the column len by one
aboveRightNode := rightNode.up()
belowRightNode := rightNode.down()
aboveRightNode.setDown(belowRightNode)
belowRightNode.setUp(aboveRightNode)
rightNode.parentColumn().decrementLen()
rightNode = rightNode.right()
}
downNode = downNode.down()
}
}
// uncover a column in the matrix
func uncover(column *columnHeader) {
// For each node up the column (from the bottom)
upNode := column.up()
for upNode != column {
leftNode := upNode.left()
// For each node left
for leftNode != upNode {
// Put this node back into its column and increase the column len by one
aboveLeftNode := leftNode.up()
belowLeftNode := leftNode.down()
aboveLeftNode.setDown(leftNode)
belowLeftNode.setUp(leftNode)
leftNode.parentColumn().incrementLen()
leftNode = leftNode.left()
}
upNode = upNode.up()
}
// Put the column back into the columnHeaders linked list
leftColumn := column.left()
rightColumn := column.right()
leftColumn.setRight(column)
rightColumn.setLeft(column)
}
func (m *matrix) getSolution() (solution map[int][]int) {
solution = make(map[int][]int)
for _, row := range m.solutionRows {
rowStart := row.parentRow()
rowIndex := rowStart.rowIndex()
solution[rowIndex] = []int{rowStart.parentColumn().columnIndex()}
rowNode := rowStart.right()
for rowNode != rowStart {
solution[rowIndex] = append(solution[rowIndex], rowNode.parentColumn().columnIndex())
rowNode = rowNode.right()
}
}
return solution
}
// Search attempts to find an or all exact covers. It returns solutions over the
// results channel and has boolean channel to indicate if a solution has been
// found when the search is complete.
func (m *matrix) search(solutions chan map[int][]int, solutionFound chan bool) {
matrixRoot := m.root
// If only the root is left, a solution has been found
if matrixRoot.right() == matrixRoot {
solutions <- m.getSolution()
solutionFound <- true
return
}
// Pick the left most column of minimum length
currentColumn := matrixRoot.right()
minColumn := matrixRoot.right()
for currentColumn != matrixRoot {
if currentColumn.parentColumn().len() < minColumn.parentColumn().len() {
minColumn = currentColumn
}
currentColumn = currentColumn.right()
}
cover(minColumn.parentColumn())
// For each node down the covered minColumn
childSolutionFound := false
downNode := minColumn.down()
for downNode != minColumn {
m.solutionRows = append(m.solutionRows, downNode) // Add this row to the solution
rightNode := downNode.right()
// Cover the column of each node to the right of the current downNode
for rightNode != downNode {
cover(rightNode.parentColumn())
rightNode = rightNode.right()
}
// Call search again on the remaining uncovered portion of the matrix.
childSuccessChannel := make(chan bool)
go m.search(solutions, childSuccessChannel)
childSuccess := <-childSuccessChannel
if childSuccess {
childSolutionFound = true
}
// Uncover the columns
leftNode := downNode.left()
for leftNode != downNode {
uncover(leftNode.parentColumn())
leftNode = leftNode.left()
}
downNode = downNode.down()
m.solutionRows = m.solutionRows[:len(m.solutionRows)-1] // remove this row from the solution
}
uncover(minColumn.parentColumn())
solutionFound <- childSolutionFound // send the finished signal over the channel when done.
return
}
// Solve returns a solution and a boolean to indicate if a solution was found.
// The function will return true when the first viable solution is found or false
// after all possibilities have been searched.
func (m *matrix) Solve() (map[int][]int, bool) {
var solution map[int][]int
solutions := make(chan map[int][]int)
finished := make(chan bool)
success := false
go m.search(solutions, finished)
select {
case solution = <-solutions:
success = true
case success = <-finished:
if !success {
solution = make(map[int][]int)
} else {
solution = <-solutions
}
}
return solution, success
}
// SolveComplete returns two channels. One contains all possible solutions and the
// other returns either true or false at the end of the search to indicate if any
// solutions were found.
func (m *matrix) SolveComplete() (<-chan map[int][]int, <-chan bool) {
solutions := make(chan map[int][]int)
finished := make(chan bool)
go m.search(solutions, finished)
return solutions, finished
} | dlx.go | 0.688887 | 0.74958 | dlx.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.