code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package interp
import (
"fmt"
"math"
"strconv"
"strings"
)
type valueType uint8
const (
typeNil valueType = iota
typeStr
typeNum
)
// An AWK value (these are passed around by value)
type value struct {
typ valueType // Value type
isNumStr bool // An AWK "numeric string" from user input
s string // String value (for typeStr)
n float64 // Numeric value (for typeNum and numeric strings)
}
// Create a new number value
func num(n float64) value {
return value{typ: typeNum, n: n}
}
// Create a new string value
func str(s string) value {
return value{typ: typeStr, s: s}
}
// Create a new value for a "numeric string" context, converting the
// string to a number if possible.
func numStr(s string) value {
f, err := strconv.ParseFloat(strings.TrimSpace(s), 64)
return value{typ: typeStr, isNumStr: err == nil, s: s, n: f}
}
// Create a numeric value from a Go bool
func boolean(b bool) value {
if b {
return num(1)
}
return num(0)
}
// Return true if value is a "true string" (string but not a "numeric
// string")
func (v value) isTrueStr() bool {
return v.typ == typeStr && !v.isNumStr
}
// Return Go bool value of AWK value. For numbers or numeric strings,
// zero is false and everything else is true. For strings, empty
// string is false and everything else is true.
func (v value) boolean() bool {
if v.isTrueStr() {
return v.s != ""
} else {
return v.n != 0
}
}
// Return value's string value, or convert to a string using given
// format if a number value. Integers are a special case and don't
// use floatFormat.
func (v value) str(floatFormat string) string {
switch v.typ {
case typeNum:
if math.IsNaN(v.n) {
return "nan"
} else if math.IsInf(v.n, 0) {
if v.n < 0 {
return "-inf"
} else {
return "inf"
}
} else if v.n == float64(int(v.n)) {
return strconv.Itoa(int(v.n))
} else {
return fmt.Sprintf(floatFormat, v.n)
}
case typeStr:
return v.s
default:
return ""
}
}
// Return value's number value, converting from string if necessary
func (v value) num() float64 {
f, _ := v.numChecked()
return f
}
// Return value's number value and a success flag, converting from a
// string if necessary
func (v value) numChecked() (float64, bool) {
switch v.typ {
case typeNum:
return v.n, true
case typeStr:
if v.isNumStr {
// If it's a numeric string, we already have the float
// value from the numStr() call
return v.n, true
}
// Otherwise ensure string starts with a float and convert it
return parseFloatPrefix(v.s)
default:
return 0, true
}
}
// Like strconv.ParseFloat, but parses at the start of string and
// allows things like "1.5foo"
func parseFloatPrefix(s string) (float64, bool) {
// Skip whitespace at start
i := 0
for i < len(s) && (s[i] == ' ' || s[i] == '\t' || s[i] == '\n' || s[i] == '\r') {
i++
}
start := i
// Parse mantissa: optional sign, initial digit(s), optional '.',
// then more digits
gotDigit := false
if i < len(s) && (s[i] == '+' || s[i] == '-') {
i++
}
for i < len(s) && s[i] >= '0' && s[i] <= '9' {
gotDigit = true
i++
}
if i < len(s) && s[i] == '.' {
i++
}
for i < len(s) && s[i] >= '0' && s[i] <= '9' {
gotDigit = true
i++
}
if !gotDigit {
return 0, false
}
// Parse exponent ("1e" and similar are allowed, but ParseFloat
// rejects them)
end := i
if i < len(s) && (s[i] == 'e' || s[i] == 'E') {
i++
if i < len(s) && (s[i] == '+' || s[i] == '-') {
i++
}
for i < len(s) && s[i] >= '0' && s[i] <= '9' {
i++
end = i
}
}
floatStr := s[start:end]
f, err := strconv.ParseFloat(floatStr, 64)
return f, err == nil // May be "value out of range" error
} | interp/value.go | 0.705176 | 0.456652 | value.go | starcoder |
package deepmerge
import (
"errors"
"fmt"
"reflect"
)
// DeepMerge instantiates initial counters / keys for traversal
type DeepMerge struct {
map1 interface{}
map2 interface{}
// Stores the keys that we have processed as we iterate the maps
seenKeys map[interface{}]bool
// Keeps track of nested parent keys
parentKey reflect.Value
}
// Merge merges 2 maps by applying a fptr (function pointer) to the values
// Example:
// d := &DeepMerge{ map1 : some_map1, map2: some_map2}
// d.Merge(&my_func)
// where my_func := func(a,b int) int { return a + b }
func (d DeepMerge) Merge(fptr interface{}) (interface{}, error) {
if d.map1 == nil && d.map2 != nil {
return d.map2, nil
}
if d.map1 != nil && d.map2 == nil {
return d.map1, nil
}
m1_t := reflect.ValueOf(d.map1).Type()
m2_t := reflect.ValueOf(d.map2).Type()
if m1_t != m2_t {
return nil, errors.New("Maps have to be of the same type")
}
d.seenKeys = make(map[interface{}]bool)
return d.merge(d.map1, d.map2, fptr), nil
}
func (d DeepMerge) merge(m1, m2, fptr interface{}) interface{} {
// Lets keep track of the keys from the maps were iterating
var allKeys []reflect.Value
m1_t := reflect.ValueOf(m1)
m2_t := reflect.ValueOf(m2)
// This will store our final merged map
ret_map := reflect.MakeMap(m1_t.Type())
cp_m1 := reflect.New(m1_t.Type()).Elem()
cp_m2 := reflect.New(m2_t.Type()).Elem()
// Copy over the map values to the placeholder maps
// that will perform the fptr function operations
translateRecursive(cp_m1, m1_t)
translateRecursive(cp_m2, m2_t)
// Lets find out what type of function we have
// so that we can call it
fn := reflect.ValueOf(fptr).Elem()
allKeys = append(allKeys, cp_m1.MapKeys()...)
allKeys = append(allKeys, cp_m2.MapKeys()...)
// For each key we'll run the function block
for _, k := range allKeys {
// If we've already processed the key we'll skip
if _, ok := d.seenKeys[k.Interface()]; ok {
continue
}
// If we're traversing a parent_key, then we'll need to add that key
if (d.parentKey.IsValid()) && (d.parentKey.Len() != 0) {
keyplus := fmt.Sprintf("%v_%v", k.Interface(), d.parentKey.Interface())
d.seenKeys[keyplus] = true
} else {
d.seenKeys[k.Interface()] = true
}
// Get the value of the key from each map
v := cp_m1.MapIndex(k)
o_v := cp_m2.MapIndex(k)
// If we have a map, lets iterative through
// recursively
if v.Kind() == reflect.Map && o_v.Kind() == reflect.Map {
d.parentKey = k
yy := d.merge(v.Interface(), o_v.Interface(), fptr)
ret_map.SetMapIndex(k, reflect.ValueOf(yy))
} else {
// If any of the keys traversed is invalid
// we'll ignore it and update the map to
// the values of the other key
if !v.IsValid() && o_v.IsValid() {
ret_map.SetMapIndex(k, o_v)
continue
}
if !o_v.IsValid() && v.IsValid() {
ret_map.SetMapIndex(k, v)
continue
}
// Everything looks good to call the function pointer
in := []reflect.Value{v, o_v}
zz := fn.Call(in)
// We'll only take the first return value
ret_map.SetMapIndex(k, zz[0])
}
}
return ret_map.Interface()
}
func translateRecursive(copy, original reflect.Value) {
switch original.Kind() {
// The first cases handle nested structures and translate them recursively
// If it is a pointer we need to unwrap and call once again
case reflect.Ptr:
// To get the actual value of the original we have to call Elem()
// At the same time this unwraps the pointer so we don't end up in
// an infinite recursion
originalValue := original.Elem()
// Check if the pointer is nil
if !originalValue.IsValid() {
return
}
// Allocate a new object and set the pointer to it
copy.Set(reflect.New(originalValue.Type()))
// Unwrap the newly created pointer
translateRecursive(copy.Elem(), originalValue)
// If it is an interface (which is very similar to a pointer), do basically the
// same as for the pointer. Though a pointer is not the same as an interface so
// note that we have to call Elem() after creating a new object because otherwise
// we would end up with an actual pointer
case reflect.Interface:
// Get rid of the wrapping interface
originalValue := original.Elem()
// Create a new object. Now new gives us a pointer, but we want the value it
// points to, so we have to call Elem() to unwrap it
copyValue := reflect.New(originalValue.Type()).Elem()
translateRecursive(copyValue, originalValue)
copy.Set(copyValue)
// If it is a struct we translate each field
case reflect.Struct:
for i := 0; i < original.NumField(); i += 1 {
if copy.Field(i).CanSet() {
translateRecursive(copy.Field(i), original.Field(i))
} else {
fmt.Printf("WARNING: Cannot Set unexported fields. Type:%T ,Value:%v will be set to it's zero value.\n", original.Type().Field(i).Name, original.Field(i))
}
}
// If it is a slice we create a new slice and translate each element
case reflect.Slice:
copy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap()))
for i := 0; i < original.Len(); i += 1 {
translateRecursive(copy.Index(i), original.Index(i))
}
// If it is a map we create a new map and translate each value
case reflect.Map:
copy.Set(reflect.MakeMap(original.Type()))
for _, key := range original.MapKeys() {
originalValue := original.MapIndex(key)
// New gives us a pointer, but again we want the value
copyValue := reflect.New(originalValue.Type()).Elem()
translateRecursive(copyValue, originalValue)
copy.SetMapIndex(key, copyValue)
}
// And everything else will simply be taken from the original
default:
copy.Set(original)
}
} | deepmerge.go | 0.700792 | 0.453625 | deepmerge.go | starcoder |
package discover
import (
"net"
"time"
"github.com/ThinkiumGroup/go-common"
)
type (
packetSort interface {
handleSort(t *udp_srt, from *net.UDPAddr, fromID common.NodeID, mac []byte) error
nameSort() string
}
pingSort struct {
Version uint
ChainID common.ChainID
NetType common.NetType
From, To rpcEndpoint
Expiration uint64
}
// pongSort is the reply to pingSort.
pongSort struct {
Version uint
ChainID common.ChainID
NetType common.NetType
// This field should mirror the UDP envelope address
// of the ping packet, which provides a way to discover the
// the external address (after NAT).
To rpcEndpoint
ReplyTok []byte // This contains the hash of the ping packet.
Expiration uint64 // Absolute timestamp at which the packet becomes invalid.
}
// findnodeSort is a query for nodes close to the given target.
findnodeSort struct {
Version uint
ChainID common.ChainID
NetType common.NetType
Expiration uint64
}
// reply to findnodeSort
neighborsSort struct {
Version uint
ChainID common.ChainID
NetType common.NetType
IsInvalidchain bool
Nodes []rpcNode
Expiration uint64
}
)
func (req *pingSort) handleSort(t *udp_srt, from *net.UDPAddr, fromID common.NodeID, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
if req.Version != srtVersion {
return errVersion
}
if req.NetType != t.netType {
return errNetType
}
t.Send(from, pongPacket, &pongSort{
Version: srtVersion,
ChainID: t.chainId,
NetType: t.netType,
To: makeEndpoint(from, req.From.TCP),
ReplyTok: mac,
Expiration: uint64(time.Now().Add(expiration).Unix()),
})
t.handleReply(fromID, pingPacket, req)
// Add the node to the table. Before doing so, ensure that we have a recent enough pong
// recorded in the database so their findnode requests will be accepted later.
n := NewNode(fromID, from.IP, uint16(from.Port), req.From.TCP, req.From.RPC)
if time.Since(t.db.lastPongReceived(fromID)) > nodeDBNodeExpiration {
t.SendPing(fromID, from, func() { t.addThroughPing(req.ChainID, n) })
} else {
t.addThroughPing(req.ChainID, n)
}
t.db.updateLastPingReceived(fromID, time.Now())
return nil
}
func (req *pingSort) nameSort() string { return "SORTPING" }
func (req *pongSort) handleSort(t *udp_srt, from *net.UDPAddr, fromID common.NodeID, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
if req.Version != srtVersion {
return errVersion
}
if req.NetType != t.netType {
return errNetType
}
if !t.handleReply(fromID, pongPacket, req) {
return errUnsolicitedReply
}
t.db.updateLastPongReceived(fromID, time.Now())
return nil
}
func (req *pongSort) nameSort() string { return "SORTPONG" }
func (req *findnodeSort) handleSort(t *udp_srt, from *net.UDPAddr, fromID common.NodeID, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
if req.Version != srtVersion {
return errVersion
}
if req.NetType != t.netType {
return errNetType
}
if !t.db.hasBond(fromID) {
// No endpoint proof pong exists, we don't process the packet. This prevents an
// attack vector where the discovery protocol could be used to amplify traffic in a
// DDOS attack. A malicious actor would send a findnode request with the IP address
// and UDP port of the target as the source address. The recipient of the findnode
// packet would then send a neighbors packet (which is a much bigger packet than
// findnode) to the victim.
return errUnknownNode
}
closest := t.benchRow(MaxPeersPerChain)
for c, cl := range closest {
p := neighborsSort{Version: srtVersion, ChainID: c, NetType: t.netType, Expiration: uint64(time.Now().Add(expiration).Unix())}
var sent bool
// Send neighbors in chunks with at most maxNeighbors per packet
// to stay below the 1280 byte limit.
for _, n := range cl {
if n == nil {
continue
}
//log.Debug("SORT UDP closest chianid,node,maxNeighbors", c, n.String(), maxNeighbors)
if n.UDP <= 1024 {
continue
}
if CheckRelayIP(from.IP, n.IP) == nil {
p.Nodes = append(p.Nodes, nodeToRPC(n))
}
if len(p.Nodes) == maxNeighbors {
t.Send(from, neighborsPacket, &p)
p.Nodes = p.Nodes[:0]
sent = true
}
}
if len(p.Nodes) > 0 || !sent {
t.Send(from, neighborsPacket, &p)
}
}
return nil
}
func (req *findnodeSort) nameSort() string { return "SORTFINDNODE" }
func (req *neighborsSort) handleSort(t *udp_srt, from *net.UDPAddr, fromID common.NodeID, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
if req.Version != srtVersion {
return errVersion
}
if req.NetType != t.netType {
return errNetType
}
if !t.handleReply(fromID, neighborsPacket, req) {
return errUnsolicitedReply
}
return nil
}
func (req *neighborsSort) nameSort() string { return "SORTNEIGHBORS" } | network/discover/sortpacket.go | 0.579043 | 0.412471 | sortpacket.go | starcoder |
package types
import (
"fmt"
"regexp"
"strings"
)
// String type for clause attribute evaluation
type String string
// NewString creates a string with the object value
func NewString(value interface{}) (*String, error) {
str, ok := value.(string)
if ok {
newStr := String(str)
return &newStr, nil
}
return nil, fmt.Errorf("%v: cant cast to a string", ErrWrongTypeAssertion)
}
// String implement Stringer interface
func (s String) String() string {
return string(s)
}
// stringOperator takes the first element from the slice and passes to fn for processing.
// we ignore any additional elements if they exist.
func stringOperator(value []string, fn func(string) bool) bool {
if len(value) > 0 {
return fn(value[0])
}
return false
}
// StartsWith check if the string starts with the value
func (s String) StartsWith(value []string) bool {
return stringOperator(value, func(c string) bool {
return strings.HasPrefix(string(s), c)
})
}
// EndsWith check if the string ends with the value
func (s String) EndsWith(value []string) bool {
return stringOperator(value, func(c string) bool {
return strings.HasSuffix(string(s), c)
})
}
// Match check if the string match the regex value
func (s String) Match(value []string) bool {
return stringOperator(value, func(c string) bool {
if matched, err := regexp.MatchString(string(s), c); err == nil {
return matched
}
return false
})
}
// Contains check if the string contains the value
func (s String) Contains(value []string) bool {
return stringOperator(value, func(c string) bool {
return strings.Contains(string(s), c)
})
}
// EqualSensitive check if the string and value are equal (case sensitive)
func (s String) EqualSensitive(value []string) bool {
return stringOperator(value, func(c string) bool {
return string(s) == c
})
}
// Equal check if the string and value are equal
func (s String) Equal(value []string) bool {
return stringOperator(value, func(c string) bool {
return strings.EqualFold(string(s), c)
})
}
// GreaterThan checks if the string is greater than the value
func (s String) GreaterThan(value []string) bool {
return stringOperator(value, func(c string) bool {
return strings.ToLower(string(s)) > strings.ToLower(c)
})
}
// GreaterThanEqual checks if the string is greater or equal than the value
func (s String) GreaterThanEqual(value []string) bool {
return stringOperator(value, func(c string) bool {
return strings.ToLower(string(s)) >= strings.ToLower(c)
})
}
// LessThan checks if the string is less than the value
func (s String) LessThan(value []string) bool {
return stringOperator(value, func(c string) bool {
return strings.ToLower(string(s)) < strings.ToLower(c)
})
}
// LessThanEqual checks if the string is less or equal than the value
func (s String) LessThanEqual(value []string) bool {
return stringOperator(value, func(c string) bool {
return strings.ToLower(string(s)) <= strings.ToLower(c)
})
}
// In checks if the string exist in slice of strings (value)
func (s String) In(value []string) bool {
for _, x := range value {
if strings.EqualFold(string(s), x) {
return true
}
}
return false
} | types/string.go | 0.728941 | 0.451024 | string.go | starcoder |
package nifi
import (
"encoding/json"
)
// VersionedFlowEntity struct for VersionedFlowEntity
type VersionedFlowEntity struct {
VersionedFlow *VersionedFlowDTO `json:"versionedFlow,omitempty"`
}
// NewVersionedFlowEntity instantiates a new VersionedFlowEntity object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewVersionedFlowEntity() *VersionedFlowEntity {
this := VersionedFlowEntity{}
return &this
}
// NewVersionedFlowEntityWithDefaults instantiates a new VersionedFlowEntity object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewVersionedFlowEntityWithDefaults() *VersionedFlowEntity {
this := VersionedFlowEntity{}
return &this
}
// GetVersionedFlow returns the VersionedFlow field value if set, zero value otherwise.
func (o *VersionedFlowEntity) GetVersionedFlow() VersionedFlowDTO {
if o == nil || o.VersionedFlow == nil {
var ret VersionedFlowDTO
return ret
}
return *o.VersionedFlow
}
// GetVersionedFlowOk returns a tuple with the VersionedFlow field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *VersionedFlowEntity) GetVersionedFlowOk() (*VersionedFlowDTO, bool) {
if o == nil || o.VersionedFlow == nil {
return nil, false
}
return o.VersionedFlow, true
}
// HasVersionedFlow returns a boolean if a field has been set.
func (o *VersionedFlowEntity) HasVersionedFlow() bool {
if o != nil && o.VersionedFlow != nil {
return true
}
return false
}
// SetVersionedFlow gets a reference to the given VersionedFlowDTO and assigns it to the VersionedFlow field.
func (o *VersionedFlowEntity) SetVersionedFlow(v VersionedFlowDTO) {
o.VersionedFlow = &v
}
func (o VersionedFlowEntity) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.VersionedFlow != nil {
toSerialize["versionedFlow"] = o.VersionedFlow
}
return json.Marshal(toSerialize)
}
type NullableVersionedFlowEntity struct {
value *VersionedFlowEntity
isSet bool
}
func (v NullableVersionedFlowEntity) Get() *VersionedFlowEntity {
return v.value
}
func (v *NullableVersionedFlowEntity) Set(val *VersionedFlowEntity) {
v.value = val
v.isSet = true
}
func (v NullableVersionedFlowEntity) IsSet() bool {
return v.isSet
}
func (v *NullableVersionedFlowEntity) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableVersionedFlowEntity(val *VersionedFlowEntity) *NullableVersionedFlowEntity {
return &NullableVersionedFlowEntity{value: val, isSet: true}
}
func (v NullableVersionedFlowEntity) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableVersionedFlowEntity) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_versioned_flow_entity.go | 0.770119 | 0.487551 | model_versioned_flow_entity.go | starcoder |
package vector
import "math"
// Vector described mathematical Vector.
// It stores []float64 array under the hood.
type Vector struct {
data []float64
}
// NewVector creates new vector.
// It panics when there is less than 2 arguments provided.
func NewVector(data ...float64) Vector {
if len(data) < 2 {
panic("vector should have at least 2 dimensions")
}
return Vector{data: data}
}
// NewVectorFromFloat64Array creates new vector from float64 slice.
// It panics when array length is less than 2.
func NewVectorFromFloat64Array(data []float64) Vector {
if len(data) < 2 {
panic("vector should have at least 2 dimensions")
}
return Vector{data: data}
}
// Dimension returns length of underlying array.
func (v Vector) Dimension() int {
return len(v.data)
}
// Length returns vector length.
func (v Vector) Length() float64 {
return math.Sqrt(v.DotProduct(v))
}
// ToFloat64Array Convert vector to []float64 array.
func (v Vector) ToFloat64Array() []float64 {
return v.data
}
// Add adds v1 to current vector.
// It don't mutate current vector but return a new one.
// Also this function panics when vector dimensions are not the same.
func (v Vector) Add(v1 Vector) Vector {
if v.Dimension() != v1.Dimension() {
panic("vectors should have at least 2 points")
}
output := make([]float64, v.Dimension())
for k := range v.data {
output[k] = v.data[k] + v1.data[k]
}
return Vector{data: output}
}
// Subtract subtracts v1 from current vector.
// It don't mutate current vector but return a new one.
// Also this function panics when vector dimensions are not the same.
func (v Vector) Subtract(v1 Vector) Vector {
if v.Dimension() != v1.Dimension() {
panic("vectors should have same dimension")
}
output := make([]float64, v.Dimension())
for k := range v.data {
output[k] = v.data[k] - v1.data[k]
}
return Vector{data: output}
}
// ScalarMultiply multiply current vector with scalar.
// It don't mutate current vector but return a new one.
func (v Vector) ScalarMultiply(scalar float64) Vector {
output := make([]float64, v.Dimension())
for k := range v.data {
output[k] = v.data[k] * scalar
}
return Vector{data: output}
}
// DotProduct do dot product of current vector with v1.
// It don't mutate current vector but return a new one.
// Also this function panics when vector dimensions are not the same.
func (v Vector) DotProduct(v1 Vector) float64 {
if v.Dimension() != v1.Dimension() {
panic("vectors should have same dimension")
}
var output float64
for k := range v.data {
output += v.data[k] * v1.data[k]
}
return output
}
// CrossProduct do cross product of current vector with v1.
// It don't mutate current vector but return a new one.
// Also this function panics when vector dimension are not equal to 3.
func (v Vector) CrossProduct(v1 Vector) Vector {
if v.Dimension() == 3 && v1.Dimension() == 3 {
output := make([]float64, 3)
output[0] = v.data[1]*v1.data[2] - v.data[2]*v1.data[1]
output[1] = v.data[2]*v1.data[0] - v.data[0]*v1.data[2]
output[2] = v.data[0]*v1.data[1] - v.data[1]*v1.data[0]
return Vector{data: output}
}
panic("operation supported only for 3 dimension vectors")
} | vector.go | 0.87938 | 0.746347 | vector.go | starcoder |
package pt
import (
"fmt"
"image"
_ "image/jpeg"
"image/png"
"math"
"math/rand"
"os"
"path"
"strconv"
"time"
)
func Radians(degrees float64) float64 {
return degrees * math.Pi / 180
}
func Degrees(radians float64) float64 {
return radians * 180 / math.Pi
}
func Cone(direction Vector, theta, u, v float64, rnd *rand.Rand) Vector {
if theta < EPS {
return direction
}
theta = theta * (1 - (2 * math.Acos(u) / math.Pi))
m1 := math.Sin(theta)
m2 := math.Cos(theta)
a := v * 2 * math.Pi
q := RandomUnitVector(rnd)
s := direction.Cross(q)
t := direction.Cross(s)
d := Vector{}
d = d.Add(s.MulScalar(m1 * math.Cos(a)))
d = d.Add(t.MulScalar(m1 * math.Sin(a)))
d = d.Add(direction.MulScalar(m2))
d = d.Normalize()
return d
}
func LoadImage(path string) (image.Image, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
im, _, err := image.Decode(file)
return im, err
}
func SavePNG(path string, im image.Image) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
return png.Encode(file, im)
}
func Median(items []float64) float64 {
n := len(items)
switch {
case n == 0:
return 0
case n%2 == 1:
return items[n/2]
default:
a := items[n/2-1]
b := items[n/2]
return (a + b) / 2
}
}
func DurationString(d time.Duration) string {
h := int(d.Hours())
m := int(d.Minutes()) % 60
s := int(d.Seconds()) % 60
return fmt.Sprintf("%d:%02d:%02d", h, m, s)
}
func NumberString(x float64) string {
suffixes := []string{"", "k", "M", "G"}
for _, suffix := range suffixes {
if x < 1000 {
return fmt.Sprintf("%.1f%s", x, suffix)
}
x /= 1000
}
return fmt.Sprintf("%.1f%s", x, "T")
}
func ParseFloats(items []string) []float64 {
result := make([]float64, len(items))
for i, item := range items {
f, _ := strconv.ParseFloat(item, 64)
result[i] = f
}
return result
}
func ParseInts(items []string) []int {
result := make([]int, len(items))
for i, item := range items {
f, _ := strconv.ParseInt(item, 0, 0)
result[i] = int(f)
}
return result
}
func RelativePath(path1, path2 string) string {
dir, _ := path.Split(path1)
return path.Join(dir, path2)
}
func Fract(x float64) float64 {
_, x = math.Modf(x)
return x
}
func Clamp(x, lo, hi float64) float64 {
if x < lo {
return lo
}
if x > hi {
return hi
}
return x
}
func ClampInt(x, lo, hi int) int {
if x < lo {
return lo
}
if x > hi {
return hi
}
return x
} | pt/util.go | 0.699562 | 0.432902 | util.go | starcoder |
package main
import (
"fmt"
"strconv"
)
//A basic Person struct
type Person struct {
name string
age int
}
//Some slices of ints, floats and Persons
type IntSlice []int
type Float32Slice []float32
type PersonSlice []Person
type MaxInterface interface {
// Len is the number of elements in the collection.
Len() int
//Get returns the element with index i in the collection
Get(i int) interface{}
//Bigger returns whether the element at index i is bigger that the j one
Bigger(i, j int) bool
}
//Len implementation for our three types
func (x IntSlice) Len() int {return len(x)}
func (x Float32Slice) Len() int {return len(x)}
func (x PersonSlice) Len() int {return len(x)}
//Get implementation for our three types
func(x IntSlice) Get(i int) interface{} {return x[i]}
func(x Float32Slice) Get(i int) interface{} {return x[i]}
func(x PersonSlice) Get(i int) interface{} {return x[i]}
//Bigger implementation for our three types
func (x IntSlice) Bigger(i, j int) bool {
if x[i] > x[j] { //comparing two int
return true
}
return false
}
func (x Float32Slice) Bigger(i, j int) bool {
if x[i] > x[j] { //comparing two float32
return true
}
return false
}
func (x PersonSlice) Bigger(i, j int) bool {
if x[i].age > x[j].age { //comparing two Person ages
return true
}
return false
}
//Person implements fmt.Stringer interface
func (p Person) String() string {
return "(name: " + p.name + " - age: "+strconv.Itoa(p.age)+ " years)"
}
/*
Returns a bool and a value
- The bool is set to true if there is a MAX in the collection
- The value is set to the MAX value or nil, if the bool is false
*/
func Max(data MaxInterface) (ok bool, max interface{}) {
if data.Len() == 0{
return false, nil //no elements in the collection, no Max value
}
if data.Len() == 1{ //Only one element, return it alongside with true
return true, data.Get(1)
}
max = data.Get(0)//the first element is the max for now
m := 0
for i:=1; i<data.Len(); i++ {
if data.Bigger(i, m){ //we found a bigger value in our slice
max = data.Get(i)
m = i
}
}
return true, max
}
func main() {
islice := IntSlice {1, 2, 44, 6, 44, 222}
fslice := Float32Slice{1.99, 3.14, 24.8}
group := PersonSlice{
Person{name:"Bart", age:24},
Person{name:"Bob", age:23},
Person{name:"Gertrude", age:104},
Person{name:"Paul", age:44},
Person{name:"Sam", age:34},
Person{name:"Jack", age:54},
Person{name:"Martha", age:74},
Person{name:"Leo", age:4},
}
//Use Max function with these different collections
_, m := Max(islice)
fmt.Println("The biggest integer in islice is :", m)
_, m = Max(fslice)
fmt.Println("The biggest float in fslice is :", m)
_, m = Max(group)
fmt.Println("The oldest person in the group is:", m)
} | interface/slice.go | 0.572723 | 0.428054 | slice.go | starcoder |
package vox
// Matrix3x3 is an encoded 3x3 orthogonal matrix with entries 0, +1, -1.
type Matrix3x3 uint8
// Matrix3x3Identity represents the identity matrix.
const Matrix3x3Identity = Matrix3x3(4)
func eqm3(a int, b Matrix3x3) int {
if a == int(b) {
return 1
}
return 0
}
func signm3(x Matrix3x3) int {
if x == 0 {
return 1
}
return -1
}
// Valid reports whether m is a valid 3x3 matrix.
func (m Matrix3x3) Valid() bool {
// We can't have the top bit set, and the position of
// the non-zero entry in the first and second rows
// can't be the same. Neither can be 3.
one1 := m & 3
one2 := (m >> 2) & 3
return m&128 == 0 && one1 != one2 && one1 != 3 && one2 != 3
}
// Get returns the ith row, jth column of the decoded matrix.
// It must be that 0 <= i, j <= 2, and it returns 0, 1 or -1.
func (m Matrix3x3) Get(i, j int) int {
// From https://github.com/ephtracy/voxel-model/blob/master/MagicaVoxel-file-format-vox-extension.txt
// bit | value
// 0-1 : 1 : index of the non-zero entry in the first row
// 2-3 : 2 : index of the non-zero entry in the second row
// 4 : 0 : the sign in the first row (0 : positive; 1 : negative)
// 5 : 1 : the sign in the second row (0 : positive; 1 : negative)
// 6 : 1 : the sign in the third row (0 : positive; 1 : negative)
if i == 0 {
return eqm3(j, m&3) * signm3((m>>4)&1)
} else if i == 1 {
return eqm3(j, (m>>2)&3) * signm3((m>>5)&1)
}
return eqm3(j, 3-(m&3)-((m>>2)&3)) * signm3((m>>6)&1)
}
// Mul multiplies the two matrices together, returning the result.
func (a Matrix3x3) Mul(b Matrix3x3) Matrix3x3 {
var r Matrix3x3
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
for k := 0; k < 3; k++ {
x := a.Get(i, j) * b.Get(j, k)
if x == 0 {
continue
}
if x < 0 {
r |= 1 << uint(i+4)
}
if i < 2 {
r |= Matrix3x3(k) << uint(2*i)
}
}
}
}
return r
}
// MulVec multiplies the matrix and the given vector.
func (m Matrix3x3) MulVec(x [3]int) [3]int {
var r [3]int
for i := 0; i < 3; i++ {
for j := 0; j < 3; j++ {
r[i] += m.Get(i, j) * x[j]
}
}
return r
}
var matInverses [128]Matrix3x3
func init() {
for m := Matrix3x3(0); m < 128; m++ {
if !m.Valid() {
continue
}
for r := Matrix3x3(0); r < 128; r++ {
if !r.Valid() {
continue
}
if m.Mul(r) == 0x04 {
matInverses[int(m)] = r
}
}
}
}
// Inverse returns the inverse of the given matrix.
func (m Matrix3x3) Inverse() Matrix3x3 {
if m >= 128 {
return 0
}
return matInverses[int(m)]
} | matrix.go | 0.861217 | 0.732998 | matrix.go | starcoder |
package simplify
import (
"context"
"strings"
"github.com/go-spatial/geom/planar"
)
type DouglasPeucker struct {
// Tolerance is the tolerance used to eliminate points, a tolerance of zero is not eliminate any points.
Tolerance float64
// Dist is the distance function to use, defaults to planar.PerpendicularDistance
Dist planar.PointLineDistanceFunc
}
func (dp DouglasPeucker) Simplify(ctx context.Context, linestring [][2]float64, isClosed bool) ([][2]float64, error) {
return dp.simplify(ctx, 0, linestring, isClosed)
}
func (dp DouglasPeucker) simplify(ctx context.Context, depth uint8, linestring [][2]float64, isClosed bool) ([][2]float64, error) {
// helper function for debugging and tracing the code
var printf = func(msg string, depth uint8, params ...interface{}) {
if debug {
ps := make([]interface{}, 1, len(params)+1)
ps[0] = depth
ps = append(ps, params...)
logger.Printf(strings.Repeat(" ", int(depth*2))+"[%v]"+msg, ps...)
}
}
if dp.Tolerance <= 0 || len(linestring) <= 2 {
if debug {
if dp.Tolerance <= 0 {
printf("skipping due to Tolerance (%v) ≤ zero:", depth, dp.Tolerance)
}
if len(linestring) <= 2 {
printf("skipping due to len(linestring) (%v) ≤ two:", depth, len(linestring))
}
}
return linestring, nil
}
if debug {
printf("starting linestring: %v ; tolerance: %v", depth, linestring, dp.Tolerance)
}
dmax, idx := 0.0, 0
dist := planar.PerpendicularDistance
if dp.Dist != nil {
dist = dp.Dist
}
line := [2][2]float64{linestring[0], linestring[len(linestring)-1]}
if debug {
printf("starting dmax: %v ; idx %v ; line : %v", depth, dmax, idx, line)
}
// Find the point that is the furthest away.
for i := 1; i <= len(linestring)-2; i++ {
d := dist(line, linestring[i])
if d > dmax {
dmax, idx = d, i
}
if debug {
printf("looking at %v ; d : %v dmax %v ", depth, i, d, dmax)
}
}
// If the furtherest point is greater then tolerance, we split at that point, and look again at each
// subsections.
if dmax > dp.Tolerance {
if len(linestring) <= 3 {
if debug {
printf("returning linestring %v", depth, linestring)
}
return linestring, nil
}
if err := ctx.Err(); err != nil {
return nil, err
}
rec1, _ := dp.simplify(ctx, depth+1, linestring[0:idx], isClosed)
if err := ctx.Err(); err != nil {
return nil, err
}
rec2, _ := dp.simplify(ctx, depth+1, linestring[idx:], isClosed)
if debug {
printf("returning combined lines: %v %v", depth, rec1, rec2)
}
return append(rec1, rec2...), nil
}
// Drop all points between the end points.
if debug {
printf("dropping all points between the end points: %v", depth, line)
}
return line[:], nil
} | planar/simplify/douglaspeucker.go | 0.603231 | 0.475727 | douglaspeucker.go | starcoder |
package ord
import (
"testing"
"github.com/calebcase/base/data/eq"
"github.com/stretchr/testify/require"
"golang.org/x/exp/constraints"
)
type Class[A any] interface {
eq.Class[A]
Compare(A, A) Ordering
LT(A, A) bool
LTE(A, A) bool
GT(A, A) bool
GTE(A, A) bool
Max(A, A) A
Min(A, A) A
}
type Type[A any] struct {
eq.Type[A]
compareFn CompareFn[A]
}
// Ensure Type implements Class.
var _ Class[int] = Type[int]{}
type CompareFn[A any] func(A, A) Ordering
// NewType derives a type implementing Class.
func NewType[A any](compareFn CompareFn[A]) Type[A] {
return Type[A]{
Type: eq.NewType[A](func(x, y A) bool {
return compareFn(x, y) == EQ{}
}),
compareFn: compareFn,
}
}
func (t Type[A]) Compare(x, y A) Ordering {
return t.compareFn(x, y)
}
func (t Type[A]) LT(x, y A) bool {
return t.Compare(x, y) == LT{}
}
func (t Type[A]) LTE(x, y A) bool {
o := t.Compare(x, y)
return o == LT{} || o == EQ{}
}
func (t Type[A]) GT(x, y A) bool {
return t.Compare(x, y) == GT{}
}
func (t Type[A]) GTE(x, y A) bool {
o := t.Compare(x, y)
return o == GT{} || o == EQ{}
}
func (t Type[A]) Max(x, y A) A {
o := t.Compare(x, y)
if (o == GT{} || o == EQ{}) {
return x
}
return y
}
func (t Type[A]) Min(x, y A) A {
o := t.Compare(x, y)
if (o == LT{} || o == EQ{}) {
return x
}
return y
}
type Ordering interface {
isOrdering()
}
type LT struct{}
func (_ LT) isOrdering() {}
type EQ struct{}
func (_ EQ) isOrdering() {}
type GT struct{}
func (_ GT) isOrdering() {}
// Ordered implements CompareFn for natively ordered types.
func Ordered[A constraints.Ordered](x, y A) Ordering {
if x < y {
return LT{}
}
if x == y {
return EQ{}
}
return GT{}
}
// Ensure Ordering can be used with NewType.
var _ Type[int] = NewType[int](Ordered[int])
type LTEFn[A any] func(A, A) bool
// FromLTE derives a CompareFn using the provide LTEFn.
func FromLTE[A any](lteFn LTEFn[A]) CompareFn[A] {
return func(x, y A) Ordering {
lte := lteFn(x, y)
gte := lteFn(y, x)
if lte && gte {
return EQ{}
}
if lte {
return LT{}
}
return GT{}
}
}
// Ensure FromLTE can be used with NewType.
var _ Type[int] = NewType[int](FromLTE[int](func(x, y int) bool { return x <= y }))
// Conform returns a function testing if the implementation abides by its laws.
func Conform[A any, CA Class[A]](c CA) func(t *testing.T, x, y, z A) {
return func(t *testing.T, x, y, z A) {
t.Run("eq.Conform", func(t *testing.T) {
eq.Conform[A](c)(t, x, y, z)
})
t.Run("comparability", func(t *testing.T) {
require.True(t, c.LTE(x, y) || c.LTE(y, x))
})
t.Run("transitivity", func(t *testing.T) {
if c.LTE(x, y) && c.LTE(y, z) {
require.True(t, c.LTE(x, z))
}
})
t.Run("reflexivity", func(t *testing.T) {
require.True(t, c.LTE(x, x))
})
t.Run("antisymmetry", func(t *testing.T) {
if c.LTE(x, y) && c.LTE(y, x) {
require.True(t, c.Equal(x, y))
}
})
require.True(t, c.GTE(x, y) == c.LTE(y, x))
require.True(t, c.LT(x, y) == (c.LTE(x, y) && c.NE(x, y)))
require.True(t, c.GT(x, y) == c.LT(y, x))
require.True(t, c.LT(x, y) == (c.Compare(x, y) == LT{}))
require.True(t, c.GT(x, y) == (c.Compare(x, y) == GT{}))
require.True(t, c.Equal(x, y) == (c.Compare(x, y) == EQ{}))
if c.LTE(x, y) {
require.Equal(t, c.Min(x, y), x)
} else {
require.Equal(t, c.Min(x, y), y)
}
if c.GTE(x, y) {
require.Equal(t, c.Max(x, y), x)
} else {
require.Equal(t, c.Max(x, y), y)
}
}
} | data/ord/ord.go | 0.686475 | 0.643007 | ord.go | starcoder |
package timeseq
import "time"
// Interval indicates a continuous time range
type Interval struct {
NotBefore *time.Time
NotAfter *time.Time
}
// Contain returns if time t is in the interval
func (i Interval) Contain(t time.Time) bool {
if i.NotAfter != nil && t.After(*i.NotAfter) {
return false
}
if i.NotBefore != nil && t.Before(*i.NotBefore) {
return false
}
return true
}
// String returns the interval formatted using the RFC3339 format string
func (i *Interval) String() string {
return i.Format(time.RFC3339)
}
// Format returns a textual representation of the time value formatted according to layout
func (i *Interval) Format(layout string) string {
notBefore, notAfter := "nil", "nil"
if i.NotBefore != nil {
notBefore = i.NotBefore.Format(layout)
}
if i.NotAfter != nil {
notAfter = i.NotAfter.Format(layout)
}
return notBefore + "~" + notAfter
}
// BeginAt is alias of AfterOrEqual
func (i Interval) BeginAt(t time.Time) Interval {
return i.AfterOrEqual(t)
}
// EndAt is alias of BeforeOrEqual
func (i Interval) EndAt(t time.Time) Interval {
return i.BeforeOrEqual(t)
}
// BeforeOrEqual returns a new Interval which not before t
func (i Interval) BeforeOrEqual(t time.Time) Interval {
return Interval{
NotBefore: i.NotBefore,
NotAfter: &t,
}
}
// AfterOrEqual returns a new Interval which not after t
func (i Interval) AfterOrEqual(t time.Time) Interval {
return Interval{
NotBefore: &t,
NotAfter: i.NotAfter,
}
}
// Before returns a new Interval which before t
func (i Interval) Before(t time.Time) Interval {
t = t.Add(-1)
return Interval{
NotBefore: i.NotBefore,
NotAfter: &t,
}
}
// After returns a new Interval which after t
func (i Interval) After(t time.Time) Interval {
t = t.Add(1)
return Interval{
NotBefore: &t,
NotAfter: i.NotAfter,
}
}
// Truncate returns the result of rounding interval down to a multiple of d (since the zero time).
func (i Interval) Truncate(d time.Duration) Interval {
if i.NotBefore != nil {
t := (*i.NotBefore).Truncate(d)
if t.Before(*i.NotBefore) {
t = t.Add(d)
}
i.NotBefore = &t
}
if i.NotAfter != nil {
t := (*i.NotAfter).Truncate(d)
i.NotAfter = &t
}
return i
}
// Duration returns the duration NotAfter - NotBefore,
// returns 0 if NotAfter is before or equal NotBefore,
// returns -1 if NotAfter or NotBefore if nil.
func (i Interval) Duration() time.Duration {
if i.NotBefore == nil || i.NotAfter == nil {
return -1
}
if !(*i.NotAfter).After(*i.NotBefore) {
return 0
}
return (*i.NotAfter).Sub(*i.NotBefore)
}
// BeginAt is alias of AfterOrEqual
func BeginAt(t time.Time) Interval {
return AfterOrEqual(t)
}
// EndAt is alias of BeforeOrEqual
func EndAt(t time.Time) Interval {
return BeforeOrEqual(t)
}
// BeforeOrEqual returns a new Interval which not before t
func BeforeOrEqual(t time.Time) Interval {
return Interval{
NotAfter: &t,
}
}
// AfterOrEqual returns a new Interval which not after t
func AfterOrEqual(t time.Time) Interval {
return Interval{
NotBefore: &t,
}
}
// Before returns a new Interval which before t
func Before(t time.Time) Interval {
t = t.Add(-1)
return Interval{
NotAfter: &t,
}
}
// After returns a new Interval which after t
func After(t time.Time) Interval {
t = t.Add(1)
return Interval{
NotBefore: &t,
}
} | interval.go | 0.830353 | 0.518912 | interval.go | starcoder |
package openmessaging
type KeyValue interface {
/**
* Inserts or replaces {@code short} value for the specified key.
*
* @param key the key to be placed into this {@code KeyValue} object
* @param value the value corresponding to <tt>key</tt>
*/
PutInt16(key string, value int16) (KeyValue, error)
/**
* Inserts or replaces {@code int} value for the specified key.
*
* @param key the key to be placed into this {@code KeyValue} object
* @param value the value corresponding to <tt>key</tt>
*/
PutInt(key string, value int) (KeyValue, error)
/**
* Inserts or replaces {@code long} value for the specified key.
*
* @param key the key to be placed into this {@code KeyValue} object
* @param value the value corresponding to <tt>key</tt>
*/
PutInt64(key string, value int64) (KeyValue, error)
/**
* Inserts or replaces {@code double} value for the specified key.
*
* @param key the key to be placed into this {@code KeyValue} object
* @param value the value corresponding to <tt>key</tt>
*/
PutFloat64(key string, value float64) (KeyValue, error)
/**
* Inserts or replaces {@code String} value for the specified key.
*
* @param key the key to be placed into this {@code KeyValue} object
* @param value the value corresponding to <tt>key</tt>
*/
PutString(key string, value string) (KeyValue, error)
/**
* Searches for the {@code short} property with the specified key in this {@code KeyValue} object. If the key is not
* found in this property list, zero is returned.
*
* @param key the property key
* @return the value in this {@code KeyValue} object with the specified key value
* @see #put(String, short)
*/
GetShort(key string) (int16, error)
/**
* Searches for the {@code int} property with the specified key in this {@code KeyValue} object. If the key is not
* found in this property list, zero is returned.
*
* @param key the property key
* @return the value in this {@code KeyValue} object with the specified key value
* @see #put(String, int)
*/
GetInt(key string) (int16, error)
/**
* Searches for the {@code long} property with the specified key in this {@code KeyValue} object. If the key is not
* found in this property list, zero is returned.
*
* @param key the property key
* @return the value in this {@code KeyValue} object with the specified key value
* @see #put(String, long)
*/
GetLong(key string) (int64, error)
/**
* Searches for the {@code double} property with the specified key in this {@code KeyValue} object. If the key is
* not found in this property list, zero is returned.
*
* @param key the property key
* @return the value in this {@code KeyValue} object with the specified key value
* @see #put(String, double)
*/
GetDouble(key string) (float64, error)
/**
* Searches for the {@code String} property with the specified key in this {@code KeyValue} object. If the key is
* not found in this property list, {@code null} is returned.
*
* @param key the property key
* @return the value in this {@code KeyValue} object with the specified key value
* @see #put(String, String)
*/
GetString(key string) (string, error)
/**
* Returns a {@link Set} view of the keys contained in this {@code KeyValue} object.
* <p>
* The set is backed by the {@code KeyValue}, so changes to the set are reflected in the @code KeyValue}, and
* vice-versa.
*
* @return the key set view of this {@code KeyValue} object.
*/
KeySet() ([]string, error)
/**
* Tests if the specified {@code String} is a key in this {@code KeyValue}.
*
* @param key possible key
* @return <code>true</code> if and only if the specified key is in this {@code KeyValue}, <code>false</code>
* otherwise.
*/
ContainsKey(key string) (bool, error)
} | openmessaging/key_value.go | 0.923588 | 0.555435 | key_value.go | starcoder |
package msf
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"math"
"mosaicmfg.com/ps-postprocess/gcode"
"os"
"path"
"strings"
)
func roundTo(value float32, maxDecimalPlaces int) float32 {
multiplier := math.Pow(10, float64(maxDecimalPlaces))
rounded := math.Round(float64(value) * multiplier) / multiplier
return float32(rounded)
}
func intToHexString(value uint, minHexDigits int) string {
return fmt.Sprintf("%0*x", minHexDigits, value)
}
func int16ToHexString(value int16) string {
return intToHexString(uint(uint16(value)), 4)
}
func floatToHexString(value float32) string {
bits := math.Float32bits(value)
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, bits)
asUint := uint(binary.BigEndian.Uint32(buf))
return intToHexString(asUint, 8)
}
func replaceSpaces(input string) string {
return strings.ReplaceAll(input, " ", "_")
}
func truncate(input string, length int) string {
if len(input) <= length {
return input
}
return input[:length]
}
func msfVersionToO21(major, minor uint) string {
versionNumber := (major * 10) + minor
return fmt.Sprintf("O21 D%s%s", intToHexString(versionNumber, 4), EOL)
}
func writeLine(writer *bufio.Writer, line string) error {
_, err := writer.WriteString(line + EOL)
return err
}
func writeLines(writer *bufio.Writer, lines string) error {
_, err := writer.WriteString(lines)
return err
}
func getLineLength(x1, y1, x2, y2 float32) float32 {
dx := float64(x2 - x1)
dy := float64(y2 - y1)
return float32(math.Sqrt(dx * dx + dy * dy))
}
func estimateMoveTime(x1, y1, x2, y2, feedrate float32) float32 {
lineLength := getLineLength(x1, y1, x2, y2)
mmPerS := feedrate / 60
return lineLength / mmPerS
}
func estimateZMoveTime(z1, z2, feedrate float32) float32 {
lineLength := float32(math.Abs(float64(z2 - z1)))
mmPerS := feedrate / 60
return lineLength / mmPerS
}
func estimatePurgeTime(eDelta, feedrate float32) float32 {
mmPerS := feedrate / 60
return float32(math.Abs(float64(eDelta))) / mmPerS
}
func lerp(minVal, maxVal, t float32) float32 {
boundedT := float32(math.Max(0, math.Min(1, float64(t))))
return ((1 - boundedT) * minVal) + (t * maxVal)
}
const filamentRadius = 1.75 / 2
const filamentPiRSquared = math.Pi * filamentRadius * filamentRadius
func filamentLengthToVolume(length float32) float32 {
// V = Pi * (r^2) * h
return filamentPiRSquared * length
}
func filamentVolumeToLength(volume float32) float32 {
// h = V / (Pi * (r^2))
return volume / filamentPiRSquared
}
func getExtrusionVolume(extrusionWidth, layerHeight, length float32) float32 {
// https://manual.slic3r.org/advanced/flow-math -- "Extruding on top of a surface"
area := (extrusionWidth - layerHeight) * layerHeight + math.Pi * float32(math.Pow(float64(layerHeight) / 2, 2))
return area * length
}
func getExtrusionLength(extrusionWidth, layerHeight, length float32) float32 {
volume := getExtrusionVolume(extrusionWidth, layerHeight, length)
return filamentVolumeToLength(volume)
}
func getPrintSummary(msf *MSF, timeEstimate float32) string {
totalFilament := msf.GetTotalFilamentLength()
filamentByDrive := msf.GetFilamentLengthsByDrive()
summary := "; According to Chroma:" + EOL
// total filament length
summary += fmt.Sprintf("; filament total [mm] = %.5f%s", totalFilament, EOL)
// filament lengths by drive
for drive, length := range filamentByDrive {
if length > 0 {
summary += fmt.Sprintf("; T%d filament = %.5f%s", drive + 1, length, EOL)
}
}
// time estimate
summary += fmt.Sprintf("; estimated printing time = %s%s", gcode.GetTimeString(timeEstimate), EOL)
summary += EOL
return summary
}
func prependFile(filepath, content string) error {
// create a temporary file
tempfile, err := ioutil.TempFile(path.Dir(filepath), "")
if err != nil {
return err
}
// write prepended content first
if _, err := tempfile.WriteString(content); err != nil {
return err
}
// now append content of original file
reader, err := os.Open(filepath)
if err != nil {
return err
}
if _, err := io.Copy(tempfile, reader); err != nil {
return err
}
if err := reader.Close(); err != nil {
return err
}
// finalize and close temporary file
if err := tempfile.Sync(); err != nil {
return err
}
if err := tempfile.Close(); err != nil {
return err
}
// overwrite original file with temporary one
return os.Rename(tempfile.Name(), filepath)
} | msf/utils.go | 0.757794 | 0.418875 | utils.go | starcoder |
package date
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
)
// These are predefined formats to use in PersianDate.Format.
const (
GenericFormat = "yyyy/mm/dd"
GenericShortFormat = "yyyy/m/d"
MonthDayFormat = "MMMM dd"
MonthYearFormat = "MMMM, yyyy"
WrittenFormat = "W"
Serialized = "S"
)
// PersianDate represents the persian date
type PersianDate struct {
year int
month int
day int
}
type parseResult struct {
error error
year int
month int
day int
}
// Year returns the persian date's year value
func (pd PersianDate) Year() int {
return pd.year
}
// Month returns the persian date's month value
func (pd PersianDate) Month() int {
return pd.month
}
// Day returns the persian date's day value
func (pd PersianDate) Day() int {
return pd.day
}
// NewPersianDate creates a valid new instance of PersianDate
func NewPersianDate(year int, month int, day int) (PersianDate, error) {
err := checkYear(year)
if err != nil {
return PersianDate{}, err
}
err = checkMonth(month)
if err != nil {
return PersianDate{}, err
}
err = checkDay(year, month, day)
if err != nil {
return PersianDate{}, err
}
return PersianDate{year, month, day}, nil
}
// Parse parses a string value to a PersianDate instance. Uses the default separate '/'.
func Parse(value string) (PersianDate, error) {
return ParseWithSeparator(value, '/')
}
// ParseWithSeparator parses a string value to a PersianDate instance with the given separator.
func ParseWithSeparator(value string, separator rune) (PersianDate, error) {
parseResult := parse(value, separator)
if parseResult.error != nil {
return PersianDate{}, parseResult.error
}
return NewPersianDate(parseResult.year, parseResult.month, parseResult.day)
}
// Format formats a PersianDate instance to a string with the given layout.
func (pd PersianDate) Format(layout string) string {
switch layout {
case WrittenFormat:
return fmt.Sprintf("%s %s %s %s", pd.DayOfWeek(), localizeDigits(pd.Day()), pd.MonthName(), localizeDigits(pd.Year()))
case MonthYearFormat:
return fmt.Sprintf("%s %s", pd.MonthName(), localizeDigits(pd.Year()))
case MonthDayFormat:
return fmt.Sprintf("%s %s", localizeDigits(pd.Day()), pd.MonthName())
case GenericShortFormat:
return fmt.Sprintf("%s/%s/%s", localizeDigits(pd.Year()), localizeDigits(pd.Month()), localizeDigits(pd.Day()))
case GenericFormat:
return fmt.Sprintf("%s/%s/%s",
localizeDigits(pd.Year()),
localizeDigits(fmt.Sprintf("%02d", pd.Month())),
localizeDigits(fmt.Sprintf("%02d", pd.Day())))
case Serialized:
return fmt.Sprintf("%d-%s-%s",
pd.Year(),
fmt.Sprintf("%02d", pd.Month()),
fmt.Sprintf("%02d", pd.Day()))
}
return fmt.Sprintf("%d/%s/%s",
pd.Year(),
fmt.Sprintf("%02d", pd.Month()),
fmt.Sprintf("%02d", pd.Day()))
}
func parse(value string, separator rune) parseResult {
if len(value) == 0 || len(value) > 10 {
return parseResult{error: fmt.Errorf("invalid date string")}
}
parts := strings.Split(value, string(separator))
if len(parts) != 3 {
return parseResult{error: fmt.Errorf("invalid date string")}
}
partYear := parts[0]
partMonth := parts[1]
partDay := parts[2]
if len(partYear) != 4 {
return parseResult{error: fmt.Errorf("invalid year in the value string: %s", partYear)}
}
if len(partMonth) == 0 || len(partMonth) > 2 {
return parseResult{error: fmt.Errorf("invalid month in the value string: %s", partMonth)}
}
if len(partDay) == 0 || len(partDay) > 2 {
return parseResult{error: fmt.Errorf("invalid day in the value string: %s", partDay)}
}
year, err := strconv.Atoi(partYear)
if err != nil {
return parseResult{error: fmt.Errorf("year value cannot be parsed: %d", year)}
}
month, err := strconv.Atoi(partMonth)
if err != nil {
return parseResult{error: fmt.Errorf("year value cannot be parsed: %d", month)}
}
day, err := strconv.Atoi(partDay)
if err != nil {
return parseResult{error: fmt.Errorf("year value cannot be parsed: %d", day)}
}
return parseResult{year: year, month: month, day: day}
}
// DayOfWeek returns the localized day of the week of the PersianDate
func (pd PersianDate) DayOfWeek() string {
var dt = ToGregorianDate(pd)
return localizeDayOfWeek(dt)
}
// MonthName returns the localized month of the PersianDate
func (pd PersianDate) MonthName() string {
return monthNames[pd.month-1]
}
// Today returns current time as PersianDate
func Today() PersianDate {
return ToPersianDate(time.Now())
}
func (pd *PersianDate) String() string {
return pd.Format("")
}
func (pd *PersianDate) MarshalJSON() ([]byte, error) {
return json.Marshal(pd.Format(Serialized))
}
func (pd *PersianDate) UnmarshalJSON(bytes []byte) error {
var deserialized string
if err := json.Unmarshal(bytes, &deserialized); err != nil {
return err
}
parsed, err := ParseWithSeparator(deserialized, '-')
if err != nil {
return err
}
pd.day = parsed.day
pd.month = parsed.month
pd.year = parsed.year
return nil
}
func checkYear(year int) error {
if year < 1 || year > 9999 {
return fmt.Errorf("%d is an invalid year value", year)
}
return nil
}
func checkMonth(month int) error {
if month > 12 || month < 1 {
return fmt.Errorf("%d is an invaluid month value", month)
}
return nil
}
func checkDay(year int, month int, day int) error {
if month < 6 && day > 31 {
return fmt.Errorf("%d is an invaluid day value", day)
}
if month > 6 && day > 30 {
return fmt.Errorf("%d is an invaluid day value", day)
}
if month == 12 && day > 29 {
if !isJLeapDay(year, month, day) || day > 30 {
return fmt.Errorf("%d is an invaluid day value", day)
}
}
return nil
} | date/PersianDate.go | 0.734691 | 0.407687 | PersianDate.go | starcoder |
package main
import (
"machine"
"math"
"time"
)
// Since machine.pwmGroup is not exported, we create our own type to allow it to be passed around & stored
type PWM interface {
Configure(config machine.PWMConfig) error
Channel(pin machine.Pin) (channel uint8, err error)
Set(channel uint8, value uint32)
Top() uint32
}
var (
// Joystick
pinX = machine.ADC{Pin: machine.ADC0} // analog X
pinY = machine.ADC{Pin: machine.ADC1} // analog Y
pinZ = machine.GP22 // digital - push switch
originX, originY int
// LED {R,G,B} PWM
pinsRGB = [3]machine.Pin{machine.GP0, machine.GP1, machine.GP2}
pwmsRGB = [3]PWM{machine.PWM0, machine.PWM0, machine.PWM1}
)
func setupPins() {
pinX.Configure(machine.ADCConfig{})
pinY.Configure(machine.ADCConfig{})
pinZ.Configure(machine.PinConfig{Mode: machine.PinInputPullup})
for i := 0; i < len(pinsRGB); i++ {
pinsRGB[i].Configure(machine.PinConfig{Mode: machine.PinPWM})
pwmsRGB[i].Configure(machine.PWMConfig{Period: uint64(1e9 / 500)}) // 500 Hz
}
}
// The raw values read from ADC for joystick x,y are 16-bit in range 0...math.MaxUint16
// We'll reduce the precision to 10 bit 0...1024
func joyPosRaw() (int, int) {
machine.InitADC()
return int(pinX.Get()) >> 6, int(pinY.Get()) >> 6
}
// Set the origin to the current joystick values
func joyCalibrate() {
originX, originY = 0, 0
// average over 10 readings
for i := 0; i < 10; i++ {
x, y := joyPosRaw()
originX = (originX*i + x) / (i + 1)
originY = (originY*i + y) / (i + 1)
time.Sleep(time.Millisecond * 10)
}
}
// Joystick position adjusted for calibrated origin
func joyPos() (int, int) {
x, y := joyPosRaw()
// adjust to origin
x, y = x-originX, y-originY
// Create a 'dead zone' by removing jitter close to origin (0,0)
if math.Abs(float64(x)) < 10 {
x = 0
}
if math.Abs(float64(y)) < 10 {
y = 0
}
return x, y
}
func clockFaceAngle(x, y float64) float64 {
angleRad := math.Pi/2.0 - math.Atan(y/x)
if x < 0 {
angleRad += math.Pi
}
return angleRad // radians
}
func rgbMultipliers(angleRad float64) [3]float64 {
// Each colour component (R,G,B) contributes only in their own unique 2/3 (4*PI/3 rads) region of the circle
var ret = [3]float64{0, 0, 0} // R,G,B
// The contribution multiplier (0.0...1.0) depends on how 'far' the angle is from the maximum point for that colour
var maxRads = [3]float64{0, 2 * math.Pi / 3, 4 * math.Pi / 3} // R, G, B
for i := 0; i < 3; i++ {
// Compute each multiplier in turn r,g,b
diff := math.Abs(math.Mod(angleRad-maxRads[i], math.Pi*2)) // how far from max for this component is the angle?
// Reduce the multiplier for this colour component linearly
ret[i] = math.Max(0, 1.0-diff/(math.Pi*2/3))
}
return ret
}
func main() {
setupPins()
joyCalibrate()
// Get the joysick position offset from origin (0,0)
for {
x, y := joyPos()
if !pinZ.Get() {
// Stick press down. Turn LED off
x, y = 0, 0
}
// Calulate the clockwise angle in radians of the vector (12'o'clock = (0, Ymax) = 0/2*PI)
angle := clockFaceAngle(float64(x), float64(y))
// Get the RGB multipliers for each colour component. This determines the colour {R,G,B}
rgbMultipliers := rgbMultipliers(angle)
// Get the normalized magnitude of the vector ((0,0),(x,y)). This controls the brightness
magnitude := math.Sqrt(float64(x*x+y*y)) / float64(originX)
// Set the RGB LED colour/brightness with PWM
for i := 0; i < len(pwmsRGB); i++ {
multiplier := magnitude * rgbMultipliers[i]
pwmChan, _ := pwmsRGB[i].Channel(pinsRGB[i])
pwmsRGB[i].Set(pwmChan, uint32(float64(pwmsRGB[i].Top())*multiplier))
}
time.Sleep(time.Millisecond * 10)
}
} | elegoo_most_complete_starter_kit/13_analog_joystick_module/main.go | 0.702428 | 0.454533 | main.go | starcoder |
package std
import (
"github.com/mb0/xelf/cor"
"github.com/mb0/xelf/exp"
"github.com/mb0/xelf/lit"
"github.com/mb0/xelf/typ"
)
/*
Container operations
The len form returns the length of a str, raw, container literal, or the field count of a record.
The fst, lst and nth are a short-circuiting loops that optionally accept a predicate and return the
first match from the start for fst, end for lst or the nth match from the start if the given index
is positive or from the end otherwise:
The filter and map loops accept any container and an predicate or mapper function. The each loop
resolves to the given container, while filter returns a new container of the same type and map a new
one with another element type.
A predicate or mapper function's first parameter must accept the element type and can optionally
be followed by a int and str parameter for idx or key parameters. The key parameter can only be used
for keyer literals. The filter predicate must return bool and mapper a literal of any type.
(form pred val:@1 idx?:int key?:str bool)
(form mapr val:@1 idx?:int key?:str @2)
The fold and foldr forms accumulate a container into a given literal. They accept any container and
a reducer function with a compatible accumulator parameter followed by iterator parameters. Fold
accumulates from first to last and foldr in reverse. Fold is technically a left fold and foldr a
right fold, but as the difference of cons lists and mostly linear xelf containers might lead to
confusion foldr should be thought of as reverse.
(form accu a:@1 val:@2 idx?:int key?:str @1)
The list, dict constructor forms accept any container with an appropriate iterator
to construct a new container literal by effectively using each or foldr.
(with [1 2 3 4 5] +even (fn (eq (rem _ 2) 0)) (and
(eq (len "test") 4)
(eq (len .) 5)
(eq (fst .) (nth . 0) 1)
(eq (lst .) (nth . -1) 5)
(eq (fst . even) 2)
(eq (lst . even) 4)
(eq (nth . 1 even) 4)
(eq (nth . -2 even) 4)
(eq (filter . even) [2 4])
(eq (map . even) [false true false true false])
(eq (fold . 0 (fn (add _ .val))) 15)
(eq (fold . [0] (fn (apd _ .val))) [0 1 2 3 4 5])
(eq (foldr . [0] (fn (apd _ .val))) [0 5 4 3 2 1])
))
*/
type litLener interface {
Len() int
}
var lenSpec = core.add(SpecDX("<form len <@|alt cont str raw> int>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, typ.Void)
if err != nil {
return nil, err
}
fst := x.Arg(0).(*exp.Atom)
if v, ok := deopt(fst.Lit).(litLener); ok {
return &exp.Atom{lit.Int(v.Len()), x.Source()}, nil
}
return nil, cor.Errorf("cannot call len on %s", fst.Typ())
}))
var fstSpec = decl.add(SpecDX("<form fst list|@1 pred?:<func @1 bool> @1>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, typ.Void)
if err != nil {
return nil, err
}
return nth(x, x.Arg(0).(*exp.Atom), x.Arg(1), 0)
}))
var lstSpec = decl.add(SpecDX("<form lst list|@1 pred?:<func @1 bool> @1>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, typ.Void)
if err != nil {
return nil, err
}
return nth(x, x.Arg(0).(*exp.Atom), x.Arg(1), -1)
}))
var nthSpec = decl.add(SpecDX("<form nth cont|@1 int pred?:<func @1 bool> @1>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, typ.Void)
if err != nil {
return nil, err
}
l, ok := x.Arg(1).(*exp.Atom).Lit.(lit.Numeric)
if !ok {
return nil, cor.Errorf("want number got %s", x.Arg(1))
}
return nth(x, x.Arg(0).(*exp.Atom), x.Arg(2), int(l.Num()))
}))
func nth(x CallCtx, cont *exp.Atom, pred exp.El, idx int) (_ exp.El, err error) {
if pred != nil {
iter, err := getIter(x, pred, cont.Typ(), false)
if err != nil {
return nil, err
}
cont.Lit, err = iter.filter(x, cont)
if err != nil {
return nil, err
}
}
switch v := deopt(cont.Lit).(type) {
case lit.Indexer:
idx, err = checkIdx(idx, v.Len())
if err != nil {
return nil, err
}
l, err := v.Idx(idx)
if err != nil {
return nil, err
}
return &exp.Atom{Lit: l}, nil
case *lit.Dict:
idx, err = checkIdx(idx, v.Len())
if err != nil {
return nil, err
}
keyed := v.List[idx]
return &exp.Atom{Lit: keyed.Lit}, nil
}
return nil, cor.Errorf("nth wants idxer or dict got %s", cont.Typ())
}
func checkIdx(idx, l int) (int, error) {
if idx < 0 {
idx = l + idx
}
if idx < 0 || idx >= l {
return idx, lit.ErrIdxBounds
}
return idx, nil
}
type fIter struct {
*exp.Spec
n, a, v, i, k int
args []exp.El
ator bool
}
func getIter(x CallCtx, e exp.El, ct typ.Type, ator bool) (r *fIter, _ error) {
e, err := x.Prog.Resl(x.Env, e, typ.Void)
if err != nil && err != exp.ErrUnres {
return nil, err
}
if a, ok := e.(*exp.Atom); ok {
if s, ok := a.Lit.(*exp.Spec); ok {
r = &fIter{Spec: s}
}
}
if r == nil {
return nil, cor.Errorf("iter not a func or form %s", e.Typ())
}
r.ator = ator
args := r.Arg()
if len(args) == 0 {
return nil, cor.Errorf("iter must have at least one argument %s", e.Typ())
}
r.n = 1
if ator {
r.v = 1
ct = args[0].Typ()
r.n++
if len(args) == 1 {
return nil, cor.Errorf("ator must have at least two arguments %s", e.Typ())
}
}
fst := args[r.v]
switch fst.Name { // unless the parameter name is explicitly idx or key we assume val
case "idx", "key":
// TODO handle explicit first param
return nil, cor.Errorf("key and idx iter without value are not implemented")
}
if !ator {
cmp := typ.Compare(ct.Elem(), fst.Type)
if cmp < typ.LvlCheck {
return nil, cor.Errorf("iter value %s cannot be used as %s", ct.Elem(), fst.Type)
}
}
for r.n < len(args) && r.n < r.v+3 {
switch args[r.n].Type.Kind { // default to idx
case typ.KindStr:
if r.k > 0 {
return nil, cor.Errorf("key parameter already set, got %d %s",
r.n, args[r.n])
}
r.k = r.n
r.n++
default:
if r.i > 0 {
return nil, cor.Errorf("idx parameter already set, got %d %s",
r.n, args[r.n])
}
r.i = r.n
r.n++
}
}
r.args = make([]exp.El, r.n)
return r, nil
}
func (r *fIter) eval(x CallCtx, el lit.Lit, idx int, key string) (lit.Lit, error) {
r.args[0] = &exp.Atom{Lit: el}
if r.i > 0 {
r.args[r.i] = &exp.Atom{Lit: lit.Int(idx)}
}
if r.k > 0 {
r.args[r.k] = &exp.Atom{Lit: lit.Str(key)}
}
call, err := x.NewCall(r.Spec, r.args, x.Src)
if err != nil {
return nil, err
}
res, err := x.Prog.Eval(x.Env, call, typ.Void)
if err != nil {
return nil, err
}
return res.(*exp.Atom).Lit, nil
}
func (r *fIter) accumulate(x CallCtx, acc *exp.Atom, el lit.Lit, idx int, key string) (*exp.Atom, error) {
r.args[0] = acc
if r.v > 0 {
r.args[r.v] = &exp.Atom{Lit: el}
}
if r.i > 0 {
r.args[r.i] = &exp.Atom{Lit: lit.Int(idx)}
}
if r.k > 0 {
r.args[r.k] = &exp.Atom{Lit: lit.Str(key)}
}
call, err := x.NewCall(r.Spec, r.args, x.Src)
if err != nil {
return nil, err
}
res, err := x.Eval(x.Env, call, typ.Void)
if err != nil {
return nil, err
}
return res.(*exp.Atom), nil
}
func (r *fIter) filter(x CallCtx, cont *exp.Atom) (lit.Lit, error) {
switch v := deopt(cont.Lit).(type) {
case lit.Keyer:
out := lit.Zero(v.Typ()).(lit.Keyer)
idx := 0
err := v.IterKey(func(key string, el lit.Lit) error {
res, err := r.eval(x, el, idx, key)
if err != nil {
return err
}
if !res.IsZero() {
out.SetKey(key, el)
}
idx++
return nil
})
if err != nil {
return nil, err
}
return out, nil
case lit.Indexer:
if r.k > 0 {
return nil, cor.Errorf("iter key parameter for idxer %s", cont.Typ())
}
out := lit.Zero(v.Typ()).(lit.Appender)
err := v.IterIdx(func(idx int, el lit.Lit) error {
res, err := r.eval(x, el, idx, "")
if err != nil {
return err
}
if !res.IsZero() {
out, err = out.Append(el)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return nil, err
}
return out, nil
}
return nil, cor.Errorf("filter requires idxer or keyer got %s", cont.Typ())
}
var repeatSpec = decl.add(SpecDX("<form repeat count:int elem:@1 list|@1>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, typ.Void)
if err != nil {
return nil, err
}
n, ok := x.Arg(0).(*exp.Atom).Lit.(lit.Numeric)
if !ok {
return nil, cor.Errorf("want number got %s", x.Arg(0))
}
res := lit.List{Data: make([]lit.Lit, int(n.Num()))}
var el lit.Lit = lit.Nil
if a, ok := x.Arg(1).(*exp.Atom); ok {
el = a.Lit
res.Elem = el.Typ()
}
for i := range res.Data {
res.Data[i] = el
}
return &exp.Atom{&res, x.Src}, nil
}))
var eachSpec = decl.add(SpecDX("<form range n:int list|int>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, typ.Void)
if err != nil {
return nil, err
}
n, ok := x.Arg(0).(*exp.Atom).Lit.(lit.Numeric)
if !ok {
return nil, cor.Errorf("want number got %s", x.Arg(0))
}
nn := int(n.Num())
list := &lit.List{Elem: typ.Int, Data: make([]lit.Lit, 0, nn)}
for i := 0; i < nn; i++ {
list.Data = append(list.Data, lit.Int(i))
}
return &exp.Atom{Lit: list, Src: x.Src}, nil
}))
var filterSpec = decl.add(SpecDX("<form filter cont|@1 <func @1 bool> @2>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, typ.Void)
if err != nil {
return nil, err
}
cont := x.Arg(0).(*exp.Atom)
iter, err := getIter(x, x.Arg(1), cont.Typ(), false)
if err != nil {
return nil, err
}
res, err := iter.filter(x, cont)
if err != nil {
return nil, err
}
return &exp.Atom{res, x.Src}, nil
}))
var mapSpec = decl.add(SpecDX("<form map cont|@1 <func @1 @2> @3>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, typ.Void)
if err != nil {
return nil, err
}
cont := x.Arg(0).(*exp.Atom)
iter, err := getIter(x, x.Arg(1), cont.Typ(), false)
if err != nil {
return nil, err
}
var rt typ.Type
it := iter.Res()
if it == typ.Void {
it = typ.Any
}
switch t := cont.Typ(); t.Kind & typ.MaskElem {
case typ.KindIdxr:
if it == typ.Any {
rt = typ.Idxr(it)
} else {
rt = typ.List(it)
}
case typ.KindList:
rt = typ.List(it)
case typ.KindKeyr:
if it == typ.Any {
rt = typ.Keyr(it)
} else {
rt = typ.Dict(it)
}
case typ.KindDict:
rt = typ.Dict(it)
case typ.KindRec:
rt = typ.Keyr(it)
}
switch v := deopt(cont.Lit).(type) {
case lit.Keyer:
out := lit.Zero(rt).(lit.Keyer)
idx := 0
err := v.IterKey(func(key string, el lit.Lit) error {
res, err := iter.eval(x, el, idx, key)
if err != nil {
return err
}
_, err = out.SetKey(key, res)
if err != nil {
return err
}
idx++
return nil
})
if err != nil {
return nil, err
}
return &exp.Atom{out, x.Src}, nil
case lit.Indexer:
out := lit.Zero(rt).(lit.Appender)
if iter.k > 0 {
return nil, cor.Errorf("iter key parameter for idxer %s", cont.Typ())
}
err := v.IterIdx(func(idx int, el lit.Lit) error {
res, err := iter.eval(x, el, idx, "")
if err != nil {
return err
}
out, err = out.Append(res)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return &exp.Atom{out, x.Src}, nil
}
return nil, cor.Errorf("map requires idxer or keyer got %s", cont.Typ())
}))
var foldSpec = decl.add(SpecDX("<form fold cont|@1 @2 <func @2 @1 @2> @2>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, x.Hint)
if err != nil {
return nil, err
}
cont := x.Arg(0).(*exp.Atom)
acc := x.Arg(1).(*exp.Atom)
iter, err := getIter(x, x.Arg(2), acc.Typ(), true)
if err != nil {
return nil, err
}
switch v := deopt(cont.Lit).(type) {
case lit.Keyer:
idx := 0
err := v.IterKey(func(key string, el lit.Lit) error {
acc, err = iter.accumulate(x, acc, el, idx, key)
if err != nil {
return err
}
idx++
return nil
})
if err != nil {
return nil, err
}
return acc, nil
case lit.Indexer:
if iter.k > 0 {
return nil, cor.Errorf("iter key parameter for idxer %s", cont.Typ())
}
err := v.IterIdx(func(idx int, el lit.Lit) error {
acc, err = iter.accumulate(x, acc, el, idx, "")
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return acc, nil
}
return nil, cor.Errorf("fold requires idxer or keyer got %s", cont.Typ())
}))
var foldrSpec = decl.add(SpecDX("<form foldr cont|@1 @2 <func @2 @1 @2> @2>",
func(x CallCtx) (exp.El, error) {
err := x.Layout.Eval(x.Prog, x.Env, x.Hint)
if err != nil {
return nil, err
}
cont := x.Arg(0).(*exp.Atom)
acc := x.Arg(1).(*exp.Atom)
iter, err := getIter(x, x.Arg(2), acc.Typ(), true)
if err != nil {
return nil, err
}
switch v := deopt(cont.Lit).(type) {
case lit.Keyer:
keys := v.Keys()
for idx := len(keys) - 1; idx >= 0; idx-- {
key := keys[idx]
el, err := v.Key(key)
if err != nil {
return nil, err
}
acc, err = iter.accumulate(x, acc, el, idx, key)
if err != nil {
return nil, err
}
}
return acc, nil
case lit.Indexer:
if iter.k > 0 {
return nil, cor.Errorf("iter key parameter for idxer %s", cont.Typ())
}
ln := v.Len()
for idx := ln - 1; idx >= 0; idx-- {
el, err := v.Idx(idx)
if err != nil {
return nil, err
}
acc, err = iter.accumulate(x, acc, el, idx, "")
if err != nil {
return nil, err
}
}
return acc, nil
}
return nil, cor.Errorf("fold requires idxer or keyer got %s", cont.Typ())
})) | std/cont.go | 0.61451 | 0.5425 | cont.go | starcoder |
package edgedetection
import (
"github.com/Ernyoke/Imger/blend"
"github.com/Ernyoke/Imger/convolution"
"github.com/Ernyoke/Imger/grayscale"
"github.com/Ernyoke/Imger/padding"
"image"
)
var horizontalKernel = convolution.Kernel{Content: [][]float64{
{-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1},
}, Width: 3, Height: 3}
var verticalKernel = convolution.Kernel{Content: [][]float64{
{-1, -2, -1},
{0, 0, 0},
{1, 2, 1},
}, Width: 3, Height: 3}
// HorizontalSobelGray applies the horizontal Sobel operator (horizontal kernel) to a grayscale image. The result
// of the Sobel operator is a 2-dimensional map of the gradient at each point.
// More information on the Sobel operator: https://en.wikipedia.org/wiki/Sobel_operator
func HorizontalSobelGray(gray *image.Gray, border padding.Border) (*image.Gray, error) {
return convolution.ConvolveGray(gray, &horizontalKernel, image.Point{X: 1, Y: 1}, border)
}
// VerticalSobelGray applies the vertical Sobel operator (vertical kernel) to a grayscale image. The result
// of the Sobel operator is a 2-dimensional map of the gradient at each point.
// More information on the Sobel operator: https://en.wikipedia.org/wiki/Sobel_operator
func VerticalSobelGray(gray *image.Gray, border padding.Border) (*image.Gray, error) {
return convolution.ConvolveGray(gray, &verticalKernel, image.Point{X: 1, Y: 1}, border)
}
// SobelGray combines the horizontal and the vertical gradients of a grayscale image. The result is grayscale image
// which contains the high gradients ("edges") marked as white.
func SobelGray(img *image.Gray, border padding.Border) (*image.Gray, error) {
horizontal, error := HorizontalSobelGray(img, border)
if error != nil {
return nil, error
}
vertical, error := VerticalSobelGray(img, border)
if error != nil {
return nil, error
}
res, error := blend.AddGrayWeighted(horizontal, 0.5, vertical, 0.5)
if error != nil {
return nil, error
}
return res, nil
}
// HorizontalSobelRGBA applies the horizontal Sobel operator (horizontal kernel) to an RGGBA image. The result
// of the Sobel operator is a 2-dimensional map of the gradient at each point.
// More information on the Sobel operator: https://en.wikipedia.org/wiki/Sobel_operator
func HorizontalSobelRGBA(img *image.RGBA, border padding.Border) (*image.Gray, error) {
gray := grayscale.Grayscale(img)
return convolution.ConvolveGray(gray, &horizontalKernel, image.Point{X: 1, Y: 1}, border)
}
// VerticalSobelRGBA applies the vertical Sobel operator (vertical kernel) to an RGBA image. The result
// of the Sobel operator is a 2-dimensional map of the gradient at each point.
// More information on the Sobel operator: https://en.wikipedia.org/wiki/Sobel_operator
func VerticalSobelRGBA(img *image.RGBA, border padding.Border) (*image.Gray, error) {
gray := grayscale.Grayscale(img)
return convolution.ConvolveGray(gray, &verticalKernel, image.Point{X: 1, Y: 1}, border)
}
// SobelRGBA combines the horizontal and the vertical gradients of an RGBA image. The result is grayscale image
// which contains the high gradients ("edges") marked as white.
func SobelRGBA(img *image.RGBA, border padding.Border) (*image.Gray, error) {
gray := grayscale.Grayscale(img)
return SobelGray(gray, border)
} | edgedetection/sobel.go | 0.910264 | 0.553747 | sobel.go | starcoder |
package topojson
import (
"github.com/paulmach/orb"
geojson "github.com/paulmach/orb/geojson"
)
func (t *Topology) ToGeoJSON() *geojson.FeatureCollection {
fc := geojson.NewFeatureCollection()
for _, obj := range t.Objects {
switch obj.Type {
case "GeometryCollection":
for _, geometry := range obj.Geometries {
feat := geojson.NewFeature(t.toGeometry(geometry))
feat.ID = geometry.ID
feat.Properties = geometry.Properties
feat.BBox = geometry.BoundingBox
fc.Append(feat)
}
default:
feat := geojson.NewFeature(t.toGeometry(obj))
feat.ID = obj.ID
feat.Properties = obj.Properties
feat.BBox = obj.BoundingBox
fc.Append(feat)
}
}
return fc
}
func (t *Topology) toGeometry(g *Geometry) orb.Geometry {
switch g.Type {
case geojson.TypePoint:
return t.packPoint(g.Point)
case geojson.TypeMultiPoint:
return t.packPoints(g.MultiPoint)
case geojson.TypeLineString:
return t.packLinestring(g.LineString)
case geojson.TypeMultiLineString:
return t.packMultiLinestring(g.MultiLineString)
case geojson.TypePolygon:
return t.packPolygon(g.Polygon)
case geojson.TypeMultiPolygon:
return t.packMultiPolygon(g.MultiPolygon)
default:
geometries := make([]orb.Geometry, len(g.Geometries))
for i, geometry := range g.Geometries {
geometries[i] = t.toGeometry(geometry)
}
return orb.Collection(geometries)
}
}
func (t *Topology) packPoint(in []float64) orb.Geometry {
if t.Transform == nil {
return orb.Point{in[0], in[1]}
}
out := make([]float64, len(in))
for i, v := range in {
out[i] = v
if i < 2 {
out[i] = v*t.Transform.Scale[i] + t.Transform.Translate[i]
}
}
return orb.Point{out[0], out[1]}
}
func (t *Topology) packPoints(in [][]float64) orb.Geometry {
out := make(orb.Collection, len(in))
for i, p := range in {
out[i] = t.packPoint(p)
}
return out
}
func (t *Topology) packLinestring(ls []int) orb.Geometry {
result := orb.LineString{}
for _, a := range ls {
reverse := false
if a < 0 {
a = ^a
reverse = true
}
arc := t.Arcs[a]
// Copy arc
newArc := make([][]float64, len(arc))
for i, point := range arc {
newArc[i] = append([]float64{}, point...)
}
if t.Transform != nil {
x := float64(0)
y := float64(0)
for k, p := range newArc {
x += p[0]
y += p[1]
newArc[k][0] = x*t.Transform.Scale[0] + t.Transform.Translate[0]
newArc[k][1] = y*t.Transform.Scale[1] + t.Transform.Translate[1]
}
}
if reverse {
for j := len(newArc) - 1; j >= 0; j-- {
if len(result) > 0 && pointEquals([]float64{result[len(result)-1][0], result[len(result)-1][1]}, newArc[j]) {
continue
}
result = append(result, orb.Point{newArc[j][0], newArc[j][1]})
}
} else {
for j := 0; j < len(newArc); j++ {
if len(result) > 0 && pointEquals([]float64{result[len(result)-1][0], result[len(result)-1][1]}, newArc[j]) {
continue
}
result = append(result, orb.Point{newArc[j][0], newArc[j][1]})
}
}
}
return result
}
func (t *Topology) packMultiLinestring(ls [][]int) orb.Geometry {
result := make(orb.MultiLineString, len(ls))
for i, l := range ls {
result[i] = t.packLinestring(l).(orb.LineString)
}
return result
}
func (t *Topology) packPolygon(ls [][]int) orb.Geometry {
result := make(orb.Polygon, len(ls))
for i, l := range ls {
s := t.packLinestring(l).(orb.LineString)
result[i] = make(orb.Ring, len(s))
for j, l := range s {
result[i][j] = l
}
}
return result
}
func (t *Topology) packMultiPolygon(ls [][][]int) orb.Geometry {
result := make(orb.MultiPolygon, len(ls))
for i, l := range ls {
result[i] = t.packPolygon(l).(orb.Polygon)
}
return result
} | geojson.go | 0.625896 | 0.460228 | geojson.go | starcoder |
package schema
import (
"encoding/json"
)
// NumericSchema represents the schema for a JSON number.
type NumericSchema interface {
SimpleSchema
GetMultipleOf() float64
GetMaximum() float64
GetMinimum() float64
GetExclusiveMaximum() bool
GetExclusiveMinimum() bool
SetMultipleOf(multipleOf float64)
SetMaximum(maximum float64)
SetMinimum(minimum float64)
SetExclusiveMaximum(exclusiveMaximum bool)
SetExclusiveMinimum(exclusiveMinimum bool)
}
type defaultNumericSchema struct {
*defaultSimpleSchema
Maximum float64 `json:"maximum,omitempty"`
Minimum float64 `json:"minimum,omitempty"`
MultipleOf float64 `json:"multipleOf,omitempty"`
ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
}
// NewNumericSchema creates a new numeric schema.
func NewNumericSchema(jsonType string) NumericSchema {
return &defaultNumericSchema{
defaultSimpleSchema: &defaultSimpleSchema{
basicSchema: NewBasicSchema(jsonType).(*basicSchema),
},
}
}
func (s *defaultNumericSchema) UnmarshalJSON(b []byte) error {
var err error
var stuff map[string]interface{}
ss := &defaultSimpleSchema{}
err = json.Unmarshal(b, ss)
if err == nil {
s.defaultSimpleSchema = ss
err = json.Unmarshal(b, &stuff)
}
if err == nil {
for k, v := range stuff {
switch k {
case "maximum":
s.Maximum = v.(float64)
case "minimum":
s.Minimum = v.(float64)
case "multipleOf":
s.MultipleOf = v.(float64)
case "exclusiveMaximum":
s.ExclusiveMaximum = v.(bool)
case "exclusiveMinimum":
s.ExclusiveMinimum = v.(bool)
}
}
}
return err
}
func (s *defaultNumericSchema) Clone() JSONSchema {
s2 := &defaultNumericSchema{}
*s2 = *s
s2.defaultSimpleSchema = s.defaultSimpleSchema.Clone().(*defaultSimpleSchema)
return s2
}
func (s *defaultNumericSchema) GetMultipleOf() float64 {
return s.MultipleOf
}
func (s *defaultNumericSchema) GetMaximum() float64 {
return s.Maximum
}
func (s *defaultNumericSchema) GetMinimum() float64 {
return s.Minimum
}
func (s *defaultNumericSchema) GetExclusiveMaximum() bool {
return s.ExclusiveMaximum
}
func (s *defaultNumericSchema) GetExclusiveMinimum() bool {
return s.ExclusiveMinimum
}
func (s *defaultNumericSchema) SetMultipleOf(multipleOf float64) {
s.MultipleOf = multipleOf
}
func (s *defaultNumericSchema) SetMaximum(maximum float64) {
s.Maximum = maximum
}
func (s *defaultNumericSchema) SetMinimum(minimum float64) {
s.Minimum = minimum
}
func (s *defaultNumericSchema) SetExclusiveMaximum(exclusiveMaximum bool) {
s.ExclusiveMaximum = exclusiveMaximum
}
func (s *defaultNumericSchema) SetExclusiveMinimum(exclusiveMinimum bool) {
s.ExclusiveMinimum = exclusiveMinimum
} | schema/schema_numeric.go | 0.806548 | 0.428293 | schema_numeric.go | starcoder |
package data
import (
"strings"
)
/*
Defines projection parameters with list if fields to include into query results.
The parameters support two formats: dot format and nested format.
The dot format is the standard way to define included fields and subfields
using dot object notation: "field1,field2.field21,field2.field22.field221".
As alternative the nested format offers a more compact representation: "field1,field2(field21,field22(field221))".
Example:
filter := NewFilterParamsFromTuples("type", "Type1");
paging := NewPagingParams(0, 100);
projection = NewProjectionParamsFromString("field1,field2(field21,field22)")
err, page := myDataClient.getDataByFilter(filter, paging, projection);
*/
type ProjectionParams struct {
values []string
}
// Creates a new instance of the projection parameters and assigns its value.
// Returns *ProjectionParams
func NewEmptyProjectionParams() *ProjectionParams {
return &ProjectionParams{
values: make([]string, 0, 10),
}
}
// Creates a new instance of the projection parameters and assigns its from string value.
// Parameters:
// - values []string
// Returns *ProjectionParams
func NewProjectionParamsFromStrings(values []string) *ProjectionParams {
c := &ProjectionParams{
values: make([]string, len(values)),
}
copy(c.values, values)
return c
}
// Creates a new instance of the projection parameters and assigns its from AnyValueArray values.
// Parameters:
// - values *AnyValueArray
// Returns *ProjectionParams
func NewProjectionParamsFromAnyArray(values *AnyValueArray) *ProjectionParams {
if values == nil {
return NewEmptyProjectionParams()
}
c := &ProjectionParams{
values: make([]string, 0, values.Len()),
}
for index := 0; index < values.Len(); index++ {
value := values.GetAsString(index)
if value != "" {
c.values = append(c.values, value)
}
}
return c
}
// Return raw values []string
func (c *ProjectionParams) Value() []string {
return c.values
}
// Gets or sets the length of the array. This is a number one
// higher than the highest element defined in an array.
func (c *ProjectionParams) Len() int {
return len(c.values)
}
// Get value by index
// Parameters:
// - index int
// an index of element
// Return string
func (c *ProjectionParams) Get(index int) string {
return c.values[index]
}
// Set value in index position
// Parameters:
// - index int
// an index of element
// - value string
// value
func (c *ProjectionParams) Put(index int, value string) {
if cap(c.values)+1 < index {
a := make([]string, index+1, (index+1)*2)
copy(a, c.values)
c.values = a
}
c.values[index] = value
}
// Remove element by index
// Parameters:
// - index int
// an index of remove element
func (c *ProjectionParams) Remove(index int) {
c.values = append(c.values[:index], c.values[index+1:]...)
}
// Appends new element to an array.
// Parameters:
// - value string
func (c *ProjectionParams) Push(value string) {
c.values = append(c.values, value)
}
// Appends new elements to an array.
// Parameters:
// - value []string
func (c *ProjectionParams) Append(elements []string) {
if elements != nil {
c.values = append(c.values, elements...)
}
}
// Clear elements
func (c *ProjectionParams) Clear() {
c.values = make([]string, 0, 10)
}
// Returns a string representation of an array.
// Returns string
func (c *ProjectionParams) String() string {
builder := ""
for index := 0; index < c.Len(); index++ {
if index > 0 {
builder = builder + ","
}
builder = builder + c.Get(index)
}
return builder
}
// Converts specified value into ProjectionParams.
// see
// AnyValueArray.fromValue
// Parameters:
// - value interface{}
// value to be converted
// Returns *ProjectionParams
// a newly created ProjectionParams.
func NewProjectionParamsFromValue(value interface{}) *ProjectionParams {
values := NewAnyValueArrayFromValue(value)
return NewProjectionParamsFromAnyArray(values)
}
// Create new ProjectionParams and set values from values
// Parameters:
// - values ...string
// an values to parce
// Return *ProjectionParams
func ParseProjectionParams(values ...string) *ProjectionParams {
c := NewEmptyProjectionParams()
for index := 0; index < len(values); index++ {
parseProjectionParamValue("", c, values[index])
}
return c
}
// Add parce value into exist ProjectionParams and add prefix
// Parameters:
// - prefix string
// prefix value
// - c *ProjectionParams
// ProjectionParams instance wheare need to add value
// - value string
// an values to parce
func parseProjectionParamValue(prefix string, c *ProjectionParams, value string) {
if value != "" {
value = strings.Trim(value, " \t\n\r")
}
openBracket := 0
openBracketIndex := -1
closeBracketIndex := -1
commaIndex := -1
breakCycleRequired := false
for index := 0; index < len(value); index++ {
switch value[index] {
case '(':
if openBracket == 0 {
openBracketIndex = index
}
openBracket++
break
case ')':
openBracket--
if openBracket == 0 {
closeBracketIndex = index
if openBracketIndex >= 0 && closeBracketIndex > 0 {
previousPrefix := prefix
if prefix != "" {
prefix = prefix + "." + value[:openBracketIndex]
} else {
prefix = value[:openBracketIndex]
}
subValue := value[openBracketIndex+1 : closeBracketIndex]
parseProjectionParamValue(prefix, c, subValue)
subValue = value[closeBracketIndex+1:]
parseProjectionParamValue(previousPrefix, c, subValue)
breakCycleRequired = true
}
}
break
case ',':
if openBracket == 0 {
commaIndex = index
subValue := value[0:commaIndex]
if subValue != "" {
if prefix != "" {
c.Push(prefix + "." + subValue)
} else {
c.Push(subValue)
}
}
subValue = value[commaIndex+1:]
if subValue != "" {
parseProjectionParamValue(prefix, c, subValue)
breakCycleRequired = true
}
}
break
}
if breakCycleRequired {
break
}
}
if value != "" && openBracketIndex == -1 && commaIndex == -1 {
if prefix != "" {
c.Push(prefix + "." + value)
} else {
c.Push(value)
}
}
} | data/ProjectionParams.go | 0.722821 | 0.577138 | ProjectionParams.go | starcoder |
package go_kd_segment_tree
import (
"fmt"
mapset "github.com/deckarep/golang-set"
"math/rand"
"sort"
)
type Segment struct {
Rect Rect
Data mapset.Set
rnd float64
}
func (s *Segment) String() string {
return fmt.Sprintf("{%v, %v}", s.Rect, s.Data)
}
func (s *Segment) Clone() *Segment {
newSegment := &Segment{
Rect: s.Rect.Clone(),
Data: s.Data.Clone(),
}
return newSegment
}
type sortSegments struct {
dimName interface{}
segments []*Segment
}
func (s *sortSegments) Len() int {
return len(s.segments)
}
func (s *sortSegments) Less(i, j int) bool {
iSeg, iSegOk := s.segments[i].Rect[s.dimName]
jSeg, jSegOk := s.segments[j].Rect[s.dimName]
if iSegOk == true && jSegOk == false {
return false
}
if iSegOk == false && jSegOk == true {
return true
}
if iSegOk && jSegOk {
switch iSeg.(type) {
case Interval:
if iSeg.(Interval)[0].Equal(jSeg.(Interval)[0]) == false {
return iSeg.(Interval)[0].Smaller(jSeg.(Interval)[0])
}
}
}
if s.segments[i].rnd == 0 {
s.segments[i].rnd = rand.Float64()
}
if s.segments[j].rnd == 0 {
s.segments[j].rnd = rand.Float64()
}
for s.segments[i].rnd == s.segments[j].rnd {
s.segments[i].rnd = rand.Float64()
s.segments[j].rnd = rand.Float64()
}
return s.segments[i].rnd < s.segments[j].rnd
}
func (s *sortSegments) Swap(i, j int) {
s.segments[i], s.segments[j] =
s.segments[j], s.segments[i]
}
type sortMeasures struct {
measures []Measure
randNum []float64
}
func (s *sortMeasures) Len() int {
return len(s.measures)
}
func (s *sortMeasures) Less(i, j int) bool {
if len(s.randNum) == 0 {
s.randNum = make([]float64, len(s.measures))
}
if s.measures[i].Equal(s.measures[j]) {
if s.randNum[i] == 0 {
s.randNum[i] = rand.Float64()
}
if s.randNum[j] == 0 {
s.randNum[j] = rand.Float64()
}
for s.randNum[i] == s.randNum[j] {
s.randNum[i] = rand.Float64()
s.randNum[j] = rand.Float64()
}
return s.randNum[i] < s.randNum[j]
}
return s.measures[i].Smaller(s.measures[j])
}
func (s *sortMeasures) Swap(i, j int) {
s.measures[i], s.measures[j] =
s.measures[j], s.measures[i]
}
func getRealDimSegmentsDecrease(segments []*Segment, dimName interface{}) (int, Measure) {
var dimSegments []*Segment
for _, seg := range segments {
if seg.Rect[dimName] != nil {
dimSegments = append(dimSegments, seg)
}
}
if len(dimSegments) == 0 {
return 0, nil
}
sort.Sort(&sortSegments{dimName: dimName, segments: dimSegments})
var starts []Measure
var ends []Measure
for _, seg := range dimSegments {
starts = append(starts, seg.Rect[dimName].(Interval)[0])
ends = append(ends, seg.Rect[dimName].(Interval)[1])
}
if len(starts) == 0 || len(ends) == 0 {
return 0, nil
}
sort.Sort(&sortMeasures{measures: starts})
sort.Sort(&sortMeasures{measures: ends})
pos := 0
for pos < len(starts)-1 && ends[pos].Smaller(starts[len(ends)-1-pos]) {
pos += 1
}
midMeasure := ends[pos]
leftCuttingNum := 0
rightCuttingNum := 0
for _, seg := range dimSegments {
if seg.Rect[dimName].(Interval)[1].Smaller(midMeasure) {
leftCuttingNum += 1
} else if seg.Rect[dimName].(Interval)[0].BiggerOrEqual(midMeasure) {
rightCuttingNum += 1
}
}
if leftCuttingNum < rightCuttingNum {
return leftCuttingNum, midMeasure
} else {
return rightCuttingNum, midMeasure
}
}
func getDiscreteDimSegmentsDecrease(segments []*Segment, dimName interface{}) (int, Measure) {
var dimSegments []*Segment
for _, seg := range segments {
if seg.Rect[dimName] != nil {
dimSegments = append(dimSegments, seg)
}
}
if len(dimSegments) == 0 {
return 0, nil
}
var scatterMap = make(map[Measure]int)
for _, seg := range dimSegments {
for _, s := range seg.Rect[dimName].(Measures) {
scatterMap[s] = scatterMap[s] + 1
}
}
var hottestKeyMatchNum = 0
var maxMeasure Measure
for m, n := range scatterMap {
if n > hottestKeyMatchNum {
hottestKeyMatchNum = n
maxMeasure = m
}
}
if hottestKeyMatchNum < len(segments)-len(dimSegments) {
hottestKeyMatchNum = len(segments) - len(dimSegments)
maxMeasure = nil
}
return len(segments) - hottestKeyMatchNum, maxMeasure
} | segment.go | 0.607896 | 0.4206 | segment.go | starcoder |
This package contains all the runtime metrics collected by the SDK
This is used to collect the runtime statistics of the process and publish it to IA
This runs in a separate go process based on time interval configured in config
*/
package metric
import (
"fmt"
"os"
"runtime"
)
//The Metric type has the information about the metrics collected like
// op - Type of Operation
// Id - Display ID of the metric in IA
// Type - Type of the metrics like counter,timepahse
// Value - Value of the metric
type Metric struct {
Op string
Id string
Type int
Value string
}
const (
kintervalCounter = 8194
kstringType = 21
kstringIndEvents = 4101
)
//CollectRuntimeMetrics is used to retrieve the runtime metrics of the process
//The following metrics are collected at present and this can be extended to collect more metrics
// heapObjects -- Cumulative count of heap objects allocation(Total Heap Allocated)
// heapObjectsFreed -- Cumulative count of heap object freed(Total Heap Free)
// allocation -- Bytes of allocated heap objects (Bytes In Use)
// totalAllocation -- Cumulative bytes allocated for heap objects (Bytes Total)
// goRoutines -- Represents the number of currently active goRoutines for the process(Routines Total)
func CollectRuntimeMetrics() []Metric {
var metrics []Metric
var rm runtime.MemStats
runtime.ReadMemStats(&rm)
heapObjects := Metric{
Op: "m",
Id: "GC Heap:Total Heap Allocated",
Type: kintervalCounter,
Value: fmt.Sprintf("%v", rm.Mallocs),
}
metrics = append(metrics, heapObjects)
heapObjectsFreed := Metric{
Op: "m",
Id: "GC Heap:Total Heap Free",
Type: kintervalCounter,
Value: fmt.Sprintf("%v", rm.Frees),
}
metrics = append(metrics, heapObjectsFreed)
allocation := Metric{
Op: "m",
Id: "GC Heap:Bytes In Use",
Type: kintervalCounter,
Value: fmt.Sprintf("%v", rm.Alloc),
}
metrics = append(metrics, allocation)
totalAllocation := Metric{
Op: "m",
Id: "GC Heap:Bytes Total",
Type: kintervalCounter,
Value: fmt.Sprintf("%v", rm.TotalAlloc),
}
metrics = append(metrics, totalAllocation)
goRoutines := Metric{
Op: "m",
Id: "GC Heap:Routines Total",
Type: kintervalCounter,
Value: fmt.Sprintf("%v", runtime.NumGoroutine()),
}
metrics = append(metrics, goRoutines)
return metrics
}
//CollectStaticMetrics is used to retrieve the metrics of the process
//The following metrics are collected at only once
// processID -- Current ID of the running process
// version -- GO Version by the running process
// cpu -- Logical CPU count of the process
// HostName -- Name of the host the probe is running
func CollectStaticMetrics() []Metric {
var metrics []Metric
processID := Metric{
Op: "m",
Id: "ProcessID",
Type: kstringIndEvents,
Value: fmt.Sprintf("%v", os.Getpid()),
}
metrics = append(metrics, processID)
version := Metric{
Op: "m",
Id: "GO Version",
Type: kstringIndEvents,
Value: fmt.Sprintf("%v", runtime.Version()),
}
metrics = append(metrics, version)
cpu := Metric{
Op: "m",
Id: "Logical CPU count",
Type: kstringIndEvents,
Value: fmt.Sprintf("%v", runtime.NumCPU()),
}
metrics = append(metrics, cpu)
hname, err := os.Hostname()
if err == nil {
name := Metric{
Op: "m",
Id: "Host Name",
Type: kstringIndEvents,
Value: fmt.Sprintf("%v", hname),
}
metrics = append(metrics, name)
}
return metrics
} | internal/metric/metric.go | 0.71602 | 0.514949 | metric.go | starcoder |
package testdata
// CreateMandateResponse
const CreateMandateResponse = `{
"resource": "mandate",
"id": "mdt_h3gAaD5zP",
"mode": "test",
"status": "valid",
"method": "directdebit",
"details": {
"consumerName": "<NAME>",
"consumerAccount": "NL55INGB0000000000",
"consumerBic": "INGBNL2A"
},
"mandateReference": "YOUR-COMPANY-MD13804",
"signatureDate": "2018-05-07",
"createdAt": "2018-05-07T10:49:08+00:00",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/customers/cst_4qqhO89gsT/mandates/mdt_h3gAaD5zP",
"type": "application/hal+json"
},
"customer": {
"href": "https://api.mollie.com/v2/customers/cst_4qqhO89gsT",
"type": "application/hal+json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/mandates-api/create-mandate",
"type": "text/html"
}
}
}`
// GetMandateResponse
const GetMandateResponse = `{
"resource": "mandate",
"id": "mdt_h3gAaD5zP",
"mode": "test",
"status": "valid",
"method": "directdebit",
"details": {
"consumerName": "<NAME>",
"consumerAccount": "NL55INGB0000000000",
"consumerBic": "INGBNL2A"
},
"mandateReference": "YOUR-COMPANY-MD1380",
"signatureDate": "2018-05-07",
"createdAt": "2018-05-07T10:49:08+00:00",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/customers/cst_4qqhO89gsT/mandates/mdt_h3gAaD5zP",
"type": "application/hal+json"
},
"customer": {
"href": "https://api.mollie.com/v2/customers/cst_4qqhO89gsT",
"type": "application/hal+json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/mandates-api/get-mandate",
"type": "text/html"
}
}
}`
const ListMandatesResponse = `
{
"count": 2,
"_embedded": {
"mandates": [
{
"resource": "mandate",
"id": "mdt_AcQl5fdL4h",
"mode": "test",
"status": "valid",
"method": "directdebit",
"details": {
"consumerName": "<NAME>",
"consumerAccount": "NL55INGB0000000000",
"consumerBic": "INGBNL2A"
},
"mandateReference": null,
"signatureDate": "2018-05-07",
"createdAt": "2018-05-07T10:49:08+00:00",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/customers/cst_8wmqcHMN4U/mandates/mdt_AcQl5fdL4h",
"type": "application/hal+json"
},
"customer": {
"href": "https://api.mollie.com/v2/customers/cst_8wmqcHMN4U",
"type": "application/hal+json"
},
"documentation": {
"href": "https://mollie.com/en/docs/reference/customers/create-mandate",
"type": "text/html"
}
}
},
{
"resource": "mandate",
"id": "mdt_AcQl5fdL4h",
"mode": "test",
"status": "valid",
"method": "directdebit",
"details": {
"consumerName": "<NAME>",
"consumerAccount": "NL55INGB0000000000",
"consumerBic": "INGBNL2A"
},
"mandateReference": null,
"signatureDate": "2018-05-07",
"createdAt": "2018-05-07T10:49:08+00:00",
"_links": {
"self": {
"href": "https://api.mollie.com/v2/customers/cst_8wmqcHMN4U/mandates/mdt_AcQl5fdL4h",
"type": "application/hal+json"
},
"customer": {
"href": "https://api.mollie.com/v2/customers/cst_8wmqcHMN4U",
"type": "application/hal+json"
},
"documentation": {
"href": "https://mollie.com/en/docs/reference/customers/create-mandate",
"type": "text/html"
}
}
}
]
},
"_links": {
"self": {
"href": "https://api.mollie.com/v2/customers/cst_8wmqcHMN4U/mandates?limit=5",
"type": "application/hal+json"
},
"previous": null,
"next": {
"href": "https://api.mollie.com/v2/customers/cst_8wmqcHMN4U/mandates?from=mdt_AcQl5fdL4h&limit=5",
"type": "application/hal+json"
},
"documentation": {
"href": "https://docs.mollie.com/reference/v2/mandates-api/revoke-mandate",
"type": "text/html"
}
}
}` | testdata/mandates.go | 0.646014 | 0.436682 | mandates.go | starcoder |
package units
import (
"fmt"
"regexp"
"sort"
"strconv"
"strings"
)
const (
Bit Size = 1
Byte Size = 8 * Bit
KiB Size = 1024 * Byte
MiB Size = 1024 * KiB
GiB Size = 1024 * MiB
TiB Size = 1024 * GiB
KB Size = 1000 * Byte
MB Size = 1000 * KB
GB Size = 1000 * MB
TB Size = 1000 * GB
Kb Size = 1000 * Bit
Mb Size = 1000 * Kb
Gb Size = 1000 * Mb
)
var (
suffixValues = map[string]Size{
"b": Bit, "B": Byte,
"k": KiB, "K": KiB, "KB": KB, "KiB": KiB,
"kb": Kb, "Kb": Kb,
"m": MiB, "M": MiB, "MB": MB, "MiB": MiB,
"mb": Mb, "Mb": Mb,
"g": GiB, "G": GiB, "GB": GB, "GiB": GiB,
"gb": Gb, "Gb": Gb,
"t": TiB, "T": TiB, "TB": TB, "TiB": TiB,
}
// init 函数中初始化
pairs []*pair
re = regexp.MustCompile(`^[[:space:]]*[.[:digit:]]+[bBkKmMgGtTi/s]+[[:space:]]*$`)
replaceRegex = regexp.MustCompile("[[:space:][:alpha:]/]+")
)
type Size int64
type pair struct {
suffix string
val Size
}
// ParseSize 将 s 解析为 Size, s 为 float 会有精度损失
func ParseSize(s string) (Size, error) {
if value, exist := suffixValues[s]; exist {
return Size(value), nil
}
if !re.MatchString(s) {
return 0, fmt.Errorf("bad format 1: %v", s)
}
var size Size
for _, pair := range pairs {
if strings.HasSuffix(s, pair.suffix) {
s = replaceRegex.ReplaceAllString(s, "")
value, err := strconv.ParseFloat(s, 64)
if err != nil {
return 0, fmt.Errorf("bad format 2: %s", s)
}
size = Size(value * float64(pair.val))
break
}
}
return size, nil
}
func AsSize(v float64, unit Size) Size {
return Size(v * float64(unit))
}
func MaxSize(ss ...Size) Size {
if len(ss) == 0 {
return 0
}
max := ss[0]
for _, s := range ss {
if s > max {
max = s
}
}
return max
}
func MinSize(ss ...Size) Size {
if len(ss) == 0 {
return 0
}
min := ss[0]
for _, s := range ss {
if s < min {
min = s
}
}
return min
}
func (s Size) TiB() float64 {
return float64(s) / float64(TiB)
}
func (s Size) TB() float64 {
return float64(s) / float64(TB)
}
func (s Size) GiB() float64 {
return float64(s) / float64(GiB)
}
func (s Size) GB() float64 {
return float64(s) / float64(GB)
}
func (s Size) Gb() float64 {
return float64(s) / float64(Gb)
}
func (s Size) MiB() float64 {
return float64(s) / float64(MiB)
}
func (s Size) MB() float64 {
return float64(s) / float64(MB)
}
func (s Size) Mb() float64 {
return float64(s) / float64(Mb)
}
func (s Size) KiB() float64 {
return float64(s) / float64(KiB)
}
func (s Size) KB() float64 {
return float64(s) / float64(KB)
}
func (s Size) Kb() float64 {
return float64(s) / float64(Kb)
}
func (s Size) Byte() float64 {
return float64(s) / float64(Byte)
}
func (s Size) Bit() float64 {
return float64(s)
}
// BinaryHumanSize 解析为 1024 单位的可读字符串
func (s Size) BinaryHumanSize(precision int) string {
if s < Byte {
return fmt.Sprintf("%d%s", s, "b")
}
unitStr := []string{"B", "KiB", "MiB", "GiB", "TiB"}
size, unit := getSizeAndUnit(s.Byte(), 1024.0, unitStr)
return fmt.Sprintf("%.*g%s", precision, size, unit)
}
// DecimalHumanSize 解析为 1000 单位的可读字符串
func (s Size) DecimalHumanSize(precision int) string {
if s < Byte {
return fmt.Sprintf("%d%s", s, "b")
}
unitStr := []string{"B", "kB", "MB", "GB", "TB"}
size, unit := getSizeAndUnit(s.Byte(), 1000.0, unitStr)
return fmt.Sprintf("%.*g%s", precision, size, unit)
}
func (s Size) NetHumanSize(precision int) string {
unitStr := []string{"b", "Kb", "Mb", "Gb", "Tb"}
size, unit := getSizeAndUnit(float64(s), 1000.0, unitStr)
return fmt.Sprintf("%.*g%s", precision, size, unit)
}
func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) {
i := 0
unitsLimit := len(_map) - 1
for size >= base && i < unitsLimit {
size = size / base
i++
}
return size, _map[i]
}
// func (s Size) HumanSizeWithPrecision(precision int) string {
// return ""
// }
func init() {
pairs = make([]*pair, 0, len(suffixValues))
for suffix, val := range suffixValues {
pairs = append(pairs, &pair{
suffix: suffix,
val: val})
}
// 排序, 将后缀大的放在前面, 用于 ParseSize 优先匹配
sort.Slice(pairs, func(i, j int) bool {
si := pairs[i]
sj := pairs[j]
return len(si.suffix) > len(sj.suffix)
})
} | units/units.go | 0.527317 | 0.405743 | units.go | starcoder |
package ilog10
import "math/bits"
const n = ^uint64(0)
var lookup64 = [64]uint64{
// This initializer list is easier to read as follows:
// 10000000000000000000, n, n, n, 1000000000000000000, n, n, 100000000000000000, n, n,
// 10000000000000000, n, n, n, 1000000000000000, n, n, 100000000000000, n, n,
// 10000000000000, n, n, n, 1000000000000, n, n, 100000000000, n, n,
// 10000000000, n, n, n, 1000000000, n, n, 100000000, n, n,
// 10000000, n, n, n, 1000000, n, n, 100000, n, n,
// 10000, n, n, n, 1000, n, n, 100, n, n,
// 10, n, n, n
10000000000000000000, n, n, n, 1000000000000000000, n, n, 100000000000000000, n, n,
10000000000000000, n, n, n, 1000000000000000, n, n, 100000000000000, n, n,
10000000000000, n, n, n, 1000000000000, n, n, 100000000000, n, n,
10000000000, n, n, n, 1000000000, n, n, 100000000, n, n,
10000000, n, n, n, 1000000, n, n, 100000, n, n,
10000, n, n, n, 1000, n, n, 100, n, n,
10, n, n, n,
}
// FastUint64Log10 computes the integer base-10 logarithm of v, that is,
// the number of decimal digits in v minus one.
// The function is not well-defiend for v == 0.
func FastUint64Log10(v uint64) uint {
lz := uint(bits.LeadingZeros64(v)) & 0x3f // &63 to eliminate bounds checking
g := uint(0)
if v >= lookup64[lz] {
g = 1
}
return (63-lz)*3/10 + g
}
// Note: in the following table we use 64-bit values otherwise the condition
// v >= lookup[clz(v)] will be true for the very first entry when v == 2^32-1.
// Trying to force this to be 32-bit by adding an additional condition below
// makes the code overall slower.
var lookup32 = [32]uint64{
// This initializer list is easier to read as follows:
// n, n, 1000000000, n, n, 100000000, n, n,
// 10000000, n, n, n, 1000000, n, n, 100000, n, n,
// 10000, n, n, n, 1000, n, n, 100, n, n,
// 10, n, n, l
n, n, 1000000000, n, n, 100000000, n, n,
10000000, n, n, n, 1000000, n, n, 100000, n, n,
10000, n, n, n, 1000, n, n, 100, n, n,
10, n, n, n,
}
// FastUint32Log10 computes the integer base-10 logarithm of v, that is,
// the number of decimal digits in v minus one.
// The function is not well-defiend for v == 0.
func FastUint32Log10(v uint32) uint {
lz := uint(bits.LeadingZeros32(v)) & 0x1f // &31 to eliminate bounds checking
g := uint(0)
if uint64(v) >= lookup32[lz] {
g = 1
}
return (31-lz)*3/10 + g
} | ilog10.go | 0.58439 | 0.403831 | ilog10.go | starcoder |
// C illuminant conversion functions
package white
// C_A functions
func C_A_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.2040146, 0.1029527, -0.1567072},
{0.1407450, 0.9280261, -0.0558735},
{-0.0252839, 0.0387607, 0.2891656}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_A_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0679098, 0.2342445, -0.1548534},
{0.0257301, 0.9809032, -0.0051913},
{0.0000000, 0.0000000, 0.3009760}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_A_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.1200726, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.3009760}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// C_B functions
func C_B_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0537465, 0.0251266, -0.0573939},
{0.0321644, 0.9914304, -0.0194323},
{-0.0106963, 0.0175083, 0.7148758}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_B_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0182822, 0.0630638, -0.0600632},
{0.0069271, 0.9948581, -0.0013971},
{0.0000000, 0.0000000, 0.7208116}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_B_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0101760, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.7208116}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// C_D50 functions
func C_D50_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0376976, 0.0153932, -0.0582624},
{0.0170675, 1.0056038, -0.0188973},
{-0.0120126, 0.0204361, 0.6906380}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_D50_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0132609, 0.0457455, -0.0636638},
{0.0050248, 0.9962695, -0.0010128},
{0.0000000, 0.0000000, 0.6979583}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_D50_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9831556, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.6979583}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// C_D55 functions
func C_D55_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0186606, 0.0061268, -0.0408925},
{0.0047723, 1.0105478, -0.0128799},
{-0.0089652, 0.0155750, 0.7736548}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_D55_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0068573, 0.0236569, -0.0459285},
{0.0025985, 0.9980703, -0.0005234},
{0.0000000, 0.0000000, 0.7793914}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_D55_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9756103, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.7793914}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// C_D65 functions
func C_D65_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9904476, -0.0071683, -0.0116156},
{-0.0123712, 1.0155950, -0.0029282},
{-0.0035635, 0.0067697, 0.9181569}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_D65_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9972812, -0.0093756, -0.0154171},
{-0.0010298, 1.0007636, 0.0002084},
{0.0000000, 0.0000000, 0.9209267}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_D65_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9691356, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.9209267}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// C_D75 functions
func C_D75_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9706028, -0.0161674, 0.0118229},
{-0.0235618, 1.0173098, 0.0049041},
{0.0009486, -0.0007126, 1.0370815}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_D75_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9904762, -0.0328494, 0.0094473},
{-0.0036083, 1.0026776, 0.0007284},
{0.0000000, 0.0000000, 1.0372657}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_D75_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9683708, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 1.0372657}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// C_E functions
func C_E_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0399770, 0.0198119, -0.0336279},
{0.0266883, 0.9877806, -0.0118030},
{-0.0056861, 0.0089182, 0.8429683}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_E_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0133781, 0.0461460, -0.0338372},
{0.0050688, 0.9962378, -0.0010226},
{0.0000000, 0.0000000, 0.8457947}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_E_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0196382, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.8457947}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// C_F2 functions
func C_F2_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0795960, 0.0368643, -0.0877996},
{0.0467792, 0.9891161, -0.0295981},
{-0.0165425, 0.0271981, 0.5607245}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_F2_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0271432, 0.0936299, -0.0923019},
{0.0102846, 0.9923658, -0.0020741},
{0.0000000, 0.0000000, 0.5700064}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_F2_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0113384, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.5700064}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// C_F7 functions
func C_F7_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9905934, -0.0071084, -0.0118379},
{-0.0123043, 1.0156147, -0.0030004},
{-0.0036092, 0.0068474, 0.9169788}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_F7_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9973324, -0.0091990, -0.0156597},
{-0.0010104, 1.0007492, 0.0002045},
{0.0000000, 0.0000000, 0.9197764}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_F7_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{0.9690744, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.9197764}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
// C_F11 functions
func C_F11_Bradford(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0971565, 0.0464489, -0.0954514},
{0.0606741, 0.9791562, -0.0326999},
{-0.0172568, 0.0278907, 0.5349937}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_F11_vonKries(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0328466, 0.1133021, -0.0986501},
{0.0124454, 0.9907622, -0.0025103},
{0.0000000, 0.0000000, 0.5442689}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
}
func C_F11_Xyz(xs, ys, zs float64) (xd, yd, zd float64) {
m := [3][3]float64{
{1.0294472, 0.0000000, 0.0000000},
{0.0000000, 1.0000000, 0.0000000},
{0.0000000, 0.0000000, 0.5442689}}
xd = m[0][0]*xs + m[0][1]*ys + m[0][2]*zs
yd = m[1][0]*xs + m[1][1]*ys + m[1][2]*zs
zd = m[2][0]*xs + m[2][1]*ys + m[2][2]*zs
return
} | f64/white/c.go | 0.51879 | 0.671592 | c.go | starcoder |
package fidlgen
import (
"bytes"
"fmt"
)
// In the masks used in the following functions, bytes requiring padding are marked 0xff and
// bytes not requiring padding are marked 0x00.
func (s Struct) populateFullStructMaskForStruct(mask []byte, flatten bool, getTypeShape func(Struct) TypeShape, getFieldShape func(StructMember) FieldShape, resolveStruct func(identifier EncodedCompoundIdentifier) *Struct) {
paddingEnd := getTypeShape(s).InlineSize - 1
for i := len(s.Members) - 1; i >= 0; i-- {
member := s.Members[i]
fieldShape := getFieldShape(member)
if flatten {
s.populateFullStructMaskForType(mask[fieldShape.Offset:paddingEnd+1], &member.Type, flatten, getTypeShape, getFieldShape, resolveStruct)
}
for j := 0; j < fieldShape.Padding; j++ {
mask[paddingEnd-j] = 0xff
}
paddingEnd = fieldShape.Offset - 1
}
}
func (s Struct) populateFullStructMaskForType(mask []byte, typ *Type, flatten bool, getTypeShape func(Struct) TypeShape, getFieldShape func(StructMember) FieldShape, resolveStruct func(identifier EncodedCompoundIdentifier) *Struct) {
if typ.Nullable {
return
}
switch typ.Kind {
case ArrayType:
elemByteSize := len(mask) / *typ.ElementCount
for i := 0; i < *typ.ElementCount; i++ {
s.populateFullStructMaskForType(mask[i*elemByteSize:(i+1)*elemByteSize], typ.ElementType, flatten, getTypeShape, getFieldShape, resolveStruct)
}
case IdentifierType:
sv := resolveStruct(typ.Identifier)
if sv != nil {
sv.populateFullStructMaskForStruct(mask, flatten, getTypeShape, getFieldShape, resolveStruct)
}
}
}
type PaddingMarker struct {
// Offset into the struct (0 is the start of the struct).
Offset int
// Mask, where a 1-bit means the bit in the input value should be zero.
Mask []byte
}
func (s Struct) buildPaddingMarkers(flatten bool, getTypeShape func(Struct) TypeShape, getFieldShape func(StructMember) FieldShape, resolveStruct func(identifier EncodedCompoundIdentifier) *Struct) []PaddingMarker {
var paddingMarkers []PaddingMarker
// Construct a mask across the whole struct with 0xff bytes where there is padding.
fullStructMask := make([]byte, getTypeShape(s).InlineSize)
s.populateFullStructMaskForStruct(fullStructMask, flatten, getTypeShape, getFieldShape, resolveStruct)
// Split up the mask into aligned integer mask segments that can be outputted in the
// fidl_struct! macro.
// Only the sections needing padding are outputted.
// e.g. 00ffff0000ffff000000000000000000 -> 00ffff0000ffff00, 0000000000000000
// -> []PaddingMarker{0, []byte{0x00, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0x00}}
extractNonzeroSliceOffsets := func(stride int) []int {
var offsets []int
for endi := stride - 1; endi < len(fullStructMask); endi += stride {
i := endi - (stride - 1)
if bytes.Contains(fullStructMask[i:i+stride], []byte{0xff}) {
offsets = append(offsets, i)
}
}
return offsets
}
zeroSlice := func(s []byte) {
for i := range s {
s[i] = 0
}
}
if getTypeShape(s).Alignment >= 8 {
for _, i := range extractNonzeroSliceOffsets(8) {
s := fullStructMask[i : i+8]
m := make([]byte, 8)
copy(m, s)
paddingMarkers = append(paddingMarkers, PaddingMarker{
Offset: i,
Mask: m,
})
zeroSlice(s) // Reset the buffer for the next iteration.
}
}
if getTypeShape(s).Alignment >= 4 {
for _, i := range extractNonzeroSliceOffsets(4) {
s := fullStructMask[i : i+4]
m := make([]byte, 4)
copy(m, s)
paddingMarkers = append(paddingMarkers, PaddingMarker{
Offset: i,
Mask: m,
})
zeroSlice(s) // Reset the buffer for the next iteration.
}
}
for _, i := range extractNonzeroSliceOffsets(2) {
s := fullStructMask[i : i+2]
m := make([]byte, 2)
copy(m, s)
paddingMarkers = append(paddingMarkers, PaddingMarker{
Offset: i,
Mask: m,
})
zeroSlice(s) // Reset the buffer for the next iteration.
}
if bytes.Contains(fullStructMask, []byte{0xff}) {
// This shouldn't be possible because it requires an alignment 1 struct to have padding.
panic(fmt.Sprintf("expected mask to be zero, was %v", fullStructMask))
}
return paddingMarkers
}
type WireFormatVersion int
const (
_ = iota
WireFormatVersionV1
WireFormatVersionV2
)
func getTypeShapeFunc(wireFormatVersion WireFormatVersion) func(Struct) TypeShape {
switch wireFormatVersion {
case WireFormatVersionV1:
return func(s Struct) TypeShape {
return s.TypeShapeV1
}
case WireFormatVersionV2:
return func(s Struct) TypeShape {
return s.TypeShapeV2
}
default:
panic("unknown wire format version")
}
}
func getFieldShapeFunc(wireFormatVersion WireFormatVersion) func(StructMember) FieldShape {
switch wireFormatVersion {
case WireFormatVersionV1:
return func(s StructMember) FieldShape {
return s.FieldShapeV1
}
case WireFormatVersionV2:
return func(s StructMember) FieldShape {
return s.FieldShapeV2
}
default:
panic("unknown wire format version")
}
}
func (s Struct) BuildPaddingMarkers(wireFormatVersion WireFormatVersion) []PaddingMarker {
return s.buildPaddingMarkers(false, getTypeShapeFunc(wireFormatVersion), getFieldShapeFunc(wireFormatVersion), nil)
}
func (s Struct) BuildFlattenedPaddingMarkers(wireFormatVersion WireFormatVersion, resolveStruct func(identifier EncodedCompoundIdentifier) *Struct) []PaddingMarker {
return s.buildPaddingMarkers(true, getTypeShapeFunc(wireFormatVersion), getFieldShapeFunc(wireFormatVersion), resolveStruct)
} | tools/fidl/lib/fidlgen/struct.go | 0.608129 | 0.418103 | struct.go | starcoder |
package levels
import (
"sort"
mgl "github.com/go-gl/mathgl/mgl32"
"github.com/inkyblackness/hacked/ss1/content/archive/level"
)
type hoverItem interface {
Pos() MapPosition
Size() float32
IsIn(lvl *level.Level) bool
}
type tileHoverItem struct {
pos MapPosition
}
func (item tileHoverItem) Pos() MapPosition {
return item.pos
}
func (item tileHoverItem) Size() float32 {
return level.FineCoordinatesPerTileSide
}
func (item tileHoverItem) IsIn(lvl *level.Level) bool {
return lvl.Tile(item.pos.Tile()) != nil
}
type objectHoverItem struct {
id level.ObjectID
pos MapPosition
}
func (item objectHoverItem) Pos() MapPosition {
return item.pos
}
func (item objectHoverItem) Size() float32 {
return level.FineCoordinatesPerTileSide / 4
}
func (item objectHoverItem) IsIn(lvl *level.Level) bool {
obj := lvl.Object(item.id)
return (obj != nil) && (obj.InUse != 0) && (item.pos.X == obj.X) && (item.pos.Y == obj.Y)
}
type hoverItems struct {
available []hoverItem
activeIndex int
activeItem hoverItem
}
func (items *hoverItems) reset() {
items.available = nil
items.activeIndex = 0
items.activeItem = nil
}
func (items *hoverItems) scroll(forward bool) {
count := len(items.available)
if count > 0 {
diff := 1
if !forward {
diff = -1
}
items.activeIndex = (count + (items.activeIndex + diff)) % count
items.activeItem = items.available[items.activeIndex]
}
}
func (items *hoverItems) find(lvl *level.Level, pos MapPosition) {
refVec := mgl.Vec2{float32(pos.X), float32(pos.Y)}
var distances []float32
items.available = nil
lvl.ForEachObject(func(id level.ObjectID, entry level.ObjectMainEntry) {
entryVec := mgl.Vec2{float32(entry.X), float32(entry.Y)}
distance := refVec.Sub(entryVec).Len()
if distance < level.FineCoordinatesPerTileSide/4 {
items.available = append(items.available,
objectHoverItem{
id: id,
pos: MapPosition{X: entry.X, Y: entry.Y},
})
distances = append(distances, distance)
}
})
items.available = append(items.available,
tileHoverItem{
pos: MapPosition{
X: level.CoordinateAt(pos.X.Tile(), level.FineCoordinatesPerTileSide/2),
Y: level.CoordinateAt(pos.Y.Tile(), level.FineCoordinatesPerTileSide/2),
},
})
distances = append(distances, level.FineCoordinatesPerTileSide)
sort.Slice(items.available, func(a, b int) bool { return distances[a] < distances[b] })
items.activeIndex = 0
items.activeItem = items.available[items.activeIndex]
}
func (items *hoverItems) validate(lvl *level.Level) {
allValid := true
for _, item := range items.available {
if !item.IsIn(lvl) {
allValid = false
break
}
}
if allValid {
return
}
newItems := make([]hoverItem, 0, len(items.available))
for index, item := range items.available {
isActive := index == items.activeIndex
if item.IsIn(lvl) {
if isActive {
items.activeIndex = len(newItems)
}
newItems = append(newItems, item)
} else if isActive {
items.activeIndex = 0
}
}
items.available = newItems
items.activeItem = nil
if items.activeIndex < len(items.available) {
items.activeItem = items.available[items.activeIndex]
}
} | editor/levels/HoverItems.go | 0.554229 | 0.402686 | HoverItems.go | starcoder |
package blockpool
import (
"bytes"
"errors"
"fmt"
"chainmaker.org/chainmaker/common/v2/queue"
"chainmaker.org/chainmaker/pb-go/v2/common"
)
//BlockNode save one block and its children
type BlockNode struct {
block *common.Block
children []string // the blockHash with children's block
}
//GetBlock get block
func (bn *BlockNode) GetBlock() *common.Block {
return bn.block
}
//GetChildren get children
func (bn *BlockNode) GetChildren() []string {
return bn.children
}
//BlockTree maintains a consistent block tree of parent and children links
//this struct is not thread safety.
type BlockTree struct {
idToNode map[string]*BlockNode // store block and its' children blockHash
heightToBlocks map[uint64][]*common.Block
rootBlock *common.Block // The latest block is committed to the chain
prunedBlocks []string // Caches the block hash that will be deleted
maxPrunedSize int // The maximum number of cached blocks that will be deleted
}
//NewBlockTree init a block tree with rootBlock, rootQC and maxPrunedSize
func NewBlockTree(rootBlock *common.Block, maxPrunedSize int) *BlockTree {
blockTree := &BlockTree{
idToNode: make(map[string]*BlockNode, 10),
rootBlock: rootBlock,
prunedBlocks: make([]string, 0, maxPrunedSize),
maxPrunedSize: maxPrunedSize,
heightToBlocks: make(map[uint64][]*common.Block),
}
blockTree.idToNode[string(rootBlock.Header.BlockHash)] = &BlockNode{
block: rootBlock,
children: make([]string, 0),
}
blockTree.heightToBlocks[rootBlock.Header.BlockHeight] = append(
blockTree.heightToBlocks[rootBlock.Header.BlockHeight], rootBlock)
return blockTree
}
//InsertBlock insert block to tree
func (bt *BlockTree) InsertBlock(block *common.Block) error {
if block == nil {
return errors.New("block is nil")
}
if _, exist := bt.idToNode[string(block.Header.BlockHash)]; exist {
return nil
}
if _, exist := bt.idToNode[string(block.Header.PreBlockHash)]; !exist {
return errors.New("block's parent not exist")
}
bt.idToNode[string(block.Header.BlockHash)] = &BlockNode{
block: block,
children: make([]string, 0),
}
preBlock := bt.idToNode[string(block.Header.PreBlockHash)]
preBlock.children = append(preBlock.children, string(block.Header.BlockHash))
bt.heightToBlocks[block.Header.BlockHeight] = append(bt.heightToBlocks[block.Header.BlockHeight], block)
return nil
}
//GetRootBlock get root block from tree
func (bt *BlockTree) GetRootBlock() *common.Block {
return bt.rootBlock
}
//GetBlockByID get block by block hash
func (bt *BlockTree) GetBlockByID(id string) *common.Block {
if node, ok := bt.idToNode[id]; ok {
return node.GetBlock()
}
return nil
}
//BranchFromRoot get branch from root to input block
func (bt *BlockTree) BranchFromRoot(block *common.Block) []*common.Block {
if block == nil {
return nil
}
var (
cur = block
branch []*common.Block
)
//use block height to check
for cur.Header.BlockHeight > bt.rootBlock.Header.BlockHeight {
branch = append(branch, cur)
if cur = bt.GetBlockByID(string(cur.Header.PreBlockHash)); cur == nil {
break
}
}
if cur == nil || !bytes.Equal(cur.Header.BlockHash, bt.rootBlock.Header.BlockHash) {
return nil
}
for i, j := 0, len(branch)-1; i < j; i, j = i+1, j-1 {
branch[i], branch[j] = branch[j], branch[i]
}
return branch
}
//PruneBlock prune block and update rootBlock
func (bt *BlockTree) PruneBlock(newRootID string) ([]string, error) {
toPruned := bt.findBlockToPrune(newRootID)
if toPruned == nil {
return nil, nil
}
newRootBlock := bt.GetBlockByID(newRootID)
if newRootBlock == nil {
return nil, nil
}
bt.rootBlock = newRootBlock
bt.prunedBlocks = append(bt.prunedBlocks, toPruned[0:]...)
var pruned []string
if len(bt.prunedBlocks) > bt.maxPrunedSize {
num := len(bt.prunedBlocks) - bt.maxPrunedSize
for i := 0; i < num; i++ {
bt.cleanBlock(bt.prunedBlocks[i])
pruned = append(pruned, bt.prunedBlocks[i])
}
bt.prunedBlocks = bt.prunedBlocks[num:]
}
return pruned, nil
}
//findBlockToPrune get blocks to prune by the newRootID
func (bt *BlockTree) findBlockToPrune(newRootID string) []string {
if newRootID == "" || newRootID == string(bt.rootBlock.Header.BlockHash) {
return nil
}
var (
toPruned []string
toPrunedQueue = queue.NewLinkedQueue()
)
toPrunedQueue.PushBack(string(bt.rootBlock.Header.BlockHash))
for !toPrunedQueue.IsEmpty() {
var (
curID string
ok bool
)
if curID, ok = toPrunedQueue.PollFront().(string); !ok {
return nil
}
curNode := bt.idToNode[curID]
for _, child := range curNode.GetChildren() {
if child == newRootID {
continue //save this branch
}
toPrunedQueue.PushBack(child)
}
toPruned = append(toPruned, curID)
}
return toPruned
}
//cleanBlock remove block from tree
func (bt *BlockTree) cleanBlock(blockId string) {
blk := bt.idToNode[blockId]
delete(bt.idToNode, blockId)
if blk != nil {
delete(bt.heightToBlocks, blk.block.Header.BlockHeight)
}
}
func (bt *BlockTree) GetBlocks(height uint64) []*common.Block {
return bt.heightToBlocks[height]
}
func (bt *BlockTree) Details() string {
blkContents := bytes.NewBufferString(fmt.Sprintf("BlockTree blockNum: %d\n", len(bt.idToNode)))
for _, blks := range bt.heightToBlocks {
for _, blk := range blks {
blkContents.WriteString(fmt.Sprintf("blkID: %x, blockHeight:%d\n", blk.Header.BlockHash, blk.Header.BlockHeight))
}
}
return blkContents.String()
} | module/consensus/chainedbft/block_pool/block_tree.go | 0.589953 | 0.405566 | block_tree.go | starcoder |
package visualize
import (
"github.com/go-gl/gl/v3.3-core/gl"
)
// Type RenderObject is used to concisely represent information necessary to
// perform a 2 dimensional textured render.
type RenderObject struct {
shaderProgram uint32 // The shader program.
texture uint32 // The texture.
vao uint32 // The Vertex Array Object.
vbo uint32 // The Vertex Buffer Object
ebo uint32 // The ordering of the vertices.
}
// Creating a RenderObject with a given shaderProgram, texture, and set of
// vertices.
func CreateRenderObject(shaderProgram ShaderProgram, texture Texture, vertices []float32) *RenderObject {
renderObject := new(RenderObject)
// Creating the basic information.
renderObject.shaderProgram = uint32(shaderProgram)
renderObject.texture = uint32(texture)
gl.GenVertexArrays(1, &renderObject.vao)
gl.GenBuffers(1, &renderObject.vbo)
gl.GenBuffers(1, &renderObject.ebo)
// Filling the RenderObject with information.
gl.BindVertexArray(renderObject.vao)
gl.BindBuffer(gl.ARRAY_BUFFER, renderObject.vbo)
gl.BufferData(gl.ARRAY_BUFFER, len(vertices)*4, gl.Ptr(vertices), gl.STATIC_DRAW)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, renderObject.ebo)
vertOrder := []uint32{
0, 1, 2,
2, 3, 0,
}
gl.BufferData(gl.ELEMENT_ARRAY_BUFFER, len(vertOrder)*4, gl.Ptr(vertOrder), gl.STATIC_DRAW)
// Loading up vertex attributes.
vertAttrib := uint32(gl.GetAttribLocation(renderObject.shaderProgram, gl.Str("vert\x00")))
gl.EnableVertexAttribArray(vertAttrib)
gl.VertexAttribPointer(vertAttrib, 2, gl.FLOAT, false, 4*4, gl.PtrOffset(0))
// Loading up texture attributes.
texAttrib := uint32(gl.GetAttribLocation(renderObject.shaderProgram, gl.Str("vertTexCoord\x00")))
gl.EnableVertexAttribArray(texAttrib)
gl.VertexAttribPointer(texAttrib, 2, gl.FLOAT, false, 4*4, gl.PtrOffset(2*4))
return renderObject
}
// Destroying the resources of a RenderObject.
func (renderObject *RenderObject) Destroy() {
gl.DeleteVertexArrays(1, &renderObject.vao)
gl.DeleteBuffers(1, &renderObject.vbo)
gl.DeleteBuffers(1, &renderObject.ebo)
}
func (renderObject *RenderObject) Render() {
gl.BindVertexArray(renderObject.vao)
gl.BindBuffer(gl.ARRAY_BUFFER, renderObject.vbo)
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, renderObject.ebo)
gl.UseProgram(renderObject.shaderProgram)
// Binding the texture.
gl.ActiveTexture(gl.TEXTURE0)
gl.BindTexture(gl.TEXTURE_2D, renderObject.texture)
gl.BindFragDataLocation(renderObject.shaderProgram, 0, gl.Str("outputColor\x00"))
// Drawing the object.
gl.DrawElements(gl.TRIANGLES, 6, gl.UNSIGNED_INT, nil)
} | visualize/renderobject.go | 0.734405 | 0.538619 | renderobject.go | starcoder |
package interpreter
import (
"fmt"
"io/ioutil"
"log"
"strings"
)
const memorySize int = 30000
type stack []int
func (s stack) push(v int) stack {
return append(s, v)
}
func (s stack) pop() (stack, int) {
if len(s) == 0 {
log.Fatal("Popping empty stack!")
}
return s[:len(s)-1], s[len(s)-1]
}
// Interpreter represents a Brainf--k interpreter containing
// the program and memory which may be run.
type Interpreter struct {
Program []byte
ProgramPosition int
Memory [memorySize]uint8
MemoryPosition int
bracketMap map[int]int
Output strings.Builder
Input string
}
// GetProperMemoryPosition returns the corrected
// memory position from the middle position of the memory array.
func (ipr *Interpreter) GetProperMemoryPosition() int {
return (ipr.MemoryPosition - (memorySize / 2))
}
// GetProperMemoryValue returns a value from memory
// using a corrected memory position.
func (ipr *Interpreter) GetProperMemoryValue(pos int) int {
return int(ipr.Memory[pos+(memorySize/2)])
}
// LoadFromFile loads a program to the interpreter from
// a file path.
func (ipr *Interpreter) LoadFromFile(path string) {
data, err := ioutil.ReadFile(path)
if err != nil {
log.Fatal(err)
}
ipr.LoadProgram(data)
}
// LoadProgram loads a program to the interpreter and
// builds a bracket map.
func (ipr *Interpreter) LoadProgram(data []byte) {
ipr.MemoryPosition = (memorySize / 2)
ipr.bracketMap = make(map[int]int)
var tempStack stack
for i := 0; i < len(data); i++ {
if data[i] == '>' || data[i] == '<' || data[i] == '+' ||
data[i] == '-' || data[i] == '[' || data[i] == ']' || data[i] == '.' ||
data[i] == ',' {
ipr.Program = append(ipr.Program, data[i])
}
if data[i] == '[' {
tempStack = tempStack.push(len(ipr.Program) - 1)
} else if data[i] == ']' {
var beginning int
tempStack, beginning = tempStack.pop()
ipr.bracketMap[beginning] = len(ipr.Program) - 1
ipr.bracketMap[len(ipr.Program)-1] = beginning
}
}
ipr.ProgramPosition = 0
}
// Run runs the program until the it ends.
func (ipr *Interpreter) Run() {
for ipr.Clock() {
}
}
// IsEnded returns whether the program has
// reached the end or not.
func (ipr *Interpreter) IsEnded() bool {
return ipr.ProgramPosition > len(ipr.Program)-1
}
// Clock runs one cycle/tick of the interpreter.
// It returns false when the program ends.
func (ipr *Interpreter) Clock() bool {
if ipr.IsEnded() {
return false
}
switch ipr.Program[ipr.ProgramPosition] {
case '>':
if ipr.MemoryPosition == len(ipr.Memory)-1 {
ipr.MemoryPosition = 0
} else {
ipr.MemoryPosition++
}
case '<':
if ipr.MemoryPosition == 0 {
ipr.MemoryPosition = len(ipr.Memory) - 1
} else {
ipr.MemoryPosition--
}
case '+':
ipr.Memory[ipr.MemoryPosition]++
case '-':
ipr.Memory[ipr.MemoryPosition]--
case '.':
fmt.Printf("%c", ipr.Memory[ipr.MemoryPosition])
ipr.Output.WriteByte(byte(ipr.Memory[ipr.MemoryPosition]))
case ',':
if len(ipr.Input) > 0 {
ipr.Memory[ipr.MemoryPosition] = uint8(ipr.Input[0])
ipr.Input = ipr.Input[1:]
} else {
fmt.Scanf("%c", &ipr.Memory[ipr.MemoryPosition])
}
case '[':
if ipr.Memory[ipr.MemoryPosition] == 0 {
ipr.ProgramPosition = ipr.bracketMap[ipr.ProgramPosition]
}
case ']':
if ipr.Memory[ipr.MemoryPosition] != 0 {
ipr.ProgramPosition = ipr.bracketMap[ipr.ProgramPosition]
}
}
ipr.ProgramPosition++
return true
} | interpreter/interpreter.go | 0.574753 | 0.405566 | interpreter.go | starcoder |
package tst
// TernarySearchTree represents a Ternary Search Tree.
// The zero value for List is an empty list ready to use.
type TernarySearchTree struct {
root Element // sentinel list element, only &root, root.prev, and root.next are used
len int // current list length excluding (this) sentinel element
}
// Init initializes or clears tree l.
func (l *TernarySearchTree) Init() *TernarySearchTree {
l.root.left = &l.root
l.root.middle = &l.root
l.root.right = &l.root
l.root.tree = l
l.len = 0
return l
}
// Init initializes or clears Tree l.
func New() *TernarySearchTree {
return (&TernarySearchTree{}).Init()
}
// Len returns the number of elements of list l.
// The complexity is O(1).
func (l *TernarySearchTree) Len() int { return l.len }
// Front returns the first element of list l or nil if the list is empty.
func (l *TernarySearchTree) Left() *Element {
if l.len == 0 {
return nil
}
return l.root.left
}
// Middle returns the first element of list l or nil if the list is empty.
func (l *TernarySearchTree) Middle() *Element {
if l.len == 0 {
return nil
}
return l.root.middle
}
// Right returns the first element of list l or nil if the list is empty.
func (l *TernarySearchTree) Right() *Element {
if l.len == 0 {
return nil
}
return l.root.right
}
// lazyInit lazily initializes a zero List value.
func (l *TernarySearchTree) lazyInit() {
if l.root.right == nil {
l.Init()
}
}
func (l *TernarySearchTree) TraversalPreOrderFunc(f func(prefix string, value interface{}) (goon bool)) (goon bool) {
return l.root.TraversalPreOrderFunc(func(pre []byte, v interface{}) (goon bool) {
return f(string(pre), v)
})
}
func (l *TernarySearchTree) TraversalInOrderFunc(f func(prefix string, value interface{}) (goon bool)) (goon bool) {
return l.root.TraversalInOrderFunc(func(pre []byte, v interface{}) (goon bool) {
return f(string(pre), v)
})
}
func (l *TernarySearchTree) TraversalPostOrderFunc(f func(prefix string, value interface{}) (goon bool)) (goon bool) {
return l.root.TraversalPostOrderFunc(func(pre []byte, v interface{}) (goon bool) {
return f(string(pre), v)
})
}
func (l *TernarySearchTree) Get(prefix string) (value interface{}, ok bool) {
return l.root.Get([]byte(prefix))
}
func (l *TernarySearchTree) Contains(prefix string) bool {
return l.root.Contains([]byte(prefix))
}
func (l *TernarySearchTree) Insert(prefix string, value interface{}) {
l.root.Insert([]byte(prefix), value)
l.len++
}
func (l *TernarySearchTree) Remove(prefix string) (value interface{}, ok bool) {
value, ok = l.root.Remove([]byte(prefix))
if ok {
l.len--
}
return value, ok
}
func (l *TernarySearchTree) String() string {
return l.root.String()
} | container/tst/tst.go | 0.810441 | 0.425426 | tst.go | starcoder |
package main
/*
cantor pair function reference
https://en.wikipedia.org/wiki/Cantor_function
https://en.wikipedia.org/wiki/Pairing_function
https://gist.github.com/hannesl/8031402
*/
import (
"fmt"
"math"
"math/big"
)
func main() {
a1 := cantor_pair_calculate(9, 1) // first pair 1
a2 := cantor_pair_calculate(6, 8) // second pair
a3 := cantor_pair_calculate(a1, a2) // third pair = first , second
x1, x2 := cantor_pair_reverse(a3) // reverse third pair
cantor_pair_reverse(x1) // reverse first pair
cantor_pair_reverse(x2) // reverse second pair
return
}
func cantor_pair_calculate2(x *big.Int, y *big.Int) *big.Int {
//result := ((x+y)*(x+y+1))/2 + y
x.Add(x, y).Mul(x, new(big.Int).Add(x, big.NewInt(1))).Div(x, big.NewInt(2)).Add(x, y)
fmt.Println(x)
return x
}
/**
* Return the source integers from a cantor pair integer.
*/
func cantor_pair_reverse2(z *big.Int) {
negOne := big.NewFloat(-1)
eight := big.NewFloat(8)
one := big.NewFloat(1)
two := big.NewFloat(2)
three := big.NewFloat(3)
zFloat := new(big.Float)
zFloat.SetString(z.String())
//t := math.floor((-1 + math.sqrt(1+8*z)) / 2)
zFloat8 := new(big.Float).Mul(zFloat, eight)
z81 := new(big.Float).Add(one, zFloat8)
sqrtZ81 := new(big.Float).Sqrt(z81)
sqrtZ81S1 := new(big.Float).Add(negOne, sqrtZ81)
fmt.Println("z81.Sqrt(z81).Quo(z81, two)=", sqrtZ81S1)
t := new(big.Float).Quo(sqrtZ81S1, two)
fmt.Println("t:", t)
t = big.NewFloat(24)
//x := t*(t+3)/2 - float64(z)
tx := new(big.Float)
tx.SetString(t.String())
t3 := new(big.Float).Add(tx, three)
tx.Mul(tx, t3).Quo(tx, two)
fmt.Println("t*(t+3)/2:", tx)
tx.Sub(tx, zFloat)
x := tx.Mul(tx, new(big.Float).Add(tx, three).Quo(tx, two))
x.Sub(x, zFloat)
//y := z - t*(t+1)/2
ty := new(big.Float)
ty.SetString(t.String())
y := ty.Add(zFloat, ty.Mul(new(big.Float).Add(ty, one), t).Quo(ty, two))
fmt.Println(x, y)
}
/**
Cantor pairing functions in PHP. Pass any two positive integers and get a unique integer back. Feed the unique integer back into the reverse function and get the original integers back.
https://gist.github.com/hannesl/8031402
* Calculate a unique integer based on two integers (cantor pairing).
*/
func cantor_pair_calculate(x int64, y int64) int64 {
fmt.Println(x, y)
result := ((x+y)*(x+y+1))/2 + y
fmt.Println(result)
return result
}
/**
* Return the source integers from a cantor pair integer.
*/
func cantor_pair_reverse(z int64) (int64, int64) {
t := math.Floor((-1 + math.Sqrt(1+8*float64(z))) / 2)
x := t*(t+3)/2 - float64(z)
y := float64(z) - t*(t+1)/2
fmt.Println(x, y)
return int64(x), int64(y)
} | cantor-pair-function/main.go | 0.779783 | 0.629333 | main.go | starcoder |
package data
import (
"math"
"github.com/calummccain/coxeter/vector"
)
const (
eVal43n = 4.0
pVal43n = 6.0
eVal43nTrunc = 4.0
pVal43nTrunc = 11.465072284 // math.Pi / math.Atan(math.Sqrt(1.0/(7.0+4.0*Rt2)))
eVal43nRect = 4.0
pVal43nRect = 1e100 //∞
)
func GoursatTetrahedron43n(n float64) GoursatTetrahedron {
cot := 1.0 / math.Pow(math.Tan(math.Pi/n), 2.0)
cf := 2.0 * cot / (1 + cot)
ce := cot
fe := 0.5 * (1 + cot)
var cv, fv, ev float64
if math.Abs(n-pVal43n) < BoundaryEps {
cv = 3.0
fv = 2.0
ev = 1.0
} else {
cv = 2.0 * cot / (3.0 - cot)
fv = (1.0 + cot) / (3.0 - cot)
ev = 2.0 / (3.0 - cot)
}
return GoursatTetrahedron{
V: vector.Vec4{W: 1, X: 1, Y: 1, Z: 1},
E: vector.Vec4{W: 1, X: 1, Y: 1, Z: 0},
F: vector.Vec4{W: 1, X: 1, Y: 0, Z: 0},
C: vector.Vec4{W: 1, X: 0, Y: 0, Z: 0},
CFE: vector.Vec4{W: 0, X: 0, Y: 0, Z: 1},
CFV: vector.Vec4{W: 0, X: 0, Y: 1, Z: -1},
CEV: vector.Vec4{W: 0, X: 1, Y: -1, Z: 0},
FEV: vector.Vec4{W: cot - 1.0, X: 2.0 * cot, Y: 0, Z: 0},
CF: cf,
CE: ce,
CV: cv,
FE: fe,
FV: fv,
EV: ev,
}
}
func Coxeter43n(n float64) Coxeter {
//cos := math.Pow(math.Cos(math.Pi/n), 2.0)
cn := math.Cos(2.0 * math.Pi / n)
// sin := 1.0 - cos
return Coxeter{
P: 4.0,
Q: 3.0,
R: n,
A: func(v vector.Vec4) vector.Vec4 { return vector.Vec4{W: v.W, X: v.X, Y: v.Y, Z: -v.Z} },
B: func(v vector.Vec4) vector.Vec4 { return vector.Vec4{W: v.W, X: v.X, Y: v.Z, Z: v.Y} },
C: func(v vector.Vec4) vector.Vec4 { return vector.Vec4{W: v.W, X: v.Y, Y: v.X, Z: v.Z} },
D: func(v vector.Vec4) vector.Vec4 {
return vector.Vec4{
W: (1.0+2.0*cn)*v.W - 2.0*cn*v.X,
X: (2.0+2.0*cn)*v.W - (1.0+2.0*cn)*v.X,
Y: v.Y,
Z: v.Z,
}
},
FaceReflections: []string{"bc", "c", "cbabc", "abc", "", "babc"},
GoursatTetrahedron: GoursatTetrahedron43n(n),
}
}
func Honeycomb43n(n float64) Honeycomb {
cot := 1.0 / math.Pow(math.Tan(math.Pi/n), 2.0)
space := Boundaries(n, eVal43n, pVal43n)
var scale func(vector.Vec4) vector.Vec4
if space == 'p' {
scale = func(v vector.Vec4) vector.Vec4 {
return vector.Vec4{W: Rt3 * v.W, X: v.X, Y: v.Y, Z: v.Z}
}
} else if space == 'e' {
scale = func(v vector.Vec4) vector.Vec4 {
return vector.Vec4{W: v.W, X: v.X, Y: v.Y, Z: v.Z}
}
} else {
scale = func(v vector.Vec4) vector.Vec4 {
return vector.Vec4{
W: math.Sqrt(math.Abs(2.0*cot/(3.0-cot))) * v.W,
X: math.Sqrt(math.Abs((cot-1.0)/(3.0-cot))) * v.X,
Y: math.Sqrt(math.Abs((cot-1.0)/(3.0-cot))) * v.Y,
Z: math.Sqrt(math.Abs((cot-1.0)/(3.0-cot))) * v.Z,
}
}
}
var innerProd func(vector.Vec4, vector.Vec4) float64
if space == 'p' {
innerProd = func(a, b vector.Vec4) float64 { return 3.0*a.W*b.W - a.X*b.X - a.Y*b.Y - a.Z*b.Z }
} else {
innerProd = func(a, b vector.Vec4) float64 {
return (2.0*cot*a.W*b.W - (cot-1.0)*(a.X*b.X+a.Y*b.Y+a.Z*b.Z)) / math.Abs(3.0-cot)
}
}
return Honeycomb{
Coxeter: Coxeter43n(n),
CellType: "spherical",
Vertices: []vector.Vec4{
{W: 1, X: 1, Y: 1, Z: 1},
{W: 1, X: 1, Y: -1, Z: 1},
{W: 1, X: -1, Y: -1, Z: 1},
{W: 1, X: -1, Y: 1, Z: 1},
{W: 1, X: 1, Y: 1, Z: -1},
{W: 1, X: 1, Y: -1, Z: -1},
{W: 1, X: -1, Y: -1, Z: -1},
{W: 1, X: -1, Y: 1, Z: -1},
},
Edges: [][2]int{
{0, 3}, {3, 2}, {2, 1}, {1, 0},
{7, 4}, {4, 5}, {5, 6}, {6, 7},
{0, 4}, {1, 5}, {2, 6}, {3, 7},
},
Faces: [][]int{
{0, 1, 2, 3}, {4, 7, 3, 0}, {7, 6, 2, 3},
{4, 5, 6, 7}, {0, 1, 5, 4}, {1, 2, 6, 5},
},
EVal: eVal43n,
PVal: pVal43n,
Space: space,
Scale: scale,
InnerProduct: innerProd,
}
}
func Honeycomb43nTrunc(n float64) Honeycomb {
cot := 1.0 / math.Pow(math.Tan(math.Pi/n), 2.0)
factor := Rt2 - 1.0
space := Boundaries(n, eVal43nTrunc, pVal43nTrunc)
var scale func(vector.Vec4) vector.Vec4
if space == 'p' {
scale = func(v vector.Vec4) vector.Vec4 {
return vector.Vec4{W: math.Sqrt(5.0-2.0*Rt2) * v.W, X: v.X, Y: v.Y, Z: v.Z}
}
} else if space == 'e' {
scale = func(v vector.Vec4) vector.Vec4 {
return vector.Vec4{W: v.W, X: v.X, Y: v.Y, Z: v.Z}
}
} else {
scale = func(v vector.Vec4) vector.Vec4 {
return vector.Vec4{
W: math.Sqrt(math.Abs(2.0*cot/(5.0-2.0*Rt2-(3.0-2.0*Rt2)*cot))) * v.W,
X: math.Sqrt(math.Abs((cot-1.0)/(5.0-2.0*Rt2-(3.0-2.0*Rt2)*cot))) * v.X,
Y: math.Sqrt(math.Abs((cot-1.0)/(5.0-2.0*Rt2-(3.0-2.0*Rt2)*cot))) * v.Y,
Z: math.Sqrt(math.Abs((cot-1.0)/(5.0-2.0*Rt2-(3.0-2.0*Rt2)*cot))) * v.Z,
}
}
}
var innerProd func(vector.Vec4, vector.Vec4) float64
if space == 'p' {
innerProd = func(a, b vector.Vec4) float64 { return (5.0-2.0*Rt2)*a.W*b.W - (a.X*b.X + a.Y*b.Y + a.Z*b.Z) }
} else {
innerProd = func(a, b vector.Vec4) float64 {
return (2.0*cot*a.W*b.W - (cot-1.0)*(a.X*b.X+a.Y*b.Y+a.Z*b.Z)) / math.Abs(5.0-2.0*Rt2-(3.0-2.0*Rt2)*cot)
}
}
return Honeycomb{
Coxeter: Coxeter43n(n),
CellType: "spherical",
Vertices: []vector.Vec4{
{W: 1, X: 1, Y: 1, Z: factor},
{W: 1, X: 1, Y: factor, Z: 1},
{W: 1, X: factor, Y: 1, Z: 1},
{W: 1, X: 1, Y: 1, Z: -factor},
{W: 1, X: 1, Y: factor, Z: -1},
{W: 1, X: factor, Y: 1, Z: -1},
{W: 1, X: 1, Y: -1, Z: factor},
{W: 1, X: 1, Y: -factor, Z: 1},
{W: 1, X: factor, Y: -1, Z: 1},
{W: 1, X: 1, Y: -1, Z: -factor},
{W: 1, X: 1, Y: -factor, Z: -1},
{W: 1, X: factor, Y: -1, Z: -1},
{W: 1, X: -1, Y: 1, Z: factor},
{W: 1, X: -1, Y: factor, Z: 1},
{W: 1, X: -factor, Y: 1, Z: 1},
{W: 1, X: -1, Y: 1, Z: -factor},
{W: 1, X: -1, Y: factor, Z: -1},
{W: 1, X: -factor, Y: 1, Z: -1},
{W: 1, X: -1, Y: -1, Z: factor},
{W: 1, X: -1, Y: -factor, Z: 1},
{W: 1, X: -factor, Y: -1, Z: 1},
{W: 1, X: -1, Y: -1, Z: -factor},
{W: 1, X: -1, Y: -factor, Z: -1},
{W: 1, X: -factor, Y: -1, Z: -1},
},
Edges: [][2]int{
{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 7}, {2, 14}, {3, 4}, {3, 5},
{4, 5}, {4, 10}, {5, 17}, {6, 7}, {6, 8}, {6, 9}, {7, 8}, {8, 20},
{9, 10}, {9, 11}, {10, 11}, {11, 23}, {12, 13}, {12, 14}, {12, 15}, {13, 14},
{13, 19}, {15, 16}, {15, 17}, {16, 17}, {16, 22}, {18, 19}, {18, 20}, {18, 21},
{19, 20}, {21, 22}, {21, 23}, {22, 23},
},
Faces: [][]int{
{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11},
{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23},
{0, 1, 7, 6, 9, 10, 4, 3}, {0, 2, 14, 12, 15, 17, 5, 3},
{1, 2, 14, 13, 19, 20, 8, 7}, {6, 8, 20, 18, 21, 23, 11, 9},
{4, 5, 17, 16, 22, 23, 11, 10}, {12, 13, 19, 18, 21, 22, 16, 15},
},
EVal: eVal43nTrunc,
PVal: pVal43nTrunc,
Space: space,
Scale: scale,
InnerProduct: innerProd,
}
}
func Honeycomb43nRect(n float64) Honeycomb {
cot := 1.0 / math.Pow(math.Tan(math.Pi/n), 2.0)
space := Boundaries(n, eVal43nRect, pVal43nRect)
var scale func(vector.Vec4) vector.Vec4
if space == 'e' {
scale = func(v vector.Vec4) vector.Vec4 {
return vector.Vec4{W: v.W, X: v.X, Y: v.Y, Z: v.Z}
}
} else {
scale = func(v vector.Vec4) vector.Vec4 {
return vector.Vec4{
W: math.Sqrt(math.Abs(cot)) * v.W,
X: math.Sqrt(math.Abs((cot-1.0)/2.0)) * v.X,
Y: math.Sqrt(math.Abs((cot-1.0)/2.0)) * v.Y,
Z: math.Sqrt(math.Abs((cot-1.0)/2.0)) * v.Z,
}
}
}
innerProd := func(a, b vector.Vec4) float64 {
return cot*a.W*b.W - (0.5*cot-0.5)*(a.X*b.X+a.Y*b.Y+a.Z*b.Z)
}
return Honeycomb{
Coxeter: Coxeter43n(n),
CellType: "spherical",
Vertices: []vector.Vec4{
{W: 1, X: 1, Y: 1, Z: 0},
{W: 1, X: 1, Y: 0, Z: 1},
{W: 1, X: 0, Y: 1, Z: 1},
{W: 1, X: 1, Y: -1, Z: 0},
{W: 1, X: -1, Y: 0, Z: 1},
{W: 1, X: 0, Y: 1, Z: -1},
{W: 1, X: -1, Y: 1, Z: 0},
{W: 1, X: 1, Y: 0, Z: -1},
{W: 1, X: 0, Y: -1, Z: 1},
{W: 1, X: -1, Y: -1, Z: 0},
{W: 1, X: -1, Y: 0, Z: -1},
{W: 1, X: 0, Y: -1, Z: -1},
},
Edges: [][2]int{
{0, 1}, {1, 2}, {2, 0}, {0, 5}, {5, 7}, {7, 0},
{3, 7}, {7, 11}, {11, 3}, {1, 3}, {3, 8}, {8, 1},
{4, 8}, {4, 9}, {8, 9}, {2, 4}, {2, 6}, {4, 6},
{5, 6}, {5, 10}, {6, 10}, {9, 10}, {9, 11}, {10, 11},
},
Faces: [][]int{
{0, 1, 2}, {0, 5, 7}, {3, 7, 11}, {1, 3, 8},
{4, 8, 9}, {2, 4, 6}, {5, 6, 10}, {9, 10, 11},
{0, 5, 6, 2}, {1, 2, 4, 8}, {0, 1, 3, 7},
{3, 8, 9, 11}, {5, 7, 11, 10}, {4, 6, 10, 9},
},
EVal: eVal43nRect,
PVal: pVal43nRect,
Space: space,
Scale: scale,
InnerProduct: innerProd,
}
} | data/43n.go | 0.557845 | 0.538741 | 43n.go | starcoder |
package tokenizer
import (
"math"
"tokenizer/lib/utils"
)
type tfidfTokenizer struct {
Documents []*document
AllDocumentsWordCount map[string]int
InverseDocumentFrequency map[string]float64
TFIDFVector []map[string]float64
}
type document struct {
WordCount map[string]int
TermFrequency map[string]float64
Words []string
TFIDFValues map[string]float64
}
// NewDocument creates a document
// It requires a string which describes the document content (e.g. a sentence)
// This function takes care of cleaning the document content from unnecessary characters (e.g. punctuation),
// which might otherwise create some noise in the end-result
func (tfidfTokenizer *tfidfTokenizer) NewDocument(documentContent string) *document {
document := new(document)
documentContent = utils.CleanDocumentContent(documentContent)
document.Words = utils.CreateWordsFromString(documentContent)
document.WordCount = make(map[string]int)
document.TermFrequency = make(map[string]float64)
document.TFIDFValues = make(map[string]float64)
tfidfTokenizer.Documents = append(tfidfTokenizer.Documents, document)
return document
}
// NewTFIDFTokenizer creates a TFIDFTokenizer
func NewTFIDFTokenizer() *tfidfTokenizer {
tokenizer := new(tfidfTokenizer)
tokenizer.InverseDocumentFrequency = make(map[string]float64)
tokenizer.Documents = make([]*document, 0)
return tokenizer
}
// Compute takes a pointer to a tokenizer
// It calls all necessary functions to retrieve a TFIDF score for all documents in the tokenizers collection
func (tokenizer *tfidfTokenizer) Compute() {
for _, document := range tokenizer.Documents {
document.computeTF()
}
tokenizer.computeIDF()
tokenizer.computeTFIDF()
tokenizer.computeTFIDFVector()
}
// ComputeSimiliarityBetween takes 2 vectors and computes their similiarity
// In order to do so, we compute the dotproduct of the given vectors and divide it by the product of their magnitude
func ComputeSimiliarityBetween(vectorX map[string]float64, vectorY map[string]float64) float64 {
return computeDotProduct(vectorX, vectorY) / (computeMagnitude(vectorX) * computeMagnitude(vectorY))
}
// GetFeaturesNames gets the features from all attached documents of the TFIDFTokenizer
func (tokenizer *tfidfTokenizer) GetFeatureNames() [] string {
var features = make([] string, len(tokenizer.AllDocumentsWordCount))
for featureName := range tokenizer.AllDocumentsWordCount {
features = append(features, featureName)
}
return features
}
// computeTF computes the term frequency for a given document
// It first calculates the overall occurence of a word in a given document
// and then calculates the term frequency
func (document *document) computeTF() {
for _, word := range document.Words {
document.WordCount[word] += 1
}
for word, _ := range document.WordCount {
document.TermFrequency[word] = float64(document.WordCount[word]) / float64(len(document.Words))
}
}
// computeIDF returns the inverse document frequency for a given tokenizer.
func (tfidf *tfidfTokenizer) computeIDF() {
allDocumentsWordCount := make(map[string]int)
inverseDocumentFrequency := make(map[string]float64)
for _, document := range tfidf.Documents {
for _, word := range document.Words {
allDocumentsWordCount[word] += 1
}
}
for word := range allDocumentsWordCount {
inverseDocumentFrequency[word] = math.Log((1 + float64(len(tfidf.Documents))) / (1 + float64(allDocumentsWordCount[word])))
}
tfidf.InverseDocumentFrequency = inverseDocumentFrequency
tfidf.AllDocumentsWordCount = allDocumentsWordCount
}
func (tokenizer *tfidfTokenizer) computeTFIDF() {
for _, document := range tokenizer.Documents {
tfidfValues := make(map[string]float64)
for word := range document.TermFrequency {
tfidfValues[word] = document.TermFrequency[word] * tokenizer.InverseDocumentFrequency[word]
}
document.TFIDFValues = tfidfValues
}
}
func (tokenizer *tfidfTokenizer) computeTFIDFVector() {
tfidfVector := make([](map[string]float64), len(tokenizer.Documents))
for i, document := range tokenizer.Documents {
tfidfVector[i] = make(map[string]float64)
for word := range tokenizer.AllDocumentsWordCount {
if value, ok := document.TFIDFValues[word]; ok {
tfidfVector[i][word] = value
}
}
}
tokenizer.TFIDFVector = tfidfVector
}
func computeDotProduct(vectorX map[string]float64, vectorY map[string]float64) float64 {
var dotProduct = 0.0
for index, value := range vectorX {
dotProduct += value * vectorY[index]
}
return dotProduct
}
func computeMagnitude(vector map[string]float64) float64 {
var magnitude = 0.0
for _, value := range vector {
magnitude += math.Pow(value, 2)
}
return math.Sqrt(magnitude)
} | lib/tokenizer/TfidfTokenizer.go | 0.7586 | 0.421492 | TfidfTokenizer.go | starcoder |
package main
import (
"time"
vector "github.com/xonmello/BotKoba/vector3"
rotator "github.com/xonmello/BotKoba/rotator"
math "github.com/chewxy/math32"
RLBot "github.com/Trey2k/RLBotGo"
)
var lastjump int64
func initialSetup(koba *RLBot.PlayerInfo, opponent *RLBot.PlayerInfo, ball *RLBot.BallInfo) (*vector.Vector3, *rotator.Rotator, *vector.Vector3, *vector.Vector3, *rotator.Rotator, *vector.Vector3, *vector.Vector3, *vector.Vector3) {
// Get self information into a useful format
koba_pos := vector.New(koba.Physics.Location.X, koba.Physics.Location.Y, koba.Physics.Location.Z)
koba_rot := rotator.New(koba.Physics.Rotation.Pitch, koba.Physics.Rotation.Yaw, koba.Physics.Rotation.Roll)
koba_vel := vector.New(koba.Physics.Velocity.X, koba.Physics.Velocity.Y, koba.Physics.Velocity.Z)
// Get opponent information into a useful format
opponent_pos := vector.New(opponent.Physics.Location.X, opponent.Physics.Location.Y, opponent.Physics.Location.Z)
opponent_rot := rotator.New(opponent.Physics.Rotation.Pitch, opponent.Physics.Rotation.Yaw, opponent.Physics.Rotation.Roll)
opponent_vel := vector.New(opponent.Physics.Velocity.X, opponent.Physics.Velocity.Y, opponent.Physics.Velocity.Z)
// Get ball information into a useful format
ball_pos := vector.New(ball.Physics.Location.X, ball.Physics.Location.Y, ball.Physics.Location.Z)
ball_vel := vector.New(ball.Physics.Velocity.X, ball.Physics.Velocity.Y, ball.Physics.Velocity.Z)
// Flips coordinates when on orange team
if koba.Team == 1 {
koba_pos = koba_pos.MultiplyScalar(-1)
koba_rot = koba_rot.RotateYaw(math.Pi)
koba_vel = koba_vel.MultiplyScalar(-1)
ball_pos = ball_pos.MultiplyScalar(-1)
ball_vel = ball_vel.MultiplyScalar(-1)
opponent_pos = opponent_pos.MultiplyScalar(-1)
opponent_rot = opponent_rot.RotateYaw(math.Pi)
opponent_vel = opponent_vel.MultiplyScalar(-1)
}
return koba_pos, koba_rot, koba_vel, opponent_pos, opponent_rot, opponent_vel, ball_pos, ball_vel
}
func steerToward(self_pos *vector.Vector3, self_rot *rotator.Rotator, target *vector.Vector3) float32 {
// Center the car in the coordinate system
local := target.Subtract(self_pos)
toTargetAngle := math.Atan2(local.Y,local.X)
// Steer toward the ball depending on our Yaw (direction we are facing)
steer := toTargetAngle - self_rot.Yaw
if steer < -math.Pi {
steer += math.Pi * 2.0;
} else if steer >= math.Pi {
steer -= math.Pi * 2.0;
}
// If angle is greater than 1 radian, limit to full turn
steer = cap(steer, -1, 1)
return steer
}
func flipToward(self_pos *vector.Vector3, jumped bool, self_rot *rotator.Rotator, target *vector.Vector3, PlayerInput *RLBot.ControllerState) *RLBot.ControllerState {
local := target.Subtract(self_pos)
localAngle := rotator.New(0,math.Atan2(local.Y,local.X),0).RotateYaw(-self_rot.Yaw).Yaw
if !jumped {
PlayerInput.Jump = true
lastjump = currentTime()
} else if jumped && currentTime() < lastjump + 70 {
PlayerInput.Jump = true
}
if jumped && time.Now().UnixMilli() > lastjump + 110 {
PlayerInput.Jump = true
if math.Abs(localAngle) <= 0.3 {
PlayerInput.Pitch = -1
PlayerInput.Yaw = 0
} else if localAngle <= (math.Pi / 2) && 1.14 <= localAngle {
PlayerInput.Pitch = 0
PlayerInput.Yaw = 1
} else if localAngle <= -1.14 && -(math.Pi / 2) <= localAngle {
PlayerInput.Pitch = 0
PlayerInput.Yaw = -1
} else if localAngle <= 1.14 && 0.3 <= localAngle {
PlayerInput.Pitch = -1
PlayerInput.Yaw = 1
} else if localAngle <= -0.3 && -1.14 <= localAngle {
PlayerInput.Pitch = -1
PlayerInput.Yaw = -1
}
}
return PlayerInput
}
func cap(x float32, low float32, high float32) (float32) {
if x < low {
return low
} else if x > high {
return high
}
return x
}
func currentTime() (int64) {
return time.Now().UnixMilli()
} | utils.go | 0.720172 | 0.557002 | utils.go | starcoder |
package iso20022
// Specifies an identification of a document assigned by and relative to the issuing party (of the identification).
// Optionally, the component can contain a copy of the identified document and a URI/URL (Universal Resource Information/Location) facilitating retrieval of the document.
// The component may also contain a cryptographic hash of the referenced document.
// Financial items are identified by three parts:
// (1) the creator of the document,
// (2) an identification of a dossier, and
// (3) an identification of a financial item.
// The two latter identifiers are independent permitting to identify the same item in several lists.
// The element identification is of schema type ID, it can be referenced by IDREF typed elements (composite=false).
type QualifiedDocumentInformation1 struct {
// Local identification to be used in IDREFs in this message.
Identification *ID `xml:"Id"`
// Party issuing the reference.
Issuer *QualifiedPartyIdentification1 `xml:"Issr,omitempty"`
// Unambiguous identifier relative to the issuing party of a list of items.
ItemListIdentifier *Max35Text `xml:"ItmListIdr,omitempty"`
// Unambiguous identifier relative to the issuing party of an item (independent of any list).
ItemIdentifier *Max35Text `xml:"ItmIdr,omitempty"`
// Date of document or element. This may be used as a control value to indicate a specific version.
Date *ISODate `xml:"Dt,omitempty"`
// Identification of the version of the document or element. This may be used as a control value to indicate a specific version.
Version *Max6Text `xml:"Vrsn,omitempty"`
// If true, document is in its original form, otherwise it is a scanned version.
ElectronicOriginal *YesNoIndicator `xml:"ElctrncOrgnl"`
// Cryptographic hash of the document.
Digest []*AlgorithmAndDigest1 `xml:"Dgst,omitempty"`
// Specifies the type of the document, for example commercial invoice.
DocumentType *ExternalDocumentType1Code `xml:"DocTp,omitempty"`
// URL (Uniform Resource Locator) where the document can be found.
URL *Max2048Text `xml:"URL,omitempty"`
// Attached file for this document. The file must be in a self-describing format.
AttachedFile []*BinaryFile1 `xml:"AttchdFile,omitempty"`
}
func (q *QualifiedDocumentInformation1) SetIdentification(value string) {
q.Identification = (*ID)(&value)
}
func (q *QualifiedDocumentInformation1) AddIssuer() *QualifiedPartyIdentification1 {
q.Issuer = new(QualifiedPartyIdentification1)
return q.Issuer
}
func (q *QualifiedDocumentInformation1) SetItemListIdentifier(value string) {
q.ItemListIdentifier = (*Max35Text)(&value)
}
func (q *QualifiedDocumentInformation1) SetItemIdentifier(value string) {
q.ItemIdentifier = (*Max35Text)(&value)
}
func (q *QualifiedDocumentInformation1) SetDate(value string) {
q.Date = (*ISODate)(&value)
}
func (q *QualifiedDocumentInformation1) SetVersion(value string) {
q.Version = (*Max6Text)(&value)
}
func (q *QualifiedDocumentInformation1) SetElectronicOriginal(value string) {
q.ElectronicOriginal = (*YesNoIndicator)(&value)
}
func (q *QualifiedDocumentInformation1) AddDigest() *AlgorithmAndDigest1 {
newValue := new(AlgorithmAndDigest1)
q.Digest = append(q.Digest, newValue)
return newValue
}
func (q *QualifiedDocumentInformation1) SetDocumentType(value string) {
q.DocumentType = (*ExternalDocumentType1Code)(&value)
}
func (q *QualifiedDocumentInformation1) SetURL(value string) {
q.URL = (*Max2048Text)(&value)
}
func (q *QualifiedDocumentInformation1) AddAttachedFile() *BinaryFile1 {
newValue := new(BinaryFile1)
q.AttachedFile = append(q.AttachedFile, newValue)
return newValue
} | QualifiedDocumentInformation1.go | 0.667256 | 0.41401 | QualifiedDocumentInformation1.go | starcoder |
package bufr
import "encoding/json"
// Payload represents the meat of the data section of a BUFR message.
// It is comprised of a list of Subset.
type Payload struct {
subsets []*Subset
Compressed bool
}
func NewPayload(compressed bool) *Payload {
return &Payload{Compressed: compressed}
}
func (p *Payload) Accept(visitor Visitor) error {
return visitor.VisitPayload(p)
}
func (p *Payload) Subsets() []*Subset {
return p.subsets
}
func (p *Payload) Subset(i int) *Subset {
return p.subsets[i]
}
// AddSubset adds a subsets to the Payload. It also sets a zero-based
// index to the added Subset.
func (p *Payload) AddSubset(subset *Subset) {
subset.SetIndex(len(p.subsets))
p.subsets = append(p.subsets, subset)
}
func (p *Payload) Add(root Node, cells []*Cell) {
subset := &Subset{
index: len(p.subsets),
root: root,
cells: cells,
}
p.subsets = append(p.subsets, subset)
}
func (p *Payload) MarshalJSON() ([]byte, error) {
return json.Marshal(p.subsets)
}
// Subset represents the data of a subset of the Payload.
type Subset struct {
// Zero based index of the subset in relative to all subsets a Payload contains
index int
// A list of cells representing a structureless decoded node and its value from the source.
cells []*Cell
// The root/entry Node of the hierarchical data structure.
root Node
}
func NewSubset(cells []*Cell, root Node) *Subset {
return &Subset{cells: cells, root: root}
}
func (s *Subset) Accept(visitor Visitor) error {
return visitor.VisitSubset(s)
}
func (s *Subset) Index() int {
return s.index
}
func (s *Subset) SetIndex(index int) {
s.index = index
}
func (s *Subset) Cells() []*Cell {
return s.cells
}
func (s *Subset) Cell(i int) *Cell {
return s.cells[i]
}
func (s *Subset) AddCell(cell *Cell) {
s.cells = append(s.cells, cell)
}
func (s *Subset) Root() Node {
return s.root
}
func (s *Subset) MarshalJSON() ([]byte, error) {
subsetValues := make([]interface{}, len(s.cells))
for i, cell := range s.cells {
subsetValues[i] = cell.Value()
}
return json.Marshal(subsetValues)
} | bufr/payload.go | 0.741768 | 0.408129 | payload.go | starcoder |
package validator
import (
"fmt"
"net"
"unicode/utf8"
)
// ValidateBetweenString is
func ValidateBetweenString(v string, left int64, right int64) bool {
return ValidateDigitsBetweenInt64(int64(utf8.RuneCountInString(v)), left, right)
}
// InString check if string str is a member of the set of strings params
func InString(str string, params []string) bool {
for _, param := range params {
if str == param {
return true
}
}
return false
}
// compareString determine if a comparison passes between the given values.
func compareString(first string, second int64, operator string) bool {
switch operator {
case "<":
return int64(utf8.RuneCountInString(first)) < second
case ">":
return int64(utf8.RuneCountInString(first)) > second
case "<=":
return int64(utf8.RuneCountInString(first)) <= second
case ">=":
return int64(utf8.RuneCountInString(first)) >= second
case "==":
return int64(utf8.RuneCountInString(first)) == second
default:
panic(fmt.Sprintf("validator: compareString unsupport operator %s", operator))
}
}
// IsNumeric check if the string must be numeric. Empty string is valid.
func IsNumeric(str string) bool {
if IsNull(str) {
return true
}
return rxNumeric.MatchString(str)
}
// IsInt check if the string must be an integer. Empty string is valid.
func IsInt(str string) bool {
if IsNull(str) {
return true
}
return rxInt.MatchString(str)
}
// IsFloat check if the string must be an float. Empty string is valid.
func IsFloat(str string) bool {
if IsNull(str) {
return true
}
return rxFloat.MatchString(str)
}
// IsNull check if the string is null.
func IsNull(str string) bool {
return len(str) == 0
}
// ValidateEmail check if the string is an email.
func ValidateEmail(str string) bool {
// TODO uppercase letters are not supported
return rxEmail.MatchString(str)
}
// ValidateAlpha check if the string may be only contains letters (a-zA-Z). Empty string is valid.
func ValidateAlpha(str string) bool {
if IsNull(str) {
return true
}
return rxAlpha.MatchString(str)
}
// ValidateAlphaNum check if the string may be only contains letters and numbers. Empty string is valid.
func ValidateAlphaNum(str string) bool {
if IsNull(str) {
return true
}
return rxAlphaNum.MatchString(str)
}
// ValidateAlphaDash check if the string may be only contains letters, numbers, dashes and underscores. Empty string is valid.
func ValidateAlphaDash(str string) bool {
if IsNull(str) {
return true
}
return rxAlphaDash.MatchString(str)
}
// ValidateAlphaUnicode check if the string may be only contains letters (a-zA-Z). Empty string is valid.
func ValidateAlphaUnicode(str string) bool {
if IsNull(str) {
return true
}
return rxAlphaUnicode.MatchString(str)
}
// ValidateAlphaNumUnicode check if the string may be only contains letters and numbers. Empty string is valid.
func ValidateAlphaNumUnicode(str string) bool {
if IsNull(str) {
return true
}
return rxAlphaNumUnicode.MatchString(str)
}
// ValidateAlphaDashUnicode check if the string may be only contains letters, numbers, dashes and underscores. Empty string is valid.
func ValidateAlphaDashUnicode(str string) bool {
if IsNull(str) {
return true
}
return rxAlphaDashUnicode.MatchString(str)
}
// ValidateIP check if the string is an ip address.
func ValidateIP(v string) bool {
ip := net.ParseIP(v)
return ip != nil
}
// ValidateIPv4 check if the string is an ipv4 address.
func ValidateIPv4(v string) bool {
ip := net.ParseIP(v)
return ip != nil && ip.To4() != nil
}
// ValidateIPv6 check if the string is an ipv6 address.
func ValidateIPv6(v string) bool {
ip := net.ParseIP(v)
return ip != nil && ip.To4() == nil
} | validator_string.go | 0.663996 | 0.429669 | validator_string.go | starcoder |
package tsin
import (
"encoding/xml"
"github.com/fgrid/iso20022"
)
type Document00400101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:tsin.004.001.01 Document"`
Message *FinancialInvoiceV01 `xml:"FinInvc"`
}
func (d *Document00400101) AddMessage() *FinancialInvoiceV01 {
d.Message = new(FinancialInvoiceV01)
return d.Message
}
// Scope
// The FinancialInvoice message is used to support the provision of financial and related services where there is a requirement to exchange invoice information.
// Usage
// While the prime function of the FinancialInvoice message is as a request from the seller to the buyer for payment, the FinancialInvoice message can also serve to evidence an invoice in support of a financial service such as invoice factoring, letters of credit, and bank payment obligations, to enable Web based services such as electronic bill payment and presentment, and as the basis to transfer invoice information via third parties such as e-invoicing service providers.
// A consequence of the receipt of an invoice by the buyer is that it acts as a trigger for the use of related messages that are already defined in ISO 20022, notably where the information contained in the Financial Invoice enables payment for the goods or services received, and/or is provided in support of a request for invoice financing. While certain of these related messages, such as the CreditTransfer and PaymentInitiation messages, are shown in the sequence diagram they are out of scope. They are shown only to illustrate a given scenario and to place the invoice in the context of the financial banking processes that might be conducted between different financial institutions.
// The use of self-billing by the buyer to the seller, where the buyer acts as the invoice issuer or the process of handling an incorrect invoice, is not in scope.
type FinancialInvoiceV01 struct {
// Collection of data that is exchanged between two or more parties in written, printed or electronic form. It contains general data relevant to the main body of the invoice such as date of issue, currency code and identification number.
InvoiceHeader *iso20022.InvoiceHeader1 `xml:"InvcHdr"`
// Commercial information such as terms of commerce, parties, and documentation, related to the trading agreement under which this invoice is issued.
TradeAgreement *iso20022.TradeAgreement6 `xml:"TradAgrmt"`
// Supply chain shipping arrangements for delivery of invoiced products and/or services.
TradeDelivery *iso20022.TradeDelivery1 `xml:"TradDlvry"`
// Settlement information that enables the financial reconciliation and payment of this invoice.
//
TradeSettlement *iso20022.TradeSettlement1 `xml:"TradSttlm"`
// Unit of information in this invoice showning the related provision of products and/or services and monetary summations reported as a discrete line item.
//
//
//
LineItem []*iso20022.LineItem10 `xml:"LineItm,omitempty"`
}
func (f *FinancialInvoiceV01) AddInvoiceHeader() *iso20022.InvoiceHeader1 {
f.InvoiceHeader = new(iso20022.InvoiceHeader1)
return f.InvoiceHeader
}
func (f *FinancialInvoiceV01) AddTradeAgreement() *iso20022.TradeAgreement6 {
f.TradeAgreement = new(iso20022.TradeAgreement6)
return f.TradeAgreement
}
func (f *FinancialInvoiceV01) AddTradeDelivery() *iso20022.TradeDelivery1 {
f.TradeDelivery = new(iso20022.TradeDelivery1)
return f.TradeDelivery
}
func (f *FinancialInvoiceV01) AddTradeSettlement() *iso20022.TradeSettlement1 {
f.TradeSettlement = new(iso20022.TradeSettlement1)
return f.TradeSettlement
}
func (f *FinancialInvoiceV01) AddLineItem() *iso20022.LineItem10 {
newValue := new (iso20022.LineItem10)
f.LineItem = append(f.LineItem, newValue)
return newValue
} | tsin/FinancialInvoiceV01.go | 0.716219 | 0.438364 | FinancialInvoiceV01.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// BookingWorkHours this type represents the set of working hours in a single day of the week.
type BookingWorkHours struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The day of the week represented by this instance. Possible values are: sunday, monday, tuesday, wednesday, thursday, friday, saturday.
day *DayOfWeek
// A list of start/end times during a day.
timeSlots []BookingWorkTimeSlotable
}
// NewBookingWorkHours instantiates a new bookingWorkHours and sets the default values.
func NewBookingWorkHours()(*BookingWorkHours) {
m := &BookingWorkHours{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateBookingWorkHoursFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateBookingWorkHoursFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewBookingWorkHours(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *BookingWorkHours) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetDay gets the day property value. The day of the week represented by this instance. Possible values are: sunday, monday, tuesday, wednesday, thursday, friday, saturday.
func (m *BookingWorkHours) GetDay()(*DayOfWeek) {
if m == nil {
return nil
} else {
return m.day
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *BookingWorkHours) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["day"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetEnumValue(ParseDayOfWeek)
if err != nil {
return err
}
if val != nil {
m.SetDay(val.(*DayOfWeek))
}
return nil
}
res["timeSlots"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateBookingWorkTimeSlotFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]BookingWorkTimeSlotable, len(val))
for i, v := range val {
res[i] = v.(BookingWorkTimeSlotable)
}
m.SetTimeSlots(res)
}
return nil
}
return res
}
// GetTimeSlots gets the timeSlots property value. A list of start/end times during a day.
func (m *BookingWorkHours) GetTimeSlots()([]BookingWorkTimeSlotable) {
if m == nil {
return nil
} else {
return m.timeSlots
}
}
// Serialize serializes information the current object
func (m *BookingWorkHours) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
if m.GetDay() != nil {
cast := (*m.GetDay()).String()
err := writer.WriteStringValue("day", &cast)
if err != nil {
return err
}
}
if m.GetTimeSlots() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetTimeSlots()))
for i, v := range m.GetTimeSlots() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("timeSlots", cast)
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *BookingWorkHours) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetDay sets the day property value. The day of the week represented by this instance. Possible values are: sunday, monday, tuesday, wednesday, thursday, friday, saturday.
func (m *BookingWorkHours) SetDay(value *DayOfWeek)() {
if m != nil {
m.day = value
}
}
// SetTimeSlots sets the timeSlots property value. A list of start/end times during a day.
func (m *BookingWorkHours) SetTimeSlots(value []BookingWorkTimeSlotable)() {
if m != nil {
m.timeSlots = value
}
} | models/booking_work_hours.go | 0.764804 | 0.473779 | booking_work_hours.go | starcoder |
package rolling
import "math"
const (
// By default nan values are ignored
ignoreNanValuesDefault bool = true
// By default infinite values (both positive and negative) are ignored
ignoreInfValuesDefault bool = true
// By default zero values are treated as a per any other number (ie not ignored)
ignoreZeroValuesDefault bool = false
)
// RollingObject - the struct that holds the 'settings' and current values to be used in any
// calculations.
type RollingObject struct {
window int
values []float64
ignoreNanValues bool
ignoreInfValues bool
ignoreZeroValues bool
}
// SetIgnoreInfValues - controls if we want to ignore non number values when producing the outputs
// of any calculations
func (ro *RollingObject) SetIgnoreNanValues(ignoreNanValues bool) {
ro.ignoreNanValues = ignoreNanValues
}
// SetIgnoreInfValues - controls if we want to ignore infinites (both positive and negative values)
// when producing the outputs of any calculations
func (ro *RollingObject) SetIgnoreInfValues(ignoreInfValues bool) {
ro.ignoreInfValues = ignoreInfValues
}
// SetIgnoreInfValues - controls if we want to ignore zero values when producing the outputs of
// any calculations
func (ro *RollingObject) SetIgnoreZeroValues(ignoreZeroValues bool) {
ro.ignoreZeroValues = ignoreZeroValues
}
// Add - if given value meets the given conditions, append to the values used in the calculation,
// adjusting this so it it relevant for the supplied window
func (ro *RollingObject) Add(value float64) {
if ro.ignoreNanValues && math.IsNaN(value) {
return
}
if ro.ignoreInfValues && (math.IsInf(value, 1) || math.IsInf(value, -1)) {
return
}
if ro.ignoreZeroValues && (value == 0) {
return
}
if len(ro.values) >= ro.window {
ro.values = ro.values[1:len(ro.values)]
}
ro.values = append(ro.values, value)
}
// Calc - calculate the value of the supplied calculation based from the values stored within the
// rolling object values. Options are:
// - sum: find the total of all the values
// - avg: find the arithmetic mean of the values
// - count: find the number of values
// - nunique: find the number of distinct values
// - std: find the standard deviation of the values
func (ro *RollingObject) Calc(calc string) float64 {
if calc == "sum" {
return Sum(ro.values)
} else if calc == "avg" {
return Avg(ro.values)
} else if calc == "count" {
return Count(ro.values)
} else if calc == "nunique" {
return NUnique(ro.values)
} else if calc == "std" {
return Std(ro.values)
}
panic("Supplied `calc` argument is not valid - must be one of: 'sum', 'avg', 'count', 'nunique' or 'std', received value: " + calc)
}
// NewRollingObject - set up a new rolling object with a supplied window with the default settings
func NewRollingObject(window int) *RollingObject {
return &RollingObject{
window: window,
values: []float64{},
ignoreNanValues: ignoreNanValuesDefault,
ignoreInfValues: ignoreInfValuesDefault,
ignoreZeroValues: ignoreZeroValuesDefault,
}
} | rolling.go | 0.781038 | 0.600364 | rolling.go | starcoder |
Package nestedpendingoperations is a modified implementation of
pkg/util/goroutinemap. It implements a data structure for managing go routines
by volume/pod name. It prevents the creation of new go routines if an existing
go routine for the volume already exists. It also allows multiple operations to
execute in parallel for the same volume as long as they are operating on
different pods.
*/
package nestedpendingoperations
import (
"fmt"
"sync"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
k8sRuntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/volume/util/types"
)
const (
// emptyUniquePodName is a UniquePodName for empty string.
emptyUniquePodName types.UniquePodName = types.UniquePodName("")
)
// NestedPendingOperations defines the supported set of operations.
type NestedPendingOperations interface {
// Run adds the concatenation of volumeName and podName to the list of
// running operations and spawns a new go routine to execute operationFunc.
// If an operation with the same volumeName and same or empty podName
// exists, an AlreadyExists or ExponentialBackoff error is returned.
// This enables multiple operations to execute in parallel for the same
// volumeName as long as they have different podName.
// Once the operation is complete, the go routine is terminated and the
// concatenation of volumeName and podName is removed from the list of
// executing operations allowing a new operation to be started with the
// volumeName without error.
Run(volumeName api.UniqueVolumeName, podName types.UniquePodName, operationFunc func() error) error
// Wait blocks until all operations are completed. This is typically
// necessary during tests - the test should wait until all operations finish
// and evaluate results after that.
Wait()
// IsOperationPending returns true if an operation for the given volumeName and podName is pending,
// otherwise it returns false
IsOperationPending(volumeName api.UniqueVolumeName, podName types.UniquePodName) bool
}
// NewNestedPendingOperations returns a new instance of NestedPendingOperations.
func NewNestedPendingOperations(exponentialBackOffOnError bool) NestedPendingOperations {
g := &nestedPendingOperations{
operations: []operation{},
exponentialBackOffOnError: exponentialBackOffOnError,
}
g.cond = sync.NewCond(&g.lock)
return g
}
type nestedPendingOperations struct {
operations []operation
exponentialBackOffOnError bool
cond *sync.Cond
lock sync.RWMutex
}
type operation struct {
volumeName api.UniqueVolumeName
podName types.UniquePodName
operationPending bool
expBackoff exponentialbackoff.ExponentialBackoff
}
func (grm *nestedPendingOperations) Run(
volumeName api.UniqueVolumeName,
podName types.UniquePodName,
operationFunc func() error) error {
grm.lock.Lock()
defer grm.lock.Unlock()
opExists, previousOpIndex := grm.isOperationExists(volumeName, podName)
if opExists {
previousOp := grm.operations[previousOpIndex]
// Operation already exists
if previousOp.operationPending {
// Operation is pending
operationName := getOperationName(volumeName, podName)
return NewAlreadyExistsError(operationName)
}
operationName := getOperationName(volumeName, podName)
if err := previousOp.expBackoff.SafeToRetry(operationName); err != nil {
return err
}
// Update existing operation to mark as pending.
grm.operations[previousOpIndex].operationPending = true
grm.operations[previousOpIndex].volumeName = volumeName
grm.operations[previousOpIndex].podName = podName
} else {
// Create a new operation
grm.operations = append(grm.operations,
operation{
operationPending: true,
volumeName: volumeName,
podName: podName,
expBackoff: exponentialbackoff.ExponentialBackoff{},
})
}
go func() (err error) {
// Handle unhandled panics (very unlikely)
defer k8sRuntime.HandleCrash()
// Handle completion of and error, if any, from operationFunc()
defer grm.operationComplete(volumeName, podName, &err)
// Handle panic, if any, from operationFunc()
defer k8sRuntime.RecoverFromPanic(&err)
return operationFunc()
}()
return nil
}
func (grm *nestedPendingOperations) IsOperationPending(
volumeName api.UniqueVolumeName,
podName types.UniquePodName) bool {
grm.lock.RLock()
defer grm.lock.RUnlock()
exist, previousOpIndex := grm.isOperationExists(volumeName, podName)
if exist && grm.operations[previousOpIndex].operationPending {
return true
}
return false
}
func (grm *nestedPendingOperations) isOperationExists(
volumeName api.UniqueVolumeName,
podName types.UniquePodName) (bool, int) {
for previousOpIndex, previousOp := range grm.operations {
if previousOp.volumeName != volumeName {
// No match, keep searching
continue
}
if previousOp.podName != emptyUniquePodName &&
podName != emptyUniquePodName &&
previousOp.podName != podName {
// No match, keep searching
continue
}
// Match
return true, previousOpIndex
}
return false, -1
}
func (grm *nestedPendingOperations) getOperation(
volumeName api.UniqueVolumeName,
podName types.UniquePodName) (uint, error) {
// Assumes lock has been acquired by caller.
for i, op := range grm.operations {
if op.volumeName == volumeName &&
op.podName == podName {
return uint(i), nil
}
}
logOperationName := getOperationName(volumeName, podName)
return 0, fmt.Errorf("Operation %q not found.", logOperationName)
}
func (grm *nestedPendingOperations) deleteOperation(
// Assumes lock has been acquired by caller.
volumeName api.UniqueVolumeName,
podName types.UniquePodName) {
opIndex := -1
for i, op := range grm.operations {
if op.volumeName == volumeName &&
op.podName == podName {
opIndex = i
break
}
}
// Delete index without preserving order
grm.operations[opIndex] = grm.operations[len(grm.operations)-1]
grm.operations = grm.operations[:len(grm.operations)-1]
}
func (grm *nestedPendingOperations) operationComplete(
volumeName api.UniqueVolumeName, podName types.UniquePodName, err *error) {
// Defer operations are executed in Last-In is First-Out order. In this case
// the lock is acquired first when operationCompletes begins, and is
// released when the method finishes, after the lock is released cond is
// signaled to wake waiting goroutine.
defer grm.cond.Signal()
grm.lock.Lock()
defer grm.lock.Unlock()
if *err == nil || !grm.exponentialBackOffOnError {
// Operation completed without error, or exponentialBackOffOnError disabled
grm.deleteOperation(volumeName, podName)
if *err != nil {
// Log error
logOperationName := getOperationName(volumeName, podName)
glog.Errorf("operation %s failed with: %v",
logOperationName,
*err)
}
return
}
// Operation completed with error and exponentialBackOffOnError Enabled
existingOpIndex, getOpErr := grm.getOperation(volumeName, podName)
if getOpErr != nil {
// Failed to find existing operation
logOperationName := getOperationName(volumeName, podName)
glog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.",
logOperationName,
*err)
return
}
grm.operations[existingOpIndex].expBackoff.Update(err)
grm.operations[existingOpIndex].operationPending = false
// Log error
operationName :=
getOperationName(volumeName, podName)
glog.Errorf("%v", grm.operations[existingOpIndex].expBackoff.
GenerateNoRetriesPermittedMsg(operationName))
}
func (grm *nestedPendingOperations) Wait() {
grm.lock.Lock()
defer grm.lock.Unlock()
for len(grm.operations) > 0 {
grm.cond.Wait()
}
}
func getOperationName(
volumeName api.UniqueVolumeName, podName types.UniquePodName) string {
podNameStr := ""
if podName != emptyUniquePodName {
podNameStr = fmt.Sprintf(" (%q)", podName)
}
return fmt.Sprintf("%q%s",
volumeName,
podNameStr)
}
// NewAlreadyExistsError returns a new instance of AlreadyExists error.
func NewAlreadyExistsError(operationName string) error {
return alreadyExistsError{operationName}
}
// IsAlreadyExists returns true if an error returned from
// NestedPendingOperations indicates a new operation can not be started because
// an operation with the same operation name is already executing.
func IsAlreadyExists(err error) bool {
switch err.(type) {
case alreadyExistsError:
return true
default:
return false
}
}
// alreadyExistsError is the error returned by NestedPendingOperations when a
// new operation can not be started because an operation with the same operation
// name is already executing.
type alreadyExistsError struct {
operationName string
}
var _ error = alreadyExistsError{}
func (err alreadyExistsError) Error() string {
return fmt.Sprintf(
"Failed to create operation with name %q. An operation with that name is already executing.",
err.operationName)
} | vendor/k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go | 0.706697 | 0.4575 | nestedpendingoperations.go | starcoder |
package iso20022
// Net position of a segregated holding, in a single security, within the overall position held in a securities account at a specified place of safekeeping.
type AggregateBalancePerSafekeepingPlace20 struct {
// Place where the securities are safe-kept, physically or notionally. This place can be, for example, a local custodian, a Central Securities Depository (CSD) or an International Central Securities Depository (ICSD).
SafekeepingPlace *SafekeepingPlaceFormat3Choice `xml:"SfkpgPlc"`
// Market(s) on which the security is listed.
PlaceOfListing *MarketIdentification3Choice `xml:"PlcOfListg,omitempty"`
// Total quantity of financial instruments of the balance.
AggregateBalance *Balance1 `xml:"AggtBal"`
// Total quantity of financial instruments of the balance that is available.
AvailableBalance *BalanceQuantity5Choice `xml:"AvlblBal,omitempty"`
// Total quantity of financial instruments of the balance that is not available.
NotAvailableBalance *BalanceQuantity5Choice `xml:"NotAvlblBal,omitempty"`
// Price of the financial instrument in one or more currencies.
PriceDetails []*PriceInformation5 `xml:"PricDtls,omitempty"`
// Information needed to process a currency exchange or conversion.
ForeignExchangeDetails []*ForeignExchangeTerms14 `xml:"FXDtls,omitempty"`
// Specifies the number of days used for calculating the accrued interest amount.
DaysAccrued *Number `xml:"DaysAcrd,omitempty"`
// Valuation amounts provided in the base currency of the account.
AccountBaseCurrencyAmounts *BalanceAmounts3 `xml:"AcctBaseCcyAmts,omitempty"`
// Valuation amounts provided in the currency of the financial instrument.
InstrumentCurrencyAmounts *BalanceAmounts3 `xml:"InstrmCcyAmts,omitempty"`
// Breakdown of the aggregate quantity reported into significant lots, for example, tax lots.
QuantityBreakdown []*QuantityBreakdown23 `xml:"QtyBrkdwn,omitempty"`
// Breakdown of the aggregate balance per meaningful sub-balances and availability.
BalanceBreakdown []*SubBalanceInformation11 `xml:"BalBrkdwn,omitempty"`
// Provides additional instrument sub-balance information on all or parts of the reported financial instrument (unregistered, tax exempt, etc.).
AdditionalBalanceBreakdown []*AdditionalBalanceInformation11 `xml:"AddtlBalBrkdwn,omitempty"`
// Provides additional information on the holding.
HoldingAdditionalDetails *Max350Text `xml:"HldgAddtlDtls,omitempty"`
}
func (a *AggregateBalancePerSafekeepingPlace20) AddSafekeepingPlace() *SafekeepingPlaceFormat3Choice {
a.SafekeepingPlace = new(SafekeepingPlaceFormat3Choice)
return a.SafekeepingPlace
}
func (a *AggregateBalancePerSafekeepingPlace20) AddPlaceOfListing() *MarketIdentification3Choice {
a.PlaceOfListing = new(MarketIdentification3Choice)
return a.PlaceOfListing
}
func (a *AggregateBalancePerSafekeepingPlace20) AddAggregateBalance() *Balance1 {
a.AggregateBalance = new(Balance1)
return a.AggregateBalance
}
func (a *AggregateBalancePerSafekeepingPlace20) AddAvailableBalance() *BalanceQuantity5Choice {
a.AvailableBalance = new(BalanceQuantity5Choice)
return a.AvailableBalance
}
func (a *AggregateBalancePerSafekeepingPlace20) AddNotAvailableBalance() *BalanceQuantity5Choice {
a.NotAvailableBalance = new(BalanceQuantity5Choice)
return a.NotAvailableBalance
}
func (a *AggregateBalancePerSafekeepingPlace20) AddPriceDetails() *PriceInformation5 {
newValue := new(PriceInformation5)
a.PriceDetails = append(a.PriceDetails, newValue)
return newValue
}
func (a *AggregateBalancePerSafekeepingPlace20) AddForeignExchangeDetails() *ForeignExchangeTerms14 {
newValue := new(ForeignExchangeTerms14)
a.ForeignExchangeDetails = append(a.ForeignExchangeDetails, newValue)
return newValue
}
func (a *AggregateBalancePerSafekeepingPlace20) SetDaysAccrued(value string) {
a.DaysAccrued = (*Number)(&value)
}
func (a *AggregateBalancePerSafekeepingPlace20) AddAccountBaseCurrencyAmounts() *BalanceAmounts3 {
a.AccountBaseCurrencyAmounts = new(BalanceAmounts3)
return a.AccountBaseCurrencyAmounts
}
func (a *AggregateBalancePerSafekeepingPlace20) AddInstrumentCurrencyAmounts() *BalanceAmounts3 {
a.InstrumentCurrencyAmounts = new(BalanceAmounts3)
return a.InstrumentCurrencyAmounts
}
func (a *AggregateBalancePerSafekeepingPlace20) AddQuantityBreakdown() *QuantityBreakdown23 {
newValue := new(QuantityBreakdown23)
a.QuantityBreakdown = append(a.QuantityBreakdown, newValue)
return newValue
}
func (a *AggregateBalancePerSafekeepingPlace20) AddBalanceBreakdown() *SubBalanceInformation11 {
newValue := new(SubBalanceInformation11)
a.BalanceBreakdown = append(a.BalanceBreakdown, newValue)
return newValue
}
func (a *AggregateBalancePerSafekeepingPlace20) AddAdditionalBalanceBreakdown() *AdditionalBalanceInformation11 {
newValue := new(AdditionalBalanceInformation11)
a.AdditionalBalanceBreakdown = append(a.AdditionalBalanceBreakdown, newValue)
return newValue
}
func (a *AggregateBalancePerSafekeepingPlace20) SetHoldingAdditionalDetails(value string) {
a.HoldingAdditionalDetails = (*Max350Text)(&value)
} | AggregateBalancePerSafekeepingPlace20.go | 0.871734 | 0.412353 | AggregateBalancePerSafekeepingPlace20.go | starcoder |
package docstrings
// Get - Get a document string
func Get(key string) KeyStrings {
switch key {
case "agent":
return KeyStrings{"agent <command>", "Commands that manage the Fly agent",
`Commands that manage the Fly agent`,
}
case "agent.daemon-start":
return KeyStrings{"daemon-start", "Run the Fly agent as a service (manually)",
`Run the Fly agent as a service (manually)`,
}
case "agent.ping":
return KeyStrings{"ping", "ping the Fly agent",
`ping the Fly agent`,
}
case "agent.restart":
return KeyStrings{"restart", "Restart the Fly agent",
`Restart the Fly agent`,
}
case "agent.start":
return KeyStrings{"start", "Start the Fly agent",
`Start the Fly agent`,
}
case "agent.stop":
return KeyStrings{"stop", "Stop the Fly agent",
`Stop the Fly agent`,
}
case "apps":
return KeyStrings{"apps", "Manage apps",
`The APPS commands focus on managing your Fly applications.
Start with the CREATE command to register your application.
The LIST command will list all currently registered applications.`,
}
case "apps.create":
return KeyStrings{"create [APPNAME]", "Create a new application",
`The APPS CREATE command will both register a new application
with the Fly platform and create the fly.toml file which controls how
the application will be deployed. The --builder flag allows a cloud native
buildpack to be specified which will be used instead of a Dockerfile to
create the application image when it is deployed.`,
}
case "apps.destroy":
return KeyStrings{"destroy [APPNAME]", "Permanently destroys an app",
`The APPS DESTROY command will remove an application
from the Fly platform.`,
}
case "apps.list":
return KeyStrings{"list", "List applications",
`The APPS LIST command will show the applications currently
registered and available to this user. The list will include applications
from all the organizations the user is a member of. Each application will
be shown with its name, owner and when it was last deployed.`,
}
case "apps.move":
return KeyStrings{"move [APPNAME]", "Move an app to another organization",
`The APPS MOVE command will move an application to another
organization the current user belongs to.`,
}
case "apps.restart":
return KeyStrings{"restart [APPNAME]", "Restart an application",
`The APPS RESTART command will restart all running vms.`,
}
case "apps.resume":
return KeyStrings{"resume [APPNAME]", "Resume an application",
`The APPS RESUME command will restart a previously suspended application.
The application will resume with its original region pool and a min count of one
meaning there will be one running instance once restarted. Use SCALE SET MIN= to raise
the number of configured instances.`,
}
case "apps.suspend":
return KeyStrings{"suspend [APPNAME]", "Suspend an application",
`The APPS SUSPEND command will suspend an application.
All instances will be halted leaving the application running nowhere.
It will continue to consume networking resources (IP address). See APPS RESUME
for details on restarting it.`,
}
case "auth":
return KeyStrings{"auth", "Manage authentication",
`Authenticate with Fly (and logout if you need to).
If you do not have an account, start with the AUTH SIGNUP command.
If you do have and account, begin with the AUTH LOGIN subcommand.`,
}
case "auth.docker":
return KeyStrings{"docker", "Authenticate docker",
`Adds registry.fly.io to the docker daemon's authenticated
registries. This allows you to push images directly to fly from
the docker cli.`,
}
case "auth.login":
return KeyStrings{"login", "Log in a user",
`Logs a user into the Fly platform. Supports browser-based,
email/password and one-time-password authentication. Defaults to using
browser-based authentication.`,
}
case "auth.logout":
return KeyStrings{"logout", "Logs out the currently logged in user",
`Log the currently logged-in user out of the Fly platform.
To continue interacting with Fly, the user will need to log in again.`,
}
case "auth.signup":
return KeyStrings{"signup", "Create a new fly account",
`Creates a new fly account. The command opens the browser
and sends the user to a form to provide appropriate credentials.`,
}
case "auth.token":
return KeyStrings{"token", "Show the current auth token",
`Shows the authentication token that is currently in use.
This can be used as an authentication token with API services,
independent of flyctl.`,
}
case "auth.whoami":
return KeyStrings{"whoami", "Show the currently authenticated user",
`Displays the users email address/service identity currently
authenticated and in use.`,
}
case "autoscale":
return KeyStrings{"autoscale", "Autoscaling app resources",
`Autoscaling application resources`,
}
case "autoscale.balanced":
return KeyStrings{"balanced", "Configure a traffic balanced app with params (min=int max=int)",
`Configure the app to balance regions based on traffic with given parameters:
min=int - minimum number of instances to be allocated from region pool.
max=int - maximum number of instances to be allocated from region pool.`,
}
case "autoscale.disable":
return KeyStrings{"disable", "Disable autoscaling",
`Disable autoscaling to manually controlling app resources`,
}
case "autoscale.set":
return KeyStrings{"set", "Set current models autoscaling parameters",
`Allows the setting of the current models autoscaling parameters:
min=int - minimum number of instances to be allocated from region pool.
max=int - maximum number of instances to be allocated from region pool.`,
}
case "autoscale.show":
return KeyStrings{"show", "Show current autoscaling configuration",
`Show current autoscaling configuration`,
}
case "autoscale.standard":
return KeyStrings{"standard", "Configure a standard balanced app with params (min=int max=int)",
`Configure the app without traffic balancing with the given parameters:
min=int - minimum number of instances to be allocated from region pool.
max=int - maximum number of instances to be allocated from region pool.`,
}
case "builds":
return KeyStrings{"builds", "Work with Fly builds",
`Fly builds are templates to make developing Fly applications easier.`,
}
case "builds.list":
return KeyStrings{"list", "List builds",
``,
}
case "builds.logs":
return KeyStrings{"logs", "Show logs associated with builds",
``,
}
case "builtins":
return KeyStrings{"builtins", "View and manage Flyctl deployment builtins",
`View and manage Flyctl deployment builtins.`,
}
case "builtins.list":
return KeyStrings{"list", "List available Flyctl deployment builtins",
`List available Flyctl deployment builtins and their
descriptions.`,
}
case "builtins.show":
return KeyStrings{"show [<builtin name>]", "Show details of a builtin's configuration",
`Show details of a Fly deployment builtins, including
the builtin "Dockerfile" with default settings and other information.`,
}
case "builtins.show-app":
return KeyStrings{"show-app", "Show details of a builtin's configuration",
`Show details of a Fly deployment builtins, including
the builtin "Dockerfile" with an apps settings included
and other information.`,
}
case "certs":
return KeyStrings{"certs", "Manage certificates",
`Manages the certificates associated with a deployed application.
Certificates are created by associating a hostname/domain with the application.
When Fly is then able to validate that hostname/domain, the platform gets
certificates issued for the hostname/domain by Let's Encrypt.`,
}
case "certs.add":
return KeyStrings{"add <hostname>", "Add a certificate for an app.",
`Add a certificate for an application. Takes a hostname
as a parameter for the certificate.`,
}
case "certs.check":
return KeyStrings{"check <hostname>", "Checks DNS configuration",
`Checks the DNS configuration for the specified hostname.
Displays results in the same format as the SHOW command.`,
}
case "certs.list":
return KeyStrings{"list", "List certificates for an app.",
`List the certificates associated with a deployed application.`,
}
case "certs.remove":
return KeyStrings{"remove <hostname>", "Removes a certificate from an app",
`Removes a certificate from an application. Takes hostname
as a parameter to locate the certificate.`,
}
case "certs.show":
return KeyStrings{"show <hostname>", "Shows certificate information",
`Shows certificate information for an application.
Takes hostname as a parameter to locate the certificate.`,
}
case "checks":
return KeyStrings{"checks", "Manage health checks",
`Manage health checks`,
}
case "checks.handlers":
return KeyStrings{"handlers", "Manage health check handlers",
`Manage health check handlers`,
}
case "checks.handlers.create":
return KeyStrings{"create", "Create a health check handler",
`Create a health check handler`,
}
case "checks.handlers.delete":
return KeyStrings{"delete <organization> <handler-name>", "Delete a health check handler",
`Delete a health check handler`,
}
case "checks.handlers.list":
return KeyStrings{"list", "List health check handlers",
`List health check handlers`,
}
case "checks.list":
return KeyStrings{"list", "List app health checks",
`List app health checks`,
}
case "config":
return KeyStrings{"config", "Manage an app's configuration",
`The CONFIG commands allow you to work with an application's configuration.`,
}
case "config.display":
return KeyStrings{"display", "Display an app's configuration",
`Display an application's configuration. The configuration is presented
in JSON format. The configuration data is retrieved from the Fly service.`,
}
case "config.env":
return KeyStrings{"env", "Display an app's runtime environment variables",
`Display an app's runtime environment variables. It displays a section for
secrets and another for config file defined environment variables.`,
}
case "config.save":
return KeyStrings{"save", "Save an app's config file",
`Save an application's configuration locally. The configuration data is
retrieved from the Fly service and saved in TOML format.`,
}
case "config.validate":
return KeyStrings{"validate", "Validate an app's config file",
`Validates an application's config file against the Fly platform to
ensure it is correct and meaningful to the platform.`,
}
case "curl":
return KeyStrings{"curl <url>", "Run a performance test against a url",
`Run a performance test against a url.`,
}
case "dashboard":
return KeyStrings{"dashboard", "Open web browser on Fly Web UI for this app",
`Open web browser on Fly Web UI for this application`,
}
case "dashboard.metrics":
return KeyStrings{"metrics", "Open web browser on Fly Web UI for this app's metrics",
`Open web browser on Fly Web UI for this application's metrics`,
}
case "deploy":
return KeyStrings{"deploy [<workingdirectory>]", "Deploy an app to the Fly platform",
`Deploy an application to the Fly platform. The application can be a local
image, remote image, defined in a Dockerfile or use a CNB buildpack.
Use the --config/-c flag to select a specific toml configuration file.
Use the --image/-i flag to specify a local or remote image to deploy.
Use the --detach flag to return immediately from starting the deployment rather
than monitoring the deployment progress.
Use flyctl monitor to restart monitoring deployment progress`,
}
case "destroy":
return KeyStrings{"destroy [APPNAME]", "Permanently destroys an app",
`The DESTROY command will remove an application
from the Fly platform.`,
}
case "dig":
return KeyStrings{"dig [type] <name>", "DNS lookups",
`Make DNS requests against Fly.io's internal DNS server. Valid types include
AAAA and TXT (the two types our servers answer authoritatively), AAAA-NATIVE
and TXT-NATIVE, which resolve with Go's resolver (they're slower,
but may be useful if diagnosing a DNS bug) and A and CNAME
(if you're using the server to test recursive lookups.)
Note that this resolves names against the server for the current organization. You can
set the organization with -o <org-slug>; otherwise, the command uses the organization
attached to the current app (you can pass an app in with -a <appname>).`,
}
case "dns-records":
return KeyStrings{"dns-records", "Manage DNS records",
`Manage DNS records within a domain`,
}
case "dns-records.export":
return KeyStrings{"export <domain> [<filename>]", "Export DNS records",
`Export DNS records. Will write to a file if a filename is given, otherwise
writers to StdOut.`,
}
case "dns-records.import":
return KeyStrings{"import <domain> [<filename>]", "Import DNS records",
`Import DNS records. Will import from a file is a filename is given, otherwise
imports from StdIn.`,
}
case "dns-records.list":
return KeyStrings{"list <domain>", "List DNS records",
`List DNS records within a domain`,
}
case "docs":
return KeyStrings{"docs", "View Fly documentation",
`View Fly documentation on the Fly.io website. This command will open a
browser to view the content.`,
}
case "domains":
return KeyStrings{"domains", "Manage domains",
`Manage domains`,
}
case "domains.add":
return KeyStrings{"add [org] [name]", "Add a domain",
`Add a domain to an organization`,
}
case "domains.list":
return KeyStrings{"list [<org>]", "List domains",
`List domains for an organization`,
}
case "domains.register":
return KeyStrings{"register [org] [name]", "Register a domain",
`Register a new domain in an organization`,
}
case "domains.show":
return KeyStrings{"show <domain>", "Show domain",
`Show information about a domain`,
}
case "flyctl":
return KeyStrings{"flyctl", "The Fly CLI",
`flyctl is a command line interface to the Fly.io platform.
It allows users to manage authentication, application launch,
deployment, network configuration, logging and more with just the
one command.
Launch an app with the launch command
Deploy an app with the deploy command
View a deployed web application with the open command
Check the status of an application with the status command
To read more, use the docs command to view Fly's help on the web.`,
}
case "history":
return KeyStrings{"history", "List an app's change history",
`List the history of changes in the application. Includes autoscaling
events and their results.`,
}
case "image":
return KeyStrings{"image", "Manage app image",
`Manage app image`,
}
case "image.show":
return KeyStrings{"show", "Show image details.",
`Show image details.`,
}
case "image.update":
return KeyStrings{"update", "Updates the app's image to the latest available version. (Fly Postgres only)",
`This will update the application's image to the latest available version.
The update will perform a rolling restart against each VM, which may result in a brief service disruption.`,
}
case "info":
return KeyStrings{"info", "Show detailed app information",
`Shows information about the application on the Fly platform
Information includes the application's
* name, owner, version, status and hostname
* services
* IP addresses`,
}
case "ips":
return KeyStrings{"ips", "Manage IP addresses for apps",
`The IPS commands manage IP addresses for applications. An application
can have a number of IP addresses associated with it and this family of commands
allows you to list, allocate and release those addresses. It supports both IPv4
and IPv6 addresses.`,
}
case "ips.allocate-v4":
return KeyStrings{"allocate-v4", "Allocate an IPv4 address",
`Allocates an IPv4 address to the application.`,
}
case "ips.allocate-v6":
return KeyStrings{"allocate-v6", "Allocate an IPv6 address",
`Allocates an IPv6 address to the application.`,
}
case "ips.list":
return KeyStrings{"list", "List allocated IP addresses",
`Lists the IP addresses allocated to the application.`,
}
case "ips.private":
return KeyStrings{"private", "List instances private IP addresses",
`List instances private IP addresses, accessible from within the
Fly network`,
}
case "ips.release":
return KeyStrings{"release [ADDRESS]", "Release an IP address",
`Releases an IP address from the application.`,
}
case "launch":
return KeyStrings{"launch", "Launch a new app",
`Create and configure a new app from source code or an image reference.`,
}
case "list":
return KeyStrings{"list", "Lists your Fly resources",
`The list command is for listing your resources on has two subcommands, apps and orgs.
The apps command lists your applications. There are filtering options available.
The orgs command lists all the organizations you are a member of.`,
}
case "list.apps":
return KeyStrings{"apps [text] [-o org] [-s status]", "Lists all your apps",
`The list apps command lists all your applications. As this may be a
long list, there are options to filter the results.
Specifying a text string as a parameter will only return applications where the
application name contains the text.
The --orgs/-o flag allows you to specify the name of an organization that the
application must be owned by. (see list orgs for organization names).
The --status/-s flag allows you to specify status applications should be at to be
returned in the results. e.g. -s running would only return running applications.`,
}
case "list.orgs":
return KeyStrings{"orgs", "List all your organizations",
`Lists all organizations which your are a member of. It will show the
short name of the organization and the long name.`,
}
case "logs":
return KeyStrings{"logs", "View app logs",
`View application logs as generated by the application running on
the Fly platform.
Logs can be filtered to a specific instance using the --instance/-i flag or
to all instances running in a specific region using the --region/-r flag.`,
}
case "machine":
return KeyStrings{"machine <command>", "Commands that manage machines",
`Commands that manage machines`,
}
case "machine.kill":
return KeyStrings{"kill <id>", "Kill (SIGKILL) a Fly machine",
`Kill (SIGKILL) a Fly machine`,
}
case "machine.list":
return KeyStrings{"list", "List Fly machines",
`List Fly machines`,
}
case "machine.remove":
return KeyStrings{"remove <id>", "Remove a Fly machine",
`Remove a Fly machine`,
}
case "machine.run":
return KeyStrings{"run <image> [command]", "Launch a Fly machine",
`Launch Fly machine with the provided image and command`,
}
case "machine.start":
return KeyStrings{"start <id>", "Start a Fly machine",
`Start a Fly machine`,
}
case "machine.stop":
return KeyStrings{"stop <id>", "Stop a Fly machine",
`Stop a Fly machine`,
}
case "monitor":
return KeyStrings{"monitor", "Monitor deployments",
`Monitor application deployments and other activities. Use --verbose/-v
to get details of every instance . Control-C to stop output.`,
}
case "move":
return KeyStrings{"move [APPNAME]", "Move an app to another organization",
`The MOVE command will move an application to another
organization the current user belongs to.`,
}
case "open":
return KeyStrings{"open [PATH]", "Open browser to current deployed application",
`Open browser to current deployed application. If an optional path is specified, this is appended to the
URL for deployed application.`,
}
case "orgs":
return KeyStrings{"orgs", "Commands for managing Fly organizations",
`Commands for managing Fly organizations. list, create, show and
destroy organizations.
Organization admins can also invite or remove users from Organizations.`,
}
case "orgs.create":
return KeyStrings{"create <org>", "Create an organization",
`Create a new organization. Other users can be invited to join the
organization later.`,
}
case "orgs.delete":
return KeyStrings{"delete <org>", "Delete an organization",
`Delete an existing organization.`,
}
case "orgs.invite":
return KeyStrings{"invite <org> <email>", "Invite user (by email) to organization",
`Invite a user, by email, to join organization. The invitation will be
sent, and the user will be pending until they respond. See also orgs revoke.`,
}
case "orgs.list":
return KeyStrings{"list", "Lists organizations for current user",
`Lists organizations available to current user.`,
}
case "orgs.remove":
return KeyStrings{"remove <org> <email>", "Remove a user from an organization",
`Remove a user from an organization. User must have accepted a previous
invitation to join (if not, see orgs revoke).`,
}
case "orgs.revoke":
return KeyStrings{"revoke <org> <email>", "Revoke a pending invitation to an organization",
`Revokes an invitation to join an organization that has been sent to a
user by email.`,
}
case "orgs.show":
return KeyStrings{"show <org>", "Show information about an organization",
`Shows information about an organization.
Includes name, slug and type. Summarizes user permissions, DNS zones and
associated member. Details full list of members and roles.`,
}
case "platform":
return KeyStrings{"platform", "Fly platform information",
`The PLATFORM commands are for users looking for information
about the Fly platform.`,
}
case "platform.regions":
return KeyStrings{"regions", "List regions",
`View a list of regions where Fly has edges and/or datacenters`,
}
case "platform.status":
return KeyStrings{"status", "Show current platform status",
`Show current Fly platform status in a browser`,
}
case "platform.vmsizes":
return KeyStrings{"vm-sizes", "List VM Sizes",
`View a list of VM sizes which can be used with the FLYCTL SCALE VM command`,
}
case "postgres":
return KeyStrings{"postgres", "Manage postgres clusters",
`Manage postgres clusters`,
}
case "postgres.attach":
return KeyStrings{"attach", "Attach a postgres cluster to an app",
`Attach a postgres cluster to an app`,
}
case "postgres.create":
return KeyStrings{"create", "Create a postgres cluster",
`Create a postgres cluster`,
}
case "postgres.db":
return KeyStrings{"db", "manage databases in a cluster",
`manage databases in a cluster`,
}
case "postgres.db.create":
return KeyStrings{"create <postgres-cluster-name>", "create a database in a cluster",
`create a database in a cluster`,
}
case "postgres.db.list":
return KeyStrings{"list <postgres-cluster-name>", "list databases in a cluster",
`list databases in a cluster`,
}
case "postgres.detach":
return KeyStrings{"detach", "Detach a postgres cluster from an app",
`Detach a postgres cluster from an app`,
}
case "postgres.list":
return KeyStrings{"list", "list postgres clusters",
`list postgres clusters`,
}
case "postgres.users":
return KeyStrings{"users", "manage users in a cluster",
`manage users in a cluster`,
}
case "postgres.users.create":
return KeyStrings{"create <postgres-cluster-name>", "create a user in a cluster",
`create a user in a cluster`,
}
case "postgres.users.list":
return KeyStrings{"list <postgres-cluster-name>", "list users in a cluster",
`list users in a cluster`,
}
case "proxy":
return KeyStrings{"proxy <local:remote>", "Proxies connections to a fly app",
`Proxies connections to a fly app through the wireguard tunnel`,
}
case "regions":
return KeyStrings{"regions", "Manage regions",
`Configure the region placement rules for an application.`,
}
case "regions.add":
return KeyStrings{"add REGION ...", "Allow the app to run in the provided regions",
`Allow the app to run in one or more regions`,
}
case "regions.backup":
return KeyStrings{"backup REGION ...", "Sets the backup region pool with provided regions",
`Sets the backup region pool with provided regions`,
}
case "regions.list":
return KeyStrings{"list", "Shows the list of regions the app is allowed to run in",
`Shows the list of regions the app is allowed to run in.`,
}
case "regions.remove":
return KeyStrings{"remove REGION ...", "Prevent the app from running in the provided regions",
`Prevent the app from running in the provided regions`,
}
case "regions.set":
return KeyStrings{"set REGION ...", "Sets the region pool with provided regions",
`Sets the region pool with provided regions`,
}
case "releases":
return KeyStrings{"releases", "List app releases",
`List all the releases of the application onto the Fly platform,
including type, when, success/fail and which user triggered the release.`,
}
case "restart":
return KeyStrings{"restart [APPNAME]", "Restart an application",
`The RESTART command will restart all running vms.`,
}
case "resume":
return KeyStrings{"resume [APPNAME]", "Resume an application",
`The RESUME command will restart a previously suspended application.
The application will resume with its original region pool and a min count of one
meaning there will be one running instance once restarted. Use SCALE SET MIN= to raise
the number of configured instances.`,
}
case "scale":
return KeyStrings{"scale", "Scale app resources",
`Scale application resources`,
}
case "scale.count":
return KeyStrings{"count <count>", "Change an app's VM count to the given value",
`Change an app's VM count to the given value.
For pricing, see https://fly.io/docs/about/pricing/`,
}
case "scale.memory":
return KeyStrings{"memory <memoryMB>", "Set VM memory",
`Set VM memory to a number of megabytes`,
}
case "scale.show":
return KeyStrings{"show", "Show current resources",
`Show current VM size and counts`,
}
case "scale.vm":
return KeyStrings{"vm [SIZENAME] [flags]", "Change an app's VM to a named size (eg. shared-cpu-1x, dedicated-cpu-1x, dedicated-cpu-2x...)",
`Change an application's VM size to one of the named VM sizes.
Size names include shared-cpu-1x, dedicated-cpu-1x, dedicated-cpu-2x.
For a full list of supported sizes use the command FLYCTL PLATFORM VM-SIZES
Memory size can be set with --memory=number-of-MB
e.g. flyctl scale vm shared-cpu-1x --memory=2048
For dedicated vms, this should be a multiple of 1024MB.
For shared vms, this can be 256MB or a a multiple of 1024MB.
For pricing, see https://fly.io/docs/about/pricing/`,
}
case "secrets":
return KeyStrings{"secrets", "Manage app secrets",
`Manage application secrets with the set and unset commands.
Secrets are provided to applications at runtime as ENV variables. Names are
case sensitive and stored as-is, so ensure names are appropriate for
the application and vm environment.`,
}
case "secrets.import":
return KeyStrings{"import [flags]", "Read secrets in name=value from stdin",
`Set one or more encrypted secrets for an application. Values
are read from stdin as name=value`,
}
case "secrets.list":
return KeyStrings{"list", "Lists the secrets available to the app",
`List the secrets available to the application. It shows each
secret's name, a digest of the its value and the time the secret was last set.
The actual value of the secret is only available to the application.`,
}
case "secrets.set":
return KeyStrings{"set [flags] NAME=VALUE NAME=VALUE ...", "Set one or more encrypted secrets for an app",
`Set one or more encrypted secrets for an application.
Secrets are provided to application at runtime as ENV variables. Names are
case sensitive and stored as-is, so ensure names are appropriate for
the application and vm environment.
Any value that equals "-" will be assigned from STDIN instead of args.`,
}
case "secrets.unset":
return KeyStrings{"unset [flags] NAME NAME ...", "Remove encrypted secrets from an app",
`Remove encrypted secrets from the application. Unsetting a
secret removes its availability to the application.`,
}
case "ssh":
return KeyStrings{"ssh <command>", "Commands that manage SSH credentials",
`Commands that manage SSH credentials`,
}
case "ssh.console":
return KeyStrings{"console [<host>]", "Connect to a running instance of the current app.",
`Connect to a running instance of the current app; with -select, choose instance from list.`,
}
case "ssh.establish":
return KeyStrings{"establish [<org>] [<override>]", "Create a root SSH certificate for your organization",
`Create a root SSH certificate for your organization. If <override>
is provided, will re-key an organization; all previously issued creds will be
invalidated.`,
}
case "ssh.issue":
return KeyStrings{"issue [org] [email] [path]", "Issue a new SSH credential.",
`Issue a new SSH credential. With -agent, populate credential
into SSH agent. With -hour, set the number of hours (1-72) for credential
validity.`,
}
case "ssh.log":
return KeyStrings{"log", "Log of all issued certs",
`log of all issued certs`,
}
case "ssh.shell":
return KeyStrings{"shell [org] [address]", "Connect directly to an instance.",
`Connect directly to an instance. With -region, set the
WireGuard region to use for the connection.`,
}
case "status":
return KeyStrings{"status", "Show app status",
`Show the application's current status including application
details, tasks, most recent deployment details and in which regions it is
currently allocated.`,
}
case "status.instance":
return KeyStrings{"instance [instance-id]", "Show instance status",
`Show the instance's current status including logs, checks,
and events.`,
}
case "suspend":
return KeyStrings{"suspend [APPNAME]", "Suspend an application",
`The SUSPEND command will suspend an application.
All instances will be halted leaving the application running nowhere.
It will continue to consume networking resources (IP address). See RESUME
for details on restarting it.`,
}
case "turboku":
return KeyStrings{"turboku <heroku-app>", "Launches heroku apps",
`Launches heroku apps`,
}
case "version":
return KeyStrings{"version", "Show version information for the flyctl command",
`Shows version information for the flyctl command itself,
including version number and build date.`,
}
case "version.update":
return KeyStrings{"update", "Checks for available updates and automatically updates",
`Checks for update and if one is available, runs the appropriate
command to update the application.`,
}
case "vm":
return KeyStrings{"vm <command>", "Commands that manage VM instances",
`Commands that manage VM instances`,
}
case "vm.restart":
return KeyStrings{"restart <vm-id>", "Restart a VM",
`Request for a VM to be asynchronously restarted.`,
}
case "vm.status":
return KeyStrings{"status <vm-id>", "Show a VM's status",
`Show a VM's current status including logs, checks, and events.`,
}
case "vm.stop":
return KeyStrings{"stop <vm-id>", "Stop a VM",
`Request for a VM to be asynchronously stopped.`,
}
case "volumes":
return KeyStrings{"volumes <command>", "Volume management commands",
`Commands for managing Fly Volumes associated with an application.`,
}
case "volumes.create":
return KeyStrings{"create <volumename>", "Create new volume for app",
`Create new volume for app. --region flag must be included to specify
region the volume exists in. --size flag is optional, defaults to 10,
sets the size as the number of gigabytes the volume will consume.`,
}
case "volumes.delete":
return KeyStrings{"delete <id>", "Delete a volume from the app",
`Delete a volume from the application. Requires the volume's ID
number to operate. This can be found through the volumes list command`,
}
case "volumes.list":
return KeyStrings{"list", "List the volumes for app",
`List all the volumes associated with this application.`,
}
case "volumes.show":
return KeyStrings{"show <id>", "Show details of an app's volume",
`Show details of an app's volume. Requires the volume's ID
number to operate. This can be found through the volumes list command`,
}
case "volumes.snapshots":
return KeyStrings{"snapshots", "Manage volume snapshots",
`Commands for managing volume snapshots`,
}
case "volumes.snapshots.list":
return KeyStrings{"list <volume-id>", "list snapshots associated with the specified volume",
`list snapshots associated with the specified volume`,
}
case "wireguard":
return KeyStrings{"wireguard <command>", "Commands that manage WireGuard peer connections",
`Commands that manage WireGuard peer connections`,
}
case "wireguard.create":
return KeyStrings{"create [org] [region] [name]", "Add a WireGuard peer connection",
`Add a WireGuard peer connection to an organization`,
}
case "wireguard.list":
return KeyStrings{"list [<org>]", "List all WireGuard peer connections",
`List all WireGuard peer connections`,
}
case "wireguard.remove":
return KeyStrings{"remove [org] [name]", "Remove a WireGuard peer connection",
`Remove a WireGuard peer connection from an organization`,
}
case "wireguard.token":
return KeyStrings{"token <command>", "Commands that managed WireGuard delegated access tokens",
`Commands that managed WireGuard delegated access tokens`,
}
case "wireguard.token.create":
return KeyStrings{"create [org] [name]", "Create a new WireGuard token",
`Create a new WireGuard token`,
}
case "wireguard.token.delete":
return KeyStrings{"delete [org] [token]", "Delete a WireGuard token; token is name:<name> or token:<token>",
`Delete a WireGuard token; token is name:<name> or token:<token>`,
}
case "wireguard.token.list":
return KeyStrings{"list [<org>]", "List all WireGuard tokens",
`List all WireGuard tokens`,
}
case "wireguard.token.start":
return KeyStrings{"start [name] [group] [region] [file]", "Start a new WireGuard peer connection associated with a token (set FLY_WIREGUARD_TOKEN)",
`Start a new WireGuard peer connection associated with a token (set FLY_WIREGUARD_TOKEN)`,
}
case "wireguard.token.update":
return KeyStrings{"update [name] [file]", "Rekey a WireGuard peer connection associated with a token (set FLY_WIREGUARD_TOKEN)",
`Rekey a WireGuard peer connection associated with a token (set FLY_WIREGUARD_TOKEN)`,
}
}
panic("unknown command key " + key)
} | docstrings/gen.go | 0.632049 | 0.535949 | gen.go | starcoder |
package typeinfo
import (
"context"
"fmt"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/dolt/go/store/types"
)
// This is a dolt implementation of the MySQL type Point, thus most of the functionality
// within is directly reliant on the go-mysql-server implementation.
type polygonType struct {
sqlPolygonType sql.PolygonType
}
var _ TypeInfo = (*polygonType)(nil)
var PolygonType = &polygonType{sql.PolygonType{}}
// ConvertTypesPolygonToSQLPolygon basically makes a deep copy of sql.Linestring
func ConvertTypesPolygonToSQLPolygon(p types.Polygon) sql.Polygon {
lines := make([]sql.Linestring, len(p.Lines))
for i, l := range p.Lines {
lines[i] = ConvertTypesLinestringToSQLLinestring(l)
}
return sql.Polygon{SRID: p.SRID, Lines: lines}
}
// ConvertNomsValueToValue implements TypeInfo interface.
func (ti *polygonType) ConvertNomsValueToValue(v types.Value) (interface{}, error) {
// Expect a types.Polygon, return a sql.Polygon
if val, ok := v.(types.Polygon); ok {
return ConvertTypesPolygonToSQLPolygon(val), nil
}
// Check for null
if _, ok := v.(types.Null); ok || v == nil {
return nil, nil
}
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), v.Kind())
}
// ReadFrom reads a go value from a noms types.CodecReader directly
func (ti *polygonType) ReadFrom(nbf *types.NomsBinFormat, reader types.CodecReader) (interface{}, error) {
k := reader.ReadKind()
switch k {
case types.PolygonKind:
p, err := reader.ReadPolygon()
if err != nil {
return nil, err
}
return ti.ConvertNomsValueToValue(p)
case types.NullKind:
return nil, nil
}
return nil, fmt.Errorf(`"%v" cannot convert NomsKind "%v" to a value`, ti.String(), k)
}
func ConvertSQLPolygonToTypesPolygon(p sql.Polygon) types.Polygon {
lines := make([]types.Linestring, len(p.Lines))
for i, l := range p.Lines {
lines[i] = ConvertSQLLinestringToTypesLinestring(l)
}
return types.Polygon{SRID: p.SRID, Lines: lines}
}
// ConvertValueToNomsValue implements TypeInfo interface.
func (ti *polygonType) ConvertValueToNomsValue(ctx context.Context, vrw types.ValueReadWriter, v interface{}) (types.Value, error) {
// Check for null
if v == nil {
return types.NullValue, nil
}
// Convert to sql.PolygonType
poly, err := ti.sqlPolygonType.Convert(v)
if err != nil {
return nil, err
}
return ConvertSQLPolygonToTypesPolygon(poly.(sql.Polygon)), nil
}
// Equals implements TypeInfo interface.
func (ti *polygonType) Equals(other TypeInfo) bool {
if other == nil {
return false
}
_, ok := other.(*polygonType)
return ok
}
// FormatValue implements TypeInfo interface.
func (ti *polygonType) FormatValue(v types.Value) (*string, error) {
if val, ok := v.(types.Polygon); ok {
size := types.EWKBHeaderSize + types.LengthSize
for _, l := range val.Lines {
size += types.LengthSize + types.PointDataSize*len(l.Points)
}
buf := make([]byte, size)
types.WriteEWKBHeader(val, buf[:types.EWKBHeaderSize])
types.WriteEWKBPolyData(val, buf[types.EWKBHeaderSize:])
resStr := string(buf)
return &resStr, nil
}
if _, ok := v.(types.Null); ok || v == nil {
return nil, nil
}
return nil, fmt.Errorf(`"%v" has unexpectedly encountered a value of type "%T" from embedded type`, ti.String(), v.Kind())
}
// GetTypeIdentifier implements TypeInfo interface.
func (ti *polygonType) GetTypeIdentifier() Identifier {
return PolygonTypeIdentifier
}
// GetTypeParams implements TypeInfo interface.
func (ti *polygonType) GetTypeParams() map[string]string {
return map[string]string{}
}
// IsValid implements TypeInfo interface.
func (ti *polygonType) IsValid(v types.Value) bool {
if _, ok := v.(types.Polygon); ok {
return true
}
if _, ok := v.(types.Null); ok || v == nil {
return true
}
return false
}
// NomsKind implements TypeInfo interface.
func (ti *polygonType) NomsKind() types.NomsKind {
return types.PolygonKind
}
// Promote implements TypeInfo interface.
func (ti *polygonType) Promote() TypeInfo {
return &polygonType{ti.sqlPolygonType.Promote().(sql.PolygonType)}
}
// String implements TypeInfo interface.
func (ti *polygonType) String() string {
return "Polygon"
}
// ToSqlType implements TypeInfo interface.
func (ti *polygonType) ToSqlType() sql.Type {
return ti.sqlPolygonType
}
// polygonTypeConverter is an internal function for GetTypeConverter that handles the specific type as the source TypeInfo.
func polygonTypeConverter(ctx context.Context, src *polygonType, destTi TypeInfo) (tc TypeConverter, needsConversion bool, err error) {
switch dest := destTi.(type) {
case *bitType:
return func(ctx context.Context, vrw types.ValueReadWriter, v types.Value) (types.Value, error) {
return types.Uint(0), nil
}, true, nil
case *blobStringType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *boolType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *datetimeType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *decimalType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *enumType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *floatType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *inlineBlobType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *intType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *jsonType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *linestringType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *pointType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *polygonType:
return identityTypeConverter, false, nil
case *setType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *timeType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *uintType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *uuidType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *varBinaryType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *varStringType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
case *yearType:
return wrapConvertValueToNomsValue(dest.ConvertValueToNomsValue)
default:
return nil, false, UnhandledTypeConversion.New(src.String(), destTi.String())
}
} | go/libraries/doltcore/schema/typeinfo/polygon.go | 0.68056 | 0.499146 | polygon.go | starcoder |
package marc21
/*
https://www.loc.gov/marc/specifications/specrecstruc.html
A directory entry in MARC 21 is made up of a tag, length-of-field,
and field starting position. The directory begins in character
position 24 of the record and ends with a field terminator. It is
of variable length and consists of a series of fixed fields,
referred to as "entries." One entry is associated with each
variable field (control or data) present in the record. Each
directory entry is 12 characters in length;...
*/
/*
http://www.loc.gov/marc/bibliographic/bdintro.html
Directory - A series of entries that contain the tag, length, and
starting location of each variable field within a record. Each
entry is 12 character positions in length. Directory entries for
variable control fields appear first, sequenced by the field tag in
increasing numerical order. Entries for variable data fields
follow, arranged in ascending order according to the first
character of the tag. The stored sequence of the variable data
fields in a record does not necessarily correspond to the order of
the corresponding Directory entries. Duplicate tags are
distinguished only by the location of the respective fields within
the record. The Directory ends with a field terminator character
(ASCII 1E hex).
Also:
http://www.loc.gov/marc/holdings/hdintro.html
http://www.loc.gov/marc/authority/adintro.html
http://www.loc.gov/marc/classification/cdintro.html
http://www.loc.gov/marc/community/ciintro.html
*/
/*
http://www.loc.gov/marc/bibliographic/bddirectory.html
CHARACTER POSITIONS
00-02 - Tag
Three ASCII numeric or ASCII alphabetic characters (upper case
or lower case, but not both) that identify an associated
variable field.
03-06 - Field length
Four ASCII numeric characters that specify the length of the
variable field, including indicators, subfield codes, data, and
the field terminator. A Field length number of less than four
digits is right justified and unused positions contain zeros.
07-11 - Starting character position
Five ASCII numeric characters that specify the starting
character position of the variable field relative to the Base
address of data (Leader/12-16) of the record. A Starting
character position number of less than five digits is right
justified and unused positions contain zeros.
*/
// parseDirectory extracts the directory from the raw MARC record bytes
func parseDirectory(r []byte) (dir []*directoryEntry, err error) {
for i := leaderLen; r[i] != fieldTerminator; i += 12 {
var de directoryEntry
de.tag = string(r[i : i+3])
de.fieldLength, err = toInt(r[i+3 : i+7])
if err != nil {
return nil, err
}
de.startingPos, err = toInt(r[i+7 : i+12])
if err != nil {
return nil, err
}
dir = append(dir, &de)
}
return dir, nil
} | pkg/marc21/directory.go | 0.732305 | 0.457379 | directory.go | starcoder |
// +build kubeapiserver
package cluster
import (
ksmstore "github.com/DataDog/datadog-agent/pkg/kubestatemetrics/store"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
/*
labelJoiner ingests all the metrics used by label joins to build a tree that can then be used to efficiently find which labels should be added to other metrics.
For ex., for the following JoinsConfig:
"kube_deployment_labels":
LabelsToMatch: ["namespace", "deployment"]
LabelsToGet: ["chart_name", "chart_version"]
and the following metrics:
kube_deployment_labels {"namespace": "ns-a", "deployment": "foo", "chart_name": "foo", "chart_version": "1.0"}
kube_deployment_labels {"namespace": "ns-a", "deployment": "bar", "chart_name": "bar", "chart_version": "1.1"}
kube_deployment_labels {"namespace": "ns-b", "deployment": "foo", "chart_name": "foo", "chart_version": "2.0"}
it will create the following tree:
kube_deployment_labels
├─ ns-a
│ ├─ foo
│ │ └─ [ chart_name:foo, chart_version:1.0 ]
│ └─ bar
│ └─ [ chart_name:bar, chart_version:1.1 ]
└─ ns-b
└─ foo
└─ [ chart_name:foo, chart_version:2.0 ]
At the first level of the tree, there are the different values for the "namespace" label (because it’s the first label to match)
At the second level of the tree, there are all the different values for the "deployment" label (because it’s the first label to match)
At the third level of the tree, there are the lists of labels to add with the keys and the values.
When a metric like the following needs to be decorated:
kube_pod_container_status_running {"namespace": "ns-a", "deployment": "bar", "container": "agent", "pod": "XXX"}
We first extract the "namespace" value because it’s the first label to match.
This value is used to lookup the first level node in the tree.
The "deployment" value is then extracted because it’s the second label to match.
This value is used to lookup the second level node in the tree.
That node contains the list of labels to add.
*/
type labelJoiner struct {
metricsToJoin map[string]metricToJoin
}
type metricToJoin struct {
config *JoinsConfig
tree *node
}
type label struct {
key string
value string
}
type node struct {
labelValues map[string]*node
labelsToAdd []label
}
func newLabelJoiner(config map[string]*JoinsConfig) *labelJoiner {
metricsToJoin := make(map[string]metricToJoin)
for key, joinsConfig := range config {
if len(joinsConfig.LabelsToMatch) > 0 {
metricsToJoin[key] = metricToJoin{
config: joinsConfig,
tree: newInnerNode(),
}
} else {
metricsToJoin[key] = metricToJoin{
config: joinsConfig,
tree: newLeafNode(),
}
}
}
return &labelJoiner{
metricsToJoin: metricsToJoin,
}
}
// newInnerNode creates a non-leaf node for the tree.
// a non-leaf node has child nodes in the `labelValues` map.
// a non-leaf node doesn’t use `labelsToAdd`.
func newInnerNode() *node {
return &node{
labelValues: make(map[string]*node),
labelsToAdd: nil,
}
}
// newLeafNode creates a leaf node for the tree.
// a leaf node has no children. So, the `labelValues` map can remain `nil`.
// a leaf node holds a list of labels to add in `labelsToAdd`. But this slice is allocated later, when we know its expected final size.
func newLeafNode() *node {
return &node{
labelValues: nil,
labelsToAdd: nil,
}
}
func (lj *labelJoiner) insertMetric(metric ksmstore.DDMetric, config *JoinsConfig, tree *node) {
current := tree
// Parse the tree from the root to the leaf and add missing nodes on the way.
nbLabelsToMatch := len(config.LabelsToMatch)
for i, labelToMatch := range config.LabelsToMatch {
labelValue, found := metric.Labels[labelToMatch]
if !found {
return
}
child, found := current.labelValues[labelValue]
if !found {
// If the node hasn’t been found in the tree, a node for the current `labelValue` needs to be added.
// The current depth is checked to know if the node will be a leaf or not.
if i < nbLabelsToMatch-1 {
child = newInnerNode()
} else {
child = newLeafNode()
}
current.labelValues[labelValue] = child
}
current = child
}
// Fill the `labelsToAdd` on the leaf node.
if config.GetAllLabels {
if current.labelsToAdd == nil {
current.labelsToAdd = make([]label, 0, len(metric.Labels)-len(config.LabelsToMatch))
}
for labelName, labelValue := range metric.Labels {
isALabelToMatch := false
for _, labelToMatch := range config.LabelsToMatch {
if labelName == labelToMatch {
isALabelToMatch = true
break
}
}
if !isALabelToMatch {
current.labelsToAdd = append(current.labelsToAdd, label{labelName, labelValue})
}
}
} else {
if current.labelsToAdd == nil {
current.labelsToAdd = make([]label, 0, len(config.LabelsToGet))
}
for _, labelToGet := range config.LabelsToGet {
labelValue, found := metric.Labels[labelToGet]
if found {
current.labelsToAdd = append(current.labelsToAdd, label{labelToGet, labelValue})
}
}
}
}
func (lj *labelJoiner) insertFamily(metricFamily ksmstore.DDMetricsFam) {
// The metricsToJoin map has been created in newLabelJoiner and contains one entry per label join config.
// insertFamily is then called with the metrics to use to do the label join.
// The metrics passed to insertFamily are retrieved by (*KSMCheck)Run() and are filtered by (*KSMCheck)familyFilter
// And (*KSMCheck)familyFilter keeps only the metrics that are in the label join config.
// That’s why we cannot have a miss here.
metricToJoin, found := lj.metricsToJoin[metricFamily.Name]
if !found {
log.Error("BUG in label joins")
return
}
for _, metric := range metricFamily.ListMetrics {
lj.insertMetric(metric, metricToJoin.config, metricToJoin.tree)
}
}
func (lj *labelJoiner) insertFamilies(metrics map[string][]ksmstore.DDMetricsFam) {
for _, metricsList := range metrics {
for _, metricFamily := range metricsList {
lj.insertFamily(metricFamily)
}
}
}
func (lj *labelJoiner) getLabelsToAddOne(inputLabels map[string]string, config *JoinsConfig, tree *node, labelsToAdd *[]label) {
node := tree
for _, labelToMatch := range config.LabelsToMatch {
labelValue, found := inputLabels[labelToMatch]
if !found {
return
}
node, found = node.labelValues[labelValue]
if !found {
return
}
}
*labelsToAdd = append(*labelsToAdd, node.labelsToAdd...)
}
func (lj *labelJoiner) getLabelsToAdd(inputLabels map[string]string) (labelsToAdd []label) {
for _, metricToJoin := range lj.metricsToJoin {
lj.getLabelsToAddOne(inputLabels, metricToJoin.config, metricToJoin.tree, &labelsToAdd)
}
return
} | pkg/collector/corechecks/cluster/kubernetes_state_label_joins.go | 0.653459 | 0.533337 | kubernetes_state_label_joins.go | starcoder |
package helper
import (
"github.com/adamlenda/engine/core"
"github.com/adamlenda/engine/geometry"
"github.com/adamlenda/engine/gls"
"github.com/adamlenda/engine/graphic"
"github.com/adamlenda/engine/material"
"github.com/adamlenda/engine/math32"
)
// Normals is the visual representation of the normals of a target object.
type Normals struct {
graphic.Lines
size float32
targetNode *core.Node
targetGeometry *geometry.Geometry
}
// NewNormals creates a normals helper for the specified IGraphic, with the specified size, color, and lineWidth.
func NewNormals(ig graphic.IGraphic, size float32, color *math32.Color, lineWidth float32) *Normals {
// Creates new Normals helper
nh := new(Normals)
nh.size = size
// Save the object to show the normals
nh.targetNode = ig.GetNode()
// Get the geometry of the target object
nh.targetGeometry = ig.GetGeometry()
// Get the number of target vertex positions
vertices := nh.targetGeometry.VBO(gls.VertexPosition)
n := vertices.Buffer().Size() * 2
// Creates this helper geometry
geom := geometry.NewGeometry()
positions := math32.NewArrayF32(n, n)
geom.AddVBO(gls.NewVBO(positions).AddAttrib(gls.VertexPosition))
// Creates this helper material
mat := material.NewStandard(color)
mat.SetLineWidth(lineWidth)
// Initialize graphic
nh.Lines.Init(geom, mat)
nh.Update()
return nh
}
// Update should be called in the render loop to
// update the normals based on the target object.
func (nh *Normals) Update() {
var v1 math32.Vector3
var v2 math32.Vector3
var normalMatrix math32.Matrix3
// Updates the target object matrix and get its normal matrix
matrixWorld := nh.targetNode.MatrixWorld()
normalMatrix.GetNormalMatrix(&matrixWorld)
// Get the target positions and normals buffers
tPosVBO := nh.targetGeometry.VBO(gls.VertexPosition)
tPositions := tPosVBO.Buffer()
tNormVBO := nh.targetGeometry.VBO(gls.VertexNormal)
tNormals := tNormVBO.Buffer()
// Get this object positions buffer
geom := nh.GetGeometry()
posVBO := geom.VBO(gls.VertexPosition)
positions := posVBO.Buffer()
// For each target object vertex position:
for pos := 0; pos < tPositions.Size(); pos += 3 {
// Get the target vertex position and apply the current world matrix transform
// to get the base for this normal line segment.
tPositions.GetVector3(pos, &v1)
v1.ApplyMatrix4(&matrixWorld)
// Calculates the end position of the normal line segment
tNormals.GetVector3(pos, &v2)
v2.ApplyMatrix3(&normalMatrix).Normalize().MultiplyScalar(nh.size).Add(&v1)
// Sets the line segment representing the normal of the current target position
// at this helper VBO
positions.SetVector3(2*pos, &v1)
positions.SetVector3(2*pos+3, &v2)
}
posVBO.Update()
} | util/helper/normals.go | 0.783575 | 0.511656 | normals.go | starcoder |
package pure
import (
"context"
"strconv"
"time"
"github.com/benthosdev/benthos/v4/internal/bundle"
"github.com/benthosdev/benthos/v4/internal/component/processor"
"github.com/benthosdev/benthos/v4/internal/docs"
"github.com/benthosdev/benthos/v4/internal/message"
"github.com/benthosdev/benthos/v4/internal/tracing"
)
func init() {
err := bundle.AllProcessors.Add(func(conf processor.Config, mgr bundle.NewManagement) (processor.V1, error) {
p, err := newCatch(conf.Catch, mgr)
if err != nil {
return nil, err
}
return processor.NewV2BatchedToV1Processor("catch", p, mgr.Metrics()), nil
}, docs.ComponentSpec{
Name: "catch",
Categories: []string{
"Composition",
},
Summary: `
Applies a list of child processors _only_ when a previous processing step has
failed.`,
Description: `
Behaves similarly to the ` + "[`for_each`](/docs/components/processors/for_each)" + ` processor, where a
list of child processors are applied to individual messages of a batch. However,
processors are only applied to messages that failed a processing step prior to
the catch.
For example, with the following config:
` + "```yaml" + `
pipeline:
processors:
- resource: foo
- catch:
- resource: bar
- resource: baz
` + "```" + `
If the processor ` + "`foo`" + ` fails for a particular message, that message
will be fed into the processors ` + "`bar` and `baz`" + `. Messages that do not
fail for the processor ` + "`foo`" + ` will skip these processors.
When messages leave the catch block their fail flags are cleared. This processor
is useful for when it's possible to recover failed messages, or when special
actions (such as logging/metrics) are required before dropping them.
More information about error handing can be found [here](/docs/configuration/error_handling).`,
Config: docs.FieldProcessor("", "").Array().
LinterFunc(func(ctx docs.LintContext, line, col int, value interface{}) []docs.Lint {
childProcs, ok := value.([]interface{})
if !ok {
return nil
}
for _, child := range childProcs {
childObj, ok := child.(map[string]interface{})
if !ok {
continue
}
if _, exists := childObj["catch"]; exists {
// No need to lint as a nested catch will clear errors,
// allowing nested try blocks to work as expected.
return nil
}
if _, exists := childObj["try"]; exists {
return []docs.Lint{
docs.NewLintError(line, "`catch` block contains a `try` block which will never execute due to errors only being cleared at the end of the `catch`, for more information about nesting `try` within `catch` read: https://www.benthos.dev/docs/components/processors/try#nesting-within-a-catch-block"),
}
}
}
return nil
}),
})
if err != nil {
panic(err)
}
}
//------------------------------------------------------------------------------
type catchProc struct {
children []processor.V1
}
func newCatch(conf []processor.Config, mgr bundle.NewManagement) (*catchProc, error) {
var children []processor.V1
for i, pconf := range conf {
pMgr := mgr.IntoPath("catch", strconv.Itoa(i))
proc, err := pMgr.NewProcessor(pconf)
if err != nil {
return nil, err
}
children = append(children, proc)
}
return &catchProc{
children: children,
}, nil
}
func (p *catchProc) ProcessBatch(ctx context.Context, spans []*tracing.Span, msg *message.Batch) ([]*message.Batch, error) {
resultMsgs := make([]*message.Batch, msg.Len())
_ = msg.Iter(func(i int, p *message.Part) error {
tmpMsg := message.QuickBatch(nil)
tmpMsg.SetAll([]*message.Part{p})
resultMsgs[i] = tmpMsg
return nil
})
var res error
if resultMsgs, res = processor.ExecuteCatchAll(p.children, resultMsgs...); res != nil || len(resultMsgs) == 0 {
return nil, res
}
resMsg := message.QuickBatch(nil)
for _, m := range resultMsgs {
_ = m.Iter(func(i int, p *message.Part) error {
resMsg.Append(p)
return nil
})
}
if resMsg.Len() == 0 {
return nil, res
}
_ = resMsg.Iter(func(i int, p *message.Part) error {
p.ErrorSet(nil)
return nil
})
resMsgs := [1]*message.Batch{resMsg}
return resMsgs[:], nil
}
func (p *catchProc) Close(ctx context.Context) error {
for _, c := range p.children {
c.CloseAsync()
}
deadline, exists := ctx.Deadline()
if !exists {
deadline = time.Now().Add(time.Second * 5)
}
for _, c := range p.children {
if err := c.WaitForClose(time.Until(deadline)); err != nil {
return err
}
}
return nil
} | internal/impl/pure/processor_catch.go | 0.614163 | 0.498413 | processor_catch.go | starcoder |
package main
import (
"image"
"image/color"
"image/jpeg"
"image/png"
"math"
"os"
"sync"
"github.com/Sirupsen/logrus"
)
func trap(img *image.RGBA, trapPath string, r, g, b *Histo) {
}
func plotImp() (err error) {
img := image.NewRGBA(image.Rect(0, 0, width, height))
impMax := max(&importance)
for x, col := range importance {
for y, v := range col {
if importance[x][y] == 0 {
continue
}
c := uint8(value(v, impMax))
img.SetRGBA(x, y, color.RGBA{c, c, c, 255})
}
}
return render(img, "importance")
}
// plot visualizes the histograms values as an image. It equalizes the
// histograms with a color scaling function to emphazise hidden features.
func plot(img *image.RGBA, r, g, b *Histo) {
// The highest number orbits passing through a point.
rMax, gMax, bMax := max(r), max(g), max(b)
logrus.Println("[i] Histo:", rMax, gMax, bMax)
logrus.Printf("[i] Function: %s, factor: %.2f, exposure: %.2f", getFunctionName(f), factor, exposure)
// We iterate over every point in our histogram to color scale and plot
// them.
wg := new(sync.WaitGroup)
wg.Add(len(r))
for x, col := range r {
go plotCol(wg, x, &col, img, r, g, b, rMax, bMax, gMax)
}
wg.Wait()
}
// plotCol plots a column of pixels. The RGB-value of the pixel is based on the
// frequency in the histogram. Higher value equals brighter color.
func plotCol(wg *sync.WaitGroup, x int, col *[height]float64, img *image.RGBA, r, g, b *Histo, rMax, bMax, gMax float64) {
for y := range col {
// We skip to plot the black points for faster rendering. A side
// effect is that rendering png images will have a transparent
// background.
if r[x][y] == 0 &&
g[x][y] == 0 &&
b[x][y] == 0 {
continue
}
c := color.RGBA{
uint8(value(r[x][y], rMax)),
uint8(value(g[x][y], gMax)),
uint8(value(b[x][y], bMax)),
255}
// We flip x <=> y to rotate the image to an upright position.
img.SetRGBA(y, x, c)
}
wg.Done()
}
// exp is an exponential color scaling function.
func exp(x float64) float64 {
return (1 - math.Exp(-factor*x))
}
// log is an logaritmic color scaling function.
func log(x float64) float64 {
return math.Log1p(factor * x)
}
// sqrt is a square root color scaling function.
func sqrt(x float64) float64 {
return math.Sqrt(factor * x)
}
// lin is a linear color scaling function.
func lin(x float64) float64 {
return x
}
// value calculates the color value of the pixel.
func value(v, max float64) float64 {
return math.Min(f(v)*scale(max), 255)
}
// scale equalizes the histogram distribution for each value.
func scale(max float64) float64 {
return (255 * exposure) / f(max)
}
// render creates an output image file.
func render(img image.Image, filename string) (err error) {
enc := func(img image.Image, filename string) (err error) {
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
if filePng {
return png.Encode(file, img)
}
return jpeg.Encode(file, img, &jpeg.Options{Quality: 100})
}
if filePng {
filename += ".png"
} else if fileJpg {
filename += ".jpg"
}
logrus.Println("[!] Encoding:", filename)
defer logrus.Println("[!] Done :D")
return enc(img, filename)
} | plot.go | 0.647798 | 0.432063 | plot.go | starcoder |
package protocol
import (
"time"
"github.com/montanaflynn/stats"
"go.opentelemetry.io/collector/model/pdata"
)
func buildCounterMetric(parsedMetric statsDMetric, isMonotonicCounter bool, timeNow, lastIntervalTime time.Time) pdata.InstrumentationLibraryMetrics {
ilm := pdata.NewInstrumentationLibraryMetrics()
nm := ilm.Metrics().AppendEmpty()
nm.SetName(parsedMetric.description.name)
if parsedMetric.unit != "" {
nm.SetUnit(parsedMetric.unit)
}
nm.SetDataType(pdata.MetricDataTypeSum)
nm.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta)
nm.Sum().SetIsMonotonic(isMonotonicCounter)
dp := nm.Sum().DataPoints().AppendEmpty()
dp.SetIntVal(parsedMetric.counterValue())
dp.SetStartTimestamp(pdata.NewTimestampFromTime(lastIntervalTime))
dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow))
for i, key := range parsedMetric.labelKeys {
dp.Attributes().InsertString(key, parsedMetric.labelValues[i])
}
return ilm
}
func buildGaugeMetric(parsedMetric statsDMetric, timeNow time.Time) pdata.InstrumentationLibraryMetrics {
ilm := pdata.NewInstrumentationLibraryMetrics()
nm := ilm.Metrics().AppendEmpty()
nm.SetName(parsedMetric.description.name)
if parsedMetric.unit != "" {
nm.SetUnit(parsedMetric.unit)
}
nm.SetDataType(pdata.MetricDataTypeGauge)
dp := nm.Gauge().DataPoints().AppendEmpty()
dp.SetDoubleVal(parsedMetric.gaugeValue())
dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow))
for i, key := range parsedMetric.labelKeys {
dp.Attributes().InsertString(key, parsedMetric.labelValues[i])
}
return ilm
}
func buildSummaryMetric(summaryMetric summaryMetric) pdata.InstrumentationLibraryMetrics {
ilm := pdata.NewInstrumentationLibraryMetrics()
nm := ilm.Metrics().AppendEmpty()
nm.SetName(summaryMetric.name)
nm.SetDataType(pdata.MetricDataTypeSummary)
dp := nm.Summary().DataPoints().AppendEmpty()
dp.SetCount(uint64(len(summaryMetric.summaryPoints)))
sum, _ := stats.Sum(summaryMetric.summaryPoints)
dp.SetSum(sum)
dp.SetTimestamp(pdata.NewTimestampFromTime(summaryMetric.timeNow))
for i, key := range summaryMetric.labelKeys {
dp.Attributes().InsertString(key, summaryMetric.labelValues[i])
}
quantile := []float64{0, 10, 50, 90, 95, 100}
for _, v := range quantile {
eachQuantile := dp.QuantileValues().AppendEmpty()
eachQuantile.SetQuantile(v / 100)
eachQuantileValue, _ := stats.PercentileNearestRank(summaryMetric.summaryPoints, v)
eachQuantile.SetValue(eachQuantileValue)
}
return ilm
}
func (s statsDMetric) counterValue() int64 {
x := s.asFloat
// Note statds counters are always represented as integers.
// There is no statsd specification that says what should or
// shouldn't be done here. Rounding may occur for sample
// rates that are not integer reciprocals. Recommendation:
// use integer reciprocal sampling rates.
if 0 < s.sampleRate && s.sampleRate < 1 {
x = x / s.sampleRate
}
return int64(x)
}
func (s statsDMetric) gaugeValue() float64 {
// sampleRate does not have effect for gauge points.
return s.asFloat
}
func (s statsDMetric) summaryValue() float64 {
// TODO: This method returns an incorrect result. The
// sampleRate is meant to apply to the count of observations
// recorded by the histogram/timer, not to scale the value
// observed. This should return gaugeValue(), and the
// consumer of this value should track the effective count of
// each item to compute percentiles. See #5252.
x := s.asFloat
if 0 < s.sampleRate && s.sampleRate < 1 {
x = x / s.sampleRate
}
return x
} | receiver/statsdreceiver/protocol/metric_translator.go | 0.621541 | 0.401424 | metric_translator.go | starcoder |
package registry
import (
. "github.com/protolambda/zrnt/eth2/beacon/validator"
. "github.com/protolambda/zrnt/eth2/core"
"github.com/protolambda/zrnt/eth2/util/math"
"github.com/protolambda/zrnt/eth2/util/ssz"
"github.com/protolambda/zssz"
"sort"
)
var RegistryIndicesSSZ = zssz.GetSSZ((*RegistryIndices)(nil))
type ValidatorRegistry []*Validator
func (_ *ValidatorRegistry) Limit() uint64 {
return VALIDATOR_REGISTRY_LIMIT
}
type ValidatorsState struct {
Validators ValidatorRegistry
}
func (state *ValidatorsState) IsValidIndex(index ValidatorIndex) bool {
return index < ValidatorIndex(len(state.Validators))
}
func (state *ValidatorsState) ValidatorCount() uint64 {
return uint64(len(state.Validators))
}
func (state *ValidatorsState) Validator(index ValidatorIndex) *Validator {
return state.Validators[index]
}
func (state *ValidatorsState) Pubkey(index ValidatorIndex) BLSPubkey {
return state.Validators[index].Pubkey
}
func (state *ValidatorsState) ValidatorIndex(pubkey BLSPubkey) (index ValidatorIndex, exists bool) {
for i, v := range state.Validators {
if v.Pubkey == pubkey {
return ValidatorIndex(i), true
}
}
return ValidatorIndexMarker, false
}
func (state *ValidatorsState) WithdrawableEpoch(index ValidatorIndex) Epoch {
return state.Validators[index].WithdrawableEpoch
}
func (state *ValidatorsState) IsActive(index ValidatorIndex, epoch Epoch) bool {
return state.Validators[index].IsActive(epoch)
}
func (state *ValidatorsState) GetActiveValidatorIndices(epoch Epoch) RegistryIndices {
res := make([]ValidatorIndex, 0, len(state.Validators))
for i, v := range state.Validators {
if v.IsActive(epoch) {
res = append(res, ValidatorIndex(i))
}
}
return res
}
func (state *ValidatorsState) ComputeActiveIndexRoot(epoch Epoch) Root {
indices := state.GetActiveValidatorIndices(epoch)
return ssz.HashTreeRoot(indices, RegistryIndicesSSZ)
}
func (state *ValidatorsState) GetActiveValidatorCount(epoch Epoch) (count uint64) {
for _, v := range state.Validators {
if v.IsActive(epoch) {
count++
}
}
return
}
func CommitteeCount(activeValidators uint64) uint64 {
validatorsPerSlot := activeValidators / uint64(SLOTS_PER_EPOCH)
committeesPerSlot := validatorsPerSlot / TARGET_COMMITTEE_SIZE
if MAX_COMMITTEES_PER_SLOT < committeesPerSlot {
committeesPerSlot = MAX_COMMITTEES_PER_SLOT
}
if committeesPerSlot == 0 {
committeesPerSlot = 1
}
return committeesPerSlot
}
func (state *ValidatorsState) GetCommitteeCountAtSlot(slot Slot) uint64 {
return CommitteeCount(state.GetActiveValidatorCount(slot.ToEpoch()))
}
func (state *ValidatorsState) IsSlashed(index ValidatorIndex) bool {
return state.Validators[index].Slashed
}
// Filters a slice in-place. Only keeps the unslashed validators.
// If input is sorted, then the result will be sorted.
func (state *ValidatorsState) FilterUnslashed(indices []ValidatorIndex) []ValidatorIndex {
unslashed := indices[:0]
for _, x := range indices {
if !state.Validators[x].Slashed {
unslashed = append(unslashed, x)
}
}
return unslashed
}
func (state *ValidatorsState) GetIndicesToSlash(withdrawal Epoch) (out []ValidatorIndex) {
for i, v := range state.Validators {
if v.Slashed && withdrawal == v.WithdrawableEpoch {
out = append(out, ValidatorIndex(i))
}
}
return
}
func (state *ValidatorsState) GetChurnLimit(epoch Epoch) uint64 {
return math.MaxU64(MIN_PER_EPOCH_CHURN_LIMIT, state.GetActiveValidatorCount(epoch)/CHURN_LIMIT_QUOTIENT)
}
func (state *ValidatorsState) ExitQueueEnd(epoch Epoch) Epoch {
// Compute exit queue epoch
exitQueueEnd := epoch.ComputeActivationExitEpoch()
for _, v := range state.Validators {
if v.ExitEpoch != FAR_FUTURE_EPOCH && v.ExitEpoch > exitQueueEnd {
exitQueueEnd = v.ExitEpoch
}
}
exitQueueChurn := uint64(0)
for _, v := range state.Validators {
if v.ExitEpoch == exitQueueEnd {
exitQueueChurn++
}
}
if exitQueueChurn >= state.GetChurnLimit(epoch) {
exitQueueEnd++
}
return exitQueueEnd
}
func (state *ValidatorsState) ProcessActivationQueue(currentEpoch Epoch, finalizedEpoch Epoch) {
// Queue validators eligible for activation and not dequeued for activation prior to finalized epoch
activationQueue := make([]ValidatorIndex, 0)
for i, v := range state.Validators {
if v.IsEligibleForActivation(finalizedEpoch) {
activationQueue = append(activationQueue, ValidatorIndex(i))
}
}
// Order by the sequence of activation_eligibility_epoch setting and then index
sort.Slice(activationQueue, func(i int, j int) bool {
aEligible := state.Validators[activationQueue[i]].ActivationEligibilityEpoch
bEligible := state.Validators[activationQueue[j]].ActivationEligibilityEpoch
if aEligible == bEligible {
return activationQueue[i] < activationQueue[j]
}
return aEligible < bEligible
})
// Dequeued validators for activation up to churn limit (without resetting activation epoch)
queueLen := uint64(len(activationQueue))
if churnLimit := state.GetChurnLimit(currentEpoch); churnLimit < queueLen {
queueLen = churnLimit
}
for _, vi := range activationQueue[:queueLen] {
state.Validators[vi].ActivationEpoch = currentEpoch.ComputeActivationExitEpoch()
}
}
// Return the total balance sum (1 Gwei minimum to avoid divisions by zero.)
func (state *ValidatorsState) GetTotalStakedBalance(epoch Epoch) (sum Gwei) {
for _, v := range state.Validators {
if v.IsActive(epoch) {
sum += v.EffectiveBalance
}
}
return sum
}
func (state *ValidatorsState) GetAttestersStake(statuses []AttesterStatus, mask AttesterFlag) (out Gwei) {
for i := range statuses {
status := &statuses[i]
b := state.Validators[i].EffectiveBalance
if status.Flags.HasMarkers(mask) {
out += b
}
}
if out == 0 {
return 1
}
return
}
func (state *ValidatorsState) GetTotalStake() (out Gwei) {
for i := range state.Validators {
out += state.Validators[i].EffectiveBalance
}
if out == 0 {
return 1
}
return
}
func (state *ValidatorsState) EffectiveBalance(index ValidatorIndex) Gwei {
return state.Validators[index].EffectiveBalance
}
// Return the combined effective balance of an array of validators. (1 Gwei minimum to avoid divisions by zero.)
func (state *ValidatorsState) SumEffectiveBalanceOf(indices []ValidatorIndex) (sum Gwei) {
for _, vIndex := range indices {
sum += state.Validators[vIndex].EffectiveBalance
}
if sum == 0 {
return 1
}
return sum
} | eth2/beacon/registry/validators.go | 0.595845 | 0.40116 | validators.go | starcoder |
package mo
import (
xconv "github.com/goclub/conv"
"github.com/goclub/mongo/internal/coord"
)
// geojson https://zhuanlan.zhihu.com/p/141554586
// NewPoint(mo.WGS84{121.48294,31.2328}) // WGS84{经度,纬度}
type Point struct {
Type pointType `json:"type" bson:"type"`
// []float64{longitude, latitude} []float64{经度, 纬度}
// 可能所有人都至少一次踩过这个坑:地理坐标点用字符串形式表示时是纬度在前,经度在后( "latitude,longitude" ),
// 而数组形式表示时是经度在前,纬度在后( [longitude,latitude] )—顺序刚好相反。
Coordinates []float64 `json:"coordinates" bson:"coordinates"`
}
// 用内部类型来强制调用者使用 NewPoint() 来创造 Point
type pointType *string
func NewPoint(data WGS84) Point {
typevalue := "Point"
return Point{
&typevalue,
[]float64{data.Longitude, data.Latitude},
}
}
func (p Point) WGS84() WGS84 {
return WGS84{
Longitude: p.Coordinates[0],
Latitude: p.Coordinates[1],
}
}
type WGS84 struct {
Longitude float64 `json:"longitude" note:"经度"`
Latitude float64 `json:"latitude" note:"纬度"`
}
func (data WGS84) GCJ02() GCJ02 {
lng, lat := coordtransform.WGS84toGCJ02(data.Longitude, data.Latitude)
return GCJ02{
Longitude: lng,
Latitude: lat,
}
}
func (data WGS84) BD09() BD09 {
lng, lat := coordtransform.WGS84toBD09(data.Longitude, data.Latitude)
return BD09{
Longitude: lng,
Latitude: lat,
}
}
type GCJ02 struct {
Longitude float64 `json:"longitude" note:"经度"`
Latitude float64 `json:"latitude" note:"纬度"`
}
// 返回 "纬度,经度" 格式字符串
// 可能所有人都至少一次踩过这个坑:地理坐标点用字符串形式表示时是纬度在前,经度在后( "latitude,longitude" ),
// 而数组形式表示时是经度在前,纬度在后( [longitude,latitude] )—顺序刚好相反。
func (data GCJ02) LatCommaLngString() (latCommaLng string) {
return xconv.Float64String(data.Latitude) + "," + xconv.Float64String(data.Longitude)
}
func (data GCJ02) WGS84() WGS84 {
lng, lat := coordtransform.GCJ02toWGS84(data.Longitude, data.Latitude)
return WGS84{
Longitude: lng,
Latitude: lat,
}
}
func (data GCJ02) BD09() BD09 {
lng, lat := coordtransform.GCJ02toBD09(data.Longitude, data.Latitude)
return BD09{
Longitude: lng,
Latitude: lat,
}
}
type BD09 struct {
Longitude float64 `json:"longitude" note:"经度"`
Latitude float64 `json:"latitude" note:"纬度"`
}
func (data BD09) WGS84() WGS84 {
lng, lat := coordtransform.BD09toWGS84(data.Longitude, data.Latitude)
return WGS84{
Longitude: lng,
Latitude: lat,
}
}
func (data BD09) GCJ02() GCJ02 {
lng, lat := coordtransform.BD09toGCJ02(data.Longitude, data.Latitude)
return GCJ02{
Longitude: lng,
Latitude: lat,
}
} | geojson.go | 0.552298 | 0.568655 | geojson.go | starcoder |
package main
import "math"
func SimulateGravity(initialUniverse Universe, numGens int, time float64) []Universe {
timePoints := make([]Universe, numGens+1)
timePoints[0] = initialUniverse
for i := 1; i <= numGens; i++ {
timePoints[i] = UpdateUniverse(timePoints[i-1], time)
}
return timePoints
}
func UpdateUniverse(currentUniverse Universe, t float64) Universe {
newUniverse := CopyUniverse(currentUniverse)
for i := range newUniverse.bodies {
newUniverse.bodies[i].acceleration = UpdateAcceleration(newUniverse.bodies, newUniverse.bodies[i])
newUniverse.bodies[i].velocity = UpdateVelocity(newUniverse.bodies[i], t)
newUniverse.bodies[i].position = UpdatePosition(newUniverse.bodies[i], t)
}
return newUniverse
}
func CopyUniverse(currentUniverse Universe) Universe {
var newUniverse Universe
newUniverse.width = currentUniverse.width
newUniverse.bodies = make([]Body, len(currentUniverse.bodies))
for i := range currentUniverse.bodies {
var b Body
currBody := currentUniverse.bodies[i]
b.name = currBody.name
b.red = currBody.red
b.green = currBody.green
b.blue = currBody.blue
b.mass = currBody.mass
b.radius = currBody.radius
b.acceleration.x = currBody.acceleration.x
b.acceleration.y = currBody.acceleration.y
b.velocity.x = currBody.velocity.x
b.velocity.y = currBody.velocity.y
b.position.x = currBody.position.x
b.position.y = currBody.position.y
newUniverse.bodies[i] = b
}
return newUniverse
}
func UpdateAcceleration(bodies []Body, b Body) OrderedPair {
var accel OrderedPair
force := ComputeNetForce(bodies, b)
accel.x = force.x / b.mass
accel.y = force.y / b.mass
return accel
}
func ComputeNetForce(bodies []Body, b Body) OrderedPair {
var netForce OrderedPair
for i := range bodies {
if bodies[i] != b {
force := ComputeForce(bodies[i], b)
netForce.x += force.x
netForce.y += force.y
}
}
return netForce
}
func ComputeForce(b2, b Body) OrderedPair {
var force OrderedPair
d := Distance(b2.position, b.position)
F := G * b2.mass * b.mass / (d * d)
deltaX := b2.position.x - b.position.x
deltaY := b2.position.y - b.position.y
force.x = F * (deltaX / d)
force.y = F * (deltaY / d)
return force
}
func Distance(p1, p2 OrderedPair) float64 {
deltaX := p1.x - p2.x
deltaY := p1.y - p2.y
return math.Sqrt(deltaX*deltaX + deltaY*deltaY)
}
func UpdateVelocity(b Body, time float64) OrderedPair {
var v OrderedPair
v.x = b.velocity.x + b.acceleration.x*time
v.y = b.velocity.y + b.acceleration.y*time
return v
}
func UpdatePosition(b Body, time float64) OrderedPair {
var p OrderedPair
p.x = b.position.x + b.velocity.x*time + .5*b.acceleration.x*time*time
p.y = b.position.y + b.velocity.y*time + .5*b.acceleration.y*time*time
return p
} | jupiter/gravity.go | 0.776538 | 0.466785 | gravity.go | starcoder |
package openapi
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"github.com/twilio/twilio-go/client"
)
// Optional parameters for the method 'CreateMessage'
type CreateMessageParams struct {
// The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that will create the resource.
PathAccountSid *string `json:"PathAccountSid,omitempty"`
// Determines if the address can be stored or obfuscated based on privacy settings
AddressRetention *string `json:"AddressRetention,omitempty"`
// The SID of the application that should receive message status. We POST a `message_sid` parameter and a `message_status` parameter with a value of `sent` or `failed` to the [application](https://www.twilio.com/docs/usage/api/applications)'s `message_status_callback`. If a `status_callback` parameter is also passed, it will be ignored and the application's `message_status_callback` parameter will be used.
ApplicationSid *string `json:"ApplicationSid,omitempty"`
// Total number of attempts made ( including this ) to send out the message regardless of the provider used
Attempt *int `json:"Attempt,omitempty"`
// The text of the message you want to send. Can be up to 1,600 characters in length.
Body *string `json:"Body,omitempty"`
// Determines if the message content can be stored or redacted based on privacy settings
ContentRetention *string `json:"ContentRetention,omitempty"`
// Reserved
ForceDelivery *bool `json:"ForceDelivery,omitempty"`
// A Twilio phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164) format, an [alphanumeric sender ID](https://www.twilio.com/docs/sms/send-messages#use-an-alphanumeric-sender-id), or a [Channel Endpoint address](https://www.twilio.com/docs/sms/channels#channel-addresses) that is enabled for the type of message you want to send. Phone numbers or [short codes](https://www.twilio.com/docs/sms/api/short-code) purchased from Twilio also work here. You cannot, for example, spoof messages from a private cell phone number. If you are using `messaging_service_sid`, this parameter must be empty.
From *string `json:"From,omitempty"`
// The maximum total price in US dollars that you will pay for the message to be delivered. Can be a decimal value that has up to 4 decimal places. All messages are queued for delivery and the message cost is checked before the message is sent. If the cost exceeds `max_price`, the message will fail and a status of `Failed` is sent to the status callback. If `MaxPrice` is not set, the message cost is not checked.
MaxPrice *float32 `json:"MaxPrice,omitempty"`
// The URL of the media to send with the message. The media can be of type `gif`, `png`, and `jpeg` and will be formatted correctly on the recipient's device. The media size limit is 5MB for supported file types (JPEG, PNG, GIF) and 500KB for [other types](https://www.twilio.com/docs/sms/accepted-mime-types) of accepted media. To send more than one image in the message body, provide multiple `media_url` parameters in the POST request. You can include up to 10 `media_url` parameters per message. You can send images in an SMS message in only the US and Canada.
MediaUrl *[]string `json:"MediaUrl,omitempty"`
// The SID of the [Messaging Service](https://www.twilio.com/docs/sms/services#send-a-message-with-copilot) you want to associate with the Message. Set this parameter to use the [Messaging Service Settings and Copilot Features](https://www.twilio.com/console/sms/services) you have configured and leave the `from` parameter empty. When only this parameter is set, Twilio will use your enabled Copilot Features to select the `from` phone number for delivery.
MessagingServiceSid *string `json:"MessagingServiceSid,omitempty"`
// Rich actions for Channels Messages.
PersistentAction *[]string `json:"PersistentAction,omitempty"`
// Whether to confirm delivery of the message. Set this value to `true` if you are sending messages that have a trackable user action and you intend to confirm delivery of the message using the [Message Feedback API](https://www.twilio.com/docs/sms/api/message-feedback-resource). This parameter is `false` by default.
ProvideFeedback *bool `json:"ProvideFeedback,omitempty"`
// Whether to detect Unicode characters that have a similar GSM-7 character and replace them. Can be: `true` or `false`.
SmartEncoded *bool `json:"SmartEncoded,omitempty"`
// The URL we should call using the `status_callback_method` to send status information to your application. If specified, we POST these message status changes to the URL: `queued`, `failed`, `sent`, `delivered`, or `undelivered`. Twilio will POST its [standard request parameters](https://www.twilio.com/docs/sms/twiml#request-parameters) as well as some additional parameters including `MessageSid`, `MessageStatus`, and `ErrorCode`. If you include this parameter with the `messaging_service_sid`, we use this URL instead of the Status Callback URL of the [Messaging Service](https://www.twilio.com/docs/sms/services/api). URLs must contain a valid hostname and underscores are not allowed.
StatusCallback *string `json:"StatusCallback,omitempty"`
// The destination phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164) format for SMS/MMS or [Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses) for other 3rd-party channels.
To *string `json:"To,omitempty"`
// How long in seconds the message can remain in our outgoing message queue. After this period elapses, the message fails and we call your status callback. Can be between 1 and the default value of 14,400 seconds. After a message has been accepted by a carrier, however, we cannot guarantee that the message will not be queued after this period. We recommend that this value be at least 5 seconds.
ValidityPeriod *int `json:"ValidityPeriod,omitempty"`
}
func (params *CreateMessageParams) SetPathAccountSid(PathAccountSid string) *CreateMessageParams {
params.PathAccountSid = &PathAccountSid
return params
}
func (params *CreateMessageParams) SetAddressRetention(AddressRetention string) *CreateMessageParams {
params.AddressRetention = &AddressRetention
return params
}
func (params *CreateMessageParams) SetApplicationSid(ApplicationSid string) *CreateMessageParams {
params.ApplicationSid = &ApplicationSid
return params
}
func (params *CreateMessageParams) SetAttempt(Attempt int) *CreateMessageParams {
params.Attempt = &Attempt
return params
}
func (params *CreateMessageParams) SetBody(Body string) *CreateMessageParams {
params.Body = &Body
return params
}
func (params *CreateMessageParams) SetContentRetention(ContentRetention string) *CreateMessageParams {
params.ContentRetention = &ContentRetention
return params
}
func (params *CreateMessageParams) SetForceDelivery(ForceDelivery bool) *CreateMessageParams {
params.ForceDelivery = &ForceDelivery
return params
}
func (params *CreateMessageParams) SetFrom(From string) *CreateMessageParams {
params.From = &From
return params
}
func (params *CreateMessageParams) SetMaxPrice(MaxPrice float32) *CreateMessageParams {
params.MaxPrice = &MaxPrice
return params
}
func (params *CreateMessageParams) SetMediaUrl(MediaUrl []string) *CreateMessageParams {
params.MediaUrl = &MediaUrl
return params
}
func (params *CreateMessageParams) SetMessagingServiceSid(MessagingServiceSid string) *CreateMessageParams {
params.MessagingServiceSid = &MessagingServiceSid
return params
}
func (params *CreateMessageParams) SetPersistentAction(PersistentAction []string) *CreateMessageParams {
params.PersistentAction = &PersistentAction
return params
}
func (params *CreateMessageParams) SetProvideFeedback(ProvideFeedback bool) *CreateMessageParams {
params.ProvideFeedback = &ProvideFeedback
return params
}
func (params *CreateMessageParams) SetSmartEncoded(SmartEncoded bool) *CreateMessageParams {
params.SmartEncoded = &SmartEncoded
return params
}
func (params *CreateMessageParams) SetStatusCallback(StatusCallback string) *CreateMessageParams {
params.StatusCallback = &StatusCallback
return params
}
func (params *CreateMessageParams) SetTo(To string) *CreateMessageParams {
params.To = &To
return params
}
func (params *CreateMessageParams) SetValidityPeriod(ValidityPeriod int) *CreateMessageParams {
params.ValidityPeriod = &ValidityPeriod
return params
}
// Send a message from the account used to make the request
func (c *ApiService) CreateMessage(params *CreateMessageParams) (*ApiV2010Message, error) {
path := "/2010-04-01/Accounts/{AccountSid}/Messages.json"
if params != nil && params.PathAccountSid != nil {
path = strings.Replace(path, "{"+"AccountSid"+"}", *params.PathAccountSid, -1)
} else {
path = strings.Replace(path, "{"+"AccountSid"+"}", c.requestHandler.Client.AccountSid(), -1)
}
data := url.Values{}
headers := make(map[string]interface{})
if params != nil && params.AddressRetention != nil {
data.Set("AddressRetention", *params.AddressRetention)
}
if params != nil && params.ApplicationSid != nil {
data.Set("ApplicationSid", *params.ApplicationSid)
}
if params != nil && params.Attempt != nil {
data.Set("Attempt", fmt.Sprint(*params.Attempt))
}
if params != nil && params.Body != nil {
data.Set("Body", *params.Body)
}
if params != nil && params.ContentRetention != nil {
data.Set("ContentRetention", *params.ContentRetention)
}
if params != nil && params.ForceDelivery != nil {
data.Set("ForceDelivery", fmt.Sprint(*params.ForceDelivery))
}
if params != nil && params.From != nil {
data.Set("From", *params.From)
}
if params != nil && params.MaxPrice != nil {
data.Set("MaxPrice", fmt.Sprint(*params.MaxPrice))
}
if params != nil && params.MediaUrl != nil {
for _, item := range *params.MediaUrl {
data.Add("MediaUrl", item)
}
}
if params != nil && params.MessagingServiceSid != nil {
data.Set("MessagingServiceSid", *params.MessagingServiceSid)
}
if params != nil && params.PersistentAction != nil {
for _, item := range *params.PersistentAction {
data.Add("PersistentAction", item)
}
}
if params != nil && params.ProvideFeedback != nil {
data.Set("ProvideFeedback", fmt.Sprint(*params.ProvideFeedback))
}
if params != nil && params.SmartEncoded != nil {
data.Set("SmartEncoded", fmt.Sprint(*params.SmartEncoded))
}
if params != nil && params.StatusCallback != nil {
data.Set("StatusCallback", *params.StatusCallback)
}
if params != nil && params.To != nil {
data.Set("To", *params.To)
}
if params != nil && params.ValidityPeriod != nil {
data.Set("ValidityPeriod", fmt.Sprint(*params.ValidityPeriod))
}
resp, err := c.requestHandler.Post(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ApiV2010Message{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
}
// Optional parameters for the method 'DeleteMessage'
type DeleteMessageParams struct {
// The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the Message resources to delete.
PathAccountSid *string `json:"PathAccountSid,omitempty"`
}
func (params *DeleteMessageParams) SetPathAccountSid(PathAccountSid string) *DeleteMessageParams {
params.PathAccountSid = &PathAccountSid
return params
}
// Deletes a message record from your account
func (c *ApiService) DeleteMessage(Sid string, params *DeleteMessageParams) error {
path := "/2010-04-01/Accounts/{AccountSid}/Messages/{Sid}.json"
if params != nil && params.PathAccountSid != nil {
path = strings.Replace(path, "{"+"AccountSid"+"}", *params.PathAccountSid, -1)
} else {
path = strings.Replace(path, "{"+"AccountSid"+"}", c.requestHandler.Client.AccountSid(), -1)
}
path = strings.Replace(path, "{"+"Sid"+"}", Sid, -1)
data := url.Values{}
headers := make(map[string]interface{})
resp, err := c.requestHandler.Delete(c.baseURL+path, data, headers)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
// Optional parameters for the method 'FetchMessage'
type FetchMessageParams struct {
// The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the Message resource to fetch.
PathAccountSid *string `json:"PathAccountSid,omitempty"`
}
func (params *FetchMessageParams) SetPathAccountSid(PathAccountSid string) *FetchMessageParams {
params.PathAccountSid = &PathAccountSid
return params
}
// Fetch a message belonging to the account used to make the request
func (c *ApiService) FetchMessage(Sid string, params *FetchMessageParams) (*ApiV2010Message, error) {
path := "/2010-04-01/Accounts/{AccountSid}/Messages/{Sid}.json"
if params != nil && params.PathAccountSid != nil {
path = strings.Replace(path, "{"+"AccountSid"+"}", *params.PathAccountSid, -1)
} else {
path = strings.Replace(path, "{"+"AccountSid"+"}", c.requestHandler.Client.AccountSid(), -1)
}
path = strings.Replace(path, "{"+"Sid"+"}", Sid, -1)
data := url.Values{}
headers := make(map[string]interface{})
resp, err := c.requestHandler.Get(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ApiV2010Message{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
}
// Optional parameters for the method 'ListMessage'
type ListMessageParams struct {
// The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the Message resources to read.
PathAccountSid *string `json:"PathAccountSid,omitempty"`
// Read messages sent to only this phone number.
To *string `json:"To,omitempty"`
// Read messages sent from only this phone number or alphanumeric sender ID.
From *string `json:"From,omitempty"`
// The date of the messages to show. Specify a date as `YYYY-MM-DD` in GMT to read only messages sent on this date. For example: `2009-07-06`. You can also specify an inequality, such as `DateSent<=YYYY-MM-DD`, to read messages sent on or before midnight on a date, and `DateSent>=YYYY-MM-DD` to read messages sent on or after midnight on a date.
DateSent *time.Time `json:"DateSent,omitempty"`
// The date of the messages to show. Specify a date as `YYYY-MM-DD` in GMT to read only messages sent on this date. For example: `2009-07-06`. You can also specify an inequality, such as `DateSent<=YYYY-MM-DD`, to read messages sent on or before midnight on a date, and `DateSent>=YYYY-MM-DD` to read messages sent on or after midnight on a date.
DateSentBefore *time.Time `json:"DateSent<,omitempty"`
// The date of the messages to show. Specify a date as `YYYY-MM-DD` in GMT to read only messages sent on this date. For example: `2009-07-06`. You can also specify an inequality, such as `DateSent<=YYYY-MM-DD`, to read messages sent on or before midnight on a date, and `DateSent>=YYYY-MM-DD` to read messages sent on or after midnight on a date.
DateSentAfter *time.Time `json:"DateSent>,omitempty"`
// How many resources to return in each list page. The default is 50, and the maximum is 1000.
PageSize *int `json:"PageSize,omitempty"`
// Max number of records to return.
Limit *int `json:"limit,omitempty"`
}
func (params *ListMessageParams) SetPathAccountSid(PathAccountSid string) *ListMessageParams {
params.PathAccountSid = &PathAccountSid
return params
}
func (params *ListMessageParams) SetTo(To string) *ListMessageParams {
params.To = &To
return params
}
func (params *ListMessageParams) SetFrom(From string) *ListMessageParams {
params.From = &From
return params
}
func (params *ListMessageParams) SetDateSent(DateSent time.Time) *ListMessageParams {
params.DateSent = &DateSent
return params
}
func (params *ListMessageParams) SetDateSentBefore(DateSentBefore time.Time) *ListMessageParams {
params.DateSentBefore = &DateSentBefore
return params
}
func (params *ListMessageParams) SetDateSentAfter(DateSentAfter time.Time) *ListMessageParams {
params.DateSentAfter = &DateSentAfter
return params
}
func (params *ListMessageParams) SetPageSize(PageSize int) *ListMessageParams {
params.PageSize = &PageSize
return params
}
func (params *ListMessageParams) SetLimit(Limit int) *ListMessageParams {
params.Limit = &Limit
return params
}
// Retrieve a single page of Message records from the API. Request is executed immediately.
func (c *ApiService) PageMessage(params *ListMessageParams, pageToken, pageNumber string) (*ListMessageResponse, error) {
path := "/2010-04-01/Accounts/{AccountSid}/Messages.json"
if params != nil && params.PathAccountSid != nil {
path = strings.Replace(path, "{"+"AccountSid"+"}", *params.PathAccountSid, -1)
} else {
path = strings.Replace(path, "{"+"AccountSid"+"}", c.requestHandler.Client.AccountSid(), -1)
}
data := url.Values{}
headers := make(map[string]interface{})
if params != nil && params.To != nil {
data.Set("To", *params.To)
}
if params != nil && params.From != nil {
data.Set("From", *params.From)
}
if params != nil && params.DateSent != nil {
data.Set("DateSent", fmt.Sprint((*params.DateSent).Format(time.RFC3339)))
}
if params != nil && params.DateSentBefore != nil {
data.Set("DateSent<", fmt.Sprint((*params.DateSentBefore).Format(time.RFC3339)))
}
if params != nil && params.DateSentAfter != nil {
data.Set("DateSent>", fmt.Sprint((*params.DateSentAfter).Format(time.RFC3339)))
}
if params != nil && params.PageSize != nil {
data.Set("PageSize", fmt.Sprint(*params.PageSize))
}
if pageToken != "" {
data.Set("PageToken", pageToken)
}
if pageNumber != "" {
data.Set("Page", pageNumber)
}
resp, err := c.requestHandler.Get(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ListMessageResponse{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
}
// Lists Message records from the API as a list. Unlike stream, this operation is eager and loads 'limit' records into memory before returning.
func (c *ApiService) ListMessage(params *ListMessageParams) ([]ApiV2010Message, error) {
if params == nil {
params = &ListMessageParams{}
}
params.SetPageSize(client.ReadLimits(params.PageSize, params.Limit))
response, err := c.PageMessage(params, "", "")
if err != nil {
return nil, err
}
curRecord := 0
var records []ApiV2010Message
for response != nil {
records = append(records, response.Messages...)
var record interface{}
if record, err = client.GetNext(c.baseURL, response, &curRecord, params.Limit, c.getNextListMessageResponse); record == nil || err != nil {
return records, err
}
response = record.(*ListMessageResponse)
}
return records, err
}
// Streams Message records from the API as a channel stream. This operation lazily loads records as efficiently as possible until the limit is reached.
func (c *ApiService) StreamMessage(params *ListMessageParams) (chan ApiV2010Message, error) {
if params == nil {
params = &ListMessageParams{}
}
params.SetPageSize(client.ReadLimits(params.PageSize, params.Limit))
response, err := c.PageMessage(params, "", "")
if err != nil {
return nil, err
}
curRecord := 0
//set buffer size of the channel to 1
channel := make(chan ApiV2010Message, 1)
go func() {
for response != nil {
for item := range response.Messages {
channel <- response.Messages[item]
}
var record interface{}
if record, err = client.GetNext(c.baseURL, response, &curRecord, params.Limit, c.getNextListMessageResponse); record == nil || err != nil {
close(channel)
return
}
response = record.(*ListMessageResponse)
}
close(channel)
}()
return channel, err
}
func (c *ApiService) getNextListMessageResponse(nextPageUrl string) (interface{}, error) {
if nextPageUrl == "" {
return nil, nil
}
resp, err := c.requestHandler.Get(nextPageUrl, nil, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ListMessageResponse{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, nil
}
// Optional parameters for the method 'UpdateMessage'
type UpdateMessageParams struct {
// The SID of the [Account](https://www.twilio.com/docs/iam/api/account) that created the Message resources to update.
PathAccountSid *string `json:"PathAccountSid,omitempty"`
// The text of the message you want to send. Can be up to 1,600 characters long.
Body *string `json:"Body,omitempty"`
}
func (params *UpdateMessageParams) SetPathAccountSid(PathAccountSid string) *UpdateMessageParams {
params.PathAccountSid = &PathAccountSid
return params
}
func (params *UpdateMessageParams) SetBody(Body string) *UpdateMessageParams {
params.Body = &Body
return params
}
// To redact a message-body from a post-flight message record, post to the message instance resource with an empty body
func (c *ApiService) UpdateMessage(Sid string, params *UpdateMessageParams) (*ApiV2010Message, error) {
path := "/2010-04-01/Accounts/{AccountSid}/Messages/{Sid}.json"
if params != nil && params.PathAccountSid != nil {
path = strings.Replace(path, "{"+"AccountSid"+"}", *params.PathAccountSid, -1)
} else {
path = strings.Replace(path, "{"+"AccountSid"+"}", c.requestHandler.Client.AccountSid(), -1)
}
path = strings.Replace(path, "{"+"Sid"+"}", Sid, -1)
data := url.Values{}
headers := make(map[string]interface{})
if params != nil && params.Body != nil {
data.Set("Body", *params.Body)
}
resp, err := c.requestHandler.Post(c.baseURL+path, data, headers)
if err != nil {
return nil, err
}
defer resp.Body.Close()
ps := &ApiV2010Message{}
if err := json.NewDecoder(resp.Body).Decode(ps); err != nil {
return nil, err
}
return ps, err
} | rest/api/v2010/accounts_messages.go | 0.794584 | 0.485417 | accounts_messages.go | starcoder |
package dynamic
import (
"bytes"
"reflect"
"github.com/golang/protobuf/proto"
"github.com/jhump/protoreflect/desc"
)
// Equal returns true if the given two dynamic messages are equal. Two messages are equal when they
// have the same message type and same fields set to equal values. For proto3 messages, fields set
// to their zero value are considered unset.
func Equal(a, b *Message) bool {
if a.md.GetFullyQualifiedName() != b.md.GetFullyQualifiedName() {
return false
}
if len(a.values) != len(b.values) {
return false
}
if len(a.unknownFields) != len(b.unknownFields) {
return false
}
for tag, aval := range a.values {
bval, ok := b.values[tag]
if !ok {
return false
}
if !fieldsEqual(aval, bval) {
return false
}
}
for tag, au := range a.unknownFields {
bu, ok := b.unknownFields[tag]
if !ok {
return false
}
if len(au) != len(bu) {
return false
}
for i, aval := range au {
bval := bu[i]
if aval.Encoding != bval.Encoding {
return false
}
if aval.Encoding == proto.WireBytes || aval.Encoding == proto.WireStartGroup {
if !bytes.Equal(aval.Contents, bval.Contents) {
return false
}
} else if aval.Value != bval.Value {
return false
}
}
}
// all checks pass!
return true
}
func fieldsEqual(aval, bval interface{}) bool {
arv := reflect.ValueOf(aval)
brv := reflect.ValueOf(bval)
if arv.Type() != brv.Type() {
// it is possible that one is a dynamic message and one is not
apm, ok := aval.(proto.Message)
if !ok {
return false
}
bpm, ok := bval.(proto.Message)
if !ok {
return false
}
if !MessagesEqual(apm, bpm) {
return false
}
} else {
switch arv.Kind() {
case reflect.Ptr:
apm, ok := aval.(proto.Message)
if !ok {
// Don't know how to compare pointer values that aren't messages!
// Maybe this should panic?
return false
}
bpm := bval.(proto.Message) // we know it will succeed because we know a and b have same type
if !MessagesEqual(apm, bpm) {
return false
}
case reflect.Map:
if !mapsEqual(arv, brv) {
return false
}
case reflect.Slice:
if arv.Type() == typeOfBytes {
if !bytes.Equal(aval.([]byte), bval.([]byte)) {
return false
}
} else {
if !slicesEqual(arv, brv) {
return false
}
}
default:
if aval != bval {
return false
}
}
}
return true
}
func mapsEqual(a, b reflect.Value) bool {
if a.Len() != b.Len() {
return false
}
for _, k := range a.MapKeys() {
av := a.MapIndex(k)
bv := b.MapIndex(k)
if !bv.IsValid() {
return false
}
if !fieldsEqual(av.Interface(), bv.Interface()) {
return false
}
}
return true
}
func slicesEqual(a, b reflect.Value) bool {
if a.Len() != b.Len() {
return false
}
for i := 0; i < a.Len(); i++ {
ai := a.Index(i)
bi := b.Index(i)
if !fieldsEqual(ai.Interface(), bi.Interface()) {
return false
}
}
return true
}
// MessagesEqual returns true if the given two messages are equal. Use this instead of proto.Equal
// when one or both of the messages might be a dynamic message.
func MessagesEqual(a, b proto.Message) bool {
da, aok := a.(*Message)
db, bok := b.(*Message)
// Both dynamic messages
if aok && bok {
return Equal(da, db)
}
// Neither dynamic messages
if !aok && !bok {
return proto.Equal(a, b)
}
// Mixed
if aok {
md, err := desc.LoadMessageDescriptorForMessage(b)
if err != nil {
return false
}
db = newMessageWithMessageFactory(md, da.mf)
if db.ConvertFrom(b) != nil {
return false
}
return Equal(da, db)
} else {
md, err := desc.LoadMessageDescriptorForMessage(a)
if err != nil {
return false
}
da = newMessageWithMessageFactory(md, db.mf)
if da.ConvertFrom(a) != nil {
return false
}
return Equal(da, db)
}
} | dynamic/equal.go | 0.632503 | 0.405037 | equal.go | starcoder |
Package rbac provides role-based access control for vtadmin API endpoints.
Functionality is split between two distinct components: the authenticator and
the authorizer.
The authenticator is optional, and is responsible for extracting information
from a request (gRPC or HTTP) to produce an Actor, which is added to the context
by interceptors/middlewares and eventually checked by the authorizer.
The authorizer maintains a set of rules for each resource type, and, given a
request context, action, resource, and cluster, checks its ruleset to see if
the Actor in the context (set by some authenticator) has a rule allowing it to
perform that <action, resource, cluster> tuple.
The design of package rbac is governed by the following principles:
1. Authentication is pluggable. Authorization is configurable.
VTAdmin will not be specific about how exactly you authenticate users for your
setup. Instead, users can provide whatever implementation suits their needs that
conforms to the expected Authenticator interface, and vtadmin will use that when
setting up the interceptors/middlewares. Currently, authenticators may be
registered at runtime via the rbac.RegisterAuthenticator method, or may be set
as a Go plugin (built via `go build -buildmode=plugin`) by setting the
authenticator name as a path ending in ".so" in the rbac config.
2. Permissions are additive. There is no concept of a negative permission (or
revocation). To "revoke" a permission from a user or role, structure your rules
such that they are never granted that permission.
3. Authentication is done at the gRPC/HTTP ingress boundaries.
4. Authorization is done at the API boundary. Individual clusters do not perform
authorization checks, instead relying on the calling API method to perform that
check before calling into the cluster.
5. Being unauthorized for an <action, resource> for a cluster does not fail the
overall request. Instead, the action is simply not taken in that cluster, and is
still taken in other clusters for which the actor is authorized.
*/
package rbac
// Action is an enum representing the possible actions that can be taken. Not
// every resource supports every possible action.
type Action string
// Action definitions.
const (
CreateAction Action = "create"
DeleteAction Action = "delete"
GetAction Action = "get"
PingAction Action = "ping"
PutAction Action = "put"
)
// Resource is an enum representing all resources managed by vtadmin.
type Resource string
// Resource definitions.
const (
ClusterResource Resource = "Cluster"
/* generic topo resources */
CellInfoResource Resource = "CellInfo"
CellsAliasResource Resource = "CellsAlias"
KeyspaceResource Resource = "Keyspace"
ShardResource Resource = "Shard"
TabletResource Resource = "Tablet"
VTGateResource Resource = "VTGate"
VtctldResource Resource = "Vtctld"
/* vschema resources */
SrvVSchemaResource Resource = "SrvVSchema"
VSchemaResource Resource = "VSchema"
/* misc resources */
BackupResource Resource = "Backup"
SchemaResource Resource = "Schema"
ShardReplicationPositionResource Resource = "ShardReplicationPosition"
WorkflowResource Resource = "Workflow"
VTExplainResource Resource = "VTExplain"
) | go/vt/vtadmin/rbac/rbac.go | 0.783409 | 0.418043 | rbac.go | starcoder |
package services
import (
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/lib/utils/parse"
"github.com/gravitational/trace"
log "github.com/sirupsen/logrus"
)
// TraitMapping is a mapping that maps a trait to one or
// more teleport roles.
type TraitMapping struct {
// Trait is a teleport trait name
Trait string `json:"trait"`
// Value is trait value to match
Value string `json:"value"`
// Roles is a list of static teleport roles to map to
Roles []string `json:"roles,omitempty"`
}
// TraitMappingSet is a set of trait mappings
type TraitMappingSet []TraitMapping
// TraitsToRoles maps the supplied traits to a list of teleport role names.
func (ms TraitMappingSet) TraitsToRoles(traits map[string][]string) []string {
var roles []string
ms.traitsToRoles(traits, func(role string, expanded bool) {
roles = append(roles, role)
})
return utils.Deduplicate(roles)
}
// TraitsToRoleMatchers maps the supplied traits to a list of role matchers. Prefer calling
// this function directly rather than calling TraitsToRoles and then building matchers from
// the resulting list since this function forces any roles which include substitutions to
// be literal matchers.
func (ms TraitMappingSet) TraitsToRoleMatchers(traits map[string][]string) ([]parse.Matcher, error) {
var matchers []parse.Matcher
var firstErr error
ms.traitsToRoles(traits, func(role string, expanded bool) {
if expanded || utils.ContainsExpansion(role) {
// mapping process included variable expansion; we therefore
// "escape" normal matcher syntax and look only for exact matches.
// (note: this isn't about combatting maliciously constructed traits,
// traits are from trusted identity sources, this is just
// about avoiding unnecessary footguns).
matchers = append(matchers, literalMatcher{
value: role,
})
return
}
m, err := parse.NewMatcher(role)
if err != nil {
if firstErr == nil {
firstErr = err
}
return
}
matchers = append(matchers, m)
})
if firstErr != nil {
return nil, trace.Wrap(firstErr)
}
return matchers, nil
}
// TraitsToRoles maps the supplied traits to teleport role names and passes them to a collector.
func (ms TraitMappingSet) traitsToRoles(traits map[string][]string, collect func(role string, expanded bool)) {
for _, mapping := range ms {
for traitName, traitValues := range traits {
if traitName != mapping.Trait {
continue
}
TraitLoop:
for _, traitValue := range traitValues {
for _, role := range mapping.Roles {
outRole, err := utils.ReplaceRegexp(mapping.Value, role, traitValue)
switch {
case err != nil:
if trace.IsNotFound(err) {
log.WithError(err).Debugf("Failed to match expression %v, replace with: %v input: %v", mapping.Value, role, traitValue)
}
// this trait value clearly did not match, move on to another
continue TraitLoop
// skip empty replacement or empty role
case outRole == "":
case outRole != "":
collect(outRole, outRole != role)
}
}
}
}
}
}
// literalMatcher is used to "escape" values which are not allowed to
// take advantage of normal matcher syntax by limiting them to only
// literal matches.
type literalMatcher struct {
value string
}
func (m literalMatcher) Match(in string) bool { return m.value == in } | lib/services/traits.go | 0.744656 | 0.423279 | traits.go | starcoder |
package grid
import (
"context"
"fmt"
"strings"
"github.com/airbusgeo/geocube/internal/utils/affine"
"github.com/airbusgeo/geocube/internal/utils/proj"
"github.com/airbusgeo/godal"
"github.com/twpayne/go-geom"
"github.com/twpayne/go-geom/encoding/wkb"
)
// UnsupportedGrid is raised when the GridName is not supported
type UnsupportedGridErr struct {
GridName string
}
func (err UnsupportedGridErr) Error() string {
return fmt.Sprintf("unsupported grid type: " + err.GridName)
}
var ReservedNames = []string{"regular", "singlecell"}
// Cell is a polygon with a resolution on the surface of the Earth defined using either
// a WGS84 polygon or a projected polygon. Which of them is the reference
type Cell struct {
URI string
CRS *godal.SpatialRef
PixelToCRS *affine.Affine // Transform from pixel to crs/geometric coordinates
SizeX, SizeY int
GeographicRing proj.GeographicRing // lon/lat geodetic coordinates
Ring proj.Ring // coordinates in the CRS
}
type StreamedURI struct {
URI string
Error error
}
type Grid interface {
// Return the cell defined by the uris
Cell(uri string) (*Cell, error)
// Covers streams uris of cells covering the AOI.
// The uris are unique, but the cells defined by the uris might overlap.
Covers(ctx context.Context, aoi *geom.MultiPolygon) (<-chan StreamedURI, error)
}
// NewGrid creates a new grid from flag and parameters (proj4 format)
func NewGrid(flags []string, parameters map[string]string) (Grid, error) {
grid, ok := parameters["grid"]
if !ok {
return nil, fmt.Errorf("missing 'grid' in parameters")
}
switch strings.ToLower(grid) {
case "regular":
return newRegularGrid(flags, parameters)
case "singlecell":
return newSingleCellGrid(flags, parameters)
}
return nil, UnsupportedGridErr{grid}
}
func newCell(uri string, crs *godal.SpatialRef, srid int, pixToCRS *affine.Affine, sizeX, sizeY int, p *godal.Transform) *Cell {
c := Cell{
URI: uri,
CRS: crs,
PixelToCRS: pixToCRS,
SizeX: sizeX,
SizeY: sizeY,
Ring: proj.NewRingFromExtent(pixToCRS, sizeX, sizeY, srid),
}
// Prepare geometric to geographic transform
x, y := proj.FlatCoordToXY(c.Ring.FlatCoords())
// Transform all the coordinate at once
p.TransformEx(x, y, make([]float64, len(x)), nil)
// TODO densify
// Convert to flat_coords
c.GeographicRing = proj.GeographicRing{Ring: proj.NewRingFlat(4326, proj.XYToFlatCoord(x, y))}
return &c
}
// CellsToJSON converts an array of cells_uri to a geojson
func CellsToJSON(gr Grid, cellsURI []string) (string, error) {
g := geom.NewMultiPolygon(geom.XY)
for _, cellURI := range cellsURI {
cell, err := gr.Cell(cellURI)
if err != nil {
return "", fmt.Errorf("unable to retrieve the cell '%s': %w", cellURI, err)
}
lr := cell.GeographicRing.LinearRing
polygon := geom.NewPolygonFlat(geom.XY, lr.FlatCoords(), []int{len(lr.FlatCoords())})
if err = g.Push(polygon); err != nil {
return "", fmt.Errorf("failed to push polygon: %w", err)
}
}
return geomToJSON(g)
}
func geomToJSON(g geom.T) (string, error) {
// Convert the coordinates to WKB and update feature.geometry
geomwkb, err := wkb.Marshal(g, wkb.NDR)
if err != nil {
return "", err
}
srs, err := godal.NewSpatialRefFromEPSG(4326)
if err != nil {
return "", err
}
geometry, err := godal.NewGeometryFromWKB(geomwkb, srs)
if err != nil {
return "", err
}
defer geometry.Close()
return geometry.GeoJSON(godal.SignificantDigits(12))
} | internal/utils/grid/abstractgrid.go | 0.709824 | 0.45181 | abstractgrid.go | starcoder |
package iso20022
// Details of the margin call request.
type MarginCall1 struct {
// Sum of the exposures of all transactions which are in the favour of party A. That is, all transactions which would have an amount payable by party B to party A if they were being terminated.
ExposedAmountPartyA *ActiveCurrencyAndAmount `xml:"XpsdAmtPtyA,omitempty"`
// Sum of the exposures of all transactions which are in the favour of party B. That is, all transactions which would have an amount payable by party A to party B if they were being terminated.
ExposedAmountPartyB *ActiveCurrencyAndAmount `xml:"XpsdAmtPtyB,omitempty"`
// Determines how the variation margin requirement is to be calculated:
// - either Net, in which the exposure of all transactions in favour of party A and the the exposure of all transactions in favour of party B will be netted together or
// - gross in which two separate variation margin requirements will be determined.
ExposureConvention *ExposureConventionType1Code `xml:"XpsrCnvntn,omitempty"`
// Amount applied as an add-on to the exposure (to party A) usually intended to cover a possible increase in exposure before the next valuation date.
IndependentAmountPartyA *AggregatedIndependentAmount1 `xml:"IndpdntAmtPtyA,omitempty"`
// An amount applied as an add-on to the exposure (to party B) usually intended to cover a possible increase in exposure before the next valuation date.
IndependentAmountPartyB *AggregatedIndependentAmount1 `xml:"IndpdntAmtPtyB,omitempty"`
// Provides information like threshold amount, threshold type, minimum transfer amount, rouding amount or rounding convention, that applies to either the variation margin or the segregated independent amount.
MarginTerms *MarginTerms1Choice `xml:"MrgnTerms,omitempty"`
// Provides details about the collateral held, in transit or that still needs to be agreed by both parties with a segregation between variation margin and segregated independent amount.
CollateralBalance *CollateralBalance1Choice `xml:"CollBal,omitempty"`
}
func (m *MarginCall1) SetExposedAmountPartyA(value, currency string) {
m.ExposedAmountPartyA = NewActiveCurrencyAndAmount(value, currency)
}
func (m *MarginCall1) SetExposedAmountPartyB(value, currency string) {
m.ExposedAmountPartyB = NewActiveCurrencyAndAmount(value, currency)
}
func (m *MarginCall1) SetExposureConvention(value string) {
m.ExposureConvention = (*ExposureConventionType1Code)(&value)
}
func (m *MarginCall1) AddIndependentAmountPartyA() *AggregatedIndependentAmount1 {
m.IndependentAmountPartyA = new(AggregatedIndependentAmount1)
return m.IndependentAmountPartyA
}
func (m *MarginCall1) AddIndependentAmountPartyB() *AggregatedIndependentAmount1 {
m.IndependentAmountPartyB = new(AggregatedIndependentAmount1)
return m.IndependentAmountPartyB
}
func (m *MarginCall1) AddMarginTerms() *MarginTerms1Choice {
m.MarginTerms = new(MarginTerms1Choice)
return m.MarginTerms
}
func (m *MarginCall1) AddCollateralBalance() *CollateralBalance1Choice {
m.CollateralBalance = new(CollateralBalance1Choice)
return m.CollateralBalance
} | MarginCall1.go | 0.804598 | 0.572603 | MarginCall1.go | starcoder |
package simplify
import (
"math"
"github.com/macheal/orb"
)
var _ orb.Simplifier = &VisvalingamSimplifier{}
// A VisvalingamSimplifier is a reducer that
// performs the vivalingham algorithm.
type VisvalingamSimplifier struct {
Threshold float64
ToKeep int
}
// Visvalingam creates a new VisvalingamSimplifier.
func Visvalingam(threshold float64, minPointsToKeep int) *VisvalingamSimplifier {
return &VisvalingamSimplifier{
Threshold: threshold,
ToKeep: minPointsToKeep,
}
}
// VisvalingamThreshold runs the Visvalingam-Whyatt algorithm removing
// triangles whose area is below the threshold.
func VisvalingamThreshold(threshold float64) *VisvalingamSimplifier {
return Visvalingam(threshold, 0)
}
// VisvalingamKeep runs the Visvalingam-Whyatt algorithm removing
// triangles of minimum area until we're down to `toKeep` number of points.
func VisvalingamKeep(toKeep int) *VisvalingamSimplifier {
return Visvalingam(math.MaxFloat64, toKeep)
}
func (s *VisvalingamSimplifier) simplify(ls orb.LineString, wim bool) (orb.LineString, []int) {
var indexMap []int
if len(ls) <= s.ToKeep {
if wim {
// create identify map
indexMap = make([]int, len(ls))
for i := range ls {
indexMap[i] = i
}
}
return ls, indexMap
}
// edge cases checked, get on with it
threshold := s.Threshold * 2 // triangle area is doubled to save the multiply :)
removed := 0
// build the initial minheap linked list.
heap := minHeap(make([]*visItem, 0, len(ls)))
linkedListStart := &visItem{
area: math.Inf(1),
pointIndex: 0,
}
heap.Push(linkedListStart)
// internal path items
items := make([]visItem, len(ls))
previous := linkedListStart
for i := 1; i < len(ls)-1; i++ {
item := &items[i]
item.area = doubleTriangleArea(ls, i-1, i, i+1)
item.pointIndex = i
item.previous = previous
heap.Push(item)
previous.next = item
previous = item
}
// final item
endItem := &items[len(ls)-1]
endItem.area = math.Inf(1)
endItem.pointIndex = len(ls) - 1
endItem.previous = previous
previous.next = endItem
heap.Push(endItem)
// run through the reduction process
for len(heap) > 0 {
current := heap.Pop()
if current.area > threshold || len(ls)-removed <= s.ToKeep {
break
}
next := current.next
previous := current.previous
// remove current element from linked list
previous.next = current.next
next.previous = current.previous
removed++
// figure out the new areas
if previous.previous != nil {
area := doubleTriangleArea(ls,
previous.previous.pointIndex,
previous.pointIndex,
next.pointIndex,
)
area = math.Max(area, current.area)
heap.Update(previous, area)
}
if next.next != nil {
area := doubleTriangleArea(ls,
previous.pointIndex,
next.pointIndex,
next.next.pointIndex,
)
area = math.Max(area, current.area)
heap.Update(next, area)
}
}
item := linkedListStart
count := 0
for item != nil {
ls[count] = ls[item.pointIndex]
count++
if wim {
indexMap = append(indexMap, item.pointIndex)
}
item = item.next
}
return ls[:count], indexMap
}
// Stuff to create the priority queue, or min heap.
// Rewriting it here, vs using the std lib, resulted in a 50% performance bump!
type minHeap []*visItem
type visItem struct {
area float64 // triangle area
pointIndex int // index of point in original path
// to keep a virtual linked list to help rebuild the triangle areas as we remove points.
next *visItem
previous *visItem
index int // interal index in heap, for removal and update
}
func (h *minHeap) Push(item *visItem) {
item.index = len(*h)
*h = append(*h, item)
h.up(item.index)
}
func (h *minHeap) Pop() *visItem {
removed := (*h)[0]
lastItem := (*h)[len(*h)-1]
(*h) = (*h)[:len(*h)-1]
if len(*h) > 0 {
lastItem.index = 0
(*h)[0] = lastItem
h.down(0)
}
return removed
}
func (h minHeap) Update(item *visItem, area float64) {
if item.area > area {
// area got smaller
item.area = area
h.up(item.index)
} else {
// area got larger
item.area = area
h.down(item.index)
}
}
func (h minHeap) up(i int) {
object := h[i]
for i > 0 {
up := ((i + 1) >> 1) - 1
parent := h[up]
if parent.area <= object.area {
// parent is smaller so we're done fixing up the heap.
break
}
// swap nodes
parent.index = i
h[i] = parent
object.index = up
h[up] = object
i = up
}
}
func (h minHeap) down(i int) {
object := h[i]
for {
right := (i + 1) << 1
left := right - 1
down := i
child := h[down]
// swap with smallest child
if left < len(h) && h[left].area < child.area {
down = left
child = h[down]
}
if right < len(h) && h[right].area < child.area {
down = right
child = h[down]
}
// non smaller, so quit
if down == i {
break
}
// swap the nodes
child.index = i
h[child.index] = child
object.index = down
h[down] = object
i = down
}
}
func doubleTriangleArea(ls orb.LineString, i1, i2, i3 int) float64 {
a := ls[i1]
b := ls[i2]
c := ls[i3]
return math.Abs((b[0]-a[0])*(c[1]-a[1]) - (b[1]-a[1])*(c[0]-a[0]))
}
// Simplify will run the simplification for any geometry type.
func (s *VisvalingamSimplifier) Simplify(g orb.Geometry) orb.Geometry {
return simplify(s, g)
}
// LineString will simplify the linestring using this simplifier.
func (s *VisvalingamSimplifier) LineString(ls orb.LineString) orb.LineString {
return lineString(s, ls)
}
// MultiLineString will simplify the multi-linestring using this simplifier.
func (s *VisvalingamSimplifier) MultiLineString(mls orb.MultiLineString) orb.MultiLineString {
return multiLineString(s, mls)
}
// Ring will simplify the ring using this simplifier.
func (s *VisvalingamSimplifier) Ring(r orb.Ring) orb.Ring {
return ring(s, r)
}
// Polygon will simplify the polygon using this simplifier.
func (s *VisvalingamSimplifier) Polygon(p orb.Polygon) orb.Polygon {
return polygon(s, p)
}
// MultiPolygon will simplify the multi-polygon using this simplifier.
func (s *VisvalingamSimplifier) MultiPolygon(mp orb.MultiPolygon) orb.MultiPolygon {
return multiPolygon(s, mp)
}
// Collection will simplify the collection using this simplifier.
func (s *VisvalingamSimplifier) Collection(c orb.Collection) orb.Collection {
return collection(s, c)
} | simplify/visvalingam.go | 0.705481 | 0.430806 | visvalingam.go | starcoder |
package pattern
import (
"regexp"
"strings"
)
// Params defines a map of stringed keys and values.
type Params map[string]string
// Matchable defines an interface for matchers.
type Matchable interface {
IsParam() bool
HasHash() bool
Segment() string
Validate(string) bool
}
// Matchers defines a list of machers for validating patterns with.
type Matchers []Matchable
// URIMatcher defines an interface for a URI matcher.
type URIMatcher interface {
Validate(string) (Params, string, bool)
Pattern() string
Priority() int
}
// matchProvider provides a class array-path matcher
type matchProvider struct {
pattern string
matchers Matchers
endless bool
priority int
}
// New returns a new instance of a URIMatcher.
func New(pattern string) URIMatcher {
pattern = addSlash(pattern)
pm := SegmentList(pattern)
m := matchProvider{
priority: CheckPriority(pattern),
pattern: pattern,
matchers: pm,
endless: IsEndless(pattern),
}
return &m
}
// Priority returns the priority status of this giving pattern.
func (m *matchProvider) Priority() int {
return m.priority
}
// Pattern returns the pattern string for this matcher.
func (m *matchProvider) Pattern() string {
return m.pattern
}
// Validate returns true/false if the giving string matches the pattern, returning
// a map of parameters match against segments of the pattern.
func (m *matchProvider) Validate(path string) (Params, string, bool) {
path = addSlash(path)
stripped := stripAndClean(path)
hashedSrc := stripAndCleanButHash(path)
cleaned := cleanPath(stripped)
src := splitPattern(cleaned)
srclen := len(src)
total := len(m.matchers)
if !m.endless && total != srclen {
return nil, "", false
}
if m.endless && total > srclen {
return nil, "", false
}
var state bool
param := make(Params)
var lastIndex int
var doneHash bool
for index, v := range m.matchers {
lastIndex = index
if v.HasHash() {
doneHash = true
}
if v.Validate(src[index]) {
if v.IsParam() {
param[v.Segment()] = src[index]
}
state = true
continue
} else {
state = false
break
}
}
if lastIndex+1 == srclen {
return param, "", state
}
remPath := strings.Join(src[lastIndex+1:], "/")
if doneHash || !strings.Contains(hashedSrc, "#") {
return param, addSlash(remPath), state
}
var rems []string
fragment := SegmentList(hashedSrc)[lastIndex+1:]
for _, item := range fragment {
if item.HasHash() {
hashed := "#" + item.Segment()
rems = append(rems, hashed)
continue
}
rems = append(rems, item.Segment())
}
return param, addSlash(strings.Join(rems, "/")), state
}
//==============================================================================
// SegmentList returns list of SegmentMatcher which implements the Matchable
// interface, with each made of each segment of the pattern.
func SegmentList(pattern string) Matchers {
pattern = stripAndCleanButHash(pattern)
var set Matchers
if hashIndex := strings.Index(pattern, "#"); hashIndex != -1 {
if hashIndex == 0 {
pattern = strings.Join([]string{"/", pattern}, "")
} else {
last := pattern[hashIndex-1 : hashIndex]
if string(last[0]) != "/" {
splits := strings.Split(pattern, "#")
pattern = strings.Join([]string{splits[0], "/#", splits[1]}, "")
}
}
}
for _, val := range splitPattern(pattern) {
set = append(set, Segment(val))
}
return set
}
//==============================================================================
// SegmentMatcher defines a single piece of pattern to be matched against.
type SegmentMatcher struct {
*regexp.Regexp
original string
param bool
hashed bool
}
// Segment returns a Matchable for a specific part of a pattern eg. :name, age,
// {id:[\\d+]}.
func Segment(segment string) Matchable {
if segment == "*" {
segment = "/*"
}
hashed := strings.HasPrefix(segment, "#")
if hashed {
segment = segment[1:]
}
id, rx, b := YankSpecial(segment)
mrk := regexp.MustCompile(rx)
sm := SegmentMatcher{
Regexp: mrk,
original: id,
param: b,
hashed: hashed,
}
return &sm
}
// HasHash returns true/false if this segment hash the hash.
func (s *SegmentMatcher) HasHash() bool {
return s.hashed
}
// IsParam returns true/false if the segment is also a paramter.
func (s *SegmentMatcher) IsParam() bool {
return s.param
}
// Segment returns the original string that makes up this segment matcher.
func (s *SegmentMatcher) Segment() string {
return s.original
}
// Validate validates the value against the matcher.
func (s *SegmentMatcher) Validate(m string) bool {
return s.MatchString(m)
}
//============================================================================== | vendor/github.com/influx6/faux/pattern/pattern.go | 0.831691 | 0.433862 | pattern.go | starcoder |
package fsm
// ID is the id of the instance in a given set. It's unique in that set.
type ID uint64
// Instance is the interface that returns ID and state of the fsm instance safely.
type Instance interface {
// ID returns the ID of the instance
ID() ID
// State returns the state of the instance. This is an expensive call to be submitted to queue to view
State() Index
// Data returns the custom data attached to the instance. It's set via the optional arg in Signal
Data() interface{}
// Signal signals the instance with optional custom data
Signal(Signal, ...interface{}) error
// CanReceive returns true if the current state of the instance can receive the given signal
CanReceive(Signal) bool
}
type instance struct {
id ID
state Index
data interface{}
parent *Set
error error
flaps flaps
start Time
deadline Time
index int // index used in the deadlines queue
visits map[Index]int
}
// ID returns the ID of the fsm instance
func (i instance) ID() ID {
return i.id
}
// Data returns a customer data value attached to this instance
func (i instance) Data() interface{} {
return i.data
}
// State returns the state of the fsm instance
func (i instance) State() Index {
result := make(chan Index)
defer close(result)
// we have to ask the set which actually holds the instance (this was returned by copy)
i.parent.reads <- func(view Set) {
if instance, has := view.members[i.id]; has {
result <- instance.state
}
}
return <-result
}
// Valid returns true if current state can receive the given signal
func (i instance) CanReceive(s Signal) bool {
_, _, err := i.parent.spec.transition(i.State(), s)
return err == nil
}
// Signal sends a signal to the instance
func (i instance) Signal(s Signal, optionalData ...interface{}) (err error) {
return i.parent.Signal(s, i.id, optionalData...)
}
func (i *instance) update(next Index, now Time, ttl Tick) {
i.visits[next] = i.visits[next] + 1
i.state = next
i.start = now
if ttl > 0 {
i.deadline = now + Time(ttl)
} else {
i.deadline = 0
}
} | pkg/fsm/instance.go | 0.728652 | 0.449453 | instance.go | starcoder |
package expression
import (
"fmt"
"github.com/dolthub/go-mysql-server/sql"
)
// Wrapper simply acts as a wrapper for another expression. If a nil expression is wrapped, then the wrapper functions
// as a guard against functions that expect non-nil expressions.
type Wrapper struct {
inner sql.Expression
}
var _ sql.Expression = (*Wrapper)(nil)
// WrapExpression takes in an expression and wraps it, returning the resulting Wrapper expression. Useful for when
// an expression is nil.
func WrapExpression(expr sql.Expression) *Wrapper {
return &Wrapper{expr}
}
// WrapExpressions takes in a number of expressions and wraps each one, returning the resulting slice. Useful for when
// an expression in a slice may be nil.
func WrapExpressions(exprs ...sql.Expression) []sql.Expression {
wrappers := make([]sql.Expression, len(exprs))
for i, expr := range exprs {
wrappers[i] = WrapExpression(expr)
}
return wrappers
}
// Children implements sql.Expression
func (w *Wrapper) Children() []sql.Expression {
if w.inner == nil {
return nil
}
return []sql.Expression{w.inner}
}
// Eval implements sql.Expression
func (w *Wrapper) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
if w.inner == nil {
return nil, nil
}
return w.inner.Eval(ctx, row)
}
// IsNullable implements sql.Expression
func (w *Wrapper) IsNullable() bool {
if w.inner == nil {
return true
}
return w.inner.IsNullable()
}
// Resolved implements sql.Expression
func (w *Wrapper) Resolved() bool {
if w.inner == nil {
return true
}
return w.inner.Resolved()
}
// String implements sql.Expression
func (w *Wrapper) String() string {
if w.inner == nil {
return ""
}
return fmt.Sprintf("(%s)", w.inner.String())
}
// Type implements sql.Expression
func (w *Wrapper) Type() sql.Type {
if w.inner == nil {
return sql.Null
}
return w.inner.Type()
}
// Unwrap returns the wrapped expression, or nil if no expression was wrapped.
func (w *Wrapper) Unwrap() sql.Expression {
return w.inner
}
// WithChildren implements sql.Expression
func (w *Wrapper) WithChildren(children ...sql.Expression) (sql.Expression, error) {
if len(children) == 0 {
return WrapExpression(nil), nil
} else if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(w, len(children), 1)
}
return WrapExpression(children[0]), nil
} | sql/expression/wrapper.go | 0.725065 | 0.504883 | wrapper.go | starcoder |
package three
import "github.com/gopherjs/gopherjs/js"
// Quaternion - represents a Quaternion.
type Quaternion struct {
*js.Object
X float64 `js:"x"`
Y float64 `js:"y"`
Z float64 `js:"z"`
W float64 `js:"w"`
}
func NewQuaternion() Quaternion {
return Quaternion{
Object: three.Get("Quaternion").New(),
}
}
// Returns the angle between this quaternion and quaternion a in radians.
func (q Quaternion) AngleTo(a Quaternion) float64 {
return q.Object.Call("angleTo", a).Float()
}
// Returns the rotational conjugate of this quaternion.
// The conjugate of a quaternion represents the same rotation in the opposite direction about the rotational axis.
func (q Quaternion) Conjugate() Quaternion {
return Quaternion{Object: q.Object.Call("conjugate")}
}
func (q Quaternion) Copy(a Quaternion) Quaternion {
return Quaternion{Object: q.Object.Call("copy", a)}
}
// Compares the x, y, z and w properties of a to the equivalent properties of this quaternion to determine if they represent the same rotation.
func (q Quaternion) Equals(a Quaternion) bool {
return q.Object.Call("equals", a).Bool()
}
// Calculates the dot product of quaternions a and receiver.
func (q Quaternion) Dot(a Quaternion) float64 {
return q.Object.Call("dot", a).Float()
}
// Sets this quaternion to the identity quaternion; that is, to the quaternion that represents "no rotation".
func (q Quaternion) Identity() Quaternion {
return Quaternion{Object: q.Object.Call("identity")}
}
// Inverts this quaternion - calculates the conjugate. The quaternion is assumed to have unit length.
func (q Quaternion) Invert() Quaternion {
return Quaternion{Object: q.Object.Call("invert")}
}
// Length Computes the Euclidean length (straight-line length) of this quaternion, considered as a 4 dimensional vector.
func (q Quaternion) Length() float64 {
return q.Object.Call("length").Float()
}
// LengthSq Computes the squared Euclidean length (straight-line length) of this quaternion, considered as a 4 dimensional vector.
// This can be useful if you are comparing the lengths of two quaternions, as this is a slightly more efficient calculation than length().
func (q Quaternion) LengthSq() float64 {
return q.Object.Call("lengthSq").Float()
}
// Normalize this quaternion - that is, calculated the quaternion that performs the same rotation as this one, but has length equal to 1.
func (q Quaternion) Normalize() Quaternion {
return Quaternion{Object: q.Object.Call("normalize")}
}
// Multiply this quaternion by a.
func (q Quaternion) Multiply(a Quaternion) Quaternion {
return Quaternion{Object: q.Object.Call("multiply", a)}
}
// MultiplyQuaternions Sets this quaternion to a x b. Adapted from the method outlined here http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/code/index.htm
func (q Quaternion) MultiplyQuaternions(a, b Quaternion) Quaternion {
return Quaternion{Object: q.Object.Call("multiplyQuaternions", a, b)}
}
// Premultiply Pre-multiplies this quaternion by a.
func (q Quaternion) Premultiply(a Quaternion) Quaternion {
return Quaternion{Object: q.Object.Call("premultiply", a)}
}
// Random Sets this quaternion to a uniformly random, normalized quaternion.
func (q Quaternion) Random() Quaternion {
return Quaternion{Object: q.Object.Call("random")}
}
// RotateTowards Rotates this quaternion by a given angular step to the defined quaternion a.
// The method ensures that the final quaternion will not overshoot a.
func (q Quaternion) RotateTowards(a Quaternion, step float64) Quaternion {
return Quaternion{Object: q.Object.Call("rotateTowards", a, step)}
}
// Slerp Handles the spherical linear interpolation between quaternions.
// t represents the amount of rotation between this quaternion (where t is 0) and qb (where t is 1).
// This quaternion is set to the result. Also see the static version of the slerp below.
func (q Quaternion) Slerp(qb Quaternion, t float64) Quaternion {
return Quaternion{Object: q.Object.Call("slerp", qb, t)}
}
// SlerpQuaternions Performs a spherical linear interpolation between the given quaternions and stores the result in this quaternion.
func (q Quaternion) SlerpQuaternions(qa, qb Quaternion, t float64) (this Quaternion) {
q.Object.Call("slerpQuaternions", qa, qb, t)
return q
}
// Set x, y, z, w properties of this quaternion.
func (q Quaternion) Set(x, y, z, w float64) Quaternion {
return Quaternion{
Object: q.Object.Call("set", x, y, z, w),
}
}
// SetFromAxisAngle sets this quaternion from rotation specified by axis and angle.
// Adapted from the method here http://www.euclideanspace.com/maths/geometry/rotations/conversions/angleToQuaternion/index.htm
// Axis is assumed to be normalized, angle is in radians.
func (q Quaternion) SetFromAxisAngle(axis Vector3, angle float64) Quaternion {
return Quaternion{
Object: q.Object.Call("setFromAxisAngle", axis, angle),
}
}
// SetFromEuler sets this quaternion from the rotation specified by Euler angle.
func (q Quaternion) SetFromEuler(euler Euler) Quaternion {
return Quaternion{
Object: q.Object.Call("setFromEuler", euler),
}
}
// SetFromRotationMatrix sets this quaternion from rotation component of m.
// Adapted from the method here: http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/index.htm
// m - a Matrix4 of which the upper 3x3 of matrix is a pure rotation matrix (i.e. unscaled).
func (q Quaternion) SetFromRotationMatrix(m Matrix4) Quaternion {
return Quaternion{
Object: q.Object.Call("setFromRotationMatrix", m),
}
}
// Sets this quaternion to the rotation required to rotate direction vector vFrom to direction vector vTo.
// Adapted from the method here: http://lolengine.net/blog/2013/09/18/beautiful-maths-quaternion-from-vectors
// vFrom and vTo are assumed to be normalized.
func (q Quaternion) SetFromUnitVectors(vFrom, vTo Vector3) Quaternion {
// V1
// quat quat::fromtwovectors(vec3 u, vec3 v)
// {
// vec3 w = cross(u, v);
// quat q = quat(dot(u, v), w.x, w.y, w.z);
// q.w += length(q);
// return normalize(q);
// }
// V2
// quat quat::fromtwovectors(vec3 u, vec3 v)
// {
// float m = sqrt(2.f + 2.f * dot(u, v));
// vec3 w = (1.f / m) * cross(u, v);
// return quat(0.5f * m, w.x, w.y, w.z);
// }
return Quaternion{
Object: q.Object.Call("setFromUnitVectors", vFrom, vTo),
}
} | math_quaternion.go | 0.938618 | 0.680449 | math_quaternion.go | starcoder |
package null
func (n *NullDouble) IsSet() (float64, bool) {
if n == nil {
return 0.0, false
}
return n.Value, n.Set
}
func (n *NullFloat) IsSet() (float32, bool) {
if n == nil {
return 0.0, false
}
return n.Value, n.Set
}
func (n *NullInt32) IsSet() (int32, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullInt64) IsSet() (int64, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullUint32) IsSet() (uint32, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullUint64) IsSet() (uint64, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullSint32) IsSet() (int32, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullSint64) IsSet() (int64, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullFixed32) IsSet() (uint32, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullFixed64) IsSet() (uint64, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullSfixed32) IsSet() (int32, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullSfixed64) IsSet() (int64, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullBool) IsSet() (bool, bool) {
if n == nil {
return false, false
}
return n.Value, n.Set
}
func (n *NullString) IsSet() (string, bool) {
if n == nil {
return "", false
}
return n.Value, n.Set
}
func (n *NullBytes) IsSet() ([]byte, bool) {
if n == nil {
return []byte{}, false
}
return n.Value, n.Set
}
// Nuller allows every one of these guys to be of Nuller type. Allows you to do batch work on them but you lose
// type safty
type Nuller interface {
Interface() (interface{}, bool)
}
func (n *NullDouble) Interface() (interface{}, bool) {
if n == nil {
return 0.0, false
}
return n.Value, n.Set
}
func (n *NullFloat) Interface() (interface{}, bool) {
if n == nil {
return 0.0, false
}
return n.Value, n.Set
}
func (n *NullInt32) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullInt64) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullUint32) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullUint64) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullSint32) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullSint64) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullFixed32) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullFixed64) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullSfixed32) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullSfixed64) Interface() (interface{}, bool) {
if n == nil {
return 0, false
}
return n.Value, n.Set
}
func (n *NullBool) Interface() (interface{}, bool) {
if n == nil {
return false, false
}
return n.Value, n.Set
}
func (n *NullString) Interface() (interface{}, bool) {
if n == nil {
return "", false
}
return n.Value, n.Set
}
func (n *NullBytes) Interface() (interface{}, bool) {
if n == nil {
return []byte{}, false
}
return n.Value, n.Set
} | null/nullfunctions.go | 0.670285 | 0.465934 | nullfunctions.go | starcoder |
package gioui
import (
"image/color"
"gioui.org/f32"
"gioui.org/layout"
"gioui.org/op/clip"
"gioui.org/op/paint"
)
type (
D = layout.Dimensions
C = layout.Context
)
var blackColor = color.NRGBA{R: 0, G: 0, B: 0, A: 255}
type RaceProgressBarStyle struct {
progress float32 // percentage of the race
withFuel float32 // percentage of the race with fuel level
boxOpen float32
boxClose float32
boxColor color.NRGBA
progressColor color.NRGBA
progressBarColor color.NRGBA
textColor color.NRGBA
}
func RaceProgressBar(progress float32,
withFuel float32,
boxOpen float32,
boxClose float32,
boxColor color.NRGBA,
progressColor color.NRGBA,
progressBarColor color.NRGBA,
textColor color.NRGBA) RaceProgressBarStyle {
return RaceProgressBarStyle{
progress: progress,
withFuel: withFuel,
boxOpen: boxOpen,
boxClose: boxClose,
progressColor: progressColor,
progressBarColor: progressBarColor,
textColor: textColor,
}
}
func (r RaceProgressBarStyle) Layout(gtx C) D {
return layout.Flex{
Alignment: layout.Middle,
}.Layout(gtx, layout.Rigid(func(gtx C) D {
barWidth := float32(5)
barSpace := float32(3)
//log.Printf("%+v", gtx.Constraints)
w := gtx.Constraints.Max.X
// calculate the amount of bars and distribute the space between the bars
noBars := float32(w) / (barWidth + barSpace)
space := (float32(w) - (noBars * barWidth)) / (noBars - 1)
startX := float32(gtx.Constraints.Min.X)
// convert percentage values into pixel
progressPx := (float32(w) * (r.progress)) / float32(100)
boxOpenPx := (float32(w) * (r.boxOpen)) / float32(100)
boxClosePx := (float32(w) * (r.boxClose)) / float32(100)
withFuelPx := (float32(w) * (r.withFuel)) / float32(100)
for i := 0; i < int(noBars); i++ {
rect := clip.RRect{
Rect: f32.Rectangle{Min: f32.Point{X: startX, Y: 4}, Max: f32.Point{X: startX + barWidth, Y: 22}},
}.Op(gtx.Ops)
// show box window in different color
if startX > boxOpenPx && startX < boxClosePx {
paint.FillShape(gtx.Ops, r.boxColor, rect)
} else {
if startX < progressPx {
paint.FillShape(gtx.Ops, r.textColor, rect)
} else {
paint.FillShape(gtx.Ops, r.progressBarColor, rect)
}
}
// show distance with fuel
if startX >= progressPx && startX < withFuelPx {
rect := clip.RRect{
Rect: f32.Rectangle{Min: f32.Point{X: startX, Y: 10}, Max: f32.Point{X: startX + barWidth, Y: 16}},
}.Op(gtx.Ops)
paint.FillShape(gtx.Ops, blackColor, rect)
}
startX += barWidth + space
}
// show the red progress bar
rect := clip.RRect{
Rect: f32.Rectangle{Min: f32.Point{X: progressPx, Y: 0}, Max: f32.Point{X: progressPx + 3, Y: 26}},
}.Op(gtx.Ops)
paint.FillShape(gtx.Ops, r.progressColor, rect)
return layout.Dimensions{Size: gtx.Constraints.Max}
}))
} | acc/gioui/raceProgressBar.go | 0.651022 | 0.402627 | raceProgressBar.go | starcoder |
package cmp
import (
"constraints"
"fmt"
)
// Eq defines equality of type T.
type Eq[T any] interface {
// Equal returns true if and only if given two arguments are the same.
Equal(T, T) bool
}
// DefaultEq is Eq with default implementations.
type DefaultEq[T any] struct {
EqualImpl func(T, T) bool
}
func (eq *DefaultEq[T]) Equal(x, y T) bool {
return eq.EqualImpl(x, y)
}
// DeriveEq derives Eq using standard `==` operator.
func DeriveEq[T comparable]() Eq[T] {
return derivedEq[T]{}
}
type derivedEq[T comparable] struct{}
func (derivedEq[T]) Equal(x, y T) bool {
return x == y
}
// Ord defines order of type T.
type Ord[T any] interface {
// Compare(x, y) returns:
// LT if x < y
// EQ if x == y
// GT if x > y
Compare(T, T) Ordering
// Lt(x, y) means x < y.
Lt(T, T) bool
// Le(x, y) means x <= y.
Le(T, T) bool
// Gt(x, y) means x > y.
Gt(T, T) bool
// Ge(x, y) means x >= y.
Ge(T, T) bool
// Eq(x, y) means x == y.
Eq(T, T) bool
// Ne(x, y) means x != y.
Ne(T, T) bool
}
// DeriveOrd derives Ord using `<`, `<=`, `>`, `>=`, `==`, and `!=`.
func DeriveOrd[T constraints.Ordered]() Ord[T] {
return derivedOrd[T]{}
}
type derivedOrd[T constraints.Ordered] struct{}
func (derivedOrd[T]) Compare(x, y T) Ordering {
if x < y {
return LT
} else if x == y {
return EQ
} else {
return GT
}
}
func (derivedOrd[T]) Lt(x, y T) bool {
return x < y
}
func (derivedOrd[T]) Le(x, y T) bool {
return x <= y
}
func (derivedOrd[T]) Gt(x, y T) bool {
return x > y
}
func (derivedOrd[T]) Ge(x, y T) bool {
return x >= y
}
func (derivedOrd[T]) Eq(x, y T) bool {
return x == y
}
func (derivedOrd[T]) Ne(x, y T) bool {
return x != y
}
// DefaultOrd is Ord with default implementations.
type DefaultOrd[T any] struct {
CompareImpl func(T, T) Ordering
}
func (o *DefaultOrd[T]) Compare(x, y T) Ordering {
return o.CompareImpl(x, y)
}
func (o *DefaultOrd[T]) Lt(x, y T) bool {
result := o.Compare(x, y)
return result == LT
}
func (o *DefaultOrd[T]) Le(x, y T) bool {
result := o.Compare(x, y)
return result == LT || result == EQ
}
func (o *DefaultOrd[T]) Gt(x, y T) bool {
result := o.Compare(x, y)
return result == GT
}
func (o *DefaultOrd[T]) Ge(x, y T) bool {
result := o.Compare(x, y)
return result == GT || result == EQ
}
func (o *DefaultOrd[T]) Eq(x, y T) bool {
result := o.Compare(x, y)
return result == EQ
}
func (o *DefaultOrd[T]) Ne(x, y T) bool {
result := o.Compare(x, y)
return result != EQ
}
type Ordering int
const (
LT Ordering = iota
EQ
GT
)
func (o Ordering) GoString() string {
switch o {
case LT:
return "LT"
case EQ:
return "EQ"
case GT:
return "GT"
default:
return fmt.Sprintf("<unknown Ordering (%d)>", o)
}
} | classes/cmp/cmp.go | 0.806967 | 0.673067 | cmp.go | starcoder |
package miner
import (
"math"
"sort"
"github.com/filecoin-project/go-bitfield"
"golang.org/x/xerrors"
)
// Maps deadlines to partition maps.
type DeadlineSectorMap map[uint64]PartitionSectorMap
// Maps partitions to sector bitfields.
type PartitionSectorMap map[uint64]bitfield.BitField
// Check validates all bitfields and counts the number of partitions & sectors
// contained within the map, and returns an error if they exceed the given
// maximums.
func (dm DeadlineSectorMap) Check(maxPartitions, maxSectors uint64) error {
partitionCount, sectorCount, err := dm.Count()
if err != nil {
return xerrors.Errorf("failed to count sectors: %w", err)
}
if partitionCount > maxPartitions {
return xerrors.Errorf("too many partitions %d, max %d", partitionCount, maxPartitions)
}
if sectorCount > maxSectors {
return xerrors.Errorf("too many sectors %d, max %d", sectorCount, maxSectors)
}
return nil
}
// Count counts the number of partitions & sectors within the map.
func (dm DeadlineSectorMap) Count() (partitions, sectors uint64, err error) {
for dlIdx, pm := range dm { //nolint:nomaprange
partCount, sectorCount, err := pm.Count()
if err != nil {
return 0, 0, xerrors.Errorf("when counting deadline %d: %w", dlIdx, err)
}
if partCount > math.MaxUint64-partitions {
return 0, 0, xerrors.Errorf("uint64 overflow when counting partitions")
}
if sectorCount > math.MaxUint64-sectors {
return 0, 0, xerrors.Errorf("uint64 overflow when counting sectors")
}
sectors += sectorCount
partitions += partCount
}
return partitions, sectors, nil
}
// Add records the given sector bitfield at the given deadline/partition index.
func (dm DeadlineSectorMap) Add(dlIdx, partIdx uint64, sectorNos bitfield.BitField) error {
if dlIdx >= WPoStPeriodDeadlines {
return xerrors.Errorf("invalid deadline %d", dlIdx)
}
dl, ok := dm[dlIdx]
if !ok {
dl = make(PartitionSectorMap)
dm[dlIdx] = dl
}
return dl.Add(partIdx, sectorNos)
}
// AddValues records the given sectors at the given deadline/partition index.
func (dm DeadlineSectorMap) AddValues(dlIdx, partIdx uint64, sectorNos ...uint64) error {
return dm.Add(dlIdx, partIdx, bitfield.NewFromSet(sectorNos))
}
// Deadlines returns a sorted slice of deadlines in the map.
func (dm DeadlineSectorMap) Deadlines() []uint64 {
deadlines := make([]uint64, 0, len(dm))
for dlIdx := range dm { //nolint:nomaprange
deadlines = append(deadlines, dlIdx)
}
sort.Slice(deadlines, func(i, j int) bool {
return deadlines[i] < deadlines[j]
})
return deadlines
}
// ForEach walks the deadlines in deadline order.
func (dm DeadlineSectorMap) ForEach(cb func(dlIdx uint64, pm PartitionSectorMap) error) error {
for _, dlIdx := range dm.Deadlines() {
if err := cb(dlIdx, dm[dlIdx]); err != nil {
return err
}
}
return nil
}
// AddValues records the given sectors at the given partition.
func (pm PartitionSectorMap) AddValues(partIdx uint64, sectorNos ...uint64) error {
return pm.Add(partIdx, bitfield.NewFromSet(sectorNos))
}
// Add records the given sector bitfield at the given partition index, merging
// it with any existing bitfields if necessary.
func (pm PartitionSectorMap) Add(partIdx uint64, sectorNos bitfield.BitField) error {
if oldSectorNos, ok := pm[partIdx]; ok {
var err error
sectorNos, err = bitfield.MergeBitFields(sectorNos, oldSectorNos)
if err != nil {
return xerrors.Errorf("failed to merge sector bitfields: %w", err)
}
}
pm[partIdx] = sectorNos
return nil
}
// Count counts the number of partitions & sectors within the map.
func (pm PartitionSectorMap) Count() (partitions, sectors uint64, err error) {
for partIdx, bf := range pm { //nolint:nomaprange
count, err := bf.Count()
if err != nil {
return 0, 0, xerrors.Errorf("failed to parse bitmap for partition %d: %w", partIdx, err)
}
if count > math.MaxUint64-sectors {
return 0, 0, xerrors.Errorf("uint64 overflow when counting sectors")
}
sectors += count
}
return uint64(len(pm)), sectors, nil
}
// Partitions returns a sorted slice of partitions in the map.
func (pm PartitionSectorMap) Partitions() []uint64 {
partitions := make([]uint64, 0, len(pm))
for partIdx := range pm { //nolint:nomaprange
partitions = append(partitions, partIdx)
}
sort.Slice(partitions, func(i, j int) bool {
return partitions[i] < partitions[j]
})
return partitions
}
// ForEach walks the partitions in the map, in order of increasing index.
func (pm PartitionSectorMap) ForEach(cb func(partIdx uint64, sectorNos bitfield.BitField) error) error {
for _, partIdx := range pm.Partitions() {
if err := cb(partIdx, pm[partIdx]); err != nil {
return err
}
}
return nil
} | actors/builtin/miner/sector_map.go | 0.720762 | 0.415907 | sector_map.go | starcoder |
package hego
import (
"errors"
"fmt"
"math"
"math/rand"
"time"
)
// PSOResult represents the results of the particle swarm optimization
type PSOResult struct {
BestParticles [][]float64
BestObjectives []float64
Result
}
// PSOSettings represents settings for the particle swarm optimization
type PSOSettings struct {
// PopulationSize is the number of particles
PopulationSize int
// LearningRate determines the movement size of each particle
LearningRate float64
// Omega is the weight of the current velocity, a momentum
Omega float64
// GlobalWeight determines how much a particle should drift towards the global optimum
GlobalWeight float64
// ParticleWeight determines how much a particle should drift towards the best known position of this particle
ParticleWeight float64
Settings
}
// Verify checks the validity of the settings and returns nil if everything is ok
func (s *PSOSettings) Verify() error {
if s.PopulationSize <= 1 {
return fmt.Errorf("population size must be greater than 1, got %v", s.PopulationSize)
}
if s.LearningRate <= 0.0 {
return fmt.Errorf("learning rate must be greater than 0, got %v", s.LearningRate)
}
if s.Omega < 0.0 {
return fmt.Errorf("omega should not be smaller than 0, got %v", s.Omega)
}
if s.GlobalWeight < 0.0 {
return fmt.Errorf("GlobalWeight should not be smaller than 0, got %v", s.GlobalWeight)
}
if s.ParticleWeight < 0.0 {
return fmt.Errorf("ParticleWeight should not be smaller than 0, got %v", s.ParticleWeight)
}
if s.ParticleWeight == 0.0 && s.GlobalWeight == 0.0 {
return errors.New("when ParticleWeight and GlobalWeight are set to 0, the velocity will not change at all")
}
return nil
}
// PSO performs particle swarm optimization. Objective is the function to minimize, init initializes a tupe of particle and velocity, settings holds algorithm settings
func PSO(
objective func(x []float64) float64,
init func() ([]float64, []float64),
settings PSOSettings) (res PSOResult, err error) {
err = settings.Verify()
if err != nil {
err = fmt.Errorf("settings verification failed: %v", err)
return res, err
}
start := time.Now()
logger := newLogger("Particle Swarm Optimization", []string{"Iteration", "Population Mean", "Population Best"}, settings.Verbose, settings.MaxIterations)
// increase funcEvaluations counter for every call to objective
evaluate := func(x []float64) float64 {
res.FuncEvaluations++
return objective(x)
}
res.BestParticles = make([][]float64, 0, settings.MaxIterations)
res.BestObjectives = make([]float64, 0, settings.MaxIterations)
// initialize population with velocities and best known positions
particles := make([][]float64, settings.PopulationSize)
velocities := make([][]float64, settings.PopulationSize)
bestPositions := make([][]float64, settings.PopulationSize)
bestObjs := make([]float64, settings.PopulationSize)
globalBest := make([]float64, 0)
globalBestObj := math.MaxFloat64
for i := range particles {
particles[i], velocities[i] = init()
bestObjs[i] = evaluate(particles[i])
bestPositions[i] = make([]float64, len(particles[i]))
copy(bestPositions[i], particles[i])
if bestObjs[i] < globalBestObj {
globalBest = make([]float64, len(particles[i]))
copy(globalBest, particles[i])
globalBestObj = bestObjs[i]
}
}
res.BestObjectives = append(res.BestObjectives, globalBestObj)
res.BestParticles = append(res.BestParticles, globalBest)
for i := 0; i < settings.MaxIterations; i++ {
totalObj := 0.0
newGlobalBest := false
newGlobalBestParticle := make([]float64, len(globalBest))
for j, particle := range particles {
velocity := velocities[j]
for d, v := range velocity {
rp, rg := rand.Float64(), rand.Float64()
w := settings.Omega
phip, phig := settings.ParticleWeight, settings.GlobalWeight
velocity[d] = w*v + phip*rp*(bestPositions[j][d]-particle[d]) + phig*rg*(globalBest[d]-particle[d])
}
for d, p := range particle {
particle[d] = p + settings.LearningRate*velocity[d]
}
obj := evaluate(particle)
if obj < bestObjs[j] {
copy(bestPositions[j], particle)
bestObjs[j] = obj
if obj < globalBestObj {
newGlobalBest = true
copy(newGlobalBestParticle, particle)
copy(globalBest, particle)
globalBestObj = obj
}
}
totalObj += obj
}
if newGlobalBest {
next := make([]float64, len(globalBest))
copy(next, globalBest)
res.BestParticles = append(res.BestParticles, next)
res.BestObjectives = append(res.BestObjectives, globalBestObj)
}
logger.AddLine(i, []string{
fmt.Sprint(i),
fmt.Sprint(totalObj / float64(settings.PopulationSize)),
fmt.Sprint(globalBestObj),
})
}
end := time.Now()
res.Runtime = end.Sub(start)
res.Iterations = settings.MaxIterations
logger.Flush()
if settings.Verbose > 0 {
fmt.Printf("Done after %v!\n", res.Runtime)
}
return res, nil
} | particle_swarm.go | 0.564339 | 0.471771 | particle_swarm.go | starcoder |
package hashmultisets
import "sort"
// New factory that creates a new Hash Multi Set
func New[T comparable](values ...T) *HashMultiSet[T] {
set := HashMultiSet[T]{data: make(map[T]int, len(values))}
set.Add(values...)
return &set
}
// MultiSetPair a set's key/count pair
type MultiSetPair[T comparable] struct {
Key T
Count int
}
// HashMultiSet a data structure representing a set with counts
type HashMultiSet[T comparable] struct {
data map[T]int
}
// Merge merge multiple sets
func (s *HashMultiSet[T]) Merge(sets ...*HashMultiSet[T]) {
for _, set := range sets {
for _, value := range set.GetValues() {
s.IncrementBy(value, set.GetCount(value))
}
}
}
// Add adds a value to the set
func (s *HashMultiSet[T]) Add(values ...T) {
for _, value := range values {
s.IncrementBy(value, 1)
}
}
// IncrementBy increments a value's count by a number
func (s *HashMultiSet[T]) IncrementBy(value T, count int) {
existingCount := s.data[value]
s.data[value] = existingCount + count
}
// GetValues returns a list of the set's values
func (s *HashMultiSet[T]) GetValues() []T {
values := make([]T, 0, s.Size())
for key := range s.data {
values = append(values, key)
}
return values
}
// Contains checks if a value is in the set
func (s *HashMultiSet[T]) Contains(value T) bool {
_, exists := s.data[value]
return exists
}
// ContainsAll checks if all values are in the set
func (s *HashMultiSet[T]) ContainsAll(values ...T) bool {
for _, value := range values {
if !s.Contains(value) {
return false
}
}
return true
}
// ContainsAny checks if any values are in the set
func (s *HashMultiSet[T]) ContainsAny(values ...T) bool {
for _, value := range values {
if s.Contains(value) {
return true
}
}
return false
}
// GetCount returns count associated with the value
func (s *HashMultiSet[T]) GetCount(value T) int {
return s.data[value]
}
// Remove removes a value
func (s *HashMultiSet[T]) Remove(values ...T) {
for _, value := range values {
delete(s.data, value)
}
}
// Clear clears the set
func (s *HashMultiSet[T]) Clear() {
s.data = make(map[T]int)
}
// IsEmpty checks if the set is empty
func (s *HashMultiSet[T]) IsEmpty() bool {
return s.Size() == 0
}
// Size returns size of the set
func (s *HashMultiSet[T]) Size() int {
return len(s.data)
}
// GetTopValues returns values ordered in descending order
func (s *HashMultiSet[T]) GetTopValues() []MultiSetPair[T] {
setPairs := make([]MultiSetPair[T], 0, s.Size())
for key, count := range s.data {
setPairs = append(setPairs, MultiSetPair[T]{Key: key, Count: count})
}
sort.SliceStable(setPairs, func(i, j int) bool {
return setPairs[i].Count > setPairs[j].Count
})
return setPairs
} | datastructures/sets/hashmultisets/hash_multi_set.go | 0.795022 | 0.538073 | hash_multi_set.go | starcoder |
package composite
import (
"context"
"image"
"math"
"runtime"
"golang.org/x/image/draw"
"golang.org/x/image/math/f64"
"github.com/oov/psd/blend"
)
type layerImage struct {
Canvas tiledImage
Mask tiledMask
}
type tiledImage map[image.Point]draw.Image
func (t tiledImage) Get(tileSize int, pt image.Point) (draw.Image, bool) {
r, ok := t[pt]
if !ok {
r = image.NewNRGBA(image.Rect(pt.X, pt.Y, pt.X+tileSize, pt.Y+tileSize))
t[pt] = r
}
return r, ok
}
func (t tiledImage) Rect() image.Rectangle {
var r image.Rectangle
for _, img := range t {
r = img.Bounds().Union(r)
}
return r
}
func (t tiledImage) Render(ctx context.Context, img draw.Image) error {
tileSize := t.tileSize()
if tileSize == 0 {
return nil
}
rect := img.Bounds()
x0, x1 := (rect.Min.X/tileSize)*tileSize, rect.Max.X
y0, y1 := (rect.Min.Y/tileSize)*tileSize, rect.Max.Y
ylen := (y1 - y0) / tileSize
n := runtime.GOMAXPROCS(0)
for n > 1 && n<<1 > ylen {
n--
}
pc := ¶llelContext{}
pc.Wg.Add(n)
step := (ylen / n) * tileSize
for i := 1; i < n; i++ {
go t.renderInner(pc, img, tileSize, x0, x1, y0, y0+step)
y0 += step
}
go t.renderInner(pc, img, tileSize, x0, x1, y0, y1)
return pc.Wait(ctx)
}
func (t tiledImage) renderInner(pc *parallelContext, img draw.Image, tileSize, x0, x1, y0, y1 int) {
defer pc.Done()
for ty := y0; ty < y1; ty += tileSize {
for tx := x0; tx < x1; tx += tileSize {
if b, ok := t[image.Pt(tx, ty)]; ok {
rect := b.Bounds()
blend.Copy.Draw(img, rect, b, rect.Min)
} else {
blend.Clear.Draw(img, image.Rect(tx, ty, tx+tileSize, ty+tileSize), image.Transparent, image.Point{})
}
}
}
}
type gammaTable struct {
T8 [256]uint16
T16 [65536]uint8
}
func makeGammaTable(g float64) *gammaTable {
var t [256]uint16
for i := range t {
t[i] = uint16(math.Pow(float64(i)/255, g) * 65535)
}
g = 1.0 / g
var rt [65536]uint8
for i := range rt {
rt[i] = uint8(math.Pow(float64(i)/65535, g) * 255)
}
return &gammaTable{t, rt}
}
func createImage(rect image.Rectangle, r []byte, g []byte, b []byte, a []byte, deltaX int) *image.NRGBA {
if deltaX == 4 {
return &image.NRGBA{
Pix: r,
Stride: rect.Dx() * 4,
Rect: rect,
}
}
w, h := rect.Dx(), rect.Dy()
pix := make([]byte, w*4*h)
var s, d int
if a != nil {
for d < len(pix) {
if a[s] > 0 {
pix[d+3] = a[s]
pix[d+2] = b[s]
pix[d+1] = g[s]
pix[d+0] = r[s]
}
d += 4
s += deltaX
}
} else {
for d < len(pix) {
pix[d+3] = 0xff
pix[d+2] = b[s]
pix[d+1] = g[s]
pix[d+0] = r[s]
d += 4
s += deltaX
}
}
return &image.NRGBA{
Pix: pix,
Stride: w * 4,
Rect: rect,
}
}
func createImageGamma(rect image.Rectangle, r []byte, g []byte, b []byte, a []byte, deltaX int, gt [256]uint16) *image.NRGBA64 {
w, h := rect.Dx(), rect.Dy()
pix := make([]byte, w*8*h)
var s, d int
if a != nil {
for d < len(pix) {
if a[s] > 0 {
a8, r16, g16, b16 := a[s], gt[r[s]], gt[g[s]], gt[b[s]]
pix[d+7] = a8
pix[d+6] = a8
pix[d+5] = uint8(b16)
pix[d+4] = uint8(b16 >> 8)
pix[d+3] = uint8(g16)
pix[d+2] = uint8(g16 >> 8)
pix[d+1] = uint8(r16)
pix[d+0] = uint8(r16 >> 8)
}
d += 8
s += deltaX
}
} else {
for d < len(pix) {
r16, g16, b16 := gt[r[s]], gt[g[s]], gt[b[s]]
pix[d+7] = 0xff
pix[d+6] = 0xff
pix[d+5] = uint8(b16)
pix[d+4] = uint8(b16 >> 8)
pix[d+3] = uint8(g16)
pix[d+2] = uint8(g16 >> 8)
pix[d+1] = uint8(r16)
pix[d+0] = uint8(r16 >> 8)
d += 8
s += deltaX
}
}
return &image.NRGBA64{
Pix: pix,
Stride: w * 8,
Rect: rect,
}
}
func restoreGamma(img *image.NRGBA64, gt [65536]uint8) {
pix := img.Pix
var s int
for s < len(pix) {
pix[s+4] = gt[(uint16(pix[s+4])<<8)|uint16(pix[s+5])]
pix[s+2] = gt[(uint16(pix[s+2])<<8)|uint16(pix[s+3])]
pix[s+0] = gt[(uint16(pix[s+0])<<8)|uint16(pix[s+1])]
s += 8
}
}
func (t tiledImage) tileSize() int {
for _, m := range t {
return m.Bounds().Dx()
}
return 0
}
func (t tiledImage) Transform(ctx context.Context, m f64.Aff3, gt *gammaTable) (tiledImage, error) {
rect := t.Rect()
if rect.Empty() {
return tiledImage{}, nil
}
tileSize := t.tileSize()
if tileSize == 0 {
return tiledImage{}, nil
}
tmp := image.NewNRGBA(rect)
if err := t.Render(ctx, tmp); err != nil {
return nil, err
}
return newScaledTiledImage(ctx, tileSize, rect, tmp.Pix[0:], tmp.Pix[1:], tmp.Pix[2:], tmp.Pix[3:], 4, m, gt)
}
func newScaledTiledImage(ctx context.Context, tileSize int, rect image.Rectangle, r, g, b, a []byte, deltaX int, m f64.Aff3, gt *gammaTable) (tiledImage, error) {
if m[0] == 1 && m[1] == 0 && m[2] == 0 && m[3] == 0 && m[4] == 1 && m[5] == 0 {
return newTiledImage(ctx, tileSize, rect, r, g, b, a, deltaX)
}
if gt == nil {
tmp := createImage(rect, r, g, b, a, deltaX)
trRect := transformRect(rect, m)
tmp2 := image.NewNRGBA(trRect)
draw.BiLinear.Transform(tmp2, m, tmp, rect, draw.Src, nil)
return newTiledImage(ctx, tileSize, trRect, tmp2.Pix[0:], tmp2.Pix[1:], tmp2.Pix[2:], tmp2.Pix[3:], 4)
}
tmp := createImageGamma(rect, r, g, b, a, deltaX, gt.T8)
trRect := transformRect(rect, m)
tmp2 := image.NewNRGBA64(trRect)
draw.BiLinear.Transform(tmp2, m, tmp, rect, draw.Src, nil)
restoreGamma(tmp2, gt.T16)
return newTiledImage(ctx, tileSize, trRect, tmp2.Pix[0:], tmp2.Pix[2:], tmp2.Pix[4:], tmp2.Pix[6:], 8)
}
func newTiledImage(ctx context.Context, tileSize int, rect image.Rectangle, r, g, b, a []byte, deltaX int) (tiledImage, error) {
x0, x1 := (rect.Min.X/tileSize)*tileSize, rect.Max.X
y0, y1 := (rect.Min.Y/tileSize)*tileSize, rect.Max.Y
ylen := (y1 - y0) / tileSize
n := runtime.GOMAXPROCS(0)
for n > 1 && n<<1 > ylen {
n--
}
t := tiledImage{}
pc := ¶llelContext{}
pc.Wg.Add(n)
step := (ylen / n) * tileSize
for i := 1; i < n; i++ {
go newTiledImageInner(pc, t, rect, tileSize, x0, x1, y0, y0+step, r, g, b, a, deltaX)
y0 += step
}
go newTiledImageInner(pc, t, rect, tileSize, x0, x1, y0, y1, r, g, b, a, deltaX)
if err := pc.Wait(ctx); err != nil {
return nil, err
}
return t, nil
}
func newTiledImageInner(pc *parallelContext, t tiledImage, rect image.Rectangle, tileSize, x0, x1, y0, y1 int, r, g, b, a []byte, deltaX int) {
defer pc.Done()
rx0, ry0, rx1, ry1 := rect.Min.X, rect.Min.Y, rect.Max.X, rect.Max.Y
sw := rect.Dx() * deltaX
tw := tileSize << 2
buf := make([]byte, tw*tileSize)
for ty := y0; ty < y1; ty += tileSize {
if pc.Aborted() {
return
}
for tx := x0; tx < x1; tx += tileSize {
dxMin, dxMax := 0, tileSize
sxMin := tx - rx0
if sxMin < 0 {
dxMin -= sxMin
sxMin = 0
}
if rx0+sxMin+(dxMax-dxMin) > rx1 {
dxMax -= rx0 + sxMin + (dxMax - dxMin) - rx1
}
dyMin, dyMax := 0, tileSize
syMin := ty - ry0
if syMin < 0 {
dyMin -= syMin
syMin = 0
}
if ry0+syMin+(dyMax-dyMin) > ry1 {
dyMax -= ry0 + syMin + (dyMax - dyMin) - ry1
}
used := false
sxMin = sxMin * deltaX
dyMax = dyMax * tw
dxMin, dxMax = dxMin<<2, dxMax<<2
if a != nil {
for dy, sy := dyMin*tw, syMin*sw; dy < dyMax; dy += tw {
for dx, sx, dEnd := dy+dxMin, sy+sxMin, dy+dxMax; dx < dEnd; dx += 4 {
if a[sx] > 0 {
buf[dx+3] = a[sx]
buf[dx+2] = b[sx]
buf[dx+1] = g[sx]
buf[dx+0] = r[sx]
used = true
}
sx += deltaX
}
sy += sw
}
} else {
for dy, sy := dyMin*tw, syMin*sw; dy < dyMax; dy += tw {
for dx, sx, dEnd := dy+dxMin, sy+sxMin, dy+dxMax; dx < dEnd; dx += 4 {
buf[dx+3] = 0xff
buf[dx+2] = b[sx]
buf[dx+1] = g[sx]
buf[dx+0] = r[sx]
used = true
sx += deltaX
}
sy += sw
}
}
if used {
pc.M.Lock()
t[image.Pt(tx, ty)] = &image.NRGBA{
Pix: buf,
Stride: tileSize * 4,
Rect: image.Rect(tx, ty, tx+tileSize, ty+tileSize),
}
pc.M.Unlock()
buf = make([]byte, tw*tileSize)
}
}
}
}
type tiledMask map[image.Point]*image.Alpha
func (t tiledMask) Rect() image.Rectangle {
var r image.Rectangle
for _, img := range t {
r = img.Rect.Union(r)
}
return r
}
func (t tiledMask) tileSize() int {
for _, m := range t {
return m.Rect.Dx()
}
return 0
}
func (t tiledMask) Render(ctx context.Context, img draw.Image) error {
tileSize := t.tileSize()
if tileSize == 0 {
return nil
}
rect := img.Bounds()
x0, x1 := (rect.Min.X/tileSize)*tileSize, rect.Max.X
y0, y1 := (rect.Min.Y/tileSize)*tileSize, rect.Max.Y
ylen := (y1 - y0) / tileSize
n := runtime.GOMAXPROCS(0)
for n > 1 && n<<1 > ylen {
n--
}
pc := ¶llelContext{}
pc.Wg.Add(n)
step := (ylen / n) * tileSize
for i := 1; i < n; i++ {
go t.renderInner(pc, img, tileSize, x0, x1, y0, y0+step)
y0 += step
}
go t.renderInner(pc, img, tileSize, x0, x1, y0, y1)
return pc.Wait(ctx)
}
func (t tiledMask) renderInner(pc *parallelContext, img draw.Image, tileSize, x0, x1, y0, y1 int) {
defer pc.Done()
for ty := y0; ty < y1; ty += tileSize {
for tx := x0; tx < x1; tx += tileSize {
if b, ok := t[image.Pt(tx, ty)]; ok {
blend.Copy.Draw(img, b.Rect, b, b.Rect.Min)
} else {
blend.Clear.Draw(img, image.Rect(tx, ty, tx+tileSize, ty+tileSize), image.Transparent, image.Point{})
}
}
}
}
func createMask(rect image.Rectangle, a []byte) *image.Alpha {
return &image.Alpha{
Pix: a,
Stride: rect.Dx(),
Rect: rect,
}
}
func (t tiledMask) Transform(ctx context.Context, m f64.Aff3) (tiledMask, error) {
rect := t.Rect()
if rect.Empty() {
return tiledMask{}, nil
}
tileSize := t.tileSize()
if tileSize == 0 {
return tiledMask{}, nil
}
tmp := image.NewAlpha(rect)
if err := t.Render(ctx, tmp); err != nil {
return nil, err
}
return newScaledTiledMask(ctx, tileSize, rect, tmp.Pix, 0, m)
}
func newScaledTiledMask(ctx context.Context, tileSize int, rect image.Rectangle, a []byte, defaultColor int, m f64.Aff3) (tiledMask, error) {
if m[0] == 1 && m[1] == 0 && m[2] == 0 && m[3] == 0 && m[4] == 1 && m[5] == 0 {
return newTiledMask(ctx, tileSize, rect, a, defaultColor)
}
tmp := createMask(rect, a)
trRect := transformRect(rect, m)
tmp2 := image.NewAlpha(trRect)
// TODO: currently, it seems fallback path is used in image.Alpha.
draw.BiLinear.Transform(tmp2, m, tmp, rect, draw.Src, nil)
return newTiledMask(ctx, tileSize, trRect, tmp2.Pix, defaultColor)
}
func newTiledMask(ctx context.Context, tileSize int, rect image.Rectangle, a []byte, defaultColor int) (tiledMask, error) {
x0, x1 := (rect.Min.X/tileSize)*tileSize, rect.Max.X
y0, y1 := (rect.Min.Y/tileSize)*tileSize, rect.Max.Y
ylen := (y1 - y0) / tileSize
n := runtime.GOMAXPROCS(0)
for n > 1 && n<<1 > ylen {
n--
}
t := tiledMask{}
pc := ¶llelContext{}
pc.Wg.Add(n)
step := (ylen / n) * tileSize
for i := 1; i < n; i++ {
go newTiledMaskInner(pc, t, rect, tileSize, x0, x1, y0, y0+step, a, defaultColor)
y0 += step
}
go newTiledMaskInner(pc, t, rect, tileSize, x0, x1, y0, y1, a, defaultColor)
if err := pc.Wait(ctx); err != nil {
return nil, err
}
return t, nil
}
func newTiledMaskInner(pc *parallelContext, t tiledMask, rect image.Rectangle, tileSize, x0, x1, y0, y1 int, a []byte, defaultColor int) {
defer pc.Done()
rx0, ry0, rx1, ry1 := rect.Min.X, rect.Min.Y, rect.Max.X, rect.Max.Y
rw := rect.Dx()
buf := make([]byte, tileSize*tileSize)
for ty := y0; ty < y1; ty += tileSize {
if pc.Aborted() {
return
}
for tx := x0; tx < x1; tx += tileSize {
dxMin, dxMax := 0, tileSize
sxMin := tx - rx0
if sxMin < 0 {
dxMin -= sxMin
sxMin = 0
}
if rx0+sxMin+(dxMax-dxMin) > rx1 {
dxMax -= rx0 + sxMin + (dxMax - dxMin) - rx1
}
dyMin, dyMax := 0, tileSize
syMin := ty - ry0
if syMin < 0 {
dyMin -= syMin
syMin = 0
}
if ry0+syMin+(dyMax-dyMin) > ry1 {
dyMax -= ry0 + syMin + (dyMax - dyMin) - ry1
}
used := false
dyMax = dyMax * tileSize
if defaultColor == 0 {
for dy, sy := dyMin*tileSize, syMin*rw; dy < dyMax; dy += tileSize {
for dx, sx, dEnd := dy+dxMin, sy+sxMin, dy+dxMax; dx < dEnd; dx++ {
alpha := a[sx]
if alpha != 0 {
buf[dx] = alpha
used = true
}
sx++
}
sy += rw
}
} else {
for dy, sy := dyMin*tileSize, syMin*rw; dy < dyMax; dy += tileSize {
for dx, sx, dEnd := dy+dxMin, sy+sxMin, dy+dxMax; dx < dEnd; dx++ {
alpha := a[sx]
if alpha != 255 {
buf[dx] = 255 - alpha
used = true
}
sx++
}
sy += rw
}
}
if used {
pc.M.Lock()
t[image.Pt(tx, ty)] = &image.Alpha{
Pix: buf,
Stride: tileSize,
Rect: image.Rect(tx, ty, tx+tileSize, ty+tileSize),
}
pc.M.Unlock()
buf = make([]byte, tileSize*tileSize)
}
}
}
} | composite/layerimage.go | 0.532668 | 0.421016 | layerimage.go | starcoder |
package udwTime
import (
"time"
)
func ToDateString(t time.Time) string {
return t.Format(FormatDateMysql)
}
func ToDateStringInDefaultTz(t time.Time) string {
return t.In(GetDefaultTimeZone()).Format(FormatDateMysql)
}
func ToDate(t time.Time) time.Time {
y, m, d := t.Date()
return time.Date(y, m, d, 0, 0, 0, 0, t.Location())
}
func ToDateDefault(t time.Time) time.Time {
y, m, d := t.In(GetDefaultTimeZone()).Date()
return time.Date(y, m, d, 0, 0, 0, 0, GetDefaultTimeZone())
}
func DateSub(t1 time.Time, t2 time.Time, loc *time.Location) time.Duration {
return ToDate(t1.In(loc)).Sub(ToDate(t2.In(loc)))
}
func DateSubToDay(t1 time.Time, t2 time.Time, loc *time.Location) int {
dur := ToDate(t1.In(loc)).Sub(ToDate(t2.In(loc)))
return int(dur.Hours() / 24)
}
func DateSubToHour(t1 time.Time, t2 time.Time, loc *time.Location) int {
dur := ToDate(t1.In(loc)).Sub(ToDate(t2.In(loc)))
return int(dur.Hours())
}
func DateSubLocal(t1 time.Time, t2 time.Time) time.Duration {
return DateSub(t1, t2, time.Local)
}
func IsSameDay(t1 time.Time, t2 time.Time, loc *time.Location) bool {
return DateSub(t1, t2, loc) == 0
}
func IsSameHour(t1 time.Time, t2 time.Time, loc *time.Location) bool {
return t1.In(loc).Format(FormatDateAndHour) == t2.In(loc).Format(FormatDateAndHour)
}
func IsSameMonth(t1 time.Time, t2 time.Time, loc *time.Location) bool {
return ToMonth(t1, loc).Sub(ToMonth(t2, loc)) == 0
}
func ToMonth(t time.Time, loc *time.Location) time.Time {
y, m, _ := t.In(loc).Date()
return time.Date(y, m, 1, 0, 0, 0, 0, loc)
}
func ToMonthWithOffset(t time.Time, loc *time.Location, offset int) time.Time {
y, m, _ := t.In(loc).Date()
return time.Date(y, m+time.Month(offset), 1, 0, 0, 0, 0, loc)
}
func CountMonthLeftDay(t time.Time, loc *time.Location) int {
count := 0
ot := t
for {
if !IsSameMonth(ot, t, loc) {
break
}
count++
ot = ot.Add(Day)
}
return count
}
func MonthLeftPercent(t time.Time, loc *time.Location) float64 {
count := CountMonthLeftDay(t, loc)
passed := t.In(loc).Day() - 1
return float64(count) / float64(count+passed)
} | udwTime/date.go | 0.747247 | 0.560493 | date.go | starcoder |
package vision
// InceptionV3
import (
"github.com/sugarme/gotch/nn"
ts "github.com/sugarme/gotch/tensor"
)
func convBn(p *nn.Path, cIn, cOut, ksize, pad, stride int64) ts.ModuleT {
convConfig := nn.DefaultConv2DConfig()
convConfig.Stride = []int64{stride, stride}
convConfig.Padding = []int64{pad, pad}
convConfig.Bias = false
bnConfig := nn.DefaultBatchNormConfig()
bnConfig.Eps = 0.001
seq := nn.SeqT()
convP := p.Sub("conv")
seq.Add(nn.NewConv2D(convP, cIn, cOut, ksize, convConfig))
seq.Add(nn.BatchNorm2D(p.Sub("bn"), cOut, bnConfig))
seq.AddFn(nn.NewFunc(func(xs *ts.Tensor) *ts.Tensor {
return xs.MustRelu(false)
}))
return seq
}
func convBn2(p *nn.Path, cIn, cOut int64, ksize []int64, pad []int64) ts.ModuleT {
convConfig := nn.DefaultConv2DConfig()
convConfig.Padding = pad
convConfig.Bias = false
bnConfig := nn.DefaultBatchNormConfig()
bnConfig.Eps = 0.001
seq := nn.SeqT()
seq.Add(nn.NewConv(p.Sub("conv"), cIn, cOut, ksize, convConfig).(*nn.Conv2D))
seq.Add(nn.BatchNorm2D(p.Sub("bn"), cOut, bnConfig))
seq.AddFn(nn.NewFunc(func(xs *ts.Tensor) *ts.Tensor {
return xs.MustRelu(false)
}))
return seq
}
func inMaxPool2D(xs *ts.Tensor, ksize, stride int64) *ts.Tensor {
return xs.MustMaxPool2d([]int64{ksize, ksize}, []int64{stride, stride}, []int64{0, 0}, []int64{1, 1}, false, false)
}
func inceptionA(p *nn.Path, cIn, cPool int64) ts.ModuleT {
b1 := convBn(p.Sub("branch1x1"), cIn, 64, 1, 0, 1)
b21 := convBn(p.Sub("branch5x5_1"), cIn, 48, 1, 0, 1)
b22 := convBn(p.Sub("branch5x5_2"), 48, 64, 5, 2, 1)
b31 := convBn(p.Sub("branch3x3dbl_1"), cIn, 64, 1, 0, 1)
b32 := convBn(p.Sub("branch3x3dbl_2"), 64, 96, 3, 1, 1)
b33 := convBn(p.Sub("branch3x3dbl_3"), 96, 96, 3, 1, 1)
bpool := convBn(p.Sub("branch_pool"), cIn, cPool, 1, 0, 1)
return nn.NewFuncT(func(xs *ts.Tensor, train bool) *ts.Tensor {
b1Ts := xs.ApplyT(b1, train)
b2Tmp := xs.ApplyT(b21, train)
b2Ts := b2Tmp.ApplyT(b22, train)
b2Tmp.MustDrop()
b3Tmp1 := xs.ApplyT(b31, train)
b3Tmp2 := b3Tmp1.ApplyT(b32, train)
b3Tmp1.MustDrop()
b3Ts := b3Tmp2.ApplyT(b33, train)
b3Tmp2.MustDrop()
bpoolTmp := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, []int64{9}, false)
bpoolTs := bpoolTmp.ApplyT(bpool, train)
res := ts.MustCat([]ts.Tensor{*b1Ts, *b2Ts, *b3Ts, *bpoolTs}, 1)
return res
})
}
func inceptionB(p *nn.Path, cIn int64) ts.ModuleT {
b1 := convBn(p.Sub("branch3x3"), cIn, 384, 3, 0, 2)
b21 := convBn(p.Sub("branch3x3dbl_1"), cIn, 64, 1, 0, 1)
b22 := convBn(p.Sub("branch3x3dbl_2"), 64, 96, 3, 1, 1)
b23 := convBn(p.Sub("branch3x3dbl_3"), 96, 96, 3, 0, 2)
return nn.NewFuncT(func(xs *ts.Tensor, train bool) *ts.Tensor {
b1Ts := xs.ApplyT(b1, train)
b2Tmp1 := xs.ApplyT(b21, train)
b2Tmp2 := b2Tmp1.ApplyT(b22, train)
b2Tmp1.MustDrop()
b2Ts := b2Tmp2.ApplyT(b23, train)
b2Tmp2.MustDrop()
bpoolTs := inMaxPool2D(xs, 3, 2)
res := ts.MustCat([]ts.Tensor{*b1Ts, *b2Ts, *bpoolTs}, 1)
return res
})
}
func inceptionC(p *nn.Path, cIn int64, c7 int64) ts.ModuleT {
b1 := convBn(p.Sub("branch1x1"), cIn, 192, 1, 0, 1)
b21 := convBn(p.Sub("branch7x7_1"), cIn, c7, 1, 0, 1)
b22 := convBn2(p.Sub("branch7x7_2"), c7, c7, []int64{1, 7}, []int64{0, 3})
b23 := convBn2(p.Sub("branch7x7_3"), c7, 192, []int64{7, 1}, []int64{3, 0})
b31 := convBn(p.Sub("branch7x7dbl_1"), cIn, c7, 1, 0, 1)
b32 := convBn2(p.Sub("branch7x7dbl_2"), c7, c7, []int64{7, 1}, []int64{3, 0})
b33 := convBn2(p.Sub("branch7x7dbl_3"), c7, c7, []int64{1, 7}, []int64{0, 3})
b34 := convBn2(p.Sub("branch7x7dbl_4"), c7, c7, []int64{7, 1}, []int64{3, 0})
b35 := convBn2(p.Sub("branch7x7dbl_5"), c7, 192, []int64{1, 7}, []int64{0, 3})
bpool := convBn(p.Sub("branch_pool"), cIn, 192, 1, 0, 1)
return nn.NewFuncT(func(xs *ts.Tensor, train bool) *ts.Tensor {
b1Ts := xs.ApplyT(b1, train)
b2Tmp1 := xs.ApplyT(b21, train)
b2Tmp2 := b2Tmp1.ApplyT(b22, train)
b2Tmp1.MustDrop()
b2Ts := b2Tmp2.ApplyT(b23, train)
b2Tmp2.MustDrop()
b3Tmp1 := xs.ApplyT(b31, train)
b3Tmp2 := b3Tmp1.ApplyT(b32, train)
b3Tmp1.MustDrop()
b3Tmp3 := b3Tmp2.ApplyT(b33, train)
b3Tmp2.MustDrop()
b3Tmp4 := b3Tmp3.ApplyT(b34, train)
b3Tmp3.MustDrop()
b3Ts := b3Tmp4.ApplyT(b35, train)
b3Tmp4.MustDrop()
bpTmp1 := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, []int64{9}, false)
bpoolTs := bpTmp1.ApplyT(bpool, train)
return ts.MustCat([]ts.Tensor{*b1Ts, *b2Ts, *b3Ts, *bpoolTs}, 1)
})
}
func inceptionD(p *nn.Path, cIn int64) ts.ModuleT {
b11 := convBn(p.Sub("branch3x3_1"), cIn, 192, 1, 0, 1)
b12 := convBn(p.Sub("branch3x3_2"), 192, 320, 3, 0, 2)
b21 := convBn(p.Sub("branch7x7x3_1"), cIn, 192, 1, 0, 1)
b22 := convBn2(p.Sub("branch7x7x3_2"), 192, 192, []int64{1, 7}, []int64{0, 3})
b23 := convBn2(p.Sub("branch7x7x3_3"), 192, 192, []int64{7, 1}, []int64{3, 0})
b24 := convBn(p.Sub("branch7x7x3_4"), 192, 192, 3, 0, 2)
return nn.NewFuncT(func(xs *ts.Tensor, train bool) *ts.Tensor {
b1Tmp := xs.ApplyT(b11, train)
b1Ts := b1Tmp.ApplyT(b12, train)
b1Tmp.MustDrop()
b2Tmp1 := xs.ApplyT(b21, train)
b2Tmp2 := b2Tmp1.ApplyT(b22, train)
b2Tmp1.MustDrop()
b2Tmp3 := b2Tmp2.ApplyT(b23, train)
b2Tmp2.MustDrop()
b2Ts := b2Tmp3.ApplyT(b24, train)
b2Tmp3.MustDrop()
bpoolTs := inMaxPool2D(xs, 3, 2)
return ts.MustCat([]ts.Tensor{*b1Ts, *b2Ts, *bpoolTs}, 1)
})
}
func inceptionE(p *nn.Path, cIn int64) ts.ModuleT {
b1 := convBn(p.Sub("branch1x1"), cIn, 320, 1, 0, 1)
b21 := convBn(p.Sub("branch3x3_1"), cIn, 384, 1, 0, 1)
b22a := convBn2(p.Sub("branch3x3_2a"), 384, 384, []int64{1, 3}, []int64{0, 1})
b22b := convBn2(p.Sub("branch3x3_2b"), 384, 384, []int64{3, 1}, []int64{1, 0})
b31 := convBn(p.Sub("branch3x3dbl_1"), cIn, 448, 1, 0, 1)
b32 := convBn(p.Sub("branch3x3dbl_2"), 448, 384, 3, 1, 1)
b33a := convBn2(p.Sub("branch3x3dbl_3a"), 384, 384, []int64{1, 3}, []int64{0, 1})
b33b := convBn2(p.Sub("branch3x3dbl_3b"), 384, 384, []int64{3, 1}, []int64{1, 0})
bpool := convBn(p.Sub("branch_pool"), cIn, 192, 1, 0, 1)
return nn.NewFuncT(func(xs *ts.Tensor, train bool) *ts.Tensor {
b1Ts := xs.ApplyT(b1, train)
b2Tmp := xs.ApplyT(b21, train)
b2aTs := b2Tmp.ApplyT(b22a, train)
b2bTs := b2Tmp.ApplyT(b22b, train)
b2Ts := ts.MustCat([]ts.Tensor{*b2aTs, *b2bTs}, 1)
b3Tmp1 := xs.ApplyT(b31, train)
b3Tmp2 := b3Tmp1.ApplyT(b32, train)
b3Tmp1.MustDrop()
b3aTs := b3Tmp2.ApplyT(b33a, train)
b3bTs := b3Tmp2.ApplyT(b33b, train)
b3Ts := ts.MustCat([]ts.Tensor{*b3aTs, *b3bTs}, 1)
bpTmp1 := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, []int64{9}, false)
bpoolTs := bpTmp1.ApplyT(bpool, train)
return ts.MustCat([]ts.Tensor{*b1Ts, *b2Ts, *b3Ts, *bpoolTs}, 1)
})
}
func InceptionV3(p *nn.Path, nclasses int64) ts.ModuleT {
seq := nn.SeqT()
seq.Add(convBn(p.Sub("Conv2d_1a_3x3"), 3, 32, 3, 0, 2))
seq.Add(convBn(p.Sub("Conv2d_2a_3x3"), 32, 32, 3, 0, 1))
seq.Add(convBn(p.Sub("Conv2d_2b_3x3"), 32, 64, 3, 1, 1))
seq.AddFn(nn.NewFunc(func(xs *ts.Tensor) *ts.Tensor {
tmp := xs.MustRelu(false)
res := inMaxPool2D(tmp, 3, 2)
tmp.MustDrop()
return res
}))
seq.Add(convBn(p.Sub("Conv2d_3b_1x1"), 64, 80, 1, 0, 1))
seq.Add(convBn(p.Sub("Conv2d_4a_3x3"), 80, 192, 3, 0, 1))
seq.AddFn(nn.NewFunc(func(xs *ts.Tensor) *ts.Tensor {
tmp := xs.MustRelu(false)
res := inMaxPool2D(tmp, 3, 2)
tmp.MustDrop()
return res
}))
seq.Add(inceptionA(p.Sub("Mixed_5b"), 192, 32))
seq.Add(inceptionA(p.Sub("Mixed_5c"), 256, 64))
seq.Add(inceptionA(p.Sub("Mixed_5d"), 288, 64))
seq.Add(inceptionB(p.Sub("Mixed_6a"), 288))
seq.Add(inceptionC(p.Sub("Mixed_6b"), 768, 128))
seq.Add(inceptionC(p.Sub("Mixed_6c"), 768, 160))
seq.Add(inceptionC(p.Sub("Mixed_6d"), 768, 160))
seq.Add(inceptionC(p.Sub("Mixed_6e"), 768, 192))
seq.Add(inceptionD(p.Sub("Mixed_7a"), 768))
seq.Add(inceptionE(p.Sub("Mixed_7b"), 1280))
seq.Add(inceptionE(p.Sub("Mixed_7c"), 2048))
seq.AddFnT(nn.NewFuncT(func(xs *ts.Tensor, train bool) *ts.Tensor {
tmp1 := xs.MustAdaptiveAvgPool2d([]int64{1, 1}, false)
tmp2 := ts.MustDropout(tmp1, 0.5, train)
tmp1.MustDrop()
res := tmp2.FlatView()
return res
}))
seq.Add(nn.NewLinear(p.Sub("fc"), 2048, nclasses, nn.DefaultLinearConfig()))
return seq
} | vision/inception.go | 0.816077 | 0.475423 | inception.go | starcoder |
package perfcounters
import (
"fmt"
"sync"
"time"
)
/*
AverageTimer32
An average counter that measures the time it takes, on average, to complete a process or operation. Counters of this type display a ratio of the total elapsed time of the sample interval to the
number of processes or operations completed during that time. This counter type measures time in ticks of the system clock.
Formula: ((N 1 -N 0)/F)/(B 1 -B 0), where N 1 and N 0 are performance counter readings, B 1 and B 0 are their corresponding AverageBase values, and F is the number of ticks per second.
The value of F is factored into the equation so that the result can be displayed in seconds. Thus, the numerator represents the numbers of ticks counted during the last sample interval, F
represents the frequency of the ticks, and the denominator represents the number of operations completed during the last sample interval.
Counters of this type include PhysicalDisk\ Avg. Disk sec/Transfer.
[[source: https://msdn.microsoft.com/en-us/library/system.diagnostics.performancecountertype(v=vs.90).aspx]]
*/
type AverageTimer32 struct {
lastTime time.Time
lastBase int32
currentTime time.Time
currentBase int32
mu sync.Mutex
}
func NewAverageTimer32() *AverageTimer32 {
now := time.Now()
return &AverageTimer32{
lastTime: now,
lastBase: 0,
currentTime: now,
currentBase: 0,
}
}
func (self *AverageTimer32) Add(duration time.Duration) {
self.mu.Lock()
defer self.mu.Unlock()
self.currentTime = self.currentTime.Add(duration)
self.currentBase += 1
}
func (self *AverageTimer32) CalculatedValue() float64 {
self.mu.Lock()
defer self.mu.Unlock()
lastTime := self.lastTime
currentTime := self.currentTime
lastBase := self.lastBase
currentBase := self.currentBase
if currentBase == 0 || currentBase-lastBase == 0 {
return 0
}
calculatedValue := float64(-(currentTime.Sub(lastTime).Nanoseconds() / 1e6) / int64(currentBase-lastBase))
self.lastTime = currentTime
self.lastBase = currentBase
return calculatedValue
}
func (self *AverageTimer32) String() string {
return fmt.Sprintf("%.3f", self.CalculatedValue())
} | perfcounters/averagetimer32.go | 0.81899 | 0.47792 | averagetimer32.go | starcoder |
package gubernator
// PERSISTENT STORE DETAILS
// The storage interfaces defined here allows the implementor flexibility in storage options. Depending on the
// use case an implementor can only implement the `Loader` interface and only support persistence of
// ratelimits at startup and shutdown or implement `Store` and gubernator will continuously call `OnChange()`
// and `Get()` to keep the in memory cache and persistent store up to date with the latest ratelimit data.
// Both interfaces can be implemented simultaneously to ensure data is always saved to persistent storage.
type LeakyBucketItem struct {
Limit int64
Duration int64
Remaining int64
TimeStamp int64
}
// Store interface allows implementors to off load storage of all or a subset of ratelimits to
// some persistent store. Methods OnChange() and Get() should avoid blocking as much as possible as these
// methods are called on every rate limit request and will effect the performance of gubernator.
type Store interface {
// Called by gubernator when a rate limit item is updated. It's up to the store to
// decide if this rate limit item should be persisted in the store. It's up to the
// store to expire old rate limit items.
OnChange(r *RateLimitReq, item *CacheItem)
// Called by gubernator when a rate limit is missing from the cache. It's up to the store
// to decide if this request is fulfilled. Should return true if the request is fulfilled
// and false if the request is not fulfilled or doesn't exist in the store.
Get(r *RateLimitReq) (*CacheItem, bool)
// Called by gubernator when an existing rate limit should be removed from the store.
// NOTE: This is NOT called when an rate limit expires from the cache, store implementors
// must expire rate limits in the store.
Remove(key string)
}
// Loader interface allows implementors to store all or a subset of ratelimits into a persistent
// store during startup and shutdown of the gubernator instance.
type Loader interface {
// Load is called by gubernator just before the instance is ready to accept requests. The implementation
// should return a channel gubernator can read to load all rate limits that should be loaded into the
// instance cache. The implementation should close the channel to indicate no more rate limits left to load.
Load() (chan *CacheItem, error)
// Save is called by gubernator just before the instance is shutdown. The passed channel should be
// read until the channel is closed.
Save(chan *CacheItem) error
}
func NewMockStore() *MockStore {
ml := &MockStore{
Called: make(map[string]int),
CacheItems: make(map[string]*CacheItem),
}
ml.Called["OnChange()"] = 0
ml.Called["Remove()"] = 0
ml.Called["Get()"] = 0
return ml
}
type MockStore struct {
Called map[string]int
CacheItems map[string]*CacheItem
}
var _ Store = &MockStore{}
func (ms *MockStore) OnChange(r *RateLimitReq, item *CacheItem) {
ms.Called["OnChange()"] += 1
ms.CacheItems[item.Key] = item
}
func (ms *MockStore) Get(r *RateLimitReq) (*CacheItem, bool) {
ms.Called["Get()"] += 1
item, ok := ms.CacheItems[r.HashKey()]
return item, ok
}
func (ms *MockStore) Remove(key string) {
ms.Called["Remove()"] += 1
delete(ms.CacheItems, key)
}
func NewMockLoader() *MockLoader {
ml := &MockLoader{
Called: make(map[string]int),
}
ml.Called["Load()"] = 0
ml.Called["Save()"] = 0
return ml
}
type MockLoader struct {
Called map[string]int
CacheItems []*CacheItem
}
var _ Loader = &MockLoader{}
func (ml *MockLoader) Load() (chan *CacheItem, error) {
ml.Called["Load()"] += 1
ch := make(chan *CacheItem, 10)
go func() {
for _, i := range ml.CacheItems {
ch <- i
}
close(ch)
}()
return ch, nil
}
func (ml *MockLoader) Save(in chan *CacheItem) error {
ml.Called["Save()"] += 1
for i := range in {
ml.CacheItems = append(ml.CacheItems, i)
}
return nil
} | store.go | 0.759136 | 0.437523 | store.go | starcoder |
package kdtree
import (
"geo"
"graph"
)
// Parameters for the encoding that is used for storing k-d trees space efficient
// Encoded vertex: vertex index + MaxEdgeOffset + MaxStepOffset
// Encoded step: vertex index + edge offset + step offset
const (
VertexIndexBits = 18
EdgeOffsetBits = 5
StepOffsetBits = 11
TotalBits = VertexIndexBits + EdgeOffsetBits + StepOffsetBits
TypeSize = 64
MaxVertexIndex = (1 << VertexIndexBits) - 1
MaxEdgeOffset = (1 << EdgeOffsetBits) - 1
MaxStepOffset = (1 << StepOffsetBits) - 1
)
type KdTree struct {
Graph *graph.GraphFile
EncodedSteps []uint64
Coordinates []geo.Coordinate
EncodedCoordinates []int32
// It is inefficient to create a sub slice of EncodedSteps due to the used encoding.
// Thus, we use start and end pointer instead.
EncodedStepsStart int
EncodedStepsEnd int
}
type ClusterKdTree struct {
Overlay *KdTree
Cluster []*KdTree
BBoxes []geo.BBox
}
func (t *KdTree) Len() int {
return len(t.Coordinates)
}
func (t *KdTree) Swap(i, j int) {
t.Coordinates[i], t.Coordinates[j] = t.Coordinates[j], t.Coordinates[i]
tmp := t.EncodedStep(j)
t.SetEncodedStep(j, t.EncodedStep(i))
t.SetEncodedStep(i, tmp)
}
func (t *KdTree) EncodedStepLen() int {
if t.EncodedStepsEnd > 0 {
return t.EncodedStepsEnd - t.EncodedStepsStart + 1
}
l := (len(t.EncodedSteps) * TypeSize) / TotalBits
if l > 0 && t.EncodedStep(l-1) == (1<<TotalBits)-1 {
return l - 1
}
return l
}
func (t *KdTree) EncodedStep(i int) uint64 {
index := (t.EncodedStepsStart + i) * TotalBits / TypeSize
offset := (t.EncodedStepsStart + i) * TotalBits % TypeSize
if offset+TotalBits <= TypeSize {
// contained in one uint64
mask := (uint64(1) << TotalBits) - 1
return (t.EncodedSteps[index] >> uint(offset)) & mask
}
// split over two uint64
first := uint(TypeSize - offset)
second := uint(TotalBits - first)
fMask := ((uint64(1) << first) - 1)
result := ((t.EncodedSteps[index] >> uint(offset)) & fMask) << second
sMask := (uint64(1) << second) - 1
result |= t.EncodedSteps[index+1] & sMask
return result
}
func (t *KdTree) SetEncodedStep(i int, s uint64) {
index := (t.EncodedStepsStart + i) * TotalBits / TypeSize
offset := (t.EncodedStepsStart + i) * TotalBits % TypeSize
if offset+TotalBits <= TypeSize {
// contained in one uint64
mask := (uint64(1) << TotalBits) - 1
t.EncodedSteps[index] ^= t.EncodedSteps[index] & (mask << uint(offset))
t.EncodedSteps[index] |= s << uint(offset)
} else {
// split over two uint64
first := uint(TypeSize - offset)
second := uint(TotalBits - first)
fMask := (uint64(1) << first) - 1
t.EncodedSteps[index] ^= t.EncodedSteps[index] & (fMask << uint(offset))
t.EncodedSteps[index] |= (s >> second) << uint(offset)
sMask := (uint64(1) << second) - 1
t.EncodedSteps[index+1] ^= t.EncodedSteps[index+1] & sMask
t.EncodedSteps[index+1] |= s & sMask
}
}
func (t *KdTree) AppendEncodedStep(s uint64) {
l := t.EncodedStepLen()
index := l * TotalBits / TypeSize
offset := l * TotalBits % TypeSize
if index >= len(t.EncodedSteps) {
t.EncodedSteps = append(t.EncodedSteps, (1<<64)-1)
}
if offset+TotalBits >= TypeSize && index+1 >= len(t.EncodedSteps) {
t.EncodedSteps = append(t.EncodedSteps, (1<<64)-1)
}
t.SetEncodedStep(l, s)
} | src/kdtree/kdtree.go | 0.530236 | 0.47317 | kdtree.go | starcoder |
package raylib
import "math"
//https://github.com/raysan5/raylib/blob/master/src/raymath.h
//Quaternion A represntation of rotations that does not suffer from gimbal lock
type Quaternion struct {
X float32
Y float32
Z float32
W float32
}
//NewQuaternionIdentity creates a Quaternion Identity (a blank quaternion)
func NewQuaternionIdentity() Quaternion { return Quaternion{X: 0, Y: 0, Z: 0, W: 1} }
//NewQuaternionVector3ToVector3 creates a quaternion that is the angle between 2 vectors
func NewQuaternionVector3ToVector3(from, too Vector3) Quaternion {
cos2theta := from.DotProduct(too)
cross := from.CrossProduct(too)
return Quaternion{X: cross.X, Y: cross.Y, Z: cross.Z, W: 1 + cos2theta}.Normalize()
}
//NewQuaternionFromAxisAngle creates a quaternion from an axis and its rotation
func NewQuaternionFromAxisAngle(axis Vector3, angle float32) Quaternion {
if axis.Length() != 0 {
angle *= 0.5
}
axis = axis.Normalize()
sinres := float32(math.Sin(float64(angle)))
cosres := float32(math.Cos(float64(angle)))
return Quaternion{X: axis.X * sinres, Y: axis.Y * sinres, Z: axis.Z * sinres, W: cosres}.Normalize()
}
//NewQuaternionFromMatrix creates a Quaternion from a rotation matrix
func NewQuaternionFromMatrix(mat Matrix) Quaternion {
var s float32
var invS float32
trace := mat.Trace()
if trace > 0 {
s = float32(math.Sqrt(float64(trace+1)) * 2)
invS = 1 / s
return Quaternion{
X: (mat.M6 - mat.M9) * invS,
Y: (mat.M8 - mat.M2) * invS,
Z: (mat.M1 - mat.M4) * invS,
W: s * 0.25,
}
}
m00 := mat.M0
m11 := mat.M5
m22 := mat.M10
if m00 > m11 && m00 > m22 {
s = float32(math.Sqrt(float64(1+m00-m11-m22)) * 2)
invS = 1 / s
return Quaternion{
X: s * 0.25,
Y: (mat.M4 - mat.M1) * invS,
Z: (mat.M8 - mat.M2) * invS,
W: (mat.M6 - mat.M9) * invS,
}
} else if m11 > m22 {
s = float32(math.Sqrt(float64(1+m11-m00-m22)) * 2)
invS = 1 / s
return Quaternion{
X: (mat.M4 - mat.M1) * invS,
Y: s * 0.25,
Z: (mat.M9 - mat.M6) * invS,
W: (mat.M8 - mat.M2) * invS,
}
}
s = float32(math.Sqrt(float64(1+m22-m00-m11)) * 2)
invS = 1 / s
return Quaternion{
X: (mat.M8 - mat.M2) * invS,
Y: (mat.M9 - mat.M6) * invS,
Z: s * 0.25,
W: (mat.M1 - mat.M4) * invS,
}
}
//NewQuaternionFromEuler creates a quaternion from euler angles (roll, yaw, pitch)
func NewQuaternionFromEuler(euler Vector3) Quaternion {
x0 := float32(math.Cos(float64(euler.X * 0.5)))
x1 := float32(math.Sin(float64(euler.X * 0.5)))
y0 := float32(math.Cos(float64(euler.Y * 0.5)))
y1 := float32(math.Sin(float64(euler.Y * 0.5)))
z0 := float32(math.Cos(float64(euler.Z * 0.5)))
z1 := float32(math.Sin(float64(euler.Z * 0.5)))
return Quaternion{
X: x1*y0*z0 - x0*y1*z1,
Y: x0*y1*z0 + x1*y0*z1,
Z: x0*y0*z1 - x1*y1*z0,
W: x0*y0*z0 + x1*y1*z1,
}
}
//Invert a quaternions components
func (q Quaternion) Invert() Quaternion {
length := q.SqrLength()
if length != 0 {
i := 1 / length
return Quaternion{
X: q.X * -i,
Y: q.Y * -i,
Z: q.Z * -i,
W: q.W * i,
}
}
return q
}
//Decompose the quaternion into a slice of floats
func (q Quaternion) Decompose() []float32 { return []float32{q.X, q.Y, q.Z, q.W} }
//Length of the quaternion
func (q Quaternion) Length() float32 {
return float32(math.Sqrt(float64(q.X*q.X) + float64(q.Y*q.Y) + float64(q.Z*q.Z) + float64(q.W*q.W)))
}
//SqrLength is the squared length of the quaternion
func (q Quaternion) SqrLength() float32 {
return float32(float64(q.X*q.X) + float64(q.Y*q.Y) + float64(q.Z*q.Z) + float64(q.W*q.W))
}
//Scale the quaternion (v * scale)
func (q Quaternion) Scale(scale float32) Quaternion {
return Quaternion{X: q.X * scale, Y: q.Y * scale, Z: q.Z * scale, W: q.W * scale}
}
//Normalize a quaternion
func (q Quaternion) Normalize() Quaternion {
length := q.Length()
if length == 0 {
length = 1
}
ilength := 1 / length
return q.Scale(ilength)
}
//Multiply two Quaternion together, doing queraternion mathmatics
func (q Quaternion) Multiply(q2 Quaternion) Quaternion {
return Quaternion{
X: q.X*q2.W + q.W*q2.X + q.Y*q2.Z - q.Z*q2.Y,
Y: q.Y*q2.W + q.W*q2.Y + q.Z*q2.X - q.X*q2.Z,
Z: q.Z*q2.W + q.W*q2.Z + q.X*q2.Y - q.Y*q2.X,
W: q.W*q2.W - q.X*q2.X - q.Y*q2.Y - q.Z*q2.Z,
}
}
//Lerp a vector towards another vector
func (q Quaternion) Lerp(target Quaternion, amount float32) Quaternion {
return Quaternion{
X: q.X + amount*(target.X-q.X),
Y: q.Y + amount*(target.Y-q.Y),
Z: q.Z + amount*(target.Z-q.Z),
W: q.W + amount*(target.W-q.W),
}
}
//Nlerp slerp-optimized interpolation between two quaternions
func (q Quaternion) Nlerp(target Quaternion, amount float32) Quaternion {
return q.Lerp(target, amount).Normalize()
}
//Slerp Spherically Lerped
func (q Quaternion) Slerp(q2 Quaternion, amount float32) Quaternion {
cosHalfTheta := q.X*q2.X + q.Y*q2.Y + q.Z*q2.Z + q.W*q2.W
if math.Abs((float64(cosHalfTheta))) >= 1 {
return q
}
if cosHalfTheta > 0.95 {
return q.Nlerp(q2, amount)
}
halfTheta := float32(math.Acos(float64(cosHalfTheta)))
sinHalfTheta := float32(math.Sqrt(float64(1 - cosHalfTheta*cosHalfTheta)))
if math.Abs(float64(sinHalfTheta)) < 0.001 {
return Quaternion{
X: q.X*0.5 + q.X*0.5,
Y: q.Y*0.5 + q.Y*0.5,
Z: q.Z*0.5 + q.Z*0.5,
W: q.W*0.5 + q.W*0.5,
}
}
ratioA := float32(math.Sin(float64((1-amount)*halfTheta)) / float64(sinHalfTheta))
ratioB := float32(math.Sin(float64(amount*halfTheta)) / float64(sinHalfTheta))
return Quaternion{
X: q.X*ratioA + q.X*ratioB,
Y: q.Y*ratioA + q.Y*ratioB,
Z: q.Z*ratioA + q.Z*ratioB,
W: q.W*ratioA + q.W*ratioB,
}
}
//ToMatrix converts the quaternion into a rotation matrix
func (q Quaternion) ToMatrix() Matrix {
return NewMatrixFromQuaternion(q)
}
//ToAxisAngle returns the rotation angle and axis for a given quaternion
func (q Quaternion) ToAxisAngle() (Vector3, float32) {
var den float32
var resAngle float32
var resAxis Vector3
if math.Abs(float64(q.W)) > 1 {
q = q.Normalize()
}
resAxis = Vector3{0, 0, 0}
resAngle = 2 * float32(math.Atan(float64(q.W)))
den = float32(math.Sqrt(float64(1 - q.W*q.W)))
if den > 0.0001 {
resAxis.X = q.X / den
resAxis.Y = q.Y / den
resAxis.Z = q.Z / den
} else {
resAxis.X = 1
}
return resAxis, resAngle
}
//ToEuler turns the quaternion into equivalent euler angles (roll, putch, yaw). Values are returned in Degrees
func (q Quaternion) ToEuler() Vector3 {
x0 := 2 * (q.W*q.X + q.Y*q.Z)
x1 := 1 - 2*(q.X*q.X+q.Y*q.Y)
y0 := Clamp(float64(2*(q.W*q.Y-q.Z*q.X)), -1, 1)
z0 := 2 * (q.W*q.Z + q.X*q.Y)
z1 := 1 - 2*(q.Y*q.Y+q.Z*q.Z)
return Vector3{
X: float32(math.Atan2(float64(x0), float64(x1))) * Rad2Deg,
Y: float32(math.Asin(y0)) * Rad2Deg,
Z: float32(math.Atan2(float64(z0), float64(z1))) * Rad2Deg,
}
}
//Transform a quaternion, given a transformation matrix
func (q Quaternion) Transform(mat Matrix) Quaternion {
return Quaternion{
X: mat.M0*q.X + mat.M4*q.Y + mat.M8*q.Z + mat.M12*q.W,
Y: mat.M1*q.X + mat.M5*q.Y + mat.M9*q.Z + mat.M13*q.W,
Z: mat.M2*q.X + mat.M6*q.Y + mat.M10*q.Z + mat.M14*q.W,
W: mat.M3*q.X + mat.M7*q.Y + mat.M11*q.Z + mat.M15*q.W,
}
} | raylib/quanterion.go | 0.932461 | 0.721363 | quanterion.go | starcoder |
package universe
import (
"github.com/apache/arrow/go/v7/arrow/memory"
"github.com/influxdata/flux"
"github.com/influxdata/flux/array"
)
type aggregateWindowSumInt struct {
aggregateWindowBase
vs *array.Int
}
func (a *aggregateWindowSumInt) Aggregate(ts *array.Int, vs array.Array, start, stop *array.Int, mem memory.Allocator) {
b := array.NewIntBuilder(mem)
b.Resize(stop.Len())
values := vs.(*array.Int)
aggregateWindows(ts, start, stop, func(i, j int) {
var sum int64
for ; i < j; i++ {
sum += values.Value(i)
}
b.Append(sum)
})
result := b.NewIntArray()
a.mergeWindows(start, stop, mem, func(ts, prev, next *array.Int) {
if a.vs == nil {
a.vs = result
return
}
defer result.Release()
merged := array.NewIntBuilder(mem)
merged.Resize(ts.Len())
mergeWindowValues(ts, prev, next, func(i, j int) {
if i >= 0 && j >= 0 {
merged.Append(a.vs.Value(i) + result.Value(j))
} else if i >= 0 {
merged.Append(a.vs.Value(i))
} else {
merged.Append(result.Value(j))
}
})
a.vs.Release()
a.vs = merged.NewIntArray()
})
}
func (a *aggregateWindowSumInt) Compute(mem memory.Allocator) (*array.Int, flux.ColType, array.Array) {
a.createEmptyWindows(mem, func(n int) (append func(i int), done func()) {
b := array.NewIntBuilder(mem)
b.Resize(n)
append = func(i int) {
if i < 0 {
b.AppendNull()
} else {
b.Append(a.vs.Value(i))
}
}
done = func() {
a.vs.Release()
a.vs = b.NewIntArray()
}
return append, done
})
return a.ts, flux.TInt, a.vs
}
type aggregateWindowMeanInt struct {
aggregateWindowBase
counts *array.Int
sums *array.Int
}
func (a *aggregateWindowMeanInt) Aggregate(ts *array.Int, vs array.Array, start, stop *array.Int, mem memory.Allocator) {
countsB := array.NewIntBuilder(mem)
countsB.Resize(stop.Len())
sumsB := array.NewIntBuilder(mem)
sumsB.Resize(stop.Len())
values := vs.(*array.Int)
aggregateWindows(ts, start, stop, func(i, j int) {
countsB.Append(int64(j - i))
var sum int64
for ; i < j; i++ {
sum += values.Value(i)
}
sumsB.Append(sum)
})
counts, sums := countsB.NewIntArray(), sumsB.NewIntArray()
a.mergeWindows(start, stop, mem, func(ts, prev, next *array.Int) {
if a.sums == nil {
a.counts, a.sums = counts, sums
return
}
defer counts.Release()
defer sums.Release()
mergedCounts := array.NewIntBuilder(mem)
mergedCounts.Resize(ts.Len())
mergedSums := array.NewIntBuilder(mem)
mergedSums.Resize(ts.Len())
mergeWindowValues(ts, prev, next, func(i, j int) {
if i >= 0 && j >= 0 {
mergedCounts.Append(a.counts.Value(i) + counts.Value(j))
mergedSums.Append(a.sums.Value(i) + sums.Value(j))
} else if i >= 0 {
mergedCounts.Append(a.counts.Value(i))
mergedSums.Append(a.sums.Value(i))
} else {
mergedCounts.Append(counts.Value(j))
mergedSums.Append(sums.Value(j))
}
})
a.counts.Release()
a.sums.Release()
a.counts, a.sums = mergedCounts.NewIntArray(), mergedSums.NewIntArray()
})
}
func (a *aggregateWindowMeanInt) Compute(mem memory.Allocator) (*array.Int, flux.ColType, array.Array) {
defer a.counts.Release()
defer a.sums.Release()
b := array.NewFloatBuilder(mem)
b.Resize(a.ts.Len())
for i, n := 0, a.sums.Len(); i < n; i++ {
v := float64(a.sums.Value(i)) / float64(a.counts.Value(i))
b.Append(v)
}
vs := b.NewFloatArray()
a.createEmptyWindows(mem, func(n int) (append func(i int), done func()) {
b := array.NewFloatBuilder(mem)
b.Resize(n)
append = func(i int) {
if i < 0 {
b.AppendNull()
} else {
b.Append(vs.Value(i))
}
}
done = func() {
vs.Release()
vs = b.NewFloatArray()
}
return append, done
})
return a.ts, flux.TFloat, vs
}
type aggregateWindowSumUint struct {
aggregateWindowBase
vs *array.Uint
}
func (a *aggregateWindowSumUint) Aggregate(ts *array.Int, vs array.Array, start, stop *array.Int, mem memory.Allocator) {
b := array.NewUintBuilder(mem)
b.Resize(stop.Len())
values := vs.(*array.Uint)
aggregateWindows(ts, start, stop, func(i, j int) {
var sum uint64
for ; i < j; i++ {
sum += values.Value(i)
}
b.Append(sum)
})
result := b.NewUintArray()
a.mergeWindows(start, stop, mem, func(ts, prev, next *array.Int) {
if a.vs == nil {
a.vs = result
return
}
defer result.Release()
merged := array.NewUintBuilder(mem)
merged.Resize(ts.Len())
mergeWindowValues(ts, prev, next, func(i, j int) {
if i >= 0 && j >= 0 {
merged.Append(a.vs.Value(i) + result.Value(j))
} else if i >= 0 {
merged.Append(a.vs.Value(i))
} else {
merged.Append(result.Value(j))
}
})
a.vs.Release()
a.vs = merged.NewUintArray()
})
}
func (a *aggregateWindowSumUint) Compute(mem memory.Allocator) (*array.Int, flux.ColType, array.Array) {
a.createEmptyWindows(mem, func(n int) (append func(i int), done func()) {
b := array.NewUintBuilder(mem)
b.Resize(n)
append = func(i int) {
if i < 0 {
b.AppendNull()
} else {
b.Append(a.vs.Value(i))
}
}
done = func() {
a.vs.Release()
a.vs = b.NewUintArray()
}
return append, done
})
return a.ts, flux.TUInt, a.vs
}
type aggregateWindowMeanUint struct {
aggregateWindowBase
counts *array.Int
sums *array.Uint
}
func (a *aggregateWindowMeanUint) Aggregate(ts *array.Int, vs array.Array, start, stop *array.Int, mem memory.Allocator) {
countsB := array.NewIntBuilder(mem)
countsB.Resize(stop.Len())
sumsB := array.NewUintBuilder(mem)
sumsB.Resize(stop.Len())
values := vs.(*array.Uint)
aggregateWindows(ts, start, stop, func(i, j int) {
countsB.Append(int64(j - i))
var sum uint64
for ; i < j; i++ {
sum += values.Value(i)
}
sumsB.Append(sum)
})
counts, sums := countsB.NewIntArray(), sumsB.NewUintArray()
a.mergeWindows(start, stop, mem, func(ts, prev, next *array.Int) {
if a.sums == nil {
a.counts, a.sums = counts, sums
return
}
defer counts.Release()
defer sums.Release()
mergedCounts := array.NewIntBuilder(mem)
mergedCounts.Resize(ts.Len())
mergedSums := array.NewUintBuilder(mem)
mergedSums.Resize(ts.Len())
mergeWindowValues(ts, prev, next, func(i, j int) {
if i >= 0 && j >= 0 {
mergedCounts.Append(a.counts.Value(i) + counts.Value(j))
mergedSums.Append(a.sums.Value(i) + sums.Value(j))
} else if i >= 0 {
mergedCounts.Append(a.counts.Value(i))
mergedSums.Append(a.sums.Value(i))
} else {
mergedCounts.Append(counts.Value(j))
mergedSums.Append(sums.Value(j))
}
})
a.counts.Release()
a.sums.Release()
a.counts, a.sums = mergedCounts.NewIntArray(), mergedSums.NewUintArray()
})
}
func (a *aggregateWindowMeanUint) Compute(mem memory.Allocator) (*array.Int, flux.ColType, array.Array) {
defer a.counts.Release()
defer a.sums.Release()
b := array.NewFloatBuilder(mem)
b.Resize(a.ts.Len())
for i, n := 0, a.sums.Len(); i < n; i++ {
v := float64(a.sums.Value(i)) / float64(a.counts.Value(i))
b.Append(v)
}
vs := b.NewFloatArray()
a.createEmptyWindows(mem, func(n int) (append func(i int), done func()) {
b := array.NewFloatBuilder(mem)
b.Resize(n)
append = func(i int) {
if i < 0 {
b.AppendNull()
} else {
b.Append(vs.Value(i))
}
}
done = func() {
vs.Release()
vs = b.NewFloatArray()
}
return append, done
})
return a.ts, flux.TFloat, vs
}
type aggregateWindowSumFloat struct {
aggregateWindowBase
vs *array.Float
}
func (a *aggregateWindowSumFloat) Aggregate(ts *array.Int, vs array.Array, start, stop *array.Int, mem memory.Allocator) {
b := array.NewFloatBuilder(mem)
b.Resize(stop.Len())
values := vs.(*array.Float)
aggregateWindows(ts, start, stop, func(i, j int) {
var sum float64
for ; i < j; i++ {
sum += values.Value(i)
}
b.Append(sum)
})
result := b.NewFloatArray()
a.mergeWindows(start, stop, mem, func(ts, prev, next *array.Int) {
if a.vs == nil {
a.vs = result
return
}
defer result.Release()
merged := array.NewFloatBuilder(mem)
merged.Resize(ts.Len())
mergeWindowValues(ts, prev, next, func(i, j int) {
if i >= 0 && j >= 0 {
merged.Append(a.vs.Value(i) + result.Value(j))
} else if i >= 0 {
merged.Append(a.vs.Value(i))
} else {
merged.Append(result.Value(j))
}
})
a.vs.Release()
a.vs = merged.NewFloatArray()
})
}
func (a *aggregateWindowSumFloat) Compute(mem memory.Allocator) (*array.Int, flux.ColType, array.Array) {
a.createEmptyWindows(mem, func(n int) (append func(i int), done func()) {
b := array.NewFloatBuilder(mem)
b.Resize(n)
append = func(i int) {
if i < 0 {
b.AppendNull()
} else {
b.Append(a.vs.Value(i))
}
}
done = func() {
a.vs.Release()
a.vs = b.NewFloatArray()
}
return append, done
})
return a.ts, flux.TFloat, a.vs
}
type aggregateWindowMeanFloat struct {
aggregateWindowBase
counts *array.Int
sums *array.Float
}
func (a *aggregateWindowMeanFloat) Aggregate(ts *array.Int, vs array.Array, start, stop *array.Int, mem memory.Allocator) {
countsB := array.NewIntBuilder(mem)
countsB.Resize(stop.Len())
sumsB := array.NewFloatBuilder(mem)
sumsB.Resize(stop.Len())
values := vs.(*array.Float)
aggregateWindows(ts, start, stop, func(i, j int) {
countsB.Append(int64(j - i))
var sum float64
for ; i < j; i++ {
sum += values.Value(i)
}
sumsB.Append(sum)
})
counts, sums := countsB.NewIntArray(), sumsB.NewFloatArray()
a.mergeWindows(start, stop, mem, func(ts, prev, next *array.Int) {
if a.sums == nil {
a.counts, a.sums = counts, sums
return
}
defer counts.Release()
defer sums.Release()
mergedCounts := array.NewIntBuilder(mem)
mergedCounts.Resize(ts.Len())
mergedSums := array.NewFloatBuilder(mem)
mergedSums.Resize(ts.Len())
mergeWindowValues(ts, prev, next, func(i, j int) {
if i >= 0 && j >= 0 {
mergedCounts.Append(a.counts.Value(i) + counts.Value(j))
mergedSums.Append(a.sums.Value(i) + sums.Value(j))
} else if i >= 0 {
mergedCounts.Append(a.counts.Value(i))
mergedSums.Append(a.sums.Value(i))
} else {
mergedCounts.Append(counts.Value(j))
mergedSums.Append(sums.Value(j))
}
})
a.counts.Release()
a.sums.Release()
a.counts, a.sums = mergedCounts.NewIntArray(), mergedSums.NewFloatArray()
})
}
func (a *aggregateWindowMeanFloat) Compute(mem memory.Allocator) (*array.Int, flux.ColType, array.Array) {
defer a.counts.Release()
defer a.sums.Release()
b := array.NewFloatBuilder(mem)
b.Resize(a.ts.Len())
for i, n := 0, a.sums.Len(); i < n; i++ {
v := float64(a.sums.Value(i)) / float64(a.counts.Value(i))
b.Append(v)
}
vs := b.NewFloatArray()
a.createEmptyWindows(mem, func(n int) (append func(i int), done func()) {
b := array.NewFloatBuilder(mem)
b.Resize(n)
append = func(i int) {
if i < 0 {
b.AppendNull()
} else {
b.Append(vs.Value(i))
}
}
done = func() {
vs.Release()
vs = b.NewFloatArray()
}
return append, done
})
return a.ts, flux.TFloat, vs
} | stdlib/universe/aggregate_window.gen.go | 0.587352 | 0.424949 | aggregate_window.gen.go | starcoder |
package evaluator
import (
"fmt"
"log"
"github.com/gmlewis/go-csg/ast"
"github.com/gmlewis/go-csg/object"
)
// Singleton object types.
var (
Null = &object.Null{}
True = &object.Boolean{Value: true}
False = &object.Boolean{Value: false}
)
// Eval evaluates the AST node and returns the evaluated object.
func Eval(node ast.Node, env *object.Environment) object.Object {
switch node := node.(type) {
// Statements
case *ast.Program:
return evalProgram(node, env)
case *ast.ExpressionStatement:
return Eval(node.Expression, env)
// Expressions
case *ast.BooleanLiteral:
if node.Value {
return True
}
return False
case *ast.IntegerLiteral:
return &object.Integer{Value: node.Value}
case *ast.FloatLiteral:
return &object.Float{Value: node.Value}
case *ast.InfixExpression:
left := Eval(node.Left, env)
if isError(left) {
return left
}
right := Eval(node.Right, env)
if isError(right) {
return right
}
return evalInfixExpression(node.Operator, left, right)
case *ast.PrefixExpression:
right := Eval(node.Right, env)
if isError(right) {
return right
}
return evalPrefixExpression(node.Operator, right)
case *ast.BlockStatement:
return evalBlockStatement(node, env)
case *ast.IfExpression:
return evalIfExpression(node, env)
case *ast.ReturnStatement:
val := Eval(node.ReturnValue, env)
if isError(val) {
return val
}
return &object.ReturnValue{Value: val}
case *ast.LetStatement:
val := Eval(node.Value, env)
if isError(val) {
return val
}
env.Set(node.Name.Value, val)
case *ast.Identifier:
return evalIdentifier(node, env)
case *ast.FunctionLiteral:
params := node.Parameters
body := node.Body
return &object.Function{Parameters: params, Body: body, Env: env}
case *ast.CallExpression:
function := Eval(node.Function, env)
if isError(function) {
return function
}
args := evalExpressions(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return applyFunction(function, args)
case *ast.StringLiteral:
return &object.String{Value: node.Value}
case *ast.ArrayLiteral:
elements := evalExpressions(node.Elements, env)
if len(elements) == 1 && isError(elements[0]) {
return elements[0]
}
return &object.Array{Elements: elements}
case *ast.IndexExpression:
left := Eval(node.Left, env)
if isError(left) {
return left
}
index := Eval(node.Index, env)
if isError(index) {
return index
}
return evalIndexExpression(left, index)
case *ast.HashLiteral:
return evalHashLiteral(node, env)
// CSG...
case *ast.CubePrimitive:
args := evalExpressions(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return &object.CubePrimitive{Arguments: args}
case *ast.CylinderPrimitive:
args := evalExpressions(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return &object.CylinderPrimitive{Arguments: args}
case *ast.GroupBlockPrimitive:
body := Eval(node.Body, env)
return &object.GroupBlockPrimitive{Body: body}
case *ast.MultmatrixBlockPrimitive:
args := evalExpressions(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
body := Eval(node.Body, env)
return &object.MultmatrixBlockPrimitive{Arguments: args, Body: body}
case *ast.NamedArgument:
value := Eval(node.Value, env)
return &object.NamedArgument{Name: node.Name.String(), Value: value}
case *ast.PolygonPrimitive:
args := evalExpressions(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return &object.PolygonPrimitive{Arguments: args}
case *ast.SpherePrimitive:
args := evalExpressions(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return &object.SpherePrimitive{Arguments: args}
case *ast.SquarePrimitive:
args := evalExpressions(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return &object.SquarePrimitive{Arguments: args}
case *ast.UndefLiteral:
return Null
case *ast.UnionBlockPrimitive:
body := Eval(node.Body, env)
return &object.UnionBlockPrimitive{Body: body}
default:
log.Fatalf("unhandled AST Node type %T (%+v)", node, node)
}
return nil
}
func evalBangOperatorExpression(right object.Object) object.Object {
switch right {
case True:
return False
case False:
return True
case Null:
return True
}
return False
}
func evalMinusOperatorExpression(right object.Object) object.Object {
switch {
case right.Type() == object.IntegerT:
value := right.(*object.Integer).Value
return &object.Integer{Value: -value}
case right.Type() == object.FloatT:
value := right.(*object.Float).Value
return &object.Float{Value: -value}
default:
return newError("unknown operator: -%v", right.Type())
}
}
func evalInfixExpression(operator string, left, right object.Object) object.Object {
switch {
case left.Type() == object.IntegerT && right.Type() == object.IntegerT:
return evalIntegerInfixExpression(operator, left, right)
case left.Type() == object.StringT && right.Type() == object.StringT:
return evalStringInfixExpression(operator, left, right)
case operator == "==":
if left == right {
return True
}
return False
case operator == "!=":
if left != right {
return True
}
return False
case left.Type() != right.Type():
return newError("type mismatch: %v %v %v", left.Type(), operator, right.Type())
}
return newError("unknown operator: %v %v %v", left.Type(), operator, right.Type())
}
func evalIntegerInfixExpression(operator string, left, right object.Object) object.Object {
leftVal := left.(*object.Integer).Value
rightVal := right.(*object.Integer).Value
switch operator {
case "+":
return &object.Integer{Value: leftVal + rightVal}
case "-":
return &object.Integer{Value: leftVal - rightVal}
case "*":
return &object.Integer{Value: leftVal * rightVal}
case "/":
return &object.Integer{Value: leftVal / rightVal}
case "<":
if leftVal < rightVal {
return True
}
return False
case ">":
if leftVal > rightVal {
return True
}
return False
case "==":
if leftVal == rightVal {
return True
}
return False
case "!=":
if leftVal != rightVal {
return True
}
return False
}
return newError("unknown operator: %v %v %v", left.Type(), operator, right.Type())
}
func evalStringInfixExpression(operator string, left, right object.Object) object.Object {
leftVal := left.(*object.String).Value
rightVal := right.(*object.String).Value
switch operator {
case "+":
return &object.String{Value: leftVal + rightVal}
case "<":
if leftVal < rightVal {
return True
}
return False
case ">":
if leftVal > rightVal {
return True
}
return False
case "==":
if leftVal == rightVal {
return True
}
return False
case "!=":
if leftVal != rightVal {
return True
}
return False
}
return newError("unknown operator: %v %v %v", left.Type(), operator, right.Type())
}
func evalPrefixExpression(operator string, right object.Object) object.Object {
switch operator {
case "!":
return evalBangOperatorExpression(right)
case "-":
return evalMinusOperatorExpression(right)
}
return newError("unknown operator: %v%v", operator, right.Type())
}
func evalProgram(program *ast.Program, env *object.Environment) object.Object {
var result object.Object
for _, statement := range program.Statements {
result = Eval(statement, env)
switch result := result.(type) {
case *object.ReturnValue:
return result.Value
case *object.Error:
return result
}
}
return result
}
func evalBlockStatement(block *ast.BlockStatement, env *object.Environment) object.Object {
var result object.Object
for _, statement := range block.Statements {
result = Eval(statement, env)
if result != nil {
if rt := result.Type(); rt == object.ReturnValueT || rt == object.ErrorT {
return result
}
}
}
return result
}
func evalIfExpression(ie *ast.IfExpression, env *object.Environment) object.Object {
condition := Eval(ie.Condition, env)
switch {
case isError(condition):
return condition
case isTruthy(condition):
return Eval(ie.Consequence, env)
case ie.Alternative != nil:
return Eval(ie.Alternative, env)
}
return Null
}
func evalIdentifier(node *ast.Identifier, env *object.Environment) object.Object {
if val, ok := env.Get(node.Value); ok {
return val
}
if builtin, ok := builtins[node.Value]; ok {
return builtin
}
return newError("identifier not found: " + node.Value)
}
func evalExpressions(exps []ast.Expression, env *object.Environment) []object.Object {
var result []object.Object
for _, e := range exps {
evaluated := Eval(e, env)
if isError(evaluated) {
return []object.Object{evaluated}
}
result = append(result, evaluated)
}
return result
}
func evalIndexExpression(left, index object.Object) object.Object {
switch {
case left.Type() == object.ArrayT && index.Type() == object.IntegerT:
return evalArrayIndexExpression(left, index)
case left.Type() == object.HashT:
return evalHashIndexExpression(left, index)
default:
return newError("index operator not supported: %v", left.Type())
}
}
func evalArrayIndexExpression(array, index object.Object) object.Object {
arrayObject := array.(*object.Array)
idx := index.(*object.Integer).Value
max := int64(len(arrayObject.Elements) - 1)
if idx < 0 || idx > max {
return Null
}
return arrayObject.Elements[idx]
}
func evalHashIndexExpression(hash, index object.Object) object.Object {
hashObject := hash.(*object.Hash)
key, ok := index.(object.Hashable)
if !ok {
return newError("unusable as hash key: %v", index.Type())
}
pair, ok := hashObject.Pairs[key.HashKey()]
if !ok {
return Null
}
return pair.Value
}
func evalHashLiteral(node *ast.HashLiteral, env *object.Environment) object.Object {
pairs := map[object.HashKey]object.HashPair{}
for keyNode, valueNode := range node.Pairs {
key := Eval(keyNode, env)
if isError(key) {
return key
}
hashKey, ok := key.(object.Hashable)
if !ok {
return newError("unusable as hash key: %v", key.Type())
}
value := Eval(valueNode, env)
if isError(value) {
return value
}
hashed := hashKey.HashKey()
pairs[hashed] = object.HashPair{Key: key, Value: value}
}
return &object.Hash{Pairs: pairs}
}
func applyFunction(fn object.Object, args []object.Object) object.Object {
switch fn := fn.(type) {
case *object.Function:
extendedEnv := extendFunctionEnv(fn, args)
evaluated := Eval(fn.Body, extendedEnv)
return unwrapReturnValue(evaluated)
case *object.Builtin:
return fn.Fn(args...)
default:
return newError("not a function: %v", fn.Type())
}
}
func extendFunctionEnv(fn *object.Function, args []object.Object) *object.Environment {
env := object.NewEnclosedEnvironment(fn.Env)
for i, param := range fn.Parameters {
env.Set(param.Value, args[i])
}
return env
}
func unwrapReturnValue(obj object.Object) object.Object {
if v, ok := obj.(*object.ReturnValue); ok {
return v.Value
}
return obj
}
func isTruthy(obj object.Object) bool {
switch obj {
case Null:
return false
case True:
return true
case False:
return false
}
return true
}
func isError(obj object.Object) bool {
if obj != nil {
return obj.Type() == object.ErrorT
}
return false
}
func newError(format string, args ...interface{}) *object.Error {
return &object.Error{Message: fmt.Sprintf(format, args...)}
} | evaluator/evaluator.go | 0.625438 | 0.506591 | evaluator.go | starcoder |
package gobaker
import (
"fmt"
"math"
)
// Vertex desribes a 3D mesh vertex
type Vertex struct {
v Vector
vt Vector // Texture coordinate
vn Vector // Normal Vector
va float64 // Vertex Color Alpha
}
//SetVertexAlpha set vertex alpha color
func (v *Vertex) SetVertexAlpha(vAlpha float64) {
v.va = vAlpha
}
// Triangle describes single triangle of a 3D mesh
type Triangle struct {
V0, V1, V2 Vertex // Each triangle vertex
Tangent Vector
Bitangent Vector
Normal Vector
Bar Vector // Baricentric coordinates from intersection with a ray
Material *Material
Distance float64
}
// String implements Stringer interface.
// It displays each triangle Vertex, Texture and Normals Vectors
func (t Triangle) String() string {
var s string
s += fmt.Sprintf("Verticies: %.5f, %.5f, %.5f\n", t.V0.v, t.V1.v, t.V2.v)
s += fmt.Sprintf("Texture: %.5f, %.5f, %.5f\n", t.V0.vt, t.V1.vt, t.V2.vt)
s += fmt.Sprintf("Normals: %.5f, %.5f, %.5f\n", t.V0.vn, t.V1.vn, t.V2.vn)
s += fmt.Sprintf("Color: %.5f, %.5f, %.5f\n", t.V0.va, t.V1.va, t.V2.va)
return s
}
// Intersect checks if ray intersects with a triangle
func (t *Triangle) Intersect(r *Ray) bool {
a := t.V0.v
b := t.V1.v
c := t.V2.v
t.Normal = (b.Sub(a)).Cross(c.Sub(a))
if t.Normal.X > 1.0 || t.Normal.Y > 1.0 || t.Normal.Z > 1.0 {
t.Normal = t.Normal.Normalize()
}
den := t.Normal.Dot(r.Direction)
if math.Abs(den) < math.SmallestNonzeroFloat64 {
return false
}
nom := a.Sub(r.Origin)
d := nom.Dot(t.Normal) / den
r.Distance = d
if d >= math.SmallestNonzeroFloat64 {
v0 := b.Sub(a)
v1 := c.Sub(a)
v2 := r.HitPosition().Sub(a)
d00 := v0.Dot(v0)
d01 := v0.Dot(v1)
d11 := v1.Dot(v1)
d20 := v2.Dot(v0)
d21 := v2.Dot(v1)
denom := d00*d11 - d01*d01
beta := (d11*d20 - d01*d21) / denom
gamma := (d00*d21 - d01*d20) / denom
alpha := 1.0 - beta - gamma
if beta < 0.0 || gamma < 0.0 || alpha < 0.0 {
return false
}
t.Bar = Vector{alpha, beta, gamma}
t.Distance = r.Distance
return true
}
return false
}
// Barycentric return barycentric coorditnates based on UV position of a triangle.
func (t Triangle) Barycentric(u, v float64) Vector {
xa := t.V0.vt.X
ya := t.V0.vt.Y
xb := t.V1.vt.X
yb := t.V1.vt.Y
xc := t.V2.vt.X
yc := t.V2.vt.Y
xp := u
yp := v
d := (yb-yc)*(xa-xc) + (xc-xb)*(ya-yc)
d1 := ((yb-yc)*(xp-xc) + (xc-xb)*(yp-yc)) / d
d2 := ((yc-ya)*(xp-xc) + (xa-xc)*(yp-yc)) / d
d3 := 1 - d1 - d2
return Vector{d1, d2, d3}
} | gobaker/triangle.go | 0.831074 | 0.607372 | triangle.go | starcoder |
package wparams
// ParamStorer is a type that stores safe and unsafe parameters. Keys should be unique across both SafeParams and
// UnsafeParams (that is, if a key occurs in one map, it should not occur in the other). For performance reasons,
// the maps returned by SafeParams and UnsafeParams are references to the underlying storage and should not be modified
// by the caller.
type ParamStorer interface {
SafeParams() map[string]interface{}
UnsafeParams() map[string]interface{}
}
// NewParamStorer returns a new ParamStorer that stores all of the params in the provided ParamStorer inputs. The params
// are added from the param storers in the order in which they are provided, and for each individual param storer all of
// the safe params are added before the unsafe params while maintaining key uniqueness across both safe and unsafe
// parameters. This means that, if the same parameter is provided by multiple ParamStorer inputs, the returned
// ParamStorer will have the key (including safe/unsafe type) and value as provided by the last ParamStorer (for
// example, if an unsafe key/value pair is provided by one ParamStorer and a later ParamStorer specifies a safe
// key/value pair with the same key, the returned ParamStorer will store the last safe key/value pair).
func NewParamStorer(paramStorers ...ParamStorer) ParamStorer {
collector := &mapParamStorer{}
for _, storer := range paramStorers {
collector.copyFrom(storer)
}
return collector
}
// NewSafeParamStorer returns a new ParamStorer that stores the provided parameters as SafeParams.
func NewSafeParamStorer(safeParams map[string]interface{}) ParamStorer {
return NewSafeAndUnsafeParamStorer(safeParams, nil)
}
// NewSafeParam returns a new ParamStorer that stores a single safe parameter.
func NewSafeParam(key string, value interface{}) ParamStorer {
return singleParamStorer{key: key, value: value, safe: true}
}
// NewUnsafeParamStorer returns a new ParamStorer that stores the provided parameters as UnsafeParams.
func NewUnsafeParamStorer(unsafeParams map[string]interface{}) ParamStorer {
return NewSafeAndUnsafeParamStorer(nil, unsafeParams)
}
// NewUnsafeParam returns a new ParamStorer that stores a single unsafe parameter.
func NewUnsafeParam(key string, value interface{}) ParamStorer {
return singleParamStorer{key: key, value: value, safe: false}
}
// NewSafeAndUnsafeParamStorer returns a new ParamStorer that stores the provided safe parameters as SafeParams and the
// unsafe parameters as UnsafeParams. If the safeParams and unsafeParams have any keys in common, the key/value pairs in
// the unsafeParams will be used (the conflicting key/value pairs provided by safeParams will be ignored).
func NewSafeAndUnsafeParamStorer(safeParams, unsafeParams map[string]interface{}) ParamStorer {
storer := &mapParamStorer{}
for k, v := range safeParams {
storer.putSafeParam(k, v)
}
for k, v := range unsafeParams {
storer.putUnsafeParam(k, v)
}
return storer
}
type mapParamStorer struct {
safeParams map[string]interface{}
unsafeParams map[string]interface{}
}
func (m *mapParamStorer) SafeParams() map[string]interface{} {
if m.safeParams == nil {
return map[string]interface{}{}
}
return m.safeParams
}
func (m *mapParamStorer) UnsafeParams() map[string]interface{} {
if m.unsafeParams == nil {
return map[string]interface{}{}
}
return m.unsafeParams
}
func (m *mapParamStorer) putSafeParam(k string, v interface{}) {
if m.safeParams == nil {
m.safeParams = map[string]interface{}{k: v}
} else {
m.safeParams[k] = v
}
delete(m.unsafeParams, k)
}
func (m *mapParamStorer) putUnsafeParam(k string, v interface{}) {
if m.unsafeParams == nil {
m.unsafeParams = map[string]interface{}{k: v}
} else {
m.unsafeParams[k] = v
}
delete(m.safeParams, k)
}
func (m *mapParamStorer) copyFrom(storer ParamStorer) {
if storer == nil {
return
}
// If this is one of our types, we can access the values directly and avoid intermediate map allocations.
switch st := storer.(type) {
case singleParamStorer:
if st.safe {
m.putSafeParam(st.key, st.value)
} else {
m.putUnsafeParam(st.key, st.value)
}
case *mapParamStorer:
for k, v := range st.safeParams {
m.putSafeParam(k, v)
}
for k, v := range st.unsafeParams {
m.putUnsafeParam(k, v)
}
default:
for k, v := range st.SafeParams() {
m.putSafeParam(k, v)
}
for k, v := range st.UnsafeParams() {
m.putUnsafeParam(k, v)
}
}
}
type singleParamStorer struct {
key string
value interface{}
safe bool
}
func (s singleParamStorer) SafeParams() map[string]interface{} {
if !s.safe {
return map[string]interface{}{}
}
return map[string]interface{}{s.key: s.value}
}
func (s singleParamStorer) UnsafeParams() map[string]interface{} {
if s.safe {
return map[string]interface{}{}
}
return map[string]interface{}{s.key: s.value}
} | paramstorer.go | 0.684159 | 0.524151 | paramstorer.go | starcoder |
package budgeting
import (
"time"
"github.com/shopspring/decimal"
)
type Budget struct {
Name string
earliestMonth YearMonth
latestMonth YearMonth
tbb *Category
categories map[string]*Category
accounts []*Account
budgeted map[YearMonth]monthBudget
}
type monthBudget struct {
Month YearMonth
Budgeted map[string]decimal.Decimal
}
// NewBudget creates a fresh budget.
func NewBudget(name string) *Budget {
b := &Budget{
Name: name,
earliestMonth: YearMonth{999999, time.December},
latestMonth: YearMonth{0, time.January},
categories: map[string]*Category{},
accounts: []*Account{},
budgeted: map[YearMonth]monthBudget{},
}
tbb := b.AddCategory("To Be Budgeted")
b.tbb = tbb
return b
}
// AddAccount creates an account within the budget.
func (b *Budget) AddAccount(name string, balance decimal.Decimal, date time.Time) *Account {
account, _ := newAccount(b, name, balance, date, b.tbb)
b.accounts = append(b.accounts, account)
return account
}
// TBBCategory returns the "To Be Budgeted" category.
func (b *Budget) TBBCategory() *Category {
return b.tbb.clone()
}
// TBB returns the "To Be Budgeted" balance for the specified month.
func (b *Budget) TBB(month YearMonth) decimal.Decimal {
tbb := zero
var m YearMonth
m = month
for {
if b.earliestMonth.Earlier(m) {
break
}
transactions := b.monthCategoryTransactions(m, b.tbb)
for _, t := range transactions {
tbb = tbb.Add(t.Amount)
}
m = m.LastMonth()
}
m = month
for {
if b.earliestMonth.Earlier(m) {
break
}
budgeted := monthBudget{
Month: m,
Budgeted: map[string]decimal.Decimal{},
}
if _, ok := b.budgeted[m]; ok {
budgeted = b.budgeted[m]
}
for _, v := range budgeted.Budgeted {
tbb = tbb.Sub(v)
}
m = m.LastMonth()
}
if tbb.GreaterThan(zero) {
m = month.NextMonth()
for {
if b.latestMonth.Later(m) {
break
}
budgeted := monthBudget{
Month: m,
Budgeted: map[string]decimal.Decimal{},
}
if _, ok := b.budgeted[m]; ok {
budgeted = b.budgeted[m]
}
for _, v := range budgeted.Budgeted {
tbb = tbb.Sub(v)
if tbb.LessThan(zero) {
tbb = zero
break
}
}
m = m.NextMonth()
}
}
return tbb
}
// AddCategory creates a budgeting category.
func (b *Budget) AddCategory(name string) *Category {
category := newCategory(name, b)
b.categories[category.uuid] = category
return category
}
// Activities returns how much money has been spent for the category on the specified month.
func (b *Budget) Activities(month YearMonth, category *Category) decimal.Decimal {
activities := zero
transactions := b.monthCategoryTransactions(month, category)
for _, t := range transactions {
activities = activities.Add(t.Amount)
}
return activities
}
// Available returns the available budget balance for the category on the specified month.
func (b *Budget) Available(month YearMonth, category *Category) decimal.Decimal {
available := zero
if !b.earliestMonth.Earlier(month) {
available = available.Add(b.Available(month.LastMonth(), category))
}
available = available.Add(b.Budgeted(month, category).Add(b.Activities(month, category)))
return available
}
// Budgeted returns the budgeted amount for the category on the specified month.
func (b *Budget) Budgeted(month YearMonth, category *Category) decimal.Decimal {
if _, ok := b.budgeted[month]; !ok {
return zero
}
if category.Equal(b.tbb) {
return b.TBB(month)
}
return b.budgeted[month].Budgeted[category.uuid]
}
// SetBudgeted sets the budgeted amount for the category on the specified month.
func (b *Budget) SetBudgeted(month YearMonth, category *Category, amount decimal.Decimal) {
if category.Equal(b.tbb) {
return
}
if _, ok := b.budgeted[month]; !ok {
b.budgeted[month] = monthBudget{
Month: month,
Budgeted: map[string]decimal.Decimal{},
}
}
b.budgeted[month].Budgeted[category.uuid] = amount
if b.earliestMonth.Earlier(month) {
b.earliestMonth = month
}
if b.latestMonth.Later(month) {
b.latestMonth = month
}
}
// MoveBudgeted moves the budget balance from one category to another on the specified month.
func (b *Budget) MoveBudgeted(month YearMonth, from *Category, to *Category, amount decimal.Decimal) {
if _, ok := b.budgeted[month]; !ok {
b.budgeted[month] = monthBudget{
Month: month,
Budgeted: map[string]decimal.Decimal{},
}
}
fromAmount := b.Budgeted(month, from)
toAmount := b.Budgeted(month, to)
fromAmount = fromAmount.Sub(amount)
toAmount = toAmount.Add(amount)
b.SetBudgeted(month, from, fromAmount)
b.SetBudgeted(month, to, toAmount)
}
func (b *Budget) setTransactionCategory(t *Transaction, c *Category) error {
if t.Type() == TransactionTypeTransfer {
return ErrCannotAssignCategoryToTransfer
}
transactions := []*Transaction{}
if t.Category() != nil {
transactions = t.account.transactionCategory[t.Category().uuid]
for i, tt := range transactions {
if tt.uuid == t.uuid {
transactions = append(transactions[:i], transactions[i+1:]...)
break
}
}
t.account.transactionCategory[t.Category().uuid] = transactions
}
if c != nil {
if _, ok := t.account.transactionCategory[c.uuid]; !ok {
t.account.transactionCategory[c.uuid] = []*Transaction{}
}
t.account.transactionCategory[c.uuid] = append(t.account.transactionCategory[c.uuid], t)
}
t.category = c
return nil
}
func (b *Budget) monthCategoryTransactions(month YearMonth, c *Category) []*Transaction {
allTransactions := []*Transaction{}
// TODO: Optimize this by using lookup table
for _, a := range b.accounts {
transactions := []*Transaction{}
if _, ok := a.transactionCategory[c.uuid]; ok {
transactions = a.transactionCategory[c.uuid]
}
for _, t := range transactions {
if YearMonthFromTime(t.Date).Equal(month) {
allTransactions = append(allTransactions, t)
}
}
}
return allTransactions
}
// YearMonth is a helper struct for representing a month of a year
// (e.g. May 2018).
type YearMonth struct {
Year int
Month time.Month
}
// YearMonthFromTime creates a YearMonth from a time.Time.
func YearMonthFromTime(t time.Time) YearMonth {
return YearMonth{
Year: t.Year(),
Month: t.Month(),
}
}
// LastMonth returns the previous month.
func (m YearMonth) LastMonth() YearMonth {
if m.Month == time.January {
return YearMonth{
Year: m.Year - 1,
Month: time.December,
}
}
return YearMonth{
Year: m.Year,
Month: time.Month(int(m.Month) - 1),
}
}
// NextMonth returns the next month.
func (m YearMonth) NextMonth() YearMonth {
if m.Month == time.December {
return YearMonth{
Year: m.Year + 1,
Month: time.January,
}
}
return YearMonth{
Year: m.Year,
Month: time.Month(int(m.Month) + 1),
}
}
// Earlier returns true if m is earlier than other.
func (m YearMonth) Earlier(other YearMonth) bool {
if other.Year < m.Year {
return true
}
if other.Year > m.Year {
return false
}
return int(other.Month) < int(m.Month)
}
// EarlierTime returns true if m is earlier than t.
func (m YearMonth) EarlierTime(t time.Time) bool {
if t.Year() < m.Year {
return true
}
if t.Year() > m.Year {
return false
}
return int(t.Month()) < int(m.Month)
}
// Later returns true if m is later than other.
func (m YearMonth) Later(other YearMonth) bool {
if other.Year > m.Year {
return true
}
if other.Year < m.Year {
return false
}
return int(other.Month) > int(m.Month)
}
// LaterTime returns true if m is later than t.
func (m YearMonth) LaterTime(t time.Time) bool {
if t.Year() > m.Year {
return true
}
if t.Year() < m.Year {
return false
}
return int(t.Month()) > int(m.Month)
}
// Equal returns true if the both years and months are equal.
func (m YearMonth) Equal(other YearMonth) bool {
return m.Year == other.Year && m.Month == other.Month
} | budgeting/budget.go | 0.7413 | 0.532425 | budget.go | starcoder |
package router
import (
"math"
"sync"
"time"
)
// Limit defines the maximum frequency of router events, represented as number of events per second.
type Limit float64
// Inf is the infinite rate limit; it allows all events (even if burst is zero).
const Inf = Limit(math.MaxFloat64)
// A Limiter controls how frequently events are allowed to happen.
type Limiter struct {
mu sync.Mutex
limit Limit
burst int
tokens float64
// last is the last time the limiter's tokens field was updated
last time.Time
// lastEvent is the latest time of a rate-limited event (past or future)
lastEvent time.Time
}
// A Reservation holds information about events that are permitted by a Limiter to happen after a delay.
type Reservation struct {
ok bool
lim *Limiter
tokens int
timeToAct time.Time
limit Limit
}
// NewLimiter returns a new Limiter that allows events up to rate r and permits
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
limit: r,
burst: b,
}
}
// Allow is shorthand for AllowN(time.Now(), 1).
func (lim *Limiter) Allow() bool {
return lim.AllowN(time.Now(), 1)
}
// AllowN reports whether n events may happen at time now.
func (lim *Limiter) AllowN(now time.Time, n int) bool {
return lim.reserveN(now, n, 0).ok
}
// reserveN is a helper method for AllowN, ReserveN, and WaitN.
func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation {
lim.mu.Lock()
if lim.limit == Inf {
lim.mu.Unlock()
return Reservation{
ok: true,
lim: lim,
tokens: n,
timeToAct: now,
}
}
now, last, tokens := lim.advance(now)
// Calculate the remaining number of tokens resulting from the request.
tokens -= float64(n)
// Calculate the wait duration
var waitDuration time.Duration
if tokens < 0 {
waitDuration = lim.limit.durationFromTokens(-tokens)
}
// Decide result
ok := n <= lim.burst && waitDuration <= maxFutureReserve
// Prepare reservation
r := Reservation{
ok: ok,
lim: lim,
limit: lim.limit,
}
if ok {
r.tokens = n
r.timeToAct = now.Add(waitDuration)
}
// Update state
if ok {
lim.last = now
lim.tokens = tokens
lim.lastEvent = r.timeToAct
} else {
lim.last = last
}
lim.mu.Unlock()
return r
}
// advance calculates and returns an updated state for lim resulting from the passage of time.
func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) {
last := lim.last
if now.Before(last) {
last = now
}
// Avoid making delta overflow below when last is very old.
maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens)
elapsed := now.Sub(last)
if elapsed > maxElapsed {
elapsed = maxElapsed
}
// Calculate the new number of tokens, due to time that passed.
delta := lim.limit.tokensFromDuration(elapsed)
tokens := lim.tokens + delta
if burst := float64(lim.burst); tokens > burst {
tokens = burst
}
return now, last, tokens
}
// durationFromTokens is a unit conversion function from the number of tokens to the duration
// of time it takes to accumulate them at a rate of limit tokens per second.
func (limit Limit) durationFromTokens(tokens float64) time.Duration {
seconds := tokens / float64(limit)
return time.Nanosecond * time.Duration(1e9*seconds)
}
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
// which could be accumulated during that duration at a rate of limit tokens per second.
func (limit Limit) tokensFromDuration(d time.Duration) float64 {
// Split the integer and fractional parts ourself to minimize rounding errors.
sec := float64(d/time.Second) * float64(limit)
nsec := float64(d%time.Second) * float64(limit)
return sec + nsec/1e9
} | internal/service/router/rate.go | 0.797793 | 0.492615 | rate.go | starcoder |
package main
// DataType is a models data structure
type DataType interface {
Type() string
}
// Model is the full db model structure and configuration
// The model contains all the nessasary information for
// Creating Query builders
type Model struct {
Driver string
Pkg string
Tables []*Table
Types []DataType
}
// Table is a definition of a database table
type Table string
// String is the stringer implementation
func (x Table) String() string {
return string(x)
}
// Columns returns the columns with a reference to this specific table
func (x *Table) Columns(types []DataType) (c []*Column) {
for _, typ := range types {
if col, ok := typ.(*Column); ok && col.Table == x {
c = append(c, col)
}
}
return
}
// Enum returns the enum values if the type is an enum
func (x *Table) Enum(types []DataType) Enum {
for _, typ := range types {
if enu, ok := typ.(*Enum); ok && enu.Table == x {
return *enu
}
}
return Enum{}
}
// Column contains the table column properties and
// lies at the core of the query builder.
// Note that if the datatype is of type column that
// the column is a foreign key.
type Column struct {
Table *Table
Name string
DataType DataType
Size int
Default string
Nullable bool
Unique bool
Primary bool
Constraints []string
rawType string
}
// Type is the datatype implementation
func (x Column) Type() string {
return x.DataType.Type()
}
func (x Column) String() string {
return string(*x.Table) + `.` + x.Name
}
// Enum is en e nummeric object which can be
// defined as type in the database
type Enum struct {
Table *Table
Values []string
}
// Type is the DataType implementation
func (x Enum) Type() string {
return getPrimitiveType(`varchar`).Type()
}
// PrimitiveType is a primative database type
type PrimitiveType string
// Type is the datatype implementation
func (x PrimitiveType) Type() string {
return string(x)
}
// GetType returns the full type of the column
func (x Model) GetType(c *Column) {
if p := getPrimitiveType(c.rawType); p != nil {
c.DataType = p
return
}
for _, typ := range x.Types {
if col, ok := typ.(*Column); ok && col != nil &&
(c.rawType == col.Table.String()+`.`+col.Name ||
c.rawType == col.Table.String()+`(`+col.Name+`)`) {
c.DataType = col
return
} else if enum, ok := typ.(*Enum); ok && enum != nil &&
c.rawType == string(*enum.Table) {
c.DataType = enum
return
}
}
fatal(`Type not found: %s\n`, c.rawType)
}
// getPrimativeType returns the primative type matching the
// given query
func getPrimitiveType(i string) DataType {
switch i {
case `string`, `char`, `character`, `charactering varying`:
return getPrimitiveType(`varchar`)
case `integer`:
return getPrimitiveType(`int`)
case `real`:
return getPrimitiveType(`float`)
case `time`, `datetime`:
return getPrimitiveType(`timestamp`)
case `bool`:
return getPrimitiveType(`boolean`)
case `varchar`, `text`, `int`, `tinyint`, `smallint`,
`bigint`, `double`, `float`, `date`, `timestamp`, `boolean`:
return PrimitiveType(i)
default:
return nil
}
} | datatype.go | 0.785925 | 0.537345 | datatype.go | starcoder |
package ast
import (
"strings"
)
//Format formats the spacing in the Grammar.
func (this *Grammar) Format() {
if this.TopPattern != nil {
this.TopPattern.format(false)
}
for i, p := range this.PatternDecls {
p.format(i != 0 || this.TopPattern != nil)
}
this.After.format(false)
}
func (this *PatternDecl) format(space bool) {
this.Hash.format(space)
this.Before.format(false)
this.Eq.format(true)
this.Pattern.format(true)
}
//Format formats the spacing in the Pattern.
func (this *Pattern) Format() {
this.format(false)
}
func (this *Pattern) format(space bool) {
v := this.GetValue()
v.(interface {
format(bool)
}).format(space)
}
func (this *Empty) format(space bool) {
this.Empty.format(space)
}
func (this *TreeNode) format(space bool) {
this.Name.format(space)
if this.Colon != nil {
this.Colon.format(false)
}
this.Pattern.format(this.Colon == nil)
}
func (this *Contains) format(space bool) {
this.Dot.format(space)
this.Pattern.format(false)
}
func (this *LeafNode) format(space bool) {
this.Expr.format(space)
}
func (this *Concat) format(space bool) {
this.OpenBracket.format(space)
this.LeftPattern.format(false)
this.Comma.format(false)
this.RightPattern.format(true)
if this.ExtraComma != nil {
this.ExtraComma.format(false)
}
this.CloseBracket.format(this.ExtraComma == nil)
}
func (this *Or) format(space bool) {
this.OpenParen.format(space)
this.LeftPattern.format(false)
this.Pipe.format(true)
this.RightPattern.format(true)
this.CloseParen.format(false)
}
func (this *And) format(space bool) {
this.OpenParen.format(space)
this.LeftPattern.format(false)
this.Ampersand.format(true)
this.RightPattern.format(true)
this.CloseParen.format(false)
}
func (this *ZeroOrMore) format(space bool) {
this.OpenParen.format(space)
this.Pattern.format(false)
this.CloseParen.format(false)
this.Star.format(false)
}
func (this *Reference) format(space bool) {
this.At.format(space)
}
func (this *Not) format(space bool) {
this.Exclamation.format(space)
this.OpenParen.format(false)
this.Pattern.format(false)
this.CloseParen.format(false)
}
func (this *ZAny) format(space bool) {
this.Star.format(space)
}
func (this *Optional) format(space bool) {
this.OpenParen.format(space)
this.Pattern.format(false)
this.CloseParen.format(false)
this.QuestionMark.format(false)
}
func (this *Interleave) format(space bool) {
this.OpenCurly.format(space)
this.LeftPattern.format(false)
this.SemiColon.format(false)
this.RightPattern.format(true)
if this.ExtraSemiColon != nil {
this.ExtraSemiColon.format(false)
}
this.CloseCurly.format(this.ExtraSemiColon == nil)
}
func (this *Expr) format(space bool) {
if this.RightArrow != nil {
this.RightArrow.format(space)
space = false
} else if this.Comma != nil {
this.Comma.format(space)
space = false
}
if this.Terminal != nil {
this.Terminal.format(space)
} else if this.List != nil {
this.List.format(space)
} else if this.Function != nil {
this.Function.format(space)
} else if this.BuiltIn != nil {
this.BuiltIn.format(space)
}
}
func (this *NameExpr) format(space bool) {
v := this.GetValue()
v.(interface {
format(bool)
}).format(space)
}
func (this *Name) format(space bool) {
this.Before.format(space)
}
func (this *AnyName) format(space bool) {
this.Underscore.format(space)
}
func (this *AnyNameExcept) format(space bool) {
this.Exclamation.format(space)
this.OpenParen.format(false)
this.Except.format(false)
this.CloseParen.format(false)
}
func (this *NameChoice) format(space bool) {
this.OpenParen.format(space)
this.Left.format(false)
this.Pipe.format(false)
this.Right.format(false)
this.CloseParen.format(false)
}
func (this *List) format(space bool) {
this.Before.format(space)
this.OpenCurly.format(false)
for i := range this.Elems {
this.Elems[i].format(i != 0)
}
this.CloseCurly.format(false)
}
func (this *Function) format(space bool) {
this.Before.format(space)
this.OpenParen.format(false)
for i := range this.Params {
this.Params[i].format(i != 0)
}
this.CloseParen.format(false)
}
func (this *BuiltIn) format(space bool) {
this.Symbol.format(space)
this.Expr.format(true)
}
func (this *Terminal) format(space bool) {
this.Before.format(space)
}
func (this *Keyword) format(space bool) {
if this == nil {
return
}
this.Before.format(space)
}
func (this *Space) Format() {
this.format(false)
}
func (this *Space) format(space bool) {
if this == nil {
return
}
newSpace := formatSpace(this.Space)
if space {
if len(this.Space) > 0 && strings.Contains(this.Space[len(this.Space)-1], "\n") {
newSpace = append(newSpace, "\n")
} else {
newSpace = append(newSpace, " ")
}
}
this.Space = newSpace
}
type state int
var startState = state(0)
var lineCommentState = state(1)
var blockCommentState = state(2)
func formatSpace(spaces []string) []string {
newlines := 0
current := startState
formatted := []string{}
for _, space := range spaces {
for _, c := range space {
if c == '\n' {
newlines++
}
}
if isComment(space) {
comment := strings.TrimSpace(space)
if isLineComment(space) {
comment = comment + "\n"
}
switch current {
case startState:
formatted = append(formatted, comment)
case lineCommentState:
if newlines >= 2 {
formatted = append(formatted, "\n")
}
formatted = append(formatted, comment)
case blockCommentState:
if newlines >= 2 {
formatted = append(formatted, "\n\n")
}
formatted = append(formatted, comment)
}
if isLineComment(space) {
current = lineCommentState
newlines = 1
} else {
current = blockCommentState
newlines = 0
}
}
}
return formatted
} | relapse/ast/format.go | 0.578329 | 0.509032 | format.go | starcoder |
package iso20022
// Amount of money for which goods or services are offered, sold, or bought.
type UnitPrice22 struct {
// Type and information about a price.
Type *TypeOfPrice46Choice `xml:"Tp"`
// Value of the price, for example, as a currency and value.
Value *PriceValue1 `xml:"Val"`
// Type of pricing calculation method.
PriceMethod *PriceMethod1Code `xml:"PricMtd,omitempty"`
// Specifies the number of days used for calculating the accrued interest amount.
NumberOfDaysAccrued *Number `xml:"NbOfDaysAcrd,omitempty"`
// Amount included in the NAV that corresponds to gains directly or indirectly derived from interest payment in the scope of the European Directive on taxation of savings income in the form of interest payments.
TaxableIncomePerShare *ActiveCurrencyAnd13DecimalAmount `xml:"TaxblIncmPerShr,omitempty"`
// Specifies whether the fund calculates a taxable interest per share (TIS).
TaxableIncomePerShareCalculated *TaxableIncomePerShareCalculated2Choice `xml:"TaxblIncmPerShrClctd,omitempty"`
// Reason why the price is different from the current market price.
PriceDifferenceReason *Max350Text `xml:"PricDiffRsn,omitempty"`
}
func (u *UnitPrice22) AddType() *TypeOfPrice46Choice {
u.Type = new(TypeOfPrice46Choice)
return u.Type
}
func (u *UnitPrice22) AddValue() *PriceValue1 {
u.Value = new(PriceValue1)
return u.Value
}
func (u *UnitPrice22) SetPriceMethod(value string) {
u.PriceMethod = (*PriceMethod1Code)(&value)
}
func (u *UnitPrice22) SetNumberOfDaysAccrued(value string) {
u.NumberOfDaysAccrued = (*Number)(&value)
}
func (u *UnitPrice22) SetTaxableIncomePerShare(value, currency string) {
u.TaxableIncomePerShare = NewActiveCurrencyAnd13DecimalAmount(value, currency)
}
func (u *UnitPrice22) AddTaxableIncomePerShareCalculated() *TaxableIncomePerShareCalculated2Choice {
u.TaxableIncomePerShareCalculated = new(TaxableIncomePerShareCalculated2Choice)
return u.TaxableIncomePerShareCalculated
}
func (u *UnitPrice22) SetPriceDifferenceReason(value string) {
u.PriceDifferenceReason = (*Max350Text)(&value)
} | UnitPrice22.go | 0.84367 | 0.458046 | UnitPrice22.go | starcoder |
package htsformats
import (
"strings"
)
// SamRecord holds all fields and tags from a single alignment of a SAM file
type SamRecord struct {
raw string
qname string
flag string
rname string
pos string
mapq string
cigar string
rnext string
pnext string
tlen string
seq string
qual string
fields []string
tagKeys []string
tags map[string]string
}
// NewSamRecord constructs a SamRecord from a single line
func NewSamRecord(raw string) *SamRecord {
samRecord := new(SamRecord)
samRecord.raw = raw
// split string and assign each field
split := strings.Split(raw, "\t")
samRecord.qname = split[0]
samRecord.flag = split[1]
samRecord.rname = split[2]
samRecord.pos = split[3]
samRecord.mapq = split[4]
samRecord.cigar = split[5]
samRecord.rnext = split[6]
samRecord.pnext = split[7]
samRecord.tlen = split[8]
samRecord.seq = split[9]
samRecord.qual = split[10]
samRecord.fields = split
// create a map of tag keys to tag value (entire tag), and also a list of
// the tag keys within the map
samRecord.tagKeys = []string{}
samRecord.tags = make(map[string]string)
for i := 11; i < len(split); i++ {
tagval := split[i]
tagkey := strings.Split(tagval, ":")[0]
samRecord.tagKeys = append(samRecord.tagKeys, tagkey)
samRecord.tags[tagkey] = tagval
}
return samRecord
}
// emitFields emits all SAM record fields without modification
func (samRecord *SamRecord) emitFields() []string {
return []string{
samRecord.qname,
samRecord.flag,
samRecord.rname,
samRecord.pos,
samRecord.mapq,
samRecord.cigar,
samRecord.rnext,
samRecord.pnext,
samRecord.tlen,
samRecord.seq,
samRecord.qual,
}
}
// getField retrieves the corresponding field value given the column position
// (0-10 inclusive)
func (samRecord *SamRecord) getField(col int) string {
return samRecord.fields[col]
}
// emitTags emits all tags within the SAM record without modification, in the
// same order they were parsed
func (samRecord *SamRecord) emitTags() []string {
tags := []string{}
for _, tagkey := range samRecord.tagKeys {
tags = append(tags, samRecord.tags[tagkey])
}
return tags
}
// getTag gets a parsed tag value by its two-letter tag name/key
func (samRecord *SamRecord) getTag(key string) string {
return samRecord.tags[key]
}
// String gets a string representation of the SamRecord
func (samRecord *SamRecord) String() string {
return "[SamRecord qname=" + samRecord.qname + "]"
} | internal/htsformats/samrecord.go | 0.553747 | 0.407274 | samrecord.go | starcoder |
package main
import (
"math"
)
type vec3 struct {
x float64
y float64
z float64
}
func vec3Scale(vec vec3, factor float64) vec3 {
return vec3{
x: vec.x * factor,
y: vec.y * factor,
z: vec.z * factor,
}
}
func vec3Len(vec vec3) float64 {
return math.Sqrt((vec.x*vec.x + vec.y*vec.y + vec.z*vec.z))
}
func vec3Add(first vec3, second vec3) vec3 {
return vec3{first.x + second.x, first.y + second.y, first.z + second.z}
}
func vec3Sub(first vec3, second vec3) vec3 {
return vec3{first.x - second.x, first.y - second.y, first.z - second.z}
}
func vec3Dot(first vec3, second vec3) float64 {
return (first.x * second.x) + (first.y * second.y) + (first.z * second.z)
}
func vec3Neg(vec vec3) vec3 {
return vec3{-vec.x, -vec.y, -vec.z}
}
func directionMatrixMultiplication(mat [3][3]float64, dir vec3) vec3 {
var result vec3
result.x = (mat[0][0] * dir.x) + (mat[0][1] * dir.y) + (mat[0][2] * dir.z)
result.y = (mat[1][0] * dir.x) + (mat[1][1] * dir.y) + (mat[1][2] * dir.z)
result.z = (mat[2][0] * dir.x) + (mat[2][1] * dir.y) + (mat[2][2] * dir.z)
return result
}
func scaleColor(col color, factor float64) color {
newR := float64(col.r) * factor
newG := float64(col.g) * factor
newB := float64(col.b) * factor
return color{
r: byte(math.Min(255, newR)),
g: byte(math.Min(255, newG)),
b: byte(math.Min(255, newB)),
}
}
func sumColors(col1 color, col2 color) color {
newR := float64(col1.r) + float64(col2.r)
newG := float64(col1.g) + float64(col2.g)
newB := float64(col1.b) + float64(col2.b)
return color{
r: byte(math.Min(255, newR)),
g: byte(math.Min(255, newG)),
b: byte(math.Min(255, newB)),
}
}
type color struct {
r byte
g byte
b byte
}
type sphere struct {
center vec3
radius float64
color color
specular int // -1 represents matte object
reflective float64 // 0 = not reflective, 1 = completely reflective
}
type solutions struct {
first float64
second float64
}
// Cap rendering at roughly 60 fps
const msPerFrame = 16
const windowWidth int = 1000
const windowHeight int = 1000
const viewportWidth int = 1
const viewportHeight int = 1
const distCameraToProjectionPlane float64 = 1
var backgroundColor = color{0x00, 0x00, 0x00}
var sphere1 = sphere{vec3{0, -1, 3}, 1, color{255, 0, 0}, 500, 0.2}
var sphere2 = sphere{vec3{2, 0, 4}, 1, color{0, 0, 255}, 500, 0.3}
var sphere3 = sphere{vec3{-2, 0, 4}, 1, color{0, 255, 0}, 10, 0.4}
var sphere4 = sphere{vec3{0, -5001, 0}, 5000, color{255, 255, 0}, 1000, 0.5}
var shapes = [...]sphere{sphere1, sphere2, sphere3, sphere4}
func canvasToViewport(x int, y int) vec3 {
vx := (float64(x) * float64(viewportWidth) / float64(windowWidth))
vy := (float64(y) * float64(viewportHeight) / float64(windowHeight))
vz := distCameraToProjectionPlane
return vec3{vx, vy, vz}
}
func intersectRaySphere(origin vec3, direction vec3, sp sphere) solutions {
r := sp.radius
co := vec3Sub(origin, sp.center)
a := vec3Dot(direction, direction)
b := 2 * vec3Dot(co, direction)
c := vec3Dot(co, co) - r*r
discriminant := b*b - 4*a*c
if discriminant < 0 {
return solutions{math.Inf(0), math.Inf(0)}
}
t1 := (-b + math.Sqrt(discriminant)) / (2 * a)
t2 := (-b - math.Sqrt(discriminant)) / (2 * a)
return solutions{t1, t2}
}
func closestIntersection(origin vec3, direction vec3, tMin float64, tMax float64) (*sphere, float64) {
closestT := math.Inf(0)
var closestSphere *sphere = nil
for i := 0; i < len(shapes); i++ {
sp := shapes[i]
sols := intersectRaySphere(origin, direction, sp)
t1 := sols.first
t2 := sols.second
if t1 > tMin && t1 < tMax && t1 < closestT {
closestT = t1
closestSphere = &sp
}
if t2 > tMin && t2 < tMax && t2 < closestT {
closestT = t2
closestSphere = &sp
}
}
return closestSphere, closestT
}
func calculateReflectedRay(ray vec3, normal vec3) vec3 {
dotProduct := vec3Dot(normal, ray)
scaledNormal := vec3Scale(normal, 2*dotProduct)
return vec3Sub(scaledNormal, ray)
}
func traceRay(origin vec3, direction vec3, tMin float64, tMax float64, recursionDepth int) color {
closestSphere, closestT := closestIntersection(origin, direction, tMin, tMax)
if closestSphere == nil {
return backgroundColor
}
position := vec3Add(origin, vec3Scale(direction, closestT))
normal := vec3Sub(position, closestSphere.center)
normal = vec3Scale(normal, 1/vec3Len(normal))
localColor := scaleColor(closestSphere.color, computeLighting(position, normal, vec3Neg(direction), closestSphere.specular))
// Handle reflections
reflectiveness := closestSphere.reflective
if reflectiveness <= 0 || recursionDepth <= 0 {
return localColor
}
reflectedRay := calculateReflectedRay(vec3Neg(direction), normal)
reflectedColor := traceRay(position, reflectedRay, 0.001, math.Inf(0), recursionDepth-1)
localColorContribution := scaleColor(localColor, 1-reflectiveness)
reflectedColorContribution := scaleColor(reflectedColor, reflectiveness)
return sumColors(localColorContribution, reflectedColorContribution)
}
// X and Y are canvas coordinates
// (0,0 in middle, -(Width/2), -(Hight/2) in bottom left).
func putPixel(screen *[windowWidth * windowHeight * 4]byte, color color, x int, y int) {
screenX := (windowWidth / 2) + x
screenY := (windowHeight / 2) - y - 1
base := (screenY*windowWidth + screenX) * 4
screen[base] = color.r
screen[base+1] = color.g
screen[base+2] = color.b
screen[base+3] = 0xFF
screen[0] = 0xFF
}
func rayTraceFrame(screen *[windowWidth * windowHeight * 4]byte, recursionDepth int) {
// var origin = vec3{0, 0, 0}
var cameraPosition = vec3{3, 0, 1}
var cameraOrientation = [3][3]float64{
{0.7071, 0, -0.7071},
{0, 1, 0},
{0.7071, 0, 0.7071},
}
for x := -(windowWidth / 2); x < (windowWidth / 2); x++ {
for y := -(windowHeight / 2); y < (windowHeight / 2); y++ {
direction := directionMatrixMultiplication(cameraOrientation, canvasToViewport(x, y))
color := traceRay(cameraPosition, direction, 1, math.Inf(0), recursionDepth)
putPixel(screen, color, x, y)
}
}
} | ray/ray.go | 0.893298 | 0.486575 | ray.go | starcoder |
package gohome
import (
"github.com/PucklaMotzer09/mathgl/mgl32"
"image/color"
)
// This interface handles every low level rendering operation
type Renderer interface {
// Initialises the renderer
Init() error
// Gets called after the initialisation of the engine
AfterInit()
// Cleans everything up
Terminate()
// Clears the screen with the given color
ClearScreen(c color.Color)
// Loads a shader given the contents of shaders
LoadShader(name, vertex_contents, fragment_contents, geometry_contents, tesselletion_control_contents, eveluation_contents, compute_contents string) (Shader, error)
// Creates a texture without data
CreateTexture(name string, multiSampled bool) Texture
// Creates a Mesh2D
CreateMesh2D(name string) Mesh2D
// Creates a Mesh3D
CreateMesh3D(name string) Mesh3D
// Creates a RenderTexture from the given parameters
CreateRenderTexture(name string, width, height, textures int, depthBuffer, multiSampled, shadowMap, cubeMap bool) RenderTexture
// Creates a cube map
CreateCubeMap(name string) CubeMap
// Creates an instanced mesh 3d
CreateInstancedMesh3D(name string) InstancedMesh3D
// Creates a shape 3d interface
CreateShape3DInterface(name string) Shape3DInterface
// Creates a shape 2d interface
CreateShape2DInterface(name string) Shape2DInterface
// Enables or disables wire frame render mode
SetWireFrame(b bool)
// Sets the current viewport for the GPU
SetViewport(viewport Viewport)
// Returns the current viewport of the GPU
GetViewport() Viewport
// Sets the resolution of the back buffer
SetNativeResolution(width, height int)
// Returns the resolution of the back buffer
GetNativeResolution() mgl32.Vec2
// Gets called when the window resizes
OnResize(newWidth, newHeight int)
// Gets called before rendering a RenderObject
PreRender()
// Gets called after rendering a RenderObject
AfterRender()
// Sets the clear color
SetBackgroundColor(bgColor color.Color)
// Returns the clear color
GetBackgroundColor() color.Color
// Returns the name of the renderer
GetName() string
// Calls the draw methods of the back buffer
RenderBackBuffer()
// Enable or disable back face culling
SetBacckFaceCulling(b bool)
// Enable or disable depth testing
SetDepthTesting(b bool)
// Returns the number maximum textures supported by the GPU
GetMaxTextures() int
// Increments the texture unit used for textures
NextTextureUnit() uint32
// Decrements the texture unit used for textures
DecrementTextureUnit(amount uint32)
// Returns wether the given function is supported by the hardware
HasFunctionAvailable(name string) bool
// Returns a InstancedMesh3D created from an already loaded Mesh3D
InstancedMesh3DFromLoadedMesh3D(mesh Mesh3D) InstancedMesh3D
}
// The Renderer that should be used for everything
var Render Renderer
// An implementation of Renderer that does nothing
type NilRenderer struct {
}
func (*NilRenderer) Init() error {
return nil
}
func (*NilRenderer) AfterInit() {
}
func (*NilRenderer) Terminate() {
}
func (*NilRenderer) ClearScreen(c color.Color) {
}
func (*NilRenderer) LoadShader(name, vertex_contents, fragment_contents, geometry_contents, tesselletion_control_contents, eveluation_contents, compute_contents string) (Shader, error) {
return &NilShader{}, nil
}
func (*NilRenderer) CreateTexture(name string, multiSampled bool) Texture {
return &NilTexture{}
}
func (*NilRenderer) CreateMesh2D(name string) Mesh2D {
return &NilMesh2D{}
}
func (*NilRenderer) CreateMesh3D(name string) Mesh3D {
return &NilMesh3D{}
}
func (*NilRenderer) CreateRenderTexture(name string, width, height, textures int, depthBuffer, multiSampled, shadowMap, cubeMap bool) RenderTexture {
return &NilRenderTexture{}
}
func (*NilRenderer) CreateCubeMap(name string) CubeMap {
return &NilCubeMap{}
}
func (*NilRenderer) CreateInstancedMesh3D(name string) InstancedMesh3D {
return &NilInstancedMesh3D{}
}
func (*NilRenderer) CreateShape3DInterface(name string) Shape3DInterface {
return &NilShape3DInterface{}
}
func (*NilRenderer) CreateShape2DInterface(name string) Shape2DInterface {
return &NilShape2DInterface{}
}
func (*NilRenderer) SetWireFrame(b bool) {
}
func (*NilRenderer) SetViewport(viewport Viewport) {
}
func (*NilRenderer) GetViewport() Viewport {
return Viewport{
0, 0, 0, 0, 0, false,
}
}
func (*NilRenderer) SetNativeResolution(width, height int) {
}
func (*NilRenderer) GetNativeResolution() mgl32.Vec2 {
return [2]float32{0.0, 0.0}
}
func (*NilRenderer) OnResize(newWidth, newHeight int) {
}
func (*NilRenderer) PreRender() {
}
func (*NilRenderer) AfterRender() {
}
func (*NilRenderer) SetBackgroundColor(bgColor color.Color) {
}
func (*NilRenderer) GetBackgroundColor() color.Color {
return nil
}
func (*NilRenderer) GetName() string {
return ""
}
func (*NilRenderer) RenderBackBuffer() {
}
func (*NilRenderer) SetBacckFaceCulling(b bool) {
}
func (*NilRenderer) SetDepthTesting(b bool) {
}
func (*NilRenderer) GetMaxTextures() int32 {
return 0
}
func (*NilRenderer) NextTextureUnit() uint32 {
return 0
}
func (*NilRenderer) DecrementTextureUnit(amount uint32) {
}
func (*NilRenderer) FilterShaderFiles(name, file, shader_type string) string {
return file
}
func (*NilRenderer) FilterShaderSource(name, source, shader_type string) string {
return source
}
func (*NilRenderer) HasFunctionAvailable(name string) bool {
return false
}
func (*NilRenderer) InstancedMesh3DFromLoadedMesh3D(mesh Mesh3D) InstancedMesh3D {
return &NilInstancedMesh3D{}
} | src/gohome/renderer.go | 0.801742 | 0.489992 | renderer.go | starcoder |
Package nanny implements logic to poll the k8s apiserver for cluster status,
and update a deployment based on that status.
*/
package nanny
import (
"time"
log "github.com/golang/glog"
api "k8s.io/kubernetes/pkg/api/v1"
inf "speter.net/go/exp/math/dec/inf"
)
// checkResource determines whether a specific resource needs to be over-written.
func checkResource(threshold int64, actual, expected api.ResourceList, res api.ResourceName) bool {
val, ok := actual[res]
expVal, expOk := expected[res]
if ok != expOk {
return true
}
if !ok && !expOk {
return false
}
q := new(inf.Dec).QuoRound(val.Amount, expVal.Amount, 2, inf.RoundDown)
lower := inf.NewDec(100-threshold, 2)
upper := inf.NewDec(100+threshold, 2)
if q.Cmp(lower) == -1 || q.Cmp(upper) == 1 {
return true
}
return false
}
// shouldOverwriteResources determines if we should over-write the container's
// resource limits. We'll over-write the resource limits if the limited
// resources are different, or if any limit is violated by a threshold.
func shouldOverwriteResources(threshold int64, limits, reqs, expLimits, expReqs api.ResourceList) bool {
return checkResource(threshold, limits, expLimits, api.ResourceCPU) ||
checkResource(threshold, limits, expLimits, api.ResourceMemory) ||
checkResource(threshold, limits, expLimits, api.ResourceStorage) ||
checkResource(threshold, reqs, expReqs, api.ResourceCPU) ||
checkResource(threshold, reqs, expReqs, api.ResourceMemory) ||
checkResource(threshold, reqs, expReqs, api.ResourceStorage)
}
// KubernetesClient is an object that performs the nanny's requisite interactions with Kubernetes.
type KubernetesClient interface {
CountNodes() (uint64, error)
ContainerResources() (*api.ResourceRequirements, error)
UpdateDeployment(resources *api.ResourceRequirements) error
}
// ResourceEstimator estimates ResourceRequirements for a given criteria.
type ResourceEstimator interface {
scaleWithNodes(numNodes uint64) *api.ResourceRequirements
}
// PollAPIServer periodically counts the number of nodes, estimates the expected
// ResourceRequirements, compares them to the actual ResourceRequirements, and
// updates the deployment with the expected ResourceRequirements if necessary.
func PollAPIServer(k8s KubernetesClient, est ResourceEstimator, contName string, pollPeriod time.Duration, threshold uint64) {
for i := 0; true; i++ {
if i != 0 {
// Sleep for the poll period.
time.Sleep(pollPeriod)
}
// Query the apiserver for the number of nodes.
num, err := k8s.CountNodes()
if err != nil {
log.Error(err)
continue
}
log.Infof("The number of nodes is %d", num)
// Query the apiserver for this pod's information.
resources, err := k8s.ContainerResources()
if err != nil {
log.Error(err)
continue
}
log.Infof("The container resources are %v", resources)
// Get the expected resource limits.
expResources := est.scaleWithNodes(num)
log.Infof("The expected resources are %v", expResources)
// If there's a difference, go ahead and set the new values.
if !shouldOverwriteResources(int64(threshold), resources.Limits, resources.Requests, expResources.Limits, expResources.Requests) {
log.Infof("Resources are within the expected limits.")
continue
}
log.Infof("Resources are not within the expected limits: updating the deployment.")
if err := k8s.UpdateDeployment(expResources); err != nil {
log.Error(err)
continue
}
}
} | addon-resizer/nanny/nanny_lib.go | 0.664431 | 0.437343 | nanny_lib.go | starcoder |
package chrono
import (
"database/sql/driver"
"errors"
"fmt"
"time"
)
// Date is used to save and output YYYY-MM-DD format date string.
type Date struct {
time.Time
}
func (d Date) String() string {
return d.In(time.UTC).Format(SQLDate)
}
// MarshalJSON converts a Time struct to ISO8601 string.
func (d Date) MarshalJSON() ([]byte, error) {
if y := d.Year(); y < 0 || y >= 10000 {
return nil, errors.New("Date.MarshalJSON: year outside of range [0,9999]")
}
if d.IsZero() {
return []byte("null"), nil
}
b := make([]byte, 0, len(SQLDate)+2)
b = append(b, '"')
b = d.AppendFormat(b, SQLDate)
b = append(b, '"')
return b, nil
}
// UnmarshalJSON converts ISO8601 data time into a Time struct.
// Empty string and null will be turned into time.Time zero value.
func (d *Date) UnmarshalJSON(data []byte) (err error) {
if string(data) == "null" {
d.Time = time.Time{}
return
}
d.Time, err = time.Parse(`"`+SQLDate+`"`, string(data))
return
}
// Scan implements the Scanner interface.
// SQL NULL will be turned into time zero value.
func (d *Date) Scan(value interface{}) (err error) {
if value == nil {
d.Time = time.Time{}
return nil
}
switch v := value.(type) {
case time.Time:
d.Time = v
return
case []byte:
d.Time, err = time.Parse(SQLDate, string(v))
return
case string:
d.Time, err = time.Parse(SQLDate, v)
return
}
return fmt.Errorf("can't convert %T to time.Time", value)
}
// Value implements the driver Valuer interface.
// Zero value is turned into SQL NULL.
func (d Date) Value() (driver.Value, error) {
if d.IsZero() {
return nil, nil
}
return d.In(time.UTC).Format(SQLDate), nil
}
// DateNow creates current time.
func DateNow() Date {
return Date{
time.Now().Truncate(24 * time.Hour),
}
}
func DateUTCNow() Date {
return Date{
time.Now().UTC().Truncate(24 * time.Hour),
}
}
// DateZero creates the zero value of Time.
func DateZero() Date {
return Date{time.Time{}}
}
// DateFrom creates a new Time wrapping time.Time.
func DateFrom(t time.Time) Date {
return Date{t.Truncate(24 * time.Hour)}
}
func DateUTCFrom(t time.Time) Date {
return Date{
t.UTC().Truncate(24 * time.Hour),
}
} | chrono/date.go | 0.763043 | 0.427456 | date.go | starcoder |
package sdlang
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"strconv"
"time"
)
type sdlValueTag int
const (
tNull sdlValueTag = iota
tString
tInt
tFloat
tDateTime
tTimeSpan
tBool
tBinary
)
type SdlDebugLocation struct {
File string
Line string
Loc int
LineNumber int
}
// SdlValue is a tagged union for every possible type representable in SDLang.
type SdlValue struct {
tag sdlValueTag
vString string
vInt int64
vFloat float64
vDateTime time.Time
vTimeSpan time.Duration
vBool bool
vBinary []byte
DebugLocation SdlDebugLocation
}
// SdlAttribute is a Key-Value pair between a string and an SdlValue
type SdlAttribute struct {
// Namespace is the namespace of this attribute.
Namespace string
// Name is the name of this attribute.
Name string
// QualifiedName is the fully qualified ("namespace:name") name of this attribute.
QualifiedName string
// Value is the value.
Value SdlValue
DebugLocation SdlDebugLocation
}
// SdlTag is a container consisting of a name; child tags; attributes, and values.
type SdlTag struct {
// Namespace is the namespace of this tag.
Namespace string
// Name is the name of this tag.
Name string
// QualifiedName is the fully qualified ("namespace:name") name of this tag.
QualifiedName string
// Children contains the children of this tag. It is safe (and expected) to modify this value.
Children []SdlTag
// Children contains the attribtues of this tag. It is safe (and expected) to modify this value.
Attributes map[string]SdlAttribute
// Children contains the values of this tag. It is safe (and expected) to modify this value.
Values []SdlValue
DebugLocation SdlDebugLocation
}
// Null creates a null SdlValue
func Null() SdlValue {
return SdlValue{tag: tNull}
}
// String creates a string SdlValue
func String(value string) SdlValue {
return SdlValue{tag: tString, vString: value}
}
// Int creates an int SdlValue
func Int(value int64) SdlValue {
return SdlValue{tag: tInt, vInt: value}
}
// Float creates a float SdlValue
func Float(value float64) SdlValue {
return SdlValue{tag: tFloat, vFloat: value}
}
// DateTime creates a datetime SdlValue
func DateTime(value time.Time) SdlValue {
return SdlValue{tag: tDateTime, vDateTime: value}
}
// TimeSpan creates a timespan SdlValue
func TimeSpan(value time.Duration) SdlValue {
return SdlValue{tag: tTimeSpan, vTimeSpan: value}
}
// Bool creates a bool SdlValue
func Bool(value bool) SdlValue {
return SdlValue{tag: tBool, vBool: value}
}
// Binary creates a binary SdlValue
func Binary(value []byte) SdlValue {
return SdlValue{tag: tBinary, vBinary: value}
}
func (v SdlValue) IsNull() bool {
return v.tag == tNull
}
func (v SdlValue) IsString() bool {
return v.tag == tString
}
func (v SdlValue) IsInt() bool {
return v.tag == tInt
}
func (v SdlValue) IsFloat() bool {
return v.tag == tFloat
}
func (v SdlValue) IsDateTime() bool {
return v.tag == tDateTime
}
func (v SdlValue) IsTimeSpan() bool {
return v.tag == tTimeSpan
}
func (v SdlValue) IsBool() bool {
return v.tag == tBool
}
func (v SdlValue) IsBinary() bool {
return v.tag == tBinary
}
func (v SdlValue) String() (string, error) {
if !v.IsString() {
return "", errors.New("this value is not a string")
}
return v.vString, nil
}
func (v SdlValue) Int() (int64, error) {
if !v.IsInt() {
return 0, errors.New("this value is not an integer")
}
return v.vInt, nil
}
func (v SdlValue) Float() (float64, error) {
if !v.IsString() {
return 0, errors.New("this value is not a float")
}
return v.vFloat, nil
}
func (v SdlValue) DateTime() (time.Time, error) {
if !v.IsDateTime() {
return time.Now(), errors.New("this value is not a datetime")
}
return v.vDateTime, nil
}
func (v SdlValue) TimeSpan() (time.Duration, error) {
if !v.IsTimeSpan() {
return 0, errors.New("this value is not a timespan")
}
return v.vTimeSpan, nil
}
func (v SdlValue) Bool() (bool, error) {
if !v.IsBool() {
return false, errors.New("this value is not a bool")
}
return v.vBool, nil
}
func (v SdlValue) Binary() ([]byte, error) {
if !v.IsBinary() {
return []byte{}, errors.New("this value is not a binary blob")
}
return v.vBinary, nil
}
// ForEachChild applies the function `f` onto each child of the tag.
func (t SdlTag) ForEachChild(f func(child *SdlTag)) {
for i := 0; i < len(t.Children); i++ {
f(&t.Children[i])
}
}
// ForEachChildFiltered applies the function `f` onto each child of the tag that passes the `filter` predicate.
func (t SdlTag) ForEachChildFiltered(filter func(child *SdlTag) bool, f func(child *SdlTag)) {
t.ForEachChild(func(child *SdlTag) {
if filter(child) {
f(child)
}
})
}
// ForEachChildByName applies the function `f` onto each child of the tag that has the specified `name`.
func (t SdlTag) ForEachChildByName(name string, f func(child *SdlTag)) {
t.ForEachChildFiltered(func(child *SdlTag) bool {
return child.Name == name
}, f)
}
// ForEachChildByName applies the function `f` onto each child of the tag that has the specified `namespace`.
func (t SdlTag) ForEachChildByNamespace(namespace string, f func(child *SdlTag)) {
t.ForEachChildFiltered(func(child *SdlTag) bool {
return child.Namespace == namespace
}, f)
}
// Using the given SaxParser, an AST is constructed.
// The returned value contains the root tag, which is nameless and only contains children.
func (p SaxParser) ParseIntoAst() (SdlTag, error) {
var currTagStack []SdlTag
currTagStack = append(currTagStack, SdlTag{})
prevWasNewLine := true
for {
err := p.Next()
if err != nil {
return SdlTag{}, err
}
if p.IsEof() {
break
}
dline, dloc, dln := p.getLine(p.cursor)
dbg := SdlDebugLocation{File: p.FileName, Line: dline, Loc: dloc, LineNumber: dln}
if p.IsTagName() {
if !prevWasNewLine {
return SdlTag{}, p.NewError(0, "(probably a bug) Tag names can only appear at the start of new lines.")
}
var tag SdlTag
tag.Name = p.Text()
tag.Namespace = p.AdditionalText()
tag.QualifiedName = p.AdditionalText() + ":" + p.Text()
tag.DebugLocation = dbg
if tag.QualifiedName[0] == ':' {
tag.QualifiedName = tag.QualifiedName[1:]
}
currTagStack = append(currTagStack, tag)
prevWasNewLine = false
} else if p.IsAttributeName() {
var attr SdlAttribute
attr.Name = p.Text()
attr.Namespace = p.AdditionalText()
attr.QualifiedName = p.AdditionalText() + ":" + p.Text()
attr.DebugLocation = dbg
if attr.QualifiedName[0] == ':' {
attr.QualifiedName = attr.QualifiedName[1:]
}
err = p.Next()
if err != nil {
return SdlTag{}, err
}
handleValue(&attr.Value, &p)
if currTagStack[len(currTagStack)-1].Attributes == nil {
currTagStack[len(currTagStack)-1].Attributes = map[string]SdlAttribute{}
}
currTagStack[len(currTagStack)-1].Attributes[attr.QualifiedName] = attr
prevWasNewLine = false
} else if p.IsNewLine() {
if !prevWasNewLine {
parent := &currTagStack[len(currTagStack)-2]
child := currTagStack[len(currTagStack)-1]
parent.Children = append(parent.Children, child)
currTagStack = currTagStack[0 : len(currTagStack)-1]
}
prevWasNewLine = true
} else if p.IsOpenTag() {
if prevWasNewLine {
return SdlTag{}, p.NewError(0, "Opening braces have to be on the same line as a tag.")
}
prevWasNewLine = true
err = p.Next()
if err != nil {
return SdlTag{}, err
}
if !p.IsNewLine() {
return SdlTag{}, p.NewError(0, "Expected a new line following opening brace.")
}
} else if p.IsCloseTag() {
if !prevWasNewLine {
return SdlTag{}, p.NewError(0, "Closing braces must be on their own line.")
}
err = p.Next()
if err != nil {
return SdlTag{}, err
}
if !p.IsNewLine() && !p.IsEof() {
return SdlTag{}, p.NewError(0, "Expected a new line or end of file following closing brace.")
}
parent := &currTagStack[len(currTagStack)-2]
child := currTagStack[len(currTagStack)-1]
parent.Children = append(parent.Children, child)
currTagStack = currTagStack[0 : len(currTagStack)-1]
prevWasNewLine = true
} else {
var val SdlValue
val.DebugLocation = dbg
handleValue(&val, &p)
currTagStack[len(currTagStack)-1].Values = append(currTagStack[len(currTagStack)-1].Values, val)
prevWasNewLine = false
}
}
return currTagStack[0], nil
}
func handleValue(v *SdlValue, p *SaxParser) {
if p.IsBinary() {
v.tag = tBinary
base64.NewDecoder(base64.RawStdEncoding, bytes.NewBufferString(p.Text())).Read(v.vBinary)
} else if p.IsBool() {
v.tag = tBool
v.vBool = p.Bool()
} else if p.IsDate() || p.IsDateTime() {
v.tag = tDateTime
v.vDateTime = p.Time()
} else if p.IsDouble() || p.IsFloat() {
v.tag = tFloat
v.vFloat, _ = strconv.ParseFloat(p.Text(), 64)
} else if p.IsInteger() || p.IsLong() {
v.tag = tInt
v.vInt, _ = strconv.ParseInt(p.Text(), 10, 64)
} else if p.IsNull() {
v.tag = tNull
} else if p.IsString() {
v.tag = tString
v.vString = p.Text()
} else if p.IsTimeSpan() {
v.tag = tTimeSpan
v.vTimeSpan = p.TimeSpan()
} else {
fmt.Printf("p: %v\n", p.t)
panic("bug: this error should've been caught earlier on")
}
} | ast.go | 0.67971 | 0.425784 | ast.go | starcoder |
package stats
import (
"fmt"
"math"
"sort"
)
func New(values ...float64) *Stats {
s := &Stats{}
s.Add(values...)
return s
}
type Stats struct {
values []float64
sorted bool
sum float64
}
func (stats *Stats) Values() []float64 {
return stats.values
}
func (stats *Stats) Add(values ...float64) {
for _, v := range values {
stats.sum += v
stats.values = append(stats.values, v)
}
stats.sorted = false
}
func (stats *Stats) Max() float64 {
stats.Sort()
if len(stats.values) == 0 {
return 0
}
return stats.values[len(stats.values)-1]
}
func (stats *Stats) Min() float64 {
stats.Sort()
if len(stats.values) == 0 {
return 0
}
return stats.values[0]
}
func (stats *Stats) Sum() float64 {
return stats.sum
}
func (stats *Stats) Sort() {
if !stats.sorted {
sort.Float64s(stats.values)
stats.sorted = true
}
}
func (stats *Stats) Avg() float64 {
return stats.sum / float64(len(stats.values))
}
func (stats *Stats) Median() float64 {
return stats.Perc(50)
}
func Percentile(values []float64, perc float64) float64 {
if len(values) == 0 {
return math.NaN()
}
middle := float64(len(values)) * perc / 100.0
floor := int(math.Floor(middle))
if len(values) <= floor {
panic(fmt.Sprintf("unabel to get idx %d of %v", floor, values))
}
return values[floor]
}
func PercentileFloat(values []float64, perc int) (o float64) {
middle := float64(len(values)) * float64(perc) / 100.0
floor := int(math.Floor(middle))
if len(values) <= floor {
panic(fmt.Sprintf("unabel to get idx %d of %v", floor, values))
}
return values[floor]
}
// Perc calculates the percentile, use 50 for median
func (stats *Stats) Perc(perc float64) float64 {
stats.Sort()
return Percentile(stats.values, perc)
}
func (stats *Stats) String() string {
return fmt.Sprintf("len: %d, avg: %.1f, med: %.1f, perc_95: %.1f, perc_99: %.1f, max: %.1f, min: %.1f",
len(stats.values), stats.Avg(), stats.Median(), stats.Perc(95), stats.Perc(99),
stats.Max(),
stats.Min(),
)
}
func (stats *Stats) Len() int {
return len(stats.values)
}
func (stats *Stats) Variance() float64 {
avg := stats.Avg()
sum := 0.0
for _, v := range stats.values {
sum += math.Pow(v-avg, 2.0)
}
return sum / float64(stats.Len()-1)
}
func (s *Stats) StdDeviation() float64 {
return math.Sqrt(s.Variance())
}
func (stats *Stats) Reset() {
stats.values = stats.values[:0]
stats.sorted = false
} | stats/stats.go | 0.72662 | 0.656782 | stats.go | starcoder |
package h3go
import "math"
// VertexNode is a single node in a vertex graph, part of a linked list.
type VertexNode struct {
from GeoCoord
to GeoCoord
next *VertexNode
}
// VertexGraph is a data structure to store a graph of vertices
type VertexGraph struct {
buckets []*VertexNode
numBuckets int
size int
res int
}
/**
* Initialize a new VertexGraph
* @param graph Graph to initialize
* @param numBuckets Number of buckets to include in the graph
* @param res Resolution of the hexagons whose vertices we're storing
*/
func initVertexGraph(graph *VertexGraph, numBuckets int, res int) {
if numBuckets > 0 {
graph.buckets = make([]*VertexNode, numBuckets)
} else {
graph.buckets = nil
}
graph.numBuckets = numBuckets
graph.size = 0
graph.res = res
}
/**
* Destroy a VertexGraph's sub-objects, freeing their memory. The caller is
* responsible for freeing memory allocated to the VertexGraph struct itself.
* @param graph Graph to destroy
*/
func destroyVertexGraph(graph *VertexGraph) {
for {
node := firstVertexNode(graph)
if node == nil {
break
}
removeVertexNode(graph, node)
}
graph.buckets = nil
}
/**
* Get an integer hash for a lat/lon point, at a precision determined
* by the current hexagon resolution.
* TODO: Light testing suggests this might not be sufficient at resolutions
* finer than 10. Design a better hash function if performance and collisions
* seem to be an issue here.
* @param vertex Lat/lon vertex to hash
* @param res Resolution of the hexagon the vertex belongs to
* @param numBuckets Number of buckets in the graph
* @return Integer hash
*/
func _hashVertex(vertex *GeoCoord, res int, numBuckets int) uint32 {
// Simple hash: Take the sum of the lat and lon with a precision level
// determined by the resolution, converted to int, modulo bucket count.
return uint32(
math.Mod(
math.Abs(
(vertex.lat+vertex.lon)*math.Pow(10, float64(15-res)),
),
float64(numBuckets),
),
)
}
func _initVertexNode(fromVtx *GeoCoord, toVtx *GeoCoord) *VertexNode {
return &VertexNode{
from: *fromVtx,
to: *toVtx,
next: nil,
}
}
/**
* Add a edge to the graph
* @param graph Graph to add node to
* @param fromVtx Start vertex
* @param toVtx End vertex
* @return Pointer to the new node
*/
func addVertexNode(graph *VertexGraph, fromVtx *GeoCoord, toVtx *GeoCoord) *VertexNode {
// Make the new node
node := _initVertexNode(fromVtx, toVtx)
// Determine location
index := _hashVertex(fromVtx, graph.res, graph.numBuckets)
// Check whether there's an existing node in that spot
currentNode := graph.buckets[index]
if currentNode == nil {
// Set bucket to the new node
graph.buckets[index] = node
} else {
// Find the end of the list
for {
// Check the the edge we're adding doesn't already exist
if geoAlmostEqual(¤tNode.from, fromVtx) &&
geoAlmostEqual(¤tNode.to, toVtx) {
// already exists, bail
return currentNode
}
if currentNode.next != nil {
currentNode = currentNode.next
}
if currentNode.next == nil {
break
}
}
// Add the new node to the end of the list
currentNode.next = node
}
graph.size++
return node
}
/**
* Remove a node from the graph. The input node will be freed, and should
* not be used after removal.
* @param graph Graph to mutate
* @param node Node to remove
* @return 0 on success, 1 on failure (node not found)
*/
func removeVertexNode(graph *VertexGraph, node *VertexNode) bool {
// Determine location
index := _hashVertex(&node.from, graph.res, graph.numBuckets)
currentNode := graph.buckets[index]
found := false
if currentNode != nil {
if currentNode == node {
graph.buckets[index] = node.next
found = true
}
// Look through the list
for !found && currentNode.next != nil {
if currentNode.next == node {
// splice the node out
currentNode.next = node.next
found = true
}
currentNode = currentNode.next
}
}
if found {
node = nil
graph.size--
return false
}
// Failed to find the node
return true
}
/**
* Find the Vertex node for a given edge, if it exists
* @param graph Graph to look in
* @param fromVtx Start vertex
* @param toVtx End vertex, or NULL if we don't care
* @return Pointer to the vertex node, if found
*/
func findNodeForEdge(graph *VertexGraph, fromVtx *GeoCoord, toVtx *GeoCoord) *VertexNode {
// Determine location
index := _hashVertex(fromVtx, graph.res, graph.numBuckets)
// Check whether there's an existing node in that spot
node := graph.buckets[index]
if node != nil {
// Look through the list and see if we find the edge
for {
if geoAlmostEqual(&node.from, fromVtx) &&
(toVtx == nil || geoAlmostEqual(&node.to, toVtx)) {
return node
}
node = node.next
if node == nil {
break
}
}
}
// Iteration lookup fail
return nil
}
/**
* Find a Vertex node starting at the given vertex
* @param graph Graph to look in
* @param fromVtx Start vertex
* @return Pointer to the vertex node, if found
*/
func findNodeForVertex(graph *VertexGraph, fromVtx *GeoCoord) *VertexNode {
return findNodeForEdge(graph, fromVtx, nil)
}
/**
* Get the next vertex node in the graph.
* @param graph Graph to iterate
* @return Vertex node, or NULL if at the end
*/
func firstVertexNode(graph *VertexGraph) *VertexNode {
for _, node := range graph.buckets {
if node != nil {
return node
}
}
return nil
} | vertexgraph.go | 0.68763 | 0.64994 | vertexgraph.go | starcoder |
package sqlp
/*
Notes on node representation
In a language with variant types (tagged unions), we would have represented
tokens/nodes as a variant. Go lacks variants, so the closest alternatives are:
1) Emulating a variant type by using a struct where every field is a pointer,
and only one field must be non-nil. Type detection is performed by checking
which of the fields is non-nil.
2) Using a single struct type, with an explicit type field. Type detection is
performed by comparing the type field to constants.
3) Using an interface and a collection of concrete types implementing it. Type
detection is performed by unwrapping the interface and checking the
underlying concrete type.
Problems with (1): it allows too many invalid representations; it makes it hard
or annoying to check the type.
Problem with (2): the single node type must have the fields to support every
possible node, but not every type uses every field. It makes it too hard to
express or undestand which representations are valid, and makes invalid
representations too likely.
Problem with (3): it may involve more individual heap allocations and
indirections. But unlike the other two, it allows extremely simple specialized
node types, which may ultimately use less memory, and avoids invalid
representations. Unlike the other two, (3) allows the set of possible values to
be open. The user may introduce additional AST nodes that the parser didn't
know about, though this only goes from AST to formatted code, not the other
direction.
Misc notes
The parser accumulates unrecognized content until it finds some "known" syntax,
at which point it emits the accumulation as `NodeWhitespace` and/or
`NodeText`.
The token stream or parsed AST must serialize back into EXACTLY the source
content.
*/
import (
"strconv"
)
// Arbitrary non-whitespace text that wasn't recognized by the parser. When
// generated by the parser, the node is always non-empty and consists entirely
// of non-whitespace characters.
type NodeText string
func (self NodeText) Append(buf []byte) []byte { return append(buf, self...) }
func (self NodeText) String() string { return appenderStr(&self) }
// Whitespace. When generated by the parser, the node is always non-empty and
// consists entirely of whitespace characters.
type NodeWhitespace string
func (self NodeWhitespace) Append(buf []byte) []byte { return append(buf, self...) }
func (self NodeWhitespace) String() string { return appenderStr(&self) }
func (self NodeWhitespace) Node() Node {
if self == ` ` {
return nodeWhitespaceSingle
}
return self
}
// Text inside single quotes: ''. Escape sequences are not supported yet.
type NodeQuoteSingle string
func (self NodeQuoteSingle) Append(buf []byte) []byte {
buf = append(buf, quoteSingle)
buf = append(buf, self...)
buf = append(buf, quoteSingle)
return buf
}
func (self NodeQuoteSingle) String() string { return appenderStr(&self) }
// Text inside double quotes: "". Escape sequences are not supported yet.
type NodeQuoteDouble string
func (self NodeQuoteDouble) Append(buf []byte) []byte {
buf = append(buf, quoteDouble)
buf = append(buf, self...)
buf = append(buf, quoteDouble)
return buf
}
func (self NodeQuoteDouble) String() string { return appenderStr(&self) }
// Text inside grave quotes: ``. Escape sequences are not supported yet.
type NodeQuoteGrave string
func (self NodeQuoteGrave) Append(buf []byte) []byte {
buf = append(buf, quoteGrave)
buf = append(buf, self...)
buf = append(buf, quoteGrave)
return buf
}
func (self NodeQuoteGrave) String() string { return appenderStr(&self) }
// Content of a line comment: --, including the newline.
type NodeCommentLine string
func (self NodeCommentLine) Append(buf []byte) []byte {
buf = append(buf, commentLinePrefix...)
buf = append(buf, self...)
return buf
}
func (self NodeCommentLine) String() string { return appenderStr(&self) }
// Content of a block comment: /* */.
type NodeCommentBlock string
func (self NodeCommentBlock) Append(buf []byte) []byte {
buf = append(buf, commentBlockPrefix...)
buf = append(buf, self...)
buf = append(buf, commentBlockSuffix...)
return buf
}
func (self NodeCommentBlock) String() string { return appenderStr(&self) }
// Postgres cast operator: ::. Allows to disambiguate casts from named params.
type NodeDoubleColon struct{}
func (self NodeDoubleColon) Append(buf []byte) []byte { return append(buf, castPrefix...) }
func (self NodeDoubleColon) String() string { return castPrefix }
// Postgres-style ordinal parameter placeholder: $1, $2, $3, ...
type NodeOrdinalParam int
func (self NodeOrdinalParam) Append(buf []byte) []byte {
buf = append(buf, ordinalPrefix)
buf = strconv.AppendInt(buf, int64(self), 10)
return buf
}
func (self NodeOrdinalParam) String() string { return appenderStr(&self) }
// Convenience method that returns the corresponding Go index (starts at zero).
func (self NodeOrdinalParam) Index() int { return int(self) - 1 }
// Named parameter preceded by colon: :identifier
type NodeNamedParam string
func (self NodeNamedParam) Append(buf []byte) []byte {
buf = append(buf, namedPrefix)
buf = append(buf, self...)
return buf
}
func (self NodeNamedParam) String() string { return appenderStr(&self) }
/*
Arbitrary sequence of AST nodes. When serializing, doesn't print any start or
end delimiters.
*/
type Nodes []Node
/*
Implement the `Node` interface. Simply concatenates the stringified
representations of the inner nodes, skipping any nil nodes.
`Nodes` can be arbitrarily nested without affecting the output. For example,
both `Nodes{}` and `Nodes{Nodes{}}` will print "".
*/
func (self Nodes) Append(buf []byte) []byte {
for _, node := range self {
if node != nil {
buf = node.Append(buf)
}
}
return buf
}
func (self Nodes) String() string { return appenderStr(&self) }
func (self Nodes) Nodes() Nodes { return self }
// Implement `Walker`. Calls `fun` for each non-nil node in the sequence.
func (self Nodes) WalkNode(fun func(Node)) {
if fun == nil {
return
}
for _, val := range self {
if val != nil {
fun(val)
}
}
}
// Implement `PtrWalker`. Calls `fun` for each non-nil node in the sequence.
func (self Nodes) WalkNodePtr(fun func(*Node)) {
if fun == nil {
return
}
for i := range self {
if self[i] != nil {
fun(&self[i])
}
}
}
// Makes a deep copy whose mutations won't affect the original.
func (self Nodes) CopyNodes() Nodes {
if self == nil {
return nil
}
out := make(Nodes, len(self))
for i := range self {
out[i] = CopyNode(self[i])
}
return out
}
// Implements `Copier` by calling `Nodes.CopyNodes`.
func (self Nodes) CopyNode() Node { return self.CopyNodes() }
func (self Nodes) Procure(fun func(Node) Node) Node {
if fun == nil {
return nil
}
for _, val := range self {
node := fun(val)
if node != nil {
return node
}
}
return nil
}
func (self Nodes) ProcureLast(fun func(Node) Node) Node {
if fun == nil {
return nil
}
for i := len(self) - 1; i >= 0; i-- {
node := fun(self[i])
if node != nil {
return node
}
}
return nil
}
// Nodes enclosed in parentheses: ().
type ParenNodes Nodes
// Implement `Node`.
func (self ParenNodes) Append(buf []byte) []byte {
return appendNodesEnclosed(buf, parenOpen, Nodes(self), parenClose)
}
// Implement `Node`. Also implements `fmt.Stringer` for debug purposes.
func (self ParenNodes) String() string { return appenderStr(&self) }
// Implement `Coll`. Free cast with no allocation.
func (self ParenNodes) Nodes() Nodes { return Nodes(self) }
// Implement `Copier` by calling `Nodes.Copy`.
func (self ParenNodes) CopyNode() Node { return ParenNodes(self.Nodes().CopyNodes()) }
// Implement `Walker` by calling `Nodes.WalkNode`.
func (self ParenNodes) WalkNode(fun func(Node)) { self.Nodes().WalkNode(fun) }
// Implement `PtrWalker` by calling `Nodes.WalkNodePtr`.
func (self ParenNodes) WalkNodePtr(fun func(*Node)) { self.Nodes().WalkNodePtr(fun) }
// Nodes enclosed in brackets: [].
type BracketNodes Nodes
// Implement `Node`.
func (self BracketNodes) Append(buf []byte) []byte {
return appendNodesEnclosed(buf, bracketOpen, Nodes(self), bracketClose)
}
// Implement `Node`. Also implements `fmt.Stringer` for debug purposes.
func (self BracketNodes) String() string { return appenderStr(&self) }
// Implement `Coll`. Free cast with no allocation.
func (self BracketNodes) Nodes() Nodes { return Nodes(self) }
// Implement `Copier` by calling `Nodes.Copy`.
func (self BracketNodes) CopyNode() Node { return BracketNodes(self.Nodes().CopyNodes()) }
// Implement `Walker` by calling `Nodes.WalkNode`.
func (self BracketNodes) WalkNode(fun func(Node)) { self.Nodes().WalkNode(fun) }
// Implement `PtrWalker` by calling `Nodes.WalkNodePtr`.
func (self BracketNodes) WalkNodePtr(fun func(*Node)) { self.Nodes().WalkNodePtr(fun) }
// Nodes enclosed in braces: {}.
type BraceNodes Nodes
// Implement `Node`.
func (self BraceNodes) Append(buf []byte) []byte {
return appendNodesEnclosed(buf, braceOpen, Nodes(self), braceClose)
}
// Implement `Node`. Also implements `fmt.Stringer` for debug purposes.
func (self BraceNodes) String() string { return appenderStr(&self) }
// Implement `Coll`. Free cast with no allocation.
func (self BraceNodes) Nodes() Nodes { return Nodes(self) }
// Implement `Copier` by calling `Nodes.Copy`.
func (self BraceNodes) CopyNode() Node { return BraceNodes(self.Nodes().CopyNodes()) }
// Implement `Walker` by calling `Nodes.WalkNode`.
func (self BraceNodes) WalkNode(fun func(Node)) { self.Nodes().WalkNode(fun) }
// Implement `PtrWalker` by calling `Nodes.WalkNodePtr`.
func (self BraceNodes) WalkNodePtr(fun func(*Node)) { self.Nodes().WalkNodePtr(fun) } | sqlp_node.go | 0.834677 | 0.742503 | sqlp_node.go | starcoder |
package shape
import (
"gioui.org/f32"
"gioui.org/op"
"gioui.org/op/clip"
"gioui.org/op/paint"
"github.com/rs/xid"
"image/color"
)
type Path []f32.Point
type rect [4]f32.Point
func (r rect) hit(p f32.Point) bool {
return pointInTriangle(p, r[0], r[1], r[2]) || pointInTriangle(p, r[0], r[3], r[2])
}
type Line struct {
ID string
Points Path
Color color.NRGBA
Width float32
offset f32.Point
rects []rect
boxes []f32.Rectangle
}
func NewPolyline(points []f32.Point, col color.NRGBA, width float32) *Line {
return &Line{
ID: xid.New().String(),
Points: points,
Color: col,
Width: width,
}
}
func (l *Line) Bounds() f32.Rectangle {
r := l.Width
if l.boxes == nil {
length := len(l.Points)
for i, p1 := range l.Points {
b := f32.Rect(p1.X-r, p1.Y-r, p1.X+r, p1.Y+r)
l.boxes = append(l.boxes, b)
if i < length-1 {
p2 := l.Points[i+1]
tilt := angle(p1, p2) + rad90
a := offsetPoint(p1, l.Width, tilt)
b := offsetPoint(p2, l.Width, tilt)
c := offsetPoint(p2, -l.Width, tilt)
d := offsetPoint(p1, -l.Width, tilt)
l.rects = append(l.rects, rect{a, b, c, d})
box := boundingBox([]f32.Point{a, b, c, d})
l.boxes = append(l.boxes, box)
}
}
}
if len(l.boxes) == 0 {
return f32.Rectangle{}
}
box := l.boxes[0]
for _, b := range l.boxes[1:] {
box = box.Union(b)
}
return box
}
func (l *Line) Offset(p f32.Point) Shape {
l.offset = p
return l
}
func (l Line) Draw(gtx C) {
scale := gtx.Metric.PxPerDp
width := l.Width * scale
defer op.Save(gtx.Ops).Load()
var path clip.Path
path.Begin(gtx.Ops)
for i, p := range l.Points {
if i == 0 {
path.MoveTo(p.Mul(scale))
} else {
path.LineTo(p.Mul(scale))
}
}
style := clip.StrokeStyle{Width: width, Miter: 10, Cap: clip.RoundCap, Join: clip.RoundJoin}
paint.FillShape(gtx.Ops, l.Color, clip.Stroke{Path: path.End(), Style: style}.Op())
}
func (l *Line) Move(delta f32.Point) {
for i, p := range l.Points {
l.Points[i] = p.Add(delta)
}
l.boxes = nil
l.rects = nil
}
func (l *Line) Hit(p f32.Point) bool {
for _, r := range l.rects {
if r.hit(p) {
return true
}
}
return false
}
func (l *Line) Eq(s2 Shape) bool {
return false
}
func (l *Line) Identity() string {
return l.ID
} | wonder/shape/line.go | 0.626238 | 0.464537 | line.go | starcoder |
package value
import "strconv"
// Float holds a single float64 value.
type Float struct {
valPtr *float64
}
// NewFloat makes a new Float with the given float64 value.
func NewFloat(val float64) *Float {
valPtr := new(float64)
*valPtr = val
return &Float{valPtr: valPtr}
}
// NewFloatFromPtr makes a new Float with the given pointer to float64 value.
func NewFloatFromPtr(valPtr *float64) *Float {
return &Float{valPtr: valPtr}
}
// Set changes the float64 value.
func (v *Float) Set(val float64) { *v.valPtr = val }
// Type return TypeFloat.
func (v *Float) Type() Type { return TypeFloat }
// IsSlice returns false.
func (v *Float) IsSlice() bool { return false }
// Clone produce a clone that is identical except for the backing pointer.
func (v *Float) Clone() Value { return NewFloat(*v.valPtr) }
// Parse sets the value from the given string.
func (v *Float) Parse(str string) error {
f, err := strconv.ParseFloat(str, 64)
if err != nil {
return err
}
*v.valPtr = f
return nil
}
// ValuePointer returns the pointer for value storage.
func (v *Float) ValuePointer() interface{} { return v.valPtr }
// Value returns the float64 value.
func (v *Float) Value() interface{} { return *v.valPtr }
// Equal returns checks if type and value of the given single are equal.
func (v *Float) Equal(v2 Single) (bool, error) {
if err := CheckType(TypeFloat, v2.Type()); err != nil {
return false, err
}
return *v.valPtr == v2.Value().(float64), nil
}
// Greater checks if the current value is greater than the given.
// Returns non-nil error if types do not match.
func (v *Float) Greater(v2 Single) (bool, error) {
if err := CheckType(TypeFloat, v2.Type()); err != nil {
return false, err
}
return *v.valPtr > v2.Value().(float64), nil
}
// GreaterEqual checks if the current value is greater or equal to the given.
// Returns non-nil error if types do not match.
func (v *Float) GreaterEqual(v2 Single) (bool, error) {
if err := CheckType(TypeFloat, v2.Type()); err != nil {
return false, err
}
return *v.valPtr >= v2.Value().(float64), nil
}
// Less checks if the current value is less than the given.
// Returns non-nil error if types do not match.
func (v *Float) Less(v2 Single) (bool, error) {
if err := CheckType(TypeFloat, v2.Type()); err != nil {
return false, err
}
return *v.valPtr < v2.Value().(float64), nil
}
// LessEqual checks if the current value is less or equal to the given.
// Returns non-nil error if types do not match.
func (v *Float) LessEqual(v2 Single) (bool, error) {
if err := CheckType(TypeFloat, v2.Type()); err != nil {
return false, err
}
return *v.valPtr <= v2.Value().(float64), nil
}
// OneOf checks if the current value is one of the given.
// Returns non-nil error if types do not match.
func (v *Float) OneOf(v2 Slice) (bool, error) {
return v2.Contains(v)
} | value/float.go | 0.86342 | 0.607954 | float.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.