code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package v1 import ( "time" "encoding/json" ) type Time struct { time.Time `json:"-"` } func (t Time) DeepCopy() Time { return t } // String returns the representation of the time. func (t Time) String() string { return t.Time.String() } // NewTime returns a wrapped instance of the provided time func NewTime(time time.Time) Time { return Time{time} } // Date returns the Time corresponding to the supplied parameters // by wrapping time.Date. func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} } // Now returns the current local time. func Now() Time { return Time{time.Now()} } // IsZero returns true if the value is nil or time is zero. func (t *Time) IsZero() bool { if t == nil { return true } return t.Time.IsZero() } // Before reports whether the time instant t is before u. func (t Time) Before(u Time) bool { return t.Time.Before(u.Time) } // Equal reports whether the time instant t is equal to u. func (t Time) Equal(u Time) bool { return t.Time.Equal(u.Time) } // UnmarshalJSON implements the json.Unmarshaller interface. func (t *Time) UnmarshalJSON(b []byte) error { if len(b) == 4 && string(b) == "null" { t.Time = time.Time{} return nil } var str string json.Unmarshal(b, &str) pt, err := time.Parse(time.RFC3339, str) if err != nil { return err } t.Time = pt.Local() return nil } // MarshalJSON implements the json.Marshaler interface. func (t Time) MarshalJSON() ([]byte, error) { if t.IsZero() { // Encode unset/nil objects as JSON's "null". return []byte("null"), nil } return json.Marshal(t.UTC().Format(time.RFC3339)) } // UnmarshalQueryParameter converts from a URL query parameter value to an object func (t *Time) UnmarshalQueryParameter(str string) error { if len(str) == 0 { t.Time = time.Time{} return nil } // Tolerate requests from older clients that used JSON serialization to build query params if len(str) == 4 && str == "null" { t.Time = time.Time{} return nil } pt, err := time.Parse(time.RFC3339, str) if err != nil { return err } t.Time = pt.Local() return nil } // MarshalQueryParameter converts to a URL query parameter value func (t Time) MarshalQueryParameter() (string, error) { if t.IsZero() { // Encode unset/nil objects as an empty string return "", nil } return t.UTC().Format(time.RFC3339), nil }
apimachinery/pkg/apigroups/meta/v1/time.go
0.87266
0.464476
time.go
starcoder
package redis_rate import "github.com/go-redis/redis/v8" // Copyright (c) 2017 <NAME> // https://github.com/rwz/redis-gcra/blob/master/vendor/perform_gcra_ratelimit.lua var allowN = redis.NewScript(` -- this script has side-effects, so it requires replicate commands mode -- redis-cli --ldb --eval allown.lua mykey , 3 3 120 1 redis.replicate_commands() local rate_limit_key = KEYS[1] local burst = ARGV[1] local rate = ARGV[2] local period = ARGV[3] local cost = tonumber(ARGV[4]) local emission_interval = period / rate local increment = emission_interval * cost local burst_offset = emission_interval * burst -- redis returns time as an array containing two integers: seconds of the epoch -- time (10 digits) and microseconds (6 digits). for convenience we need to -- convert them to a floating point number. the resulting number is 16 digits, -- bordering on the limits of a 64-bit double-precision floating point number. -- adjust the epoch to be relative to Jan 1, 2017 00:00:00 GMT to avoid floating -- point problems. this approach is good until "now" is 2,483,228,799 (Wed, 09 -- Sep 2048 01:46:39 GMT), when the adjusted value is 16 digits. local jan_1_2017 = 1483228800 local now = redis.call("TIME") now = (now[1] - jan_1_2017) + (now[2] / 1000000) local tat = redis.call("GET", rate_limit_key) if not tat then tat = now else tat = tonumber(tat) end tat = math.max(tat, now) -- redis.debug("emission_interval",emission_interval) -- redis.debug("increment",increment) -- redis.debug("burst_offset",burst_offset) -- redis.debug("tat",tostring(tat)) local new_tat = tat + increment local allow_at = new_tat - burst_offset local diff = now - allow_at local remaining = diff / emission_interval -- redis.debug("new_tat", tostring(new_tat)) -- redis.debug("allow_at",tostring(allow_at)) -- redis.debug("diff",diff) -- redis.debug("remaining",remaining) if remaining < 0 then local reset_after = tat - now local retry_after = diff * -1 -- redis.debug("---A") -- redis.debug("allowed",0) -- redis.debug("remaining",0) -- redis.debug("retry_after",retry_after) -- redis.debug("reset_after",reset_after) return { 0, -- allowed 0, -- remaining tostring(retry_after), tostring(reset_after), } end local reset_after = new_tat - now if reset_after > 0 then redis.call("SET", rate_limit_key, new_tat, "EX", math.ceil(reset_after)) end local retry_after = emission_interval * (1 - remaining) if retry_after < 0 then retry_after = 0 end -- redis.debug("---B") -- redis.debug("allowed",cost) -- redis.debug("remaining",remaining) -- redis.debug("retry_after",tostring(retry_after)) -- redis.debug("reset_after",tostring(reset_after)) return {cost, remaining, tostring(retry_after), tostring(reset_after)} `) var allowAtMost = redis.NewScript(` -- this script has side-effects, so it requires replicate commands mode redis.replicate_commands() local rate_limit_key = KEYS[1] local burst = ARGV[1] local rate = ARGV[2] local period = ARGV[3] local cost = tonumber(ARGV[4]) local emission_interval = period / rate local burst_offset = emission_interval * burst -- redis returns time as an array containing two integers: seconds of the epoch -- time (10 digits) and microseconds (6 digits). for convenience we need to -- convert them to a floating point number. the resulting number is 16 digits, -- bordering on the limits of a 64-bit double-precision floating point number. -- adjust the epoch to be relative to Jan 1, 2017 00:00:00 GMT to avoid floating -- point problems. this approach is good until "now" is 2,483,228,799 (Wed, 09 -- Sep 2048 01:46:39 GMT), when the adjusted value is 16 digits. local jan_1_2017 = 1483228800 local now = redis.call("TIME") now = (now[1] - jan_1_2017) + (now[2] / 1000000) local tat = redis.call("GET", rate_limit_key) if not tat then tat = now else tat = tonumber(tat) end tat = math.max(tat, now) local diff = now - (tat - burst_offset) local remaining = diff / emission_interval if remaining < 1 then local reset_after = tat - now local retry_after = emission_interval - diff return { 0, -- allowed 0, -- remaining tostring(retry_after), tostring(reset_after), } end if remaining < cost then cost = remaining remaining = 0 else remaining = remaining - cost end local increment = emission_interval * cost local new_tat = tat + increment local reset_after = new_tat - now if reset_after > 0 then redis.call("SET", rate_limit_key, new_tat, "EX", math.ceil(reset_after)) end return { cost, remaining, tostring(-1), tostring(reset_after), } `)
lua.go
0.726911
0.408041
lua.go
starcoder
package kernel /* A numeric-heavy and struct heavy bench where we get a sequence of triangle strips and a transform and find the geometric center of them all. Intentionally written with some unneeded complexity to include that in the optimization testing. */ import ( "math" "runtime" "sync" "testing" ) var shader = ` #version 450 layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; struct triangle { vec2 vertices[3]; }; struct polygon { triangle triangles[64]; }; struct cog_res { vec2 cog; float area; }; layout(std430) buffer In { mat3 transform; polygon polygons[]; }; layout(std430) buffer Out { vec2 cogs[]; }; shared cog_res shared_data[64]; float area_tri(triangle t) { float a = (t.vertices[1][0]-t.vertices[0][0])*(t.vertices[2][1]-t.vertices[0][1]) - (t.vertices[2][0]-t.vertices[0][0])*(t.vertices[1][1]-t.vertices[0][1]); if (a < 0.0f) { a *= -0.5f; } else { a *= 0.5f; } return a; } vec2 cog_tri(triangle t) { return vec2((t.vertices[1][0] + t.vertices[2][0] + t.vertices[0][0]) / 3.0f, (t.vertices[1][1] + t.vertices[2][1] + t.vertices[0][1]) / 3.0f); } cog_res tri(triangle t) { for(int i = 0; i < 3; i++) { t.vertices[i] = (transform*vec3(t.vertices[i], 1.0f)).xy; } cog_res r; r.area = area_tri(t); r.cog = cog_tri(t); return r; } cog_res cog_poly(polygon p) { float area = 0.0f; vec2 cog = vec2(0, 0); for(int i = 0; i < 64; i++) { cog_res tr = tri(p.triangles[i]); area += tr.area; cog += tr.area*tr.cog; } cog /= area; cog_res r; r.area = area; r.cog = cog; return r; } void main() { // where we should do our job uint base_index = gl_WorkGroupID.x*gl_WorkGroupSize.x; uint local_index = gl_LocalInvocationID.x; uint index = base_index + local_index; // actual calc of this invocation cog_res my_res = cog_poly(polygons[index]); // sync with others in our WG by using shared memory // and a barrier shared_data[local_index] = my_res; barrier(); // one in each WG should sum them up and return the // result. if (local_index == 0) { my_res.cog *= my_res.area; for( int i = 1; i < 64; i++) { cog_res fr = shared_data[i]; my_res.area += fr.area; my_res.cog += fr.area*fr.cog; } my_res.cog /= my_res.area; cogs[gl_WorkGroupID.x] = my_res.cog; } } ` func BenchmarkTransTri(b *testing.B) { noi := 128 data := d(noi) k, err := New(runtime.GOMAXPROCS(-1), 1024*1024) if err != nil { b.Error(err) b.FailNow() } defer k.Free() b.ResetTimer() for i := 0; i < b.N; i++ { err := k.Dispatch(data, noi, 1, 1) if err != nil { b.Error(err) } } b.StopTimer() // chec the cog's for i := 0; i < noi; i++ { cog := data.Cogs[i] if math.Abs(float64(cog[0]-1)) > 1e-4 || math.Abs(float64(cog[1]-1)) > 1e-4 { b.Error("bas cog data", i, cog) } } } func TestTransTri(t *testing.T) { noi := 2 ensureRun(t, 1, noi, 1, 1, func() Data { return d(noi) }, func(res Data) { // chec the cog's for i := 0; i < noi; i++ { cog := res.Cogs[i] if math.Abs(float64(cog[0]-1)) > 1e-4 || math.Abs(float64(cog[1]-1)) > 1e-4 { t.Error("bas cog data", i, cog) } } }) } func d(noi int) Data { d := Data{ Transform: &Mat3{Vec3{1, 0, 0}, Vec3{0, 1, 0}, Vec3{2.0 / 3, 2.0 / 3, 1}}, Polygons: make([]Polygon, noi*64), Cogs: make([]Vec2, noi), } // fill with polygons for i := 0; i < noi*64; i++ { d.Polygons[i] = p() } return d } func dgo(noi int) (tr [9]float32, ps []Polygon, cogs []float32) { tr = [9]float32{1, 0, 0, 0, 1, 0, 2.0 / 3, 2.0 / 3, 1} ps = make([]Polygon, noi*64) cogs = make([]float32, noi*2) for i := 0; i < noi*64; i++ { ps[i] = p() } return } func p() Polygon { p := Polygon{} for i := 0; i < 64; i++ { p.Triangles[i] = t() } return p } func t() Triangle { // area 0.5 and COG at (1/3, 1/3) return Triangle{ Vertices: [3]Vec2{{0, 0}, {0, 1}, {1, 0}}, } } func BenchmarkTransTriRef(b *testing.B) { noi := 128 tr, ps, cogs := dgo(noi) b.ResetTimer() for i := 0; i < b.N; i++ { refimpl(tr, ps, cogs) } b.StopTimer() // chec the cog's for i := 0; i < noi; i++ { cog := [2]float32{cogs[i*2], cogs[i*2+1]} if math.Abs(float64(cog[0]-1)) > 1e-4 || math.Abs(float64(cog[1]-1)) > 1e-4 { b.Error("bas cog data", i, cog) } } } func refimpl(tr [9]float32, ps []Polygon, cogs []float32) { // simply do it in parts of 64 wg := sync.WaitGroup{} si := 0 noe := 64 for i := 0; i < len(ps)/64; i++ { ei := si + noe wg.Add(1) go func(tr [9]float32, ps []Polygon, cogs []float32) { impl(tr, ps, cogs) wg.Done() }(tr, ps[si:ei], cogs[i*2:i*2+2]) si = ei } wg.Wait() } func impl(tr [9]float32, ps []Polygon, cogs []float32) { if len(cogs) != 2 { panic("should be") } area, x, y := float32(0), float32(0), float32(0) for i := 0; i < 64; i++ { a, b, c := cog_poly(ps[i], tr) area += a x += a * b y += a * c } x /= area y /= area cogs[0] = x cogs[1] = y } func area_tri(t Triangle) float32 { a := (t.Vertices[1][0]-t.Vertices[0][0])*(t.Vertices[2][1]-t.Vertices[0][1]) - (t.Vertices[2][0]-t.Vertices[0][0])*(t.Vertices[1][1]-t.Vertices[0][1]) if a < 0.0 { a *= -0.5 } else { a *= 0.5 } return a } func cog_tri(t Triangle) (float32, float32) { return (t.Vertices[1][0] + t.Vertices[2][0] + t.Vertices[0][0]) / 3.0, (t.Vertices[1][1] + t.Vertices[2][1] + t.Vertices[0][1]) / 3.0 } func tri(t *Triangle, transform [9]float32) (area, x, y float32) { for i := 0; i < 3; i++ { t.Vertices[i][0], t.Vertices[i][1], _ = matmul(transform, t.Vertices[i][0], t.Vertices[i][1], 1.0) } area = area_tri(*t) x, y = cog_tri(*t) return } func matmul(m [9]float32, x, y, z float32) (float32, float32, float32) { return m[0]*x + m[3]*y + m[6]*z, m[0+1]*x + m[3+1]*y + m[6+1]*z, m[0+1+1]*x + m[3+1+1]*y + m[6+1+1]*z } func cog_poly(p Polygon, t [9]float32) (area, x, y float32) { for i := 0; i < 64; i++ { a, b, c := tri(&p.Triangles[i], t) area += a x += b * a y += c * a } x /= area y /= area return }
test/benchmarks/bench_transtri.go
0.583441
0.52902
bench_transtri.go
starcoder
package main import ( "fmt" tor "github.com/NullHypothesis/zoossh" cluster "github.com/NullHypothesis/mlgo/cluster" levenshtein "github.com/arbovm/levenshtein" statistics "github.com/mcgrew/gostats" ) // RelayDistances contains a slice for relays and their corresponding distance // to another relay. type RelayDistances struct { Distances []float32 Relays []*tor.RouterStatus } // Len implements the Sorter interface. func (rd RelayDistances) Len() int { return len(rd.Distances) } // Swap implements the Sorter interface. func (rd RelayDistances) Swap(i, j int) { rd.Distances[i], rd.Distances[j] = rd.Distances[j], rd.Distances[i] rd.Relays[i], rd.Relays[j] = rd.Relays[j], rd.Relays[i] } // Less implements the Sorter interface. func (rd RelayDistances) Less(i, j int) bool { return rd.Distances[i] < rd.Distances[j] } // Add adds a new relay with its corresponding distance to the struct. func (rd *RelayDistances) Add(relay *tor.RouterStatus, dist float32) { rd.Distances = append(rd.Distances, dist) rd.Relays = append(rd.Relays, relay) } // Distance quantifies the distance between the two given "Tor objects" (e.g., // router statuses or descriptors) as 32-bit float. type Distance func(obj1, obj2 tor.Object) float32 // Levenshtein determines the Levenshtein distance, a string metric, between // the given router statuses and descriptors. In contrast to // LevenshteinVerbose, this function only returns the distance. func Levenshtein(stat1, stat2 *tor.RouterStatus, desc1, desc2 *tor.RouterDescriptor) float32 { distance, _ := LevenshteinVerbose(stat1, stat2, desc1, desc2) return distance } // PearsonWrapper is a wrapper around PearsonCorrelation. func PearsonWrapper(a, b cluster.Vector) float64 { return 1 - PearsonCorrelation(a, b) } // PearsonCorrelation determines the Pearson correlation coefficient. func PearsonCorrelation(a, b []float64) float64 { return statistics.PearsonCorrelation(a, b) } // LevenshteinVerbose determines the Levenshtein distance, a string metric, // between the given router statuses and descriptors. func LevenshteinVerbose(status1, status2 *tor.RouterStatus, desc1, desc2 *tor.RouterDescriptor) (float32, string) { var str1, str2 string if desc1 == nil { desc1 = new(tor.RouterDescriptor) } if desc2 == nil { desc2 = new(tor.RouterDescriptor) } str1 = fmt.Sprintf("%s%s%d%d%s%s%s%d%d%s%s%d%s", status1.Nickname, status1.Address, status1.Address.IPv4ORPort, status1.Address.IPv4DirPort, RouterFlagsToString(&status1.Flags), status1.TorVersion, status1.PortList, desc1.BandwidthAvg, desc1.BandwidthBurst, desc1.OperatingSystem, desc1.Published, desc1.Uptime, desc1.Contact) str2 = fmt.Sprintf("%s%s%d%d%s%s%s%d%d%s%s%d%s", status2.Nickname, status2.Address, status2.Address.IPv4ORPort, status2.Address.IPv4DirPort, RouterFlagsToString(&status2.Flags), status2.TorVersion, status2.PortList, desc2.BandwidthAvg, desc2.BandwidthBurst, desc2.OperatingSystem, desc2.Published, desc2.Uptime, desc2.Contact) verbose := fmt.Sprintf("%s: %s\n%s: %s", status1.Fingerprint[:8], str1, status2.Fingerprint[:8], str2) return float32(levenshtein.Distance(str1, str2)), verbose }
distance.go
0.778733
0.456168
distance.go
starcoder
package kv import ( "encoding/json" "reflect" ) //KV is a key/value entry and a struct which implements helper methods to help with retrial of data types from value. type KV struct { key string value interface{} } func New(key string, value interface{}) *KV { return &KV{ key: key, value: value, } } func (kv *KV) Bind(inf interface{}) error { data, err := json.Marshal(kv.Value) if err != nil { return err } return json.Unmarshal(data, inf) } var uintType = reflect.TypeOf(uint64(0)) var intType = reflect.TypeOf(int64(0)) var floatType = reflect.TypeOf(float64(0)) var stringType = reflect.TypeOf(string("")) var boolType = reflect.TypeOf(false) type converter func(in interface{}) (interface{}, bool) func untypedFloat(in interface{}) (interface{}, bool) { return toFloat(in) } func untypedUint(in interface{}) (interface{}, bool) { return toUint(in) } func untypedInt(in interface{}) (interface{}, bool) { return toInt(in) } func untypedBool(in interface{}) (res interface{}, ok bool) { return toBool(in) } func untypedString(in interface{}) (res interface{}, ok bool) { return toString(in) } //NewParam creates a new key value param from input func NewParam(key string, value interface{}) *KV { return &KV{key, value} } // Key returns the key of the key/value pair func (kv *KV) Key() string { return kv.key } // Value returns the value of the key/value pair func (kv *KV) Value() interface{} { return kv.value } // IsNil returns true if value is nil func (kv *KV) IsNil() bool { return kv.value == nil } // IsSlice returns true if value is a array func (kv *KV) IsSlice() bool { if kv.value == nil { return false } return reflect.TypeOf(kv.value).Kind() == reflect.Slice } // String returns value as a string, if possible func (kv *KV) String() (string, bool) { return toString(kv.value) } // StringOr returns value as a string, otherwise the provided default func (kv *KV) StringOr(defaultTo string) string { str, ok := kv.String() if ok { return str } return defaultTo } // StringSlice returns value as a []string, if possible func (kv *KV) StringSlice() ([]string, bool) { if kv.value == nil { return nil, false } var res []string res, ok := kv.value.([]string) if ok { return res, true } r, ok := toSliceOf(kv.value, stringType, untypedString) if !ok { return nil, false } res, ok = r.([]string) if ok { return res, true } return nil, false } // StringSliceOr returns value as a []string, otherwise the provided default func (kv *KV) StringSliceOr(defaultTo []string) []string { arr, ok := kv.StringSlice() if ok { return arr } return defaultTo } // Uint returns value as a uint64, if possible func (kv *KV) Uint() (uint64, bool) { return toUint(kv.value) } // UintOr returns value as a uint64, otherwise the provided default func (kv *KV) UintOr(def uint64) uint64 { i, ok := kv.Uint() if ok { return i } return def } // UintSlice returns value as a []uint64, if possible func (kv *KV) UintSlice() ([]uint64, bool) { if kv.value == nil { return nil, false } var res []uint64 res, ok := kv.value.([]uint64) if ok { return res, true } r, ok := toSliceOf(kv.value, uintType, untypedUint) if !ok { return nil, false } res, ok = r.([]uint64) if ok { return res, true } return nil, false } // UintSliceOr returns value as a []uint64, otherwise the provided default func (kv *KV) UintSliceOr(def []uint64) []uint64 { arr, ok := kv.UintSlice() if ok { return arr } return def } // Int returns value as a int64, if possible func (kv *KV) Int() (int64, bool) { return toInt(kv.value) } // IntOr returns value as a int64, otherwise the provided default func (kv *KV) IntOr(def int64) int64 { i, ok := kv.Int() if ok { return i } return def } // IntSlice returns value as a []int64, if possible func (kv *KV) IntSlice() ([]int64, bool) { if kv.value == nil { return nil, false } var res []int64 res, ok := kv.value.([]int64) if ok { return res, ok } r, ok := toSliceOf(kv.value, intType, untypedInt) if !ok { return nil, false } res, ok = r.([]int64) if ok { return res, true } return nil, false } // IntSliceOr returns value as a []int64, otherwise the provided default func (kv *KV) IntSliceOr(def []int64) []int64 { arr, ok := kv.IntSlice() if ok { return arr } return def } // Float returns value as a float64, if possible func (kv *KV) Float() (float64, bool) { if kv.value == nil { return 0.0, false } return toFloat(kv.value) } // FloatOr returns value as a float64, otherwise the provided default func (kv *KV) FloatOr(def float64) float64 { i, ok := kv.Float() if ok { return i } return def } // FloatSlice returns value as a []float64, if possible func (kv *KV) FloatSlice() ([]float64, bool) { if kv.value == nil { return nil, false } var res []float64 res, ok := kv.value.([]float64) if ok { return res, ok } r, ok := toSliceOf(kv.value, floatType, untypedFloat) if !ok { return nil, false } res, ok = r.([]float64) if ok { return res, true } return nil, false } // FloatSliceOr returns value as a []float64, otherwise the provided default func (kv *KV) FloatSliceOr(def []float64) []float64 { arr, ok := kv.FloatSlice() if ok { return arr } return def } // Bool returns value as a bool, if possible func (kv *KV) Bool() (bool, bool) { return toBool(kv.value) } // BoolOr returns value as a bool, otherwise the provided default func (kv *KV) BoolOr(def bool) bool { i, ok := kv.Bool() if ok { return i } return def } // BoolSlice returns value as a []bool, if possible func (kv *KV) BoolSlice() ([]bool, bool) { if kv.value == nil { return nil, false } var res []bool res, ok := kv.value.([]bool) if ok { return res, ok } r, ok := toSliceOf(kv.value, boolType, untypedBool) if !ok { return nil, false } res, ok = r.([]bool) if ok { return res, true } return nil, false } // BoolSliceOr returns value as a []bool, otherwise the provided default func (kv *KV) BoolSliceOr(def []bool) []bool { arr, ok := kv.BoolSlice() if ok { return arr } return def } func toString(in interface{}) (res string, ok bool) { if in == nil { return "", false } switch in.(type) { case string: res, ok = in.(string) case []byte: var b []byte b, ok = in.([]byte) res = string(b) case []rune: var r []rune r, ok = in.([]rune) res = string(r) } return } func toBool(in interface{}) (res bool, ok bool) { if in == nil { return false, false } switch in.(type) { case bool: res, ok = in.(bool) } return } func toUint(num interface{}) (uint64, bool) { if num == nil { return 0, false } var i uint64 var ok bool switch num.(type) { case int, int8, int16, int32, int64: a := reflect.ValueOf(num).Int() // a has type int64 return uint64(a), true case uint, uint8, uint16, uint32, uint64: a := reflect.ValueOf(num).Uint() // a has type uint64 return a, true case float64: f, ok := num.(float64) return uint64(f), ok case float32: f, ok := num.(float32) return uint64(f), ok } return i, ok } func toInt(num interface{}) (int64, bool) { if num == nil { return 0, false } var i int64 var ok bool switch num.(type) { case int, int8, int16, int32, int64: a := reflect.ValueOf(num).Int() // a has type int64 return a, true case uint, uint8, uint16, uint32, uint64: a := reflect.ValueOf(num).Uint() // a has type uint64 return int64(a), true case float64, float32: a := reflect.ValueOf(num).Float() return int64(a), true } return i, ok } func toFloat(num interface{}) (float64, bool) { if num == nil { return 0, false } var i float64 var ok bool switch num.(type) { case int, int8, int16, int32, int64: a := reflect.ValueOf(num).Int() // a has type int64 return float64(a), true case uint, uint8, uint16, uint32, uint64: a := reflect.ValueOf(num).Uint() // a has type uint64 return float64(a), true case float64: f, ok := num.(float64) return float64(f), ok case float32: f, ok := num.(float32) return float64(f), ok } return i, ok } func toSliceOf(value interface{}, typ reflect.Type, converter converter) (interface{}, bool) { if reflect.TypeOf(value).Kind() != reflect.Slice { return nil, false } slice := reflect.ValueOf(value) resSlice := reflect.MakeSlice(reflect.SliceOf(typ), slice.Len(), slice.Len()) for i := 0; i < slice.Len(); i++ { val, ok := converter(slice.Index(i).Interface()) if !ok { return nil, false } resSlice.Index(i).Set(reflect.ValueOf(val)) } return resSlice.Interface(), true }
vendor/github.com/modfin/kv/kv.go
0.786254
0.441974
kv.go
starcoder
package orbits import ( "image" "image/color" "image/png" "math" "os" "github.com/Balise42/marzipango/params" ) type PointOrbit struct { X float64 Y float64 Translation float64 Factor float64 } type LineOrbit struct { A float64 B float64 C float64 Sqrtab float64 Translation float64 Factor float64 } type Coords struct { X int64 Y int64 } type coordsFloat struct { X float64 Y float64 } type ImageOrbit struct { Distances map[Coords]float64 Translation float64 Factor float64 Width int Height int } func CreatePointOrbit(x float64, y float64, maxvalue float64) PointOrbit { orbit := PointOrbit{X: x, Y: y} minDist := 0.0 maxDist := orbit.squaredDistance(-2 - 1i) maxDist = math.Max(maxDist, orbit.squaredDistance(-2+1i)) maxDist = math.Max(maxDist, orbit.squaredDistance(1+1i)) maxDist = math.Max(maxDist, orbit.squaredDistance(1-1i)) minDist = math.Min(minDist, orbit.squaredDistance(-2+1i)) minDist = math.Min(minDist, orbit.squaredDistance(1+1i)) minDist = math.Min(minDist, orbit.squaredDistance(1-1i)) maxDist = math.Sqrt(maxDist) minDist = math.Sqrt(minDist) orbit.Factor = (maxvalue - minDist) / (maxDist - minDist) orbit.Translation = minDist return orbit } func (p PointOrbit) GetOrbitFastValue(z complex128) float64 { return p.squaredDistance(z) } func (p PointOrbit) GetOrbitValue(v float64) float64 { return (math.Sqrt(v) - p.Translation) * p.Factor } func (p PointOrbit) squaredDistance(z complex128) float64 { return (real(z)-p.X)*(real(z)-p.X) + (imag(z)-p.Y)*(imag(z)-p.Y) } func CreateLineOrbit(a float64, b float64, c float64, maxvalue float64) LineOrbit { orbit := LineOrbit{A: a, B: b, C: c} orbit.Sqrtab = math.Sqrt(a*a + b*b) minDist := 0.0 maxDist := orbit.GetOrbitFastValue(-2 - 1i) maxDist = math.Max(maxDist, orbit.GetOrbitFastValue(-2+1i)) maxDist = math.Max(maxDist, orbit.GetOrbitFastValue(1+1i)) maxDist = math.Max(maxDist, orbit.GetOrbitFastValue(1-1i)) minDist = math.Min(minDist, orbit.GetOrbitFastValue(-2+1i)) minDist = math.Min(minDist, orbit.GetOrbitFastValue(1+1i)) minDist = math.Min(minDist, orbit.GetOrbitFastValue(1-1i)) maxDist = math.Sqrt(maxDist) / orbit.Sqrtab minDist = math.Sqrt(minDist) / orbit.Sqrtab orbit.Factor = (maxvalue - minDist) / (maxDist - minDist) orbit.Translation = minDist return orbit } func (l LineOrbit) GetOrbitFastValue(z complex128) float64 { lineCoeff := l.A*real(z) + l.B*imag(z) + l.C return lineCoeff * lineCoeff } func (l LineOrbit) GetOrbitValue(v float64) float64 { return (math.Sqrt(v)/l.Sqrtab - l.Translation) * l.Factor } func isBlack(c color.Color) bool { r, g, b, _ := c.RGBA() return r == 0 && g == 0 && b == 0 } // distance field computation from https://prideout.net/blog/distance_fields/ func findHullParabolas(row []float64) ([]int, []float64) { v := make([]int, len(row)) z := make([]float64, len(row)+1) k := 0 v[0] = 0 z[0] = -math.MaxInt16 z[1] = math.MaxInt16 for i := 1; i < len(row); i++ { q := i p := v[k] s := intersectParabolas(p, q, row) for s <= z[k] { k = k - 1 p = v[k] s = intersectParabolas(p, q, row) } k = k + 1 v[k] = q z[k] = s z[k+1] = math.MaxInt16 } return v, z } func intersectParabolas(p int, q int, row []float64) float64 { intersect := ((row[q] + float64(q*q)) - (row[p] + float64(p*p))) / (2*float64(q) - 2*float64(p)) return intersect } func marchParabolas(row []float64, vertices []int, intersections []float64) { k := 0 for q := range row { for intersections[k+1] < float64(q) { k = k + 1 } dx := q - vertices[k] row[q] = float64(dx*dx) + row[vertices[k]] } } func horizontalPass(row []float64) { vertices, intersections := findHullParabolas(row) marchParabolas(row, vertices, intersections) } func transpose(field [][]float64) [][]float64 { transposed := make([][]float64, len(field[0])) for x := range transposed { transposed[x] = make([]float64, len(field)) for y := range transposed[x] { transposed[x][y] = field[y][x] } } return transposed } func computeEdt(img image.Image, width int, height int, maxvalue int) map[Coords]float64 { field := make([][]float64, width+maxvalue*2) for x := range field { field[x] = make([]float64, height+maxvalue*2) for y := range field[x] { if x > maxvalue && x < img.Bounds().Dx()+maxvalue && y > maxvalue && y < img.Bounds().Dy()+maxvalue && isBlack(img.At(x-maxvalue, y-maxvalue)) { field[x][y] = 0 } else { field[x][y] = math.MaxInt16 } } } for _, row := range field { horizontalPass(row) } field = transpose(field) for _, row := range field { horizontalPass(row) } field = transpose(field) return convertField(field, maxvalue, maxvalue) } func doNothing(x int, y int) {} func convertField(field [][]float64, offsetX int, offsetY int) map[Coords]float64 { offsetField := make(map[Coords]float64) for x := range field { for y := range field[x] { offsetField[Coords{int64(x - offsetX), int64(y - offsetY)}] = math.Sqrt(field[x][y]) } } return offsetField } func CreateImageOrbit(params params.ImageParams, path string, maxvalue float64) (ImageOrbit, error) { f, err := os.Open(path) if err != nil { return ImageOrbit{}, err } img, err := png.Decode(f) if err != nil { return ImageOrbit{}, err } distances := computeEdt(img, params.Width, params.Height, int(maxvalue)) minDist := 0.0 maxDist := 0.0 for _, v := range distances { if maxDist < v { maxDist = v } } factor := (maxvalue - minDist) / (maxDist - minDist) translation := minDist return ImageOrbit{Distances: distances, Factor: factor, Translation: translation, Width: img.Bounds().Dx(), Height: img.Bounds().Dy()}, nil } func (im ImageOrbit) GetOrbitFastValue(z complex128) float64 { x := real(z) y := imag(z) hFloat := float64(im.Height) wFloat := float64(im.Width) xOffset := 2.0 yOffset := 1.0 xFactor := wFloat / 3 yFactor := hFloat / 2 xImg := x*xFactor + xOffset/xFactor yImg := y*yFactor + yOffset/yFactor dist, ok := im.Distances[Coords{int64(xImg), int64(yImg)}] if !ok { return math.MaxInt64 } return dist } func (im ImageOrbit) GetOrbitValue(v float64) float64 { if v == math.MaxInt64 { return math.MaxInt64 } return (v - im.Translation) * im.Factor }
fractales/orbits/orbit.go
0.774413
0.505188
orbit.go
starcoder
package toml // tomlType represents any Go type that corresponds to a TOML type. // While the first draft of the TOML spec has a simplistic type system that // probably doesn't need this level of sophistication, we seem to be militating // toward adding real composite types. type tomlType interface { typeString() string } // typeEqual accepts any two types and returns true if they are equal. func typeEqual(t1, t2 tomlType) bool { if t1 == nil || t2 == nil { return false } return t1.typeString() == t2.typeString() } func typeIsHash(t tomlType) bool { return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) } type tomlBaseType string func (btype tomlBaseType) typeString() string { return string(btype) } func (btype tomlBaseType) String() string { return btype.typeString() } var ( tomlInteger tomlBaseType = "Integer" tomlFloat tomlBaseType = "Float" tomlDatetime tomlBaseType = "Datetime" tomlString tomlBaseType = "String" tomlBool tomlBaseType = "Bool" tomlArray tomlBaseType = "Array" tomlHash tomlBaseType = "Hash" tomlArrayHash tomlBaseType = "ArrayHash" ) // typeOfPrimitive returns a tomlType of any primitive value in TOML. // Primitive values are: Integer, Float, Datetime, String and Bool. // // Passing a lexer item other than the following will cause a BUG message // to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. func (p *parser) typeOfPrimitive(lexItem item) tomlType { switch lexItem.typ { case itemInteger: return tomlInteger case itemFloat: return tomlFloat case itemDatetime: return tomlDatetime case itemString: return tomlString case itemBool: return tomlBool } p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) panic("unreachable") } // typeOfArray returns a tomlType for an array given a list of types of its // values. // // In the current spec, if an array is homogeneous, then its type is always // "Array". If the array is not homogeneous, an error is generated. func (p *parser) typeOfArray(types []tomlType) tomlType { // Empty arrays are cool. if len(types) == 0 { return tomlArray } theType := types[0] for _, t := range types[1:] { if !typeEqual(theType, t) { p.panicf("Array contains values of type '%s' and '%s', but arrays "+ "must be homogeneous.", theType, t) } } return tomlArray }
vendor/github.com/BurntSushi/toml/type_check.go
0.803598
0.403743
type_check.go
starcoder
package kyber import ( "golang.org/x/crypto/sha3" ) //Poly represents a polynomial of deg n with coefs in [0, Q) type Poly [n]int16 func add(a, b Poly) Poly { var c Poly for i := 0; i < n; i++ { c[i] = a[i] + b[i] } return c } //sub substracts b from a without normalization func sub(a, b Poly) Poly { var c Poly for i := 0; i < n; i++ { c[i] = a[i] - b[i] } return c } //reduce calls barretReduce on each coef func (p *Poly) reduce() { for i := 0; i < n; i++ { p[i] = barretReduce(p[i]) } } //freeze calls Freeze on each coef func (p *Poly) freeze() { for i := 0; i < n; i++ { p[i] = freeze(p[i]) } } //rej fills a with coefs in [0, Q) generated with buf using rejection sampling func rej(a []int16, buf []byte) int { ctr, buflen, alen := 0, len(buf), len(a) for pos := 0; pos+3 <= buflen && ctr < alen; pos += 3 { val0 := (uint16(buf[pos]) | (uint16(buf[pos+1]) << 8)) & 0xfff val1 := (uint16(buf[pos+1]>>4) | (uint16(buf[pos+2]) << 4)) & 0xfff if val0 < uint16(q) { a[ctr] = int16(val0) ctr++ } if val1 < uint16(q) && ctr != alen { a[ctr] = int16(val1) ctr++ } } return ctr } //polyUniform samples a polynomial with coefs in [0, Q] func polyUniform(rho []byte, nonce []byte) Poly { var outbuf [shake128Rate]byte state := sha3.NewShake128() state.Write(rho[:]) state.Write(nonce) state.Read(outbuf[:]) var a Poly ctr := rej(a[:], outbuf[:]) for ctr < n { state.Read(outbuf[:shake128Rate]) ctr += rej(a[ctr:], outbuf[:shake128Rate]) } return a } //polyGetNoise samples a polynomial with coefs in [Q-eta, Q+eta] func polyGetNoise(eta int, seed []byte, nonce byte) Poly { outbuf := make([]byte, eta*n/4) state := sha3.NewShake256() state.Write(seed[:]) state.Write([]byte{nonce}) state.Read(outbuf[:]) var p Poly if eta == 3 { p = polyCBD3(outbuf) } if eta == 2 { p = polyCBD2(outbuf) } return p } //polyCBD2 samples a poly using a centered binomial distribution func polyCBD2(outbuf []byte) Poly { var t, d uint32 var a, b int16 var p Poly for i := 0; i < n/8; i++ { t = load32LE(outbuf[4*i:]) d = t & 0x55555555 d += (t >> 1) & 0x55555555 for j := 0; j < 8; j++ { a = int16((d >> (4*j + 0)) & 0x3) b = int16((d >> (4*j + 2)) & 0x3) p[8*i+j] = a - b } } return p } //polyCBD3 samples a poly using a centered binomial distribution func polyCBD3(outbuf []byte) Poly { var t, d uint32 var a, b int16 var p Poly for i := 0; i < n/4; i++ { t = load24LE(outbuf[3*i:]) d = t & 0x00249249 d += (t >> 1) & 0x00249249 d += (t >> 2) & 0x00249249 for j := 0; j < 4; j++ { a = int16((d >> (6*j + 0)) & 0x7) b = int16((d >> (6*j + 3)) & 0x7) p[4*i+j] = a - b } } return p } //polyBaseMul multiplies two polynomials func polyBaseMul(a, b Poly) Poly { var r Poly for i := 0; i < n/4; i++ { copy(r[4*i:4*i+2], basemul(a[4*i:4*i+2], b[4*i:4*i+2], zetas[64+i])) copy(r[4*i+2:4*i+4], basemul(a[4*i+2:4*i+4], b[4*i+2:4*i+4], -zetas[64+i])) } return r } //tomont converts a poly to its montgomery representation func (p *Poly) toMont() { var f int16 = int16((uint64(1) << 32) % uint64(q)) for i := 0; i < n; i++ { p[i] = montgomeryReduce(int32(p[i]) * int32(f)) } } //polyFromMsg converts a msg into polynomial representation func polyFromMsg(msg []byte) Poly { var p Poly for i := 0; i < n/8; i++ { for j := 0; j < 8; j++ { mask := -int16((msg[i] >> j) & 1) p[8*i+j] = mask & int16((q+1)/2) } } return p } //polyToMsg converts a polynomial to a byte array func polyToMsg(p Poly) []byte { msg := make([]byte, 32) var t uint16 var tmp byte p.reduce() for i := 0; i < n/8; i++ { tmp = 0 for j := 0; j < 8; j++ { t = (((uint16(p[8*i+j]) << 1) + uint16(q/2)) / uint16(q)) & 1 tmp |= byte(t << j) } msg[i] = tmp } return msg } //compress packs a polynomial into a byte array using d bits per coefficient func (p *Poly) compress(d int) []byte { c := make([]byte, n*d/8) switch d { case 3: var t [8]uint16 id := 0 for i := 0; i < n/8; i++ { for j := 0; j < 8; j++ { t[j] = uint16(((uint32(p[8*i+j])<<3)+uint32(q)/2)/ uint32(q)) & ((1 << 3) - 1) } c[id] = byte(t[0]) | byte(t[1]<<3) | byte(t[2]<<6) c[id+1] = byte(t[2]>>2) | byte(t[3]<<1) | byte(t[4]<<4) | byte(t[5]<<7) c[id+2] = byte(t[5]>>1) | byte(t[6]<<2) | byte(t[7]<<5) id += 3 } case 4: var t [8]uint16 id := 0 for i := 0; i < n/8; i++ { for j := 0; j < 8; j++ { t[j] = uint16(((uint32(p[8*i+j])<<4)+uint32(q)/2)/ uint32(q)) & ((1 << 4) - 1) } c[id] = byte(t[0]) | byte(t[1]<<4) c[id+1] = byte(t[2]) | byte(t[3]<<4) c[id+2] = byte(t[4]) | byte(t[5]<<4) c[id+3] = byte(t[6]) | byte(t[7]<<4) id += 4 } case 5: var t [8]uint16 id := 0 for i := 0; i < n/8; i++ { for j := 0; j < 8; j++ { t[j] = uint16(((uint32(p[8*i+j])<<5)+uint32(q)/2)/ uint32(q)) & ((1 << 5) - 1) } c[id] = byte(t[0]) | byte(t[1]<<5) c[id+1] = byte(t[1]>>3) | byte(t[2]<<2) | byte(t[3]<<7) c[id+2] = byte(t[3]>>1) | byte(t[4]<<4) c[id+3] = byte(t[4]>>4) | byte(t[5]<<1) | byte(t[6]<<6) c[id+4] = byte(t[6]>>2) | byte(t[7]<<3) id += 5 } case 6: var t [4]uint16 id := 0 for i := 0; i < n/4; i++ { for j := 0; j < 4; j++ { t[j] = uint16(((uint32(p[4*i+j])<<6)+uint32(q)/2)/ uint32(q)) & ((1 << 6) - 1) } c[id] = byte(t[0]) | byte(t[1]<<6) c[id+1] = byte(t[1]>>2) | byte(t[2]<<4) c[id+2] = byte(t[2]>>2) | byte(t[3]<<2) id += 3 } case 10: var t [4]uint16 id := 0 for i := 0; i < n/4; i++ { for j := 0; j < 4; j++ { t[j] = uint16(((uint32(p[4*i+j])<<10)+uint32(q)/2)/ uint32(q)) & ((1 << 10) - 1) } c[id] = byte(t[0]) c[id+1] = byte(t[0]>>8) | byte(t[1]<<2) c[id+2] = byte(t[1]>>6) | byte(t[2]<<4) c[id+3] = byte(t[2]>>4) | byte(t[3]<<6) c[id+4] = byte(t[3] >> 2) id += 5 } case 11: var t [8]uint16 id := 0 for i := 0; i < n/8; i++ { for j := 0; j < 8; j++ { t[j] = uint16(((uint32(p[8*i+j])<<11)+uint32(q)/2)/ uint32(q)) & ((1 << 11) - 1) } c[id] = byte(t[0]) c[id+1] = byte(t[0]>>8) | byte(t[1]<<3) c[id+2] = byte(t[1]>>5) | byte(t[2]<<6) c[id+3] = byte(t[2] >> 2) c[id+4] = byte(t[2]>>10) | byte(t[3]<<1) c[id+5] = byte(t[3]>>7) | byte(t[4]<<4) c[id+6] = byte(t[4]>>4) | byte(t[5]<<7) c[id+7] = byte(t[5] >> 1) c[id+8] = byte(t[5]>>9) | byte(t[6]<<2) c[id+9] = byte(t[6]>>6) | byte(t[7]<<5) c[id+10] = byte(t[7] >> 3) id += 11 } default: panic("bad d value") } return c[:] } //decompressPoly creates a polynomial based on a compressed array, using d bits per coefficients func decompressPoly(c []byte, d int) Poly { var p Poly switch d { case 3: var t [8]uint16 id := 0 for i := 0; i < n/8; i++ { t[0] = uint16(c[id]) t[1] = uint16(c[id]) >> 3 t[2] = (uint16(c[id]) >> 6) | (uint16(c[id+1]) << 2) t[3] = uint16(c[id+1]) >> 1 t[4] = uint16(c[id+1]) >> 4 t[5] = (uint16(c[id+1]) >> 7) | (uint16(c[id+2]) << 1) t[6] = uint16(c[id+2]) >> 2 t[7] = uint16(c[id+2]) >> 5 for j := 0; j < 8; j++ { p[8*i+j] = int16(((1 << 2) + uint32(t[j]&((1<<3)-1))*uint32(q)) >> 3) } id += 3 } case 4: for i := 0; i < n/2; i++ { p[2*i] = int16(((1 << 3) + uint32(c[i]&15)*uint32(q)) >> 4) p[2*i+1] = int16(((1 << 3) + uint32(c[i]>>4)*uint32(q)) >> 4) } case 5: var t [8]uint16 id := 0 for i := 0; i < n/8; i++ { t[0] = uint16(c[id]) t[1] = (uint16(c[id]) >> 5) | (uint16(c[id+1] << 3)) t[2] = uint16(c[id+1]) >> 2 t[3] = (uint16(c[id+1]) >> 7) | (uint16(c[id+2] << 1)) t[4] = (uint16(c[id+2]) >> 4) | (uint16(c[id+3] << 4)) t[5] = uint16(c[id+3]) >> 1 t[6] = (uint16(c[id+3]) >> 6) | (uint16(c[id+4] << 2)) t[7] = uint16(c[id+4]) >> 3 for j := 0; j < 8; j++ { p[8*i+j] = int16(((1 << 4) + uint32(t[j]&((1<<5)-1))*uint32(q)) >> 5) } id += 5 } case 6: var t [4]uint16 id := 0 for i := 0; i < n/4; i++ { t[0] = uint16(c[id]) t[1] = (uint16(c[id]) >> 6) | (uint16(c[id+1] << 2)) t[2] = (uint16(c[id+1]) >> 4) | (uint16(c[id+2]) << 4) t[3] = uint16(c[id+2]) >> 2 for j := 0; j < 4; j++ { p[4*i+j] = int16(((1 << 5) + uint32(t[j]&((1<<6)-1))*uint32(q)) >> 6) } id += 3 } case 10: var t [4]uint16 id := 0 for i := 0; i < n/4; i++ { t[0] = uint16(c[id]) | (uint16(c[id+1]) << 8) t[1] = (uint16(c[id+1]) >> 2) | (uint16(c[id+2]) << 6) t[2] = (uint16(c[id+2]) >> 4) | (uint16(c[id+3]) << 4) t[3] = (uint16(c[id+3]) >> 6) | (uint16(c[id+4]) << 2) for j := 0; j < 4; j++ { p[4*i+j] = int16(((1 << 9) + uint32(t[j]&((1<<10)-1))*uint32(q)) >> 10) } id += 5 } case 11: var t [8]uint16 id := 0 for i := 0; i < n/8; i++ { t[0] = uint16(c[id]) | (uint16(c[id+1]) << 8) t[1] = (uint16(c[id+1]) >> 3) | (uint16(c[id+2]) << 5) t[2] = (uint16(c[id+2]) >> 6) | (uint16(c[id+3]) << 2) | (uint16(c[id+4]) << 10) t[3] = (uint16(c[id+4]) >> 1) | (uint16(c[id+5]) << 7) t[4] = (uint16(c[id+5]) >> 4) | (uint16(c[id+6]) << 4) t[5] = (uint16(c[id+6]) >> 7) | (uint16(c[id+7]) << 1) | (uint16(c[id+8]) << 9) t[6] = (uint16(c[id+8]) >> 2) | (uint16(c[id+9]) << 6) t[7] = (uint16(c[id+9]) >> 5) | (uint16(c[id+10]) << 3) for j := 0; j < 8; j++ { p[8*i+j] = int16(((1 << 10) + uint32(t[j]&((1<<11)-1))*uint32(q)) >> 11) } id += 11 } default: panic("bad d value") } return p }
crystals-kyber/poly.go
0.635222
0.417271
poly.go
starcoder
package main import "errors" import "fmt" import "math" type coordinate struct { x int y int } /* iterate through the 2-d int slice to find all the guard locations, and store them somewhere iterate through entire array, push all elements onto a queue pop elements off the queue and calcuate distance from any guard to the square store the lowest value return error if locked room through which guard cannot pass (implement later; mvp) - need a calculate distance function to calculate the distance from one square to another square example of why having "special" rooms be notified separately is useful: */ /*func analyzeAndScore(plan [][]int) error { guards := make(map[coordinate]bool) // first, iterate across all the locations and remember the locations of the guards for i := 0; i < len(plan); i++ { for j := 0; j < len(plan[i]); j++ { if plan[i][j] == 0 { var guard coordinate = coordinate{i, j} guards[guard] = true } else if plan[i][j] == -1 { return errors.New("Locked rooms not supported yet") } } } ... return nil }*/ // iterate through the rooms in the floor plan and update the distances; guards stores the location of the guards // note that since this is a slice, the underlying data structure is modified; this is sub-optimal and would need to later be improved to ensure that the data is either locked (eg with a mutex) or a serialized version is passed to the function. // the room is treated as an x-y coordinate system; buildings must be rectangular; locked rooms not supported yet func analyzeAndScore(plan [][]int, guards map[coordinate]bool) error { var ySize int for x := 0; x < len(plan); x++ { if ySize == 0 { ySize = len(plan[x]) } else if len(plan[x]) != ySize { return errors.New("room is not rectangular") // XXX move to type or support rectangular rooms } for y := 0; y < len(plan[x]); y++ { var distance int = math.MaxInt32 for k, _ := range guards { if x == k.x && y == y.x { // it's a guard distance = -1 break } t := math.Abs(k.x-x) + math.Abs(k.y-y) if t < distance { distance = t } } plan[x][y] = distance } } return nil } func main() { // build some example cases, supply to function }
floorPlan.go
0.687
0.405478
floorPlan.go
starcoder
package main import ( "AoC2021/aoc_fun" "io/ioutil" "log" "os" "strings" ) type Record struct { signal []string expect []string } type Data struct { states []Record } func parse(line string, data *Data) { if len(line) == 0 { return } parts := strings.Split(line, " | ") signal := strings.Fields(parts[0]) expect := strings.Fields(parts[1]) data.states = append(data.states, Record{signal, expect}) } func read_data() Data { input_file_name := aoc_fun.GetDefaultInputFilePath() if len(os.Args) == 2 { input_file_name = os.Args[1] } file, err := ioutil.ReadFile(input_file_name) if err != nil { log.Fatal(err) } input_n := string(file) var data Data for _, line := range strings.Split(input_n, "\n") { parse(line, &data) } return data } func d08_1(data Data) int { defer aoc_fun.Track(aoc_fun.Runningtime()) unique_numbers := 0 for _, record := range data.states { signals := record.expect for _, signal := range signals { switch len(signal) { case 2, 4, 3, 7: unique_numbers++ } } } return unique_numbers } func d08_2(data Data) int { defer aoc_fun.Track(aoc_fun.Runningtime()) // static line analisys: // a=>8 b=>6 c=>8 d=>7 e=>4 f=>9 g=>7 make_mapping := func(a, b, c, d, e, f, g rune) map[rune]rune { mapping := make(map[rune]rune) mapping[a] = 'a' mapping[b] = 'b' mapping[c] = 'c' mapping[d] = 'd' mapping[e] = 'e' mapping[f] = 'f' mapping[g] = 'g' return mapping } map_signal := func(mapping map[rune]rune, signal string) string { runeArray := []rune(signal) for idx, s := range runeArray { runeArray[idx] = mapping[s] } return aoc_fun.SortString(string(runeArray)) } get_number_from_display := func(signal string) int { switch signal { case "abcefg": return 0 case "cf": return 1 case "acdeg": return 2 case "acdfg": return 3 case "bcdf": return 4 case "abdfg": return 5 case "abdefg": return 6 case "acf": return 7 case "abcdefg": return 8 case "abcdfg": return 9 default: return -1 } } char_c_from_one := func(signal string, f rune) rune { if len(signal) != 2 { log.Panic("Wrong length for one") } for _, r := range signal { if r != f { return r } } return 'P' } char_a_from_seven := func(signal string, c, f rune) rune { if len(signal) != 3 { log.Panic("Wrong length for one") } for _, r := range signal { if r != c && r != f { return r } } return 'P' } char_d_from_four := func(signal string, b, c, f rune) rune { if len(signal) != 4 { log.Panic("Wrong length for one") } for _, r := range signal { if r != b && r != c && r != f { return r } } return 'P' } char_g_from_eight := func(signal string, a, b, c, d, e, f rune) rune { if len(signal) != 7 { log.Panic("Wrong length for one") } for _, r := range signal { if r != a && r != b && r != c && r != d && r != e && r != f { return r } } return 'P' } sum := 0 for _, record := range data.states { mapping := make(map[rune]int) s1, s4, s7, s8 := "", "", "", "" for _, signal := range record.signal { switch len(signal) { case 2: s1 = signal case 4: s4 = signal case 3: s7 = signal case 7: s8 = signal } for _, d := range signal { mapping[d]++ } } var a, b, c, d, e, f, g rune for key, val := range mapping { switch val { case 9: f = key case 4: e = key case 6: b = key default: continue } } c = char_c_from_one(s1, f) a = char_a_from_seven(s7, c, f) d = char_d_from_four(s4, b, c, f) g = char_g_from_eight(s8, a, b, c, d, e, f) final_mapping := make_mapping(a, b, c, d, e, f, g) result := get_number_from_display(map_signal(final_mapping, record.expect[3])) result += get_number_from_display(map_signal(final_mapping, record.expect[2])) * 10 result += get_number_from_display(map_signal(final_mapping, record.expect[1])) * 100 result += get_number_from_display(map_signal(final_mapping, record.expect[0])) * 1000 sum += result } return sum } func main() { defer aoc_fun.Unprofile(aoc_fun.ProfileCPU()) data := read_data() log.Printf("01: %d", d08_1(data)) log.Printf("02: %d", d08_2(data)) }
d08/d08.go
0.511473
0.418756
d08.go
starcoder
// This file implements type parameter inference given // a list of concrete arguments and a parameter list. package types import ( "go/token" "strings" ) // infer returns the list of actual type arguments for the given list of type parameters tparams // by inferring them from the actual arguments args for the parameters params. If infer fails to // determine all type arguments, an error is reported and the result is nil. func (check *Checker) infer(pos token.Pos, tparams []*TypeName, params *Tuple, args []*operand) []Type { assert(params.Len() == len(args)) u := check.newUnifier(false) u.x.init(tparams) errorf := func(kind string, tpar, targ Type, arg *operand) { // provide a better error message if we can targs, failed := u.x.types() if failed == 0 { // The first type parameter couldn't be inferred. // If none of them could be inferred, don't try // to provide the inferred type in the error msg. allFailed := true for _, targ := range targs { if targ != nil { allFailed = false break } } if allFailed { check.errorf(arg.pos(), "%s %s of %s does not match %s (cannot infer %s)", kind, targ, arg.expr, tpar, typeNamesString(tparams)) return } } smap := makeSubstMap(tparams, targs) inferred := check.subst(arg.pos(), tpar, smap) if inferred != tpar { check.errorf(arg.pos(), "%s %s of %s does not match inferred type %s for %s", kind, targ, arg.expr, inferred, tpar) } else { check.errorf(arg.pos(), "%s %s of %s does not match %s", kind, targ, arg.expr, tpar) } } // Terminology: generic parameter = function parameter with a type-parameterized type // 1st pass: Unify parameter and argument types for generic parameters with typed arguments // and collect the indices of generic parameters with untyped arguments. var indices []int for i, arg := range args { par := params.At(i) // If we permit bidirectional unification, this conditional code needs to be // executed even if par.typ is not parameterized since the argument may be a // generic function (for which we want to infer // its type arguments). if IsParameterized(par.typ) { if arg.mode == invalid { // TODO(gri) we might still be able to infer all targs by // simply ignoring (continue) invalid args return nil // error was reported earlier } if targ := arg.typ; isTyped(targ) { // If we permit bidirectional unification, and targ is // a generic function, we need to initialize u.y with // the respectice type parameters of targ. if !u.unify(par.typ, targ) { errorf("type", par.typ, targ, arg) return nil } } else { indices = append(indices, i) } } } // Some generic parameters with untyped arguments may have been given a type // indirectly through another generic parameter with a typed argument; we can // ignore those now. (This only means that we know the types for those generic // parameters; it doesn't mean untyped arguments can be passed safely. We still // need to verify that assignment of those arguments is valid when we check // function parameter passing external to infer.) j := 0 for _, i := range indices { par := params.At(i) // Since untyped types are all basic (i.e., non-composite) types, an // untyped argument will never match a composite parameter type; the // only parameter type it can possibly match against is a *TypeParam. // Thus, only keep the indices of generic parameters that are not of // composite types and which don't have a type inferred yet. if tpar, _ := par.typ.(*TypeParam); tpar != nil && u.x.at(tpar.index) == nil { indices[j] = i j++ } } indices = indices[:j] // 2nd pass: Unify parameter and default argument types for remaining generic parameters. for _, i := range indices { par := params.At(i) arg := args[i] targ := Default(arg.typ) // The default type for an untyped nil is untyped nil. We must not // infer an untyped nil type as type parameter type. Ignore untyped // nil by making sure all default argument types are typed. if isTyped(targ) && !u.unify(par.typ, targ) { errorf("default type", par.typ, targ, arg) return nil } } // Collect type arguments and check if they all have been determined. // TODO(gri) consider moving this outside this function and then we won't need to pass in pos targs, failed := u.x.types() if failed >= 0 { tpar := tparams[failed] ppos := check.fset.Position(tpar.pos).String() check.errorf(pos, "cannot infer %s (%s)", tpar.name, ppos) return nil } return targs } // typeNamesString produces a string containing all the // type names in list suitable for human consumption. func typeNamesString(list []*TypeName) string { // common cases n := len(list) switch n { case 0: return "" case 1: return list[0].name case 2: return list[0].name + " and " + list[1].name } // general case (n > 2) var b strings.Builder for i, tname := range list[:n-1] { if i > 0 { b.WriteString(", ") } b.WriteString(tname.name) } b.WriteString(", and ") b.WriteString(list[n-1].name) return b.String() } // IsParameterized reports whether typ contains any type parameters. // TODO(gri) This is not strictly correct. We only want the free // type parameters for a given type. (At the moment, the only way // to mix free and bound type parameters is through method type parameters // on parameterized receiver types - need to investigate.) func IsParameterized(typ Type) bool { return isParameterized(typ, make(map[Type]bool)) } func isParameterized(typ Type, seen map[Type]bool) (res bool) { // detect cycles // TODO(gri) can/should this be a Checker map? if x, ok := seen[typ]; ok { return x } seen[typ] = false defer func() { seen[typ] = res }() switch t := typ.(type) { case nil, *Basic: // TODO(gri) should nil be handled here? break case *Array: return isParameterized(t.elem, seen) case *Slice: return isParameterized(t.elem, seen) case *Struct: for _, fld := range t.fields { if isParameterized(fld.typ, seen) { return true } } case *Pointer: return isParameterized(t.base, seen) case *Tuple: n := t.Len() for i := 0; i < n; i++ { if isParameterized(t.At(i).typ, seen) { return true } } case *Sum: return isParameterizedList(t.types, seen) case *Signature: assert(t.tparams == nil) // TODO(gri) is this correct? // TODO(gri) Rethink check below. //assert(t.recv == nil || !isParameterized(t.recv.typ)) return isParameterized(t.params, seen) || isParameterized(t.results, seen) case *Interface: if t.allMethods != nil { // interface is complete - quick test for _, m := range t.allMethods { if isParameterized(m.typ, seen) { return true } } return isParameterizedList(unpack(t.allTypes), seen) } return t.iterate(func(t *Interface) bool { for _, m := range t.methods { if isParameterized(m.typ, seen) { return true } } return isParameterizedList(unpack(t.types), seen) }, nil) case *Map: return isParameterized(t.key, seen) || isParameterized(t.elem, seen) case *Chan: return isParameterized(t.elem, seen) case *Named: return isParameterizedList(t.targs, seen) case *TypeParam: return true case *instance: return isParameterizedList(t.targs, seen) default: unreachable() } return false } // IsParameterizedList reports whether any type in list is parameterized. func IsParameterizedList(list []Type) bool { return isParameterizedList(list, make(map[Type]bool)) } func isParameterizedList(list []Type, seen map[Type]bool) bool { for _, t := range list { if isParameterized(t, seen) { return true } } return false }
src/go/types/infer.go
0.520253
0.466238
infer.go
starcoder
package typegraph const ASSIGNABLE_OP_VALUE = "value" type typerefGetter func(containingType TypeReference) TypeReference // operatorParameter represents a single expected parameter on an operator. type operatorParameter struct { Name string // The name of the parameter. getParameterType typerefGetter // The expected type. } // ExpectedType returns the type expected for this parameter. func (op *operatorParameter) ExpectedType(containingType TypeReference) TypeReference { return op.getParameterType(containingType) } // operatorDefinition represents the definition of a supported operator on a Serulian type. type operatorDefinition struct { Name string // The name of the operator. IsStatic bool // Whether the operator is static. IsAssignable bool // Whether the operator is assignable. getReturnType typerefGetter // The expected return type. Parameters []operatorParameter // The expected parameters. } // ExpectedReturnType returns the return type expected for this operator. func (od *operatorDefinition) ExpectedReturnType(containingType TypeReference) TypeReference { return od.getReturnType(containingType) } // GetMemberType returns the member type for this operator definition. func (od *operatorDefinition) GetMemberType(containingType TypeReference, declaredReturnType TypeReference) TypeReference { // The member type for an operator is a function that takes in the expected parameters // and returns the declared return type. typegraph := containingType.tdg // Add the operator's parameters. var funcType = typegraph.NewTypeReference(typegraph.FunctionType()).WithGeneric(declaredReturnType) for _, param := range od.Parameters { funcType = funcType.WithParameter(param.getParameterType(containingType)) } return funcType } // buildOperatorDefinitions sets the defined operators supported in the type system. func (t *TypeGraph) buildOperatorDefinitions() { containingTypeGetter := func(containingType TypeReference) TypeReference { return containingType } streamContainingTypeGetter := func(containingType TypeReference) TypeReference { return t.NewTypeReference(t.StreamType(), containingType) } staticTypeGetter := func(staticType TGTypeDecl) typerefGetter { return func(containingType TypeReference) TypeReference { return t.NewTypeReference(staticType) } } staticNullableTypeGetter := func(staticType TGTypeDecl) typerefGetter { return func(containingType TypeReference) TypeReference { return t.NewTypeReference(staticType).AsNullable() } } anyTypeGetter := func(containingType TypeReference) TypeReference { return t.AnyTypeReference() } voidTypeGetter := func(containingType TypeReference) TypeReference { return t.VoidTypeReference() } unaryParameters := []operatorParameter{ operatorParameter{"value", containingTypeGetter}, } binaryParameters := []operatorParameter{ operatorParameter{"left", containingTypeGetter}, operatorParameter{"right", containingTypeGetter}, } operators := []operatorDefinition{ // Binary operators: +, -, *, /, % operatorDefinition{"plus", true, false, containingTypeGetter, binaryParameters}, operatorDefinition{"minus", true, false, containingTypeGetter, binaryParameters}, operatorDefinition{"times", true, false, containingTypeGetter, binaryParameters}, operatorDefinition{"div", true, false, containingTypeGetter, binaryParameters}, operatorDefinition{"mod", true, false, containingTypeGetter, binaryParameters}, // Bitwise operators: ^, |, &, <<, >>, ~ operatorDefinition{"xor", true, false, containingTypeGetter, binaryParameters}, operatorDefinition{"or", true, false, containingTypeGetter, binaryParameters}, operatorDefinition{"and", true, false, containingTypeGetter, binaryParameters}, operatorDefinition{"leftshift", true, false, containingTypeGetter, binaryParameters}, operatorDefinition{"rightshift", true, false, containingTypeGetter, binaryParameters}, operatorDefinition{"not", true, false, containingTypeGetter, unaryParameters}, operatorDefinition{"bool", true, false, staticTypeGetter(t.BoolType()), unaryParameters}, // Equality. operatorDefinition{"equals", true, false, staticTypeGetter(t.BoolType()), binaryParameters}, // Comparison. operatorDefinition{"compare", true, false, staticTypeGetter(t.IntType()), binaryParameters}, // Ranges. operatorDefinition{"range", true, false, streamContainingTypeGetter, binaryParameters}, operatorDefinition{"exclusiverange", true, false, streamContainingTypeGetter, binaryParameters}, // Contains. operatorDefinition{"contains", false, false, staticTypeGetter(t.BoolType()), []operatorParameter{ operatorParameter{"item", anyTypeGetter}, }}, // Slice. operatorDefinition{"slice", false, false, anyTypeGetter, []operatorParameter{ operatorParameter{"startindex", staticNullableTypeGetter(t.IntType())}, operatorParameter{"endindex", staticNullableTypeGetter(t.IntType())}, }}, // Index. operatorDefinition{"index", false, false, anyTypeGetter, []operatorParameter{ operatorParameter{"index", anyTypeGetter}, }}, // SetIndex. operatorDefinition{"setindex", false, true, voidTypeGetter, []operatorParameter{ operatorParameter{"index", anyTypeGetter}, operatorParameter{ASSIGNABLE_OP_VALUE, anyTypeGetter}, }}, } for _, operator := range operators { t.operators[operator.Name] = operator } }
graphs/typegraph/operators.go
0.826887
0.633934
operators.go
starcoder
package giamesoft import ( "image/draw" "github.com/GUMI-golang/giame" "image/color" "image" ) func RasterFiller(dst draw.Image, ws *Workspace, min image.Point, f giame.Filler) { f.ToBound(image.Rect(0,0,ws.Width, ws.Height)) for x := 0; x < ws.Width; x++ { for y := 0; y < ws.Height; y++ { v := ws.Get(x, y) r, g, b, a := f.At(x, y).RGBA() dst.Set(min.X + x, min.Y + y, color.RGBA{ R: uint8(float32(r >> 8) * v + .5), G: uint8(float32(g >> 8) * v + .5), B: uint8(float32(b >> 8) * v + .5), A: uint8(float32(a >> 8) * v + .5), }) } } } func RasterUniform(dst draw.Image, ws *Workspace, min image.Point, f *giame.UniformFiller) { for x := 0; x < ws.Width; x++ { for y := 0; y < ws.Height; y++ { v := ws.Get(x, y) dst.Set(min.X + x, min.Y + y, color.RGBA{ R: uint8(float32(f.Color.R) * v + .5), G: uint8(float32(f.Color.G) * v + .5), B: uint8(float32(f.Color.B) * v + .5), A: uint8(float32(f.Color.A) * v + .5), }) } } } func RasterFixed(dst draw.Image, ws *Workspace, min image.Point, f *giame.FixedFiller) { for x := 0; x < ws.Width; x++ { for y := 0; y < ws.Height; y++ { v := ws.Get(x, y) r, g, b, a := f.At(x, y).RGBA() dst.Set(min.X + x, min.Y + y, color.RGBA{ R: uint8(float32(r >> 8) * v + .5), G: uint8(float32(g >> 8) * v + .5), B: uint8(float32(b >> 8) * v + .5), A: uint8(float32(a >> 8) * v + .5), }) } } } func RasterKernel(dst draw.Image, ws *Workspace, min image.Point, f *giame.KernelFiller) { f.ToBound(image.Rect(0,0,ws.Width, ws.Height)) for x := 0; x < ws.Width; x++ { for y := 0; y < ws.Height; y++ { v := ws.Get(x, y) r, g, b, a := f.At(x, y).RGBA() dst.Set(min.X + x, min.Y + y, color.RGBA{ R: uint8(float32(r >> 8) * v + .5), G: uint8(float32(g >> 8) * v + .5), B: uint8(float32(b >> 8) * v + .5), A: uint8(float32(a >> 8) * v + .5), }) } } } func RasterRepeat(dst draw.Image, ws *Workspace, min image.Point, f *giame.RepeatFiller) { f.ToBound(image.Rect(0,0,ws.Width, ws.Height)) for x := 0; x < ws.Width; x++ { for y := 0; y < ws.Height; y++ { v := ws.Get(x, y) r, g, b, a := f.At(x, y).RGBA() dst.Set(min.X + x, min.Y + y, color.RGBA{ R: uint8(float32(r >> 8) * v + .5), G: uint8(float32(g >> 8) * v + .5), B: uint8(float32(b >> 8) * v + .5), A: uint8(float32(a >> 8) * v + .5), }) } } }
giamesoft/DST_Filler.go
0.54698
0.471953
DST_Filler.go
starcoder
package state import . "api" func (l *luaState) Compare(idx1, idx2 int, op CompareOp) bool { a := l.stack.get(idx1) b := l.stack.get(idx2) switch op { case LUA_OPEQ: return _eq(a, b) case LUA_OPLT: return _lt(a, b) case LUA_OPLE: return _le(a, b) case LUA_OPGT: return _gt(a, b) case LUA_OPGE: return _ge(a, b) default: panic("invalid compare op") } } func _eq(a, b luaValue) bool { switch x := a.(type) { case nil: return b == nil case bool: y, ok := b.(bool) return ok && x == y case string: y, ok := b.(string) return ok && x == y case int64: switch y := b.(type) { case int64: return x == y case float64: return float64(x) == y default: return false } case float64: switch y := b.(type) { case int64: return x == float64(y) case float64: return x == y default: return false } default: return a == b } } func _lt(a, b luaValue) bool { switch x := a.(type) { case string: if y, ok := b.(string); ok { return x < y } case int64: switch y := b.(type) { case int64: return x < y case float64: return float64(x) < y } case float64: switch y := b.(type) { case float64: return x < y case int64: return x < float64(y) } } panic("comparison error") } func _le(a, b luaValue) bool { switch x := a.(type) { case string: if y, ok := b.(string); ok { return x <= y } case int64: switch y := b.(type) { case int64: return x <= y case float64: return float64(x) <= y } case float64: switch y := b.(type) { case float64: return x <= y case int64: return x <= float64(y) } } panic("comparison error") } func _gt(a, b luaValue) bool { switch x := a.(type) { case string: if y, ok := b.(string); ok { return x > y } case int64: switch y := b.(type) { case int64: return x > y case float64: return float64(x) > y } case float64: switch y := b.(type) { case float64: return x > y case int64: return x > float64(y) } } panic("comparison error") } func _ge(a, b luaValue) bool { switch x := a.(type) { case string: if y, ok := b.(string); ok { return x >= y } case int64: switch y := b.(type) { case int64: return x >= y case float64: return float64(x) >= y } case float64: switch y := b.(type) { case float64: return x >= y case int64: return x >= float64(y) } } panic("comparison error") }
src/state/api_ compare.go
0.529507
0.510192
api_ compare.go
starcoder
package internal type expr interface { accept(exprVisitor) R } type exprVisitor interface { visitListExpr(expr *listExpr) R visitDictionaryExpr(expr *dictionaryExpr) R visitAssignExpr(expr *assignExpr) R visitAccessExpr(expr *accessExpr) R visitBinaryExpr(expr *binaryExpr) R visitCallExpr(expr *callExpr) R visitGetExpr(expr *getExpr) R visitSetExpr(expr *setExpr) R visitSuperExpr(expr *superExpr) R visitGroupingExpr(expr *groupingExpr) R visitLiteralExpr(expr *literalExpr) R visitLogicalExpr(expr *logicalExpr) R visitThisExpr(expr *thisExpr) R visitUnaryExpr(expr *unaryExpr) R visitVariableExpr(expr *variableExpr) R visitFunctionExpr(expr *functionExpr) R } type listExpr struct { elements []expr brace *token } func (s *listExpr) accept(visitor exprVisitor) R { return visitor.visitListExpr(s) } type dictionaryExpr struct { elements []expr curlyBrace *token } func (s *dictionaryExpr) accept(visitor exprVisitor) R { return visitor.visitDictionaryExpr(s) } type assignExpr struct { name *token value expr access expr } func (s *assignExpr) accept(visitor exprVisitor) R { return visitor.visitAssignExpr(s) } type accessExpr struct { object expr brace *token first expr firstColon *token second expr secondColon *token third expr } func (s *accessExpr) accept(visitor exprVisitor) R { return visitor.visitAccessExpr(s) } type binaryExpr struct { left expr operator *token right expr } func (s *binaryExpr) accept(visitor exprVisitor) R { return visitor.visitBinaryExpr(s) } type callExpr struct { callee expr paren *token arguments []expr } func (s *callExpr) accept(visitor exprVisitor) R { return visitor.visitCallExpr(s) } type getExpr struct { object expr name *token } func (s *getExpr) accept(visitor exprVisitor) R { return visitor.visitGetExpr(s) } type setExpr struct { object expr name *token value expr access expr } func (s *setExpr) accept(visitor exprVisitor) R { return visitor.visitSetExpr(s) } type superExpr struct { keyword *token method *token } func (s *superExpr) accept(visitor exprVisitor) R { return visitor.visitSuperExpr(s) } type groupingExpr struct { expression expr } func (s *groupingExpr) accept(visitor exprVisitor) R { return visitor.visitGroupingExpr(s) } type literalExpr struct { value interface{} } func (s *literalExpr) accept(visitor exprVisitor) R { return visitor.visitLiteralExpr(s) } type logicalExpr struct { left expr operator *token right expr } func (s *logicalExpr) accept(visitor exprVisitor) R { return visitor.visitLogicalExpr(s) } type thisExpr struct { keyword *token } func (s *thisExpr) accept(visitor exprVisitor) R { return visitor.visitThisExpr(s) } type unaryExpr struct { operator *token right expr } func (s *unaryExpr) accept(visitor exprVisitor) R { return visitor.visitUnaryExpr(s) } type variableExpr struct { name *token } func (s *variableExpr) accept(visitor exprVisitor) R { return visitor.visitVariableExpr(s) } type functionExpr struct { params []*token body []stmt } func (s *functionExpr) accept(visitor exprVisitor) R { return visitor.visitFunctionExpr(s) }
internal/expr.go
0.530723
0.435421
expr.go
starcoder
package templatecheck import ( "reflect" "text/template/parse" ) func checkLen(s *state, dot reflect.Type, args []parse.Node) reflect.Type { arg := args[0] argType, isLiteral := s.evalArg(dot, arg) if isLiteral { if argType == stringType { return intType } s.errorf("len of %s", arg) } argType = indirectType(argType) if argType == unknownType { return intType } switch argType.Kind() { case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: return intType case reflect.Interface: // We can't assume anything about an interface type. return intType default: s.errorf("len of type %s", typeString(argType)) } panic("not reached") } func checkIndex(s *state, dot reflect.Type, args []parse.Node) reflect.Type { item := args[0] itemType, _ := s.evalArg(dot, item) if itemType == nil { s.errorf("index of untyped nil") } for _, index := range args[1:] { itemType = indirectType(itemType) indexType, _ := s.evalArg(dot, index) switch itemType.Kind() { case reflect.Array, reflect.Slice, reflect.String: checkIndexArg(s, indexType) if itemType.Kind() == reflect.String { itemType = byteType } else { itemType = itemType.Elem() } case reflect.Map: checkMapArg(s, indexType, itemType.Key()) itemType = itemType.Elem() default: s.errorf("can't index item of type %s", typeString(itemType)) } } return itemType } func checkIndexArg(s *state, typ reflect.Type) { if typ == nil { s.errorf("cannot index slice/array with nil") } if !(typ == intType || typ == numberType) { s.errorf("cannot index slice/array with type %s", typ) } } func checkMapArg(s *state, indexType, keyType reflect.Type) { if indexType == nil { if !canBeNil(keyType) { s.errorf("value is nil; should be of type %s", typeString(keyType)) } return } if indexType.AssignableTo(keyType) { return } if intLike(indexType.Kind()) && intLike(keyType.Kind()) && indexType.ConvertibleTo(keyType) { return } s.errorf("index has type %s; should be %s", typeString(indexType), typeString(keyType)) } func intLike(typ reflect.Kind) bool { switch typ { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return true } return false } func checkSlice(s *state, dot reflect.Type, args []parse.Node) reflect.Type { item := args[0] indexes := args[1:] itemType, _ := s.evalArg(dot, item) if itemType == nil { s.errorf("index of untyped nil") } if len(indexes) > 3 { s.errorf("too many slice indexes: %d", len(indexes)) } var resultType reflect.Type switch itemType.Kind() { case reflect.String: if len(indexes) == 3 { s.errorf("cannot 3-index slice a string") } resultType = itemType case reflect.Array: resultType = reflect.SliceOf(itemType.Elem()) case reflect.Slice: resultType = itemType default: s.errorf("can't slice item of type %s", typeString(itemType)) } for _, index := range indexes { indexType, _ := s.evalArg(dot, index) checkIndexArg(s, indexType) } return resultType } func checkEq(s *state, dot reflect.Type, args []parse.Node) reflect.Type { if len(args) == 1 { s.errorf("missing argument for comparison") } for _, arg := range args { typ, _ := s.evalArg(dot, arg) if definitelyNotComparable(typ) { s.errorf("uncomparable type: %s", typeString(typ)) } } return boolType } // definitelyNotComparable returns true if values of type t can never be compared. // Only non-comparable struct types have that property. func definitelyNotComparable(t reflect.Type) bool { return t != nil && t.Kind() == reflect.Struct && !t.Comparable() } // check le, gt, etc. func checkOrderedComparison(s *state, dot reflect.Type, args []parse.Node) reflect.Type { for _, arg := range args { if t, _ := s.evalArg(dot, arg); !isOrderable(t) { s.errorf("cannot compare values of type %s", typeString(t)) } } return boolType } func isOrderable(t reflect.Type) bool { if t == nil { return false } if intLike(t.Kind()) { return true } switch t.Kind() { case reflect.Float32, reflect.Float64, reflect.String: return true default: return false } } func typeString(t reflect.Type) string { if t == nil { return "untyped nil" } return t.String() }
funcs.go
0.566978
0.430985
funcs.go
starcoder
package calc import "math" type binaryExpr struct { left Expr right Expr } type addExpr binaryExpr func (x addExpr) Eval(ctx EvalContext) Number { return x.left.Eval(ctx) + x.right.Eval(ctx) } type minusExpr binaryExpr func (x minusExpr) Eval(ctx EvalContext) Number { return x.left.Eval(ctx) - x.right.Eval(ctx) } type timesExpr binaryExpr func (x timesExpr) Eval(ctx EvalContext) Number { return x.left.Eval(ctx) * x.right.Eval(ctx) } type divideExpr binaryExpr func (x divideExpr) Eval(ctx EvalContext) Number { return x.left.Eval(ctx) / x.right.Eval(ctx) } type moduloExpr binaryExpr func (x moduloExpr) Eval(ctx EvalContext) Number { return Number(int(x.left.Eval(ctx)) % int(x.right.Eval(ctx))) } type powerExpr binaryExpr func (x powerExpr) Eval(ctx EvalContext) Number { left := float64(x.left.Eval(ctx)) right := float64(x.right.Eval(ctx)) return Number(math.Pow(left, right)) } type logExpr binaryExpr func (x logExpr) Eval(ctx EvalContext) Number { base := float64(x.left.Eval(ctx)) num := float64(x.right.Eval(ctx)) return Number(math.Log(num) / math.Log(base)) } type unaryExpr struct { inner Expr } type negateExpr unaryExpr func (x negateExpr) Eval(ctx EvalContext) Number { inner := float64(x.inner.Eval(ctx)) return Number(-inner) } type sqrtExpr unaryExpr func (x sqrtExpr) Eval(ctx EvalContext) Number { inner := float64(x.inner.Eval(ctx)) return Number(math.Sqrt(inner)) } type sinExpr unaryExpr func (x sinExpr) Eval(ctx EvalContext) Number { inner := float64(x.inner.Eval(ctx)) return Number(math.Sin(inner)) } type cosExpr unaryExpr func (x cosExpr) Eval(ctx EvalContext) Number { inner := float64(x.inner.Eval(ctx)) return Number(math.Cos(inner)) } type tanExpr unaryExpr func (x tanExpr) Eval(ctx EvalContext) Number { inner := float64(x.inner.Eval(ctx)) return Number(math.Tan(inner)) } type asinExpr unaryExpr func (x asinExpr) Eval(ctx EvalContext) Number { inner := float64(x.inner.Eval(ctx)) return Number(math.Asin(inner)) } type acosExpr unaryExpr func (x acosExpr) Eval(ctx EvalContext) Number { inner := float64(x.inner.Eval(ctx)) return Number(math.Acos(inner)) } type atanExpr unaryExpr func (x atanExpr) Eval(ctx EvalContext) Number { inner := float64(x.inner.Eval(ctx)) return Number(math.Atan(inner)) } type identExpr struct { ident string } func (x identExpr) Eval(ctx EvalContext) Number { return ctx.Get(x.ident) } type numberExpr struct { number Number } func (x numberExpr) Eval(ctx EvalContext) Number { return x.number }
internal/calc/exprs.go
0.825167
0.524577
exprs.go
starcoder
package ent import ( "fmt" "strings" "entgo.io/ent/dialect/sql" "github.com/robinhuiser/fca-emu/ent/bank" "github.com/robinhuiser/fca-emu/ent/branch" ) // Branch is the model entity for the Branch schema. type Branch struct { config `json:"-"` // ID of the ent. ID int `json:"id,omitempty"` // BranchCode holds the value of the "branchCode" field. BranchCode string `json:"branchCode,omitempty"` // StreetNumber holds the value of the "streetNumber" field. StreetNumber string `json:"streetNumber,omitempty"` // StreetName holds the value of the "streetName" field. StreetName string `json:"streetName,omitempty"` // City holds the value of the "city" field. City string `json:"city,omitempty"` // State holds the value of the "state" field. State string `json:"state,omitempty"` // Zip holds the value of the "zip" field. Zip string `json:"zip,omitempty"` // Latitude holds the value of the "latitude" field. Latitude float64 `json:"latitude,omitempty"` // Longitude holds the value of the "longitude" field. Longitude float64 `json:"longitude,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the BranchQuery when eager-loading is set. Edges BranchEdges `json:"edges"` bank_branches *int } // BranchEdges holds the relations/edges for other nodes in the graph. type BranchEdges struct { // Owner holds the value of the owner edge. Owner *Bank `json:"owner,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. loadedTypes [1]bool } // OwnerOrErr returns the Owner value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e BranchEdges) OwnerOrErr() (*Bank, error) { if e.loadedTypes[0] { if e.Owner == nil { // The edge owner was loaded in eager-loading, // but was not found. return nil, &NotFoundError{label: bank.Label} } return e.Owner, nil } return nil, &NotLoadedError{edge: "owner"} } // scanValues returns the types for scanning values from sql.Rows. func (*Branch) scanValues(columns []string) ([]interface{}, error) { values := make([]interface{}, len(columns)) for i := range columns { switch columns[i] { case branch.FieldLatitude, branch.FieldLongitude: values[i] = &sql.NullFloat64{} case branch.FieldID: values[i] = &sql.NullInt64{} case branch.FieldBranchCode, branch.FieldStreetNumber, branch.FieldStreetName, branch.FieldCity, branch.FieldState, branch.FieldZip: values[i] = &sql.NullString{} case branch.ForeignKeys[0]: // bank_branches values[i] = &sql.NullInt64{} default: return nil, fmt.Errorf("unexpected column %q for type Branch", columns[i]) } } return values, nil } // assignValues assigns the values that were returned from sql.Rows (after scanning) // to the Branch fields. func (b *Branch) assignValues(columns []string, values []interface{}) error { if m, n := len(values), len(columns); m < n { return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) } for i := range columns { switch columns[i] { case branch.FieldID: value, ok := values[i].(*sql.NullInt64) if !ok { return fmt.Errorf("unexpected type %T for field id", value) } b.ID = int(value.Int64) case branch.FieldBranchCode: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field branchCode", values[i]) } else if value.Valid { b.BranchCode = value.String } case branch.FieldStreetNumber: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field streetNumber", values[i]) } else if value.Valid { b.StreetNumber = value.String } case branch.FieldStreetName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field streetName", values[i]) } else if value.Valid { b.StreetName = value.String } case branch.FieldCity: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field city", values[i]) } else if value.Valid { b.City = value.String } case branch.FieldState: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field state", values[i]) } else if value.Valid { b.State = value.String } case branch.FieldZip: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field zip", values[i]) } else if value.Valid { b.Zip = value.String } case branch.FieldLatitude: if value, ok := values[i].(*sql.NullFloat64); !ok { return fmt.Errorf("unexpected type %T for field latitude", values[i]) } else if value.Valid { b.Latitude = value.Float64 } case branch.FieldLongitude: if value, ok := values[i].(*sql.NullFloat64); !ok { return fmt.Errorf("unexpected type %T for field longitude", values[i]) } else if value.Valid { b.Longitude = value.Float64 } case branch.ForeignKeys[0]: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for edge-field bank_branches", value) } else if value.Valid { b.bank_branches = new(int) *b.bank_branches = int(value.Int64) } } } return nil } // QueryOwner queries the "owner" edge of the Branch entity. func (b *Branch) QueryOwner() *BankQuery { return (&BranchClient{config: b.config}).QueryOwner(b) } // Update returns a builder for updating this Branch. // Note that you need to call Branch.Unwrap() before calling this method if this Branch // was returned from a transaction, and the transaction was committed or rolled back. func (b *Branch) Update() *BranchUpdateOne { return (&BranchClient{config: b.config}).UpdateOne(b) } // Unwrap unwraps the Branch entity that was returned from a transaction after it was closed, // so that all future queries will be executed through the driver which created the transaction. func (b *Branch) Unwrap() *Branch { tx, ok := b.config.driver.(*txDriver) if !ok { panic("ent: Branch is not a transactional entity") } b.config.driver = tx.drv return b } // String implements the fmt.Stringer. func (b *Branch) String() string { var builder strings.Builder builder.WriteString("Branch(") builder.WriteString(fmt.Sprintf("id=%v", b.ID)) builder.WriteString(", branchCode=") builder.WriteString(b.BranchCode) builder.WriteString(", streetNumber=") builder.WriteString(b.StreetNumber) builder.WriteString(", streetName=") builder.WriteString(b.StreetName) builder.WriteString(", city=") builder.WriteString(b.City) builder.WriteString(", state=") builder.WriteString(b.State) builder.WriteString(", zip=") builder.WriteString(b.Zip) builder.WriteString(", latitude=") builder.WriteString(fmt.Sprintf("%v", b.Latitude)) builder.WriteString(", longitude=") builder.WriteString(fmt.Sprintf("%v", b.Longitude)) builder.WriteByte(')') return builder.String() } // Branches is a parsable slice of Branch. type Branches []*Branch func (b Branches) config(cfg config) { for _i := range b { b[_i].config = cfg } }
ent/branch.go
0.661923
0.416263
branch.go
starcoder
package core import ( "math" ) // Bounds3d represents a 3D bounding box type Bounds3d struct { MinPos *Vector3d // Minimum position MaxPos *Vector3d // Maximum position } // NewBounds3d returns a new Bounds3d pointer func NewBounds3d() *Bounds3d { b := new(Bounds3d) b.MinPos = NewVector3d(Infinity, Infinity, Infinity) b.MaxPos = NewVector3d(-Infinity, -Infinity, -Infinity) return b } // NewBounds3dMinMax returns a new Bounds3d pointer, // which has minPos and maxPos as its minimum and maximum positions. func NewBounds3dMinMax(minPos, maxPos *Vector3d) *Bounds3d { b := new(Bounds3d) b.MinPos = minPos b.MaxPos = maxPos return b } func (b1 *Bounds3d) Merge(b2 *Bounds3d) *Bounds3d { ret := &Bounds3d{} ret.MinPos = b1.MinPos.Minimum(b2.MinPos) ret.MaxPos = b1.MaxPos.Maximum(b2.MaxPos) return ret } func (b *Bounds3d) MergePoint(v *Vector3d) *Bounds3d { ret := &Bounds3d{} ret.MinPos = b.MinPos.Minimum(v) ret.MaxPos = b.MaxPos.Maximum(v) return ret } func (b *Bounds3d) Area() Float { diff := b.MaxPos.Subtract(b.MinPos) return math.Abs(diff.X * diff.Y * diff.Z) } func (b *Bounds3d) Centroid() *Vector3d { return b.MinPos.Add(b.MaxPos).Scale(0.5) } func (b *Bounds3d) MaxExtent() int { v := b.MaxPos.Subtract(b.MinPos) v = v.Abs() switch { case v.X > v.Y && v.X > v.Z: return 0 case v.Y > v.Z: return 1 default: return 2 } } func (b *Bounds3d) Intersect(ray *Ray, tNear *Float, tFar *Float) bool { t0 := 0.0 t1 := ray.MaxDist var tt0, tt1 Float // X tt0 = (b.MinPos.X - ray.Org.X) * ray.InvDir.X tt1 = (b.MaxPos.X - ray.Org.X) * ray.InvDir.X if tt0 > tt1 { tt0, tt1 = tt1, tt0 } t0 = math.Max(t0, tt0) t1 = math.Min(t1, tt1) if t0 > t1 { return false } // Y tt0 = (b.MinPos.Y - ray.Org.Y) * ray.InvDir.Y tt1 = (b.MaxPos.Y - ray.Org.Y) * ray.InvDir.Y if tt0 > tt1 { tt0, tt1 = tt1, tt0 } t0 = math.Max(t0, tt0) t1 = math.Min(t1, tt1) if t0 > t1 { return false } // Z tt0 = (b.MinPos.Z - ray.Org.Z) * ray.InvDir.Z tt1 = (b.MaxPos.Z - ray.Org.Z) * ray.InvDir.Z if tt0 > tt1 { tt0, tt1 = tt1, tt0 } t0 = math.Max(t0, tt0) t1 = math.Min(t1, tt1) if t0 > t1 { return false } *tNear = t0 *tFar = t1 return true }
src/core/bounds3d.go
0.867162
0.661014
bounds3d.go
starcoder
package assert import ( "fmt" "regexp" "strconv" "strings" ) const ( Nil = iota Boolean Number String ) type Value struct { val interface{} vType uint8 } func NewValue(v interface{}) Value { res := Value{ val: v, vType: Nil, } if v == nil { return res } switch r := v.(type) { case bool: res.vType = Boolean case []byte: res.val = string(r) res.vType = String case string: res.vType = String case float64: res.vType = Number case float32: res.val = float64(r) res.vType = Number case int: res.val = float64(r) res.vType = Number case int8: res.val = float64(r) res.vType = Number case int16: res.val = float64(r) res.vType = Number case int32: res.val = float64(r) res.vType = Number case int64: res.val = float64(r) res.vType = Number case uint: res.val = float64(r) res.vType = Number case uint8: res.val = float64(r) res.vType = Number case uint16: res.val = float64(r) res.vType = Number case uint32: res.val = float64(r) res.vType = Number case uint64: res.val = float64(r) res.vType = Number default: s := fmt.Sprintf("%v", v) f, err := strconv.ParseFloat(s, 64) if err == nil { res.val = f res.vType = Number break } b, err := strconv.ParseBool(s) if err == nil { res.val = b res.vType = Boolean break } res.val = s res.vType = String } return res } func (v Value) Float() (float64, error) { switch v.vType { case Number: return v.val.(float64), nil case String: return strconv.ParseFloat(fmt.Sprintf("%v", v.val), 64) } return 0, fmt.Errorf("value '%v' not a number", v.val) } func (v Value) String() string { if v.vType == String { return v.val.(string) } return fmt.Sprintf("%v", v.val) } func (v Value) Boolean() bool { if v.vType == Boolean { return v.val.(bool) } s := strings.ToLower(fmt.Sprintf("%v", v.val)) return s != "" && s != "false" } func (v Value) Not() Value { return Value{ val: !v.Boolean(), vType: Boolean, } } func (v Value) And(v2 Value) Value { return Value{ val: v.Boolean() && v2.Boolean(), vType: Boolean, } } func (v Value) Or(v2 Value) Value { return Value{ val: v.Boolean() || v2.Boolean(), vType: Boolean, } } func (v Value) E(v2 Value) Value { return Value{ val: v.String() == v2.String(), vType: Boolean, } } func (v Value) RE(v2 Value) Value { exp, err := regexp.Compile(v2.String()) if err != nil { return Value{ val: false, vType: Boolean, } } return Value{ val: exp.MatchString(v.String()), vType: Boolean, } } func (v Value) NRE(v2 Value) Value { exp, err := regexp.Compile(v2.String()) if err != nil { return Value{ val: false, vType: Boolean, } } return Value{ val: !exp.MatchString(v.String()), vType: Boolean, } } func (v Value) NE(v2 Value) Value { return Value{ val: v.String() != v2.String(), vType: Boolean, } } func (v Value) GT(v2 Value) Value { left, err := v.Float() if err != nil { return Value{val: false, vType: Boolean} } right, err := v2.Float() if err != nil { return Value{val: false, vType: Boolean} } return Value{ val: left > right, vType: Boolean, } } func (v Value) GTE(v2 Value) Value { left, err := v.Float() if err != nil { return Value{val: false, vType: Boolean} } right, err := v2.Float() if err != nil { return Value{val: false, vType: Boolean} } return Value{ val: left >= right, vType: Boolean, } } func (v Value) LT(v2 Value) Value { left, err := v.Float() if err != nil { return Value{val: false, vType: Boolean} } right, err := v2.Float() if err != nil { return Value{val: false, vType: Boolean} } return Value{ val: left < right, vType: Boolean, } } func (v Value) LTE(v2 Value) Value { left, err := v.Float() if err != nil { return Value{val: false, vType: Boolean} } right, err := v2.Float() if err != nil { return Value{val: false, vType: Boolean} } return Value{ val: left <= right, vType: Boolean, } } func (v Value) Add(v2 Value) Value { f, err := v.Float() if err != nil { return Value{val: v.String() + v2.String(), vType: String} } f2, err := v2.Float() if err != nil { return Value{val: v.String() + v2.String(), vType: String} } return Value{ val: f + f2, vType: Number, } } func (v Value) Sub(v2 Value) (Value, error) { f, err := v.Float() if err != nil { return Value{}, err } f2, err := v2.Float() if err != nil { return Value{}, err } return Value{ val: f - f2, vType: Number, }, nil } func (v Value) Multi(v2 Value) (Value, error) { f, err := v.Float() if err != nil { return Value{}, err } f2, err := v2.Float() if err != nil { return Value{}, err } return Value{ val: f * f2, vType: Number, }, nil } func (v Value) Div(v2 Value) (Value, error) { f, err := v.Float() if err != nil { return Value{}, err } f2, err := v2.Float() if err != nil { return Value{}, err } return Value{ val: f / f2, vType: Number, }, nil } func (v Value) Mod(v2 Value) (Value, error) { f, err := v.Float() if err != nil { return Value{}, err } f2, err := v2.Float() if err != nil { return Value{}, err } return Value{ val: float64(int(f) % int(f2)), vType: Number, }, nil }
assert/value.go
0.579876
0.425486
value.go
starcoder
package ijson // Set sets the provide value to the path. It creates the structure if not present. // An error is returned if it fails to resolve the path OR encounters different type than expected by path. func Set(data, value interface{}, path ...string) (interface{}, error) { return set(data, value, false, path...) } // SetP is same as Set(). It just takes `"."` separated path. func SetP(data, value interface{}, path string) (interface{}, error) { return setP(data, value, false, path) } // SetF is same as Set(). It just forcefully replaces the structure if it is not same as expected by the path. func SetF(data, value interface{}, path ...string) (interface{}, error) { return set(data, value, true, path...) } // SetFP is same as SetF(). It just takes `"."` separated path. func SetFP(data, value interface{}, path string) (interface{}, error) { return setP(data, value, true, path) } func set(data interface{}, value interface{}, force bool, path ...string) (interface{}, error) { if len(path) == 0 { if data == nil { return value, nil } return data, nil } pathType := DetectSetPath(path[0]) switch pathType { case PSet_Obj: object, valid := data.(map[string]interface{}) if !valid { if data == nil || force { object = make(map[string]interface{}, 1) } else { return nil, errExpObj } // object = make(map[string]interface{}) } newData, err := set(object[path[0]], value, force, path[1:]...) if err != nil { return nil, err } object[path[0]] = newData return object, nil case PSet_ArrIdx: idx, err := index(path[0], PSet_ArrIdx) if err != nil { return nil, err } array, valid := data.([]interface{}) if !valid { if data == nil || force { array = make([]interface{}, idx+1) } else { return nil, errExpArr } } else { array = extend(array, idx) } newData, err := set(array[idx], value, force, path[1:]...) if err != nil { return nil, err } array[idx] = newData return array, nil case PSet_ArrAppend: array, valid := data.([]interface{}) if !valid { if data == nil || force { array = make([]interface{}, 0, 1) } else { return nil, errExpArr } } array = append(array, value) return array, nil default: return nil, errInvPth } } func setP(data interface{}, value interface{}, force bool, path string) (interface{}, error) { return set(data, value, force, split(path)...) } func extend(arr []interface{}, idx int) []interface{} { max := len(arr) - 1 if idx > max { arr = append(arr, make([]interface{}, idx-max)...) } return arr }
set.go
0.754553
0.566318
set.go
starcoder
package logr import ( "fmt" "time" ) // Any picks the best supported field type based on type of val. // For best performance when passing a struct (or struct pointer), // implement `logr.LogWriter` on the struct, otherwise reflection // will be used to generate a string representation. func Any(key string, val interface{}) Field { return fieldForAny(key, val) } // Int64 constructs a field containing a key and Int64 value. func Int64(key string, val int64) Field { return Field{Key: key, Type: Int64Type, Integer: val} } // Int32 constructs a field containing a key and Int32 value. func Int32(key string, val int32) Field { return Field{Key: key, Type: Int32Type, Integer: int64(val)} } // Int constructs a field containing a key and Int value. func Int(key string, val int) Field { return Field{Key: key, Type: IntType, Integer: int64(val)} } // Uint64 constructs a field containing a key and Uint64 value. func Uint64(key string, val uint64) Field { return Field{Key: key, Type: Uint64Type, Integer: int64(val)} } // Uint32 constructs a field containing a key and Uint32 value. func Uint32(key string, val uint32) Field { return Field{Key: key, Type: Uint32Type, Integer: int64(val)} } // Uint constructs a field containing a key and Uint value. func Uint(key string, val uint) Field { return Field{Key: key, Type: UintType, Integer: int64(val)} } // Float64 constructs a field containing a key and Float64 value. func Float64(key string, val float64) Field { return Field{Key: key, Type: Float64Type, Float: val} } // Float32 constructs a field containing a key and Float32 value. func Float32(key string, val float32) Field { return Field{Key: key, Type: Float32Type, Float: float64(val)} } // String constructs a field containing a key and String value. func String(key string, val string) Field { return Field{Key: key, Type: StringType, String: val} } // Stringer constructs a field containing a key and a `fmt.Stringer` value. // The `String` method will be called in lazy fashion. func Stringer(key string, val fmt.Stringer) Field { return Field{Key: key, Type: StringerType, Interface: val} } // Err constructs a field containing a default key ("error") and error value. func Err(err error) Field { return NamedErr("error", err) } // NamedErr constructs a field containing a key and error value. func NamedErr(key string, err error) Field { return Field{Key: key, Type: ErrorType, Interface: err} } // Bool constructs a field containing a key and bool value. func Bool(key string, val bool) Field { var b int64 if val { b = 1 } return Field{Key: key, Type: BoolType, Integer: b} } // Time constructs a field containing a key and time.Time value. func Time(key string, val time.Time) Field { return Field{Key: key, Type: TimeType, Interface: val} } // Duration constructs a field containing a key and time.Duration value. func Duration(key string, val time.Duration) Field { return Field{Key: key, Type: DurationType, Integer: int64(val)} } // Millis constructs a field containing a key and timestamp value. // The timestamp is expected to be milliseconds since Jan 1, 1970 UTC. func Millis(key string, val int64) Field { return Field{Key: key, Type: TimestampMillisType, Integer: val} } // Array constructs a field containing a key and array value. func Array(key string, val interface{}) Field { return Field{Key: key, Type: ArrayType, Interface: val} } // Map constructs a field containing a key and map value. func Map(key string, val interface{}) Field { return Field{Key: key, Type: MapType, Interface: val} }
vendor/github.com/mattermost/logr/v2/fieldapi.go
0.873956
0.465387
fieldapi.go
starcoder
package vector import ( "fmt" "math" "strings" ) type Algorithm interface { Compare(v1, v2 *Vector) (float64, error) } type simpsonComparator struct { } func (sc *simpsonComparator) Compare(v1, v2 *Vector) (float64, error) { intersect := v1.Intersect(v2) return intersect.Length() / math.Min(v1.Length(), v2.Length()), nil } type diceComparator struct { } func (dc *diceComparator) Compare(v1, v2 *Vector) (float64, error) { intersect := v1.Intersect(v2) return 2.0 * intersect.Length() / (v1.Length() + v2.Length()), nil } type jaccardComparator struct { } func (jc *jaccardComparator) Compare(v1, v2 *Vector) (float64, error) { intersect := v1.Intersect(v2) union := v1.Union(v2) return intersect.Length() / union.Length(), nil } type cosineComparator struct { } func (sc *cosineComparator) Compare(v1, v2 *Vector) (float64, error) { return v1.InnerProduct(v2), nil } type pearsonCorrelation struct { } func calcDeviation(v, union *Vector) float64 { sum := float64(0) average := v.average(union.Length()) for key, _ := range union.values { value := float64(v.values[key]) - average sum = sum + (value * value) } return math.Sqrt(sum) } func calcCovariance(v1, v2, union *Vector) float64 { covariance := float64(0) xAverage := v1.average(union.Length()) yAverage := v2.average(union.Length()) for key, _ := range union.values { x := float64(v1.values[key]) y := float64(v2.values[key]) covariance = covariance + ((x - xAverage) * (y - yAverage)) } return covariance } func (pc *pearsonCorrelation) Compare(v1, v2 *Vector) (float64, error) { union := v1.Union(v2) covariance := calcCovariance(v1, v2, union) deviation1 := calcDeviation(v1, union) deviation2 := calcDeviation(v2, union) return covariance / (deviation1 * deviation2), nil } type euclideanDistance struct { } func (ed *euclideanDistance) Compare(v1, v2 *Vector) (float64, error) { union := v1.Union(v2) sum := 0 for key := range union.values { value1 := v1.values[key] value2 := v2.values[key] sum = sum + ((value1 - value2) * (value1 - value2)) } return math.Sqrt(float64(sum)), nil } type levenshteinDistance struct { } func (ld *levenshteinDistance) Compare(v1, v2 *Vector) (float64, error) { if v1.Source.Type() != "string" || v2.Source.Type() != "string" { return 0, fmt.Errorf("levenshtein distance: type of two vector must be string") } return ld.compareImpl(v1.Source.Value(), v2.Source.Value()) } func (ld *levenshteinDistance) compareImpl(s1, s2 string) (float64, error) { table := constructTable(s1, s2) calcLevenshtein(table, s1, s2) return float64(table[len(s1)][len(s2)]), nil } func calcLevenshtein(table [][]int, s1, s2 string) { for i := 1; i < len(table); i++ { for j := 1; j < len(table[i]); j++ { cost := 1 if s1[i-1] == s2[j-1] { cost = 0 } updateTable(table, i, j, cost) } } } func updateTable(table [][]int, i, j, cost int) { d1 := table[i-1][j] + 1 d2 := table[i][j-1] + 1 d3 := table[i-1][j-1] + cost table[i][j] = min(d1, d2, d3) } func constructTable(s1, s2 string) [][]int { table := [][]int{} for j := 0; j <= len(s1); j++ { values := []int{} for i := 0; i <= len(s2); i++ { values = append(values, 0) } table = append(table, values) } return initTable(table) } func initTable(table [][]int) [][]int { for i := 0; i < len(table); i++ { table[i][0] = i } for j := 0; j < len(table[0]); j++ { table[0][j] = j } return table } func min(values ...int) int { min := values[0] for _, value := range values { if min > value { min = value } } return min } /* func printTable(table [][]int) { for i := 0; i < len(table); i++ { for j := 0; j < len(table[i]); j++ { fmt.Printf("%2d ", table[i][j]) } fmt.Println() } } */ func NewAlgorithm(comparatorType string) (Algorithm, error) { switch strings.ToLower(comparatorType) { case "simpson": return &simpsonComparator{}, nil case "dice": return &diceComparator{}, nil case "jaccard": return &jaccardComparator{}, nil case "cosine": return &cosineComparator{}, nil case "pearson": return &pearsonCorrelation{}, nil case "euclidean": return &euclideanDistance{}, nil case "levenshtein": return &levenshteinDistance{}, nil } return nil, fmt.Errorf("%s: unknown algorithm", comparatorType) }
vector/compare.go
0.665845
0.521532
compare.go
starcoder
package packp /* A nice way to trace the real data transmitted and received by git, use: GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git Here follows a copy of the current protocol specification at the time of this writing. (Please notice that most http git servers will add a flush-pkt after the first pkt-line when using HTTP smart.) Documentation Common to Pack and Http Protocols =============================================== ABNF Notation ------------- ABNF notation as described by RFC 5234 is used within the protocol documents, except the following replacement core rules are used: ---- HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f" ---- We also define the following common rules: ---- NUL = %x00 zero-id = 40*"0" obj-id = 40*(HEXDIGIT) refname = "HEAD" refname /= "refs/" <see discussion below> ---- A refname is a hierarchical octet string beginning with "refs/" and not violating the 'git-check-ref-format' command's validation rules. More specifically, they: . They can include slash `/` for hierarchical (directory) grouping, but no slash-separated component can begin with a dot `.`. . They must contain at least one `/`. This enforces the presence of a category like `heads/`, `tags/` etc. but the actual names are not restricted. . They cannot have two consecutive dots `..` anywhere. . They cannot have ASCII control characters (i.e. bytes whose values are lower than \040, or \177 `DEL`), space, tilde `~`, caret `^`, colon `:`, question-mark `?`, asterisk `*`, or open bracket `[` anywhere. . They cannot end with a slash `/` or a dot `.`. . They cannot end with the sequence `.lock`. . They cannot contain a sequence `@{`. . They cannot contain a `\\`. pkt-line Format --------------- Much (but not all) of the payload is described around pkt-lines. A pkt-line is a variable length binary string. The first four bytes of the line, the pkt-len, indicates the total length of the line, in hexadecimal. The pkt-len includes the 4 bytes used to contain the length's hexadecimal representation. A pkt-line MAY contain binary data, so implementors MUST ensure pkt-line parsing/formatting routines are 8-bit clean. A non-binary line SHOULD BE terminated by an LF, which if present MUST be included in the total length. Receivers MUST treat pkt-lines with non-binary data the same whether or not they contain the trailing LF (stripping the LF if present, and not complaining when it is missing). The maximum length of a pkt-line's data component is 65516 bytes. Implementations MUST NOT send pkt-line whose length exceeds 65520 (65516 bytes of payload + 4 bytes of length data). Implementations SHOULD NOT send an empty pkt-line ("0004"). A pkt-line with a length field of 0 ("0000"), called a flush-pkt, is a special case and MUST be handled differently than an empty pkt-line ("0004"). ---- pkt-line = data-pkt / flush-pkt data-pkt = pkt-len pkt-payload pkt-len = 4*(HEXDIG) pkt-payload = (pkt-len - 4)*(OCTET) flush-pkt = "0000" ---- Examples (as C-style strings): ---- pkt-line actual value --------------------------------- "0006a\n" "a\n" "0005a" "a" "000bfoobar\n" "foobar\n" "0004" "" ---- Packfile transfer protocols =========================== Git supports transferring data in packfiles over the ssh://, git://, http:// and file:// transports. There exist two sets of protocols, one for pushing data from a client to a server and another for fetching data from a server to a client. The three transports (ssh, git, file) use the same protocol to transfer data. http is documented in http-protocol.txt. The processes invoked in the canonical Git implementation are 'upload-pack' on the server side and 'fetch-pack' on the client side for fetching data; then 'receive-pack' on the server and 'send-pack' on the client for pushing data. The protocol functions to have a server tell a client what is currently on the server, then for the two to negotiate the smallest amount of data to send in order to fully update one or the other. pkt-line Format --------------- The descriptions below build on the pkt-line format described in protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless otherwise noted the usual pkt-line LF rules apply: the sender SHOULD include a LF, but the receiver MUST NOT complain if it is not present. Transports ---------- There are three transports over which the packfile protocol is initiated. The Git transport is a simple, unauthenticated server that takes the command (almost always 'upload-pack', though Git servers can be configured to be globally writable, in which 'receive- pack' initiation is also allowed) with which the client wishes to communicate and executes it and connects it to the requesting process. In the SSH transport, the client just runs the 'upload-pack' or 'receive-pack' process on the server over the SSH protocol and then communicates with that invoked process over the SSH connection. The file:// transport runs the 'upload-pack' or 'receive-pack' process locally and communicates with it over a pipe. Git Transport ------------- The Git transport starts off by sending the command and repository on the wire using the pkt-line format, followed by a NUL byte and a hostname parameter, terminated by a NUL byte. 0032git-upload-pack /project.git\0host=myserver.com\0 -- git-proto-request = request-command SP pathname NUL [ host-parameter NUL ] request-command = "git-upload-pack" / "git-receive-pack" / "git-upload-archive" ; case sensitive pathname = *( %x01-ff ) ; exclude NUL host-parameter = "host=" hostname [ ":" port ] -- Only host-parameter is allowed in the git-proto-request. Clients MUST NOT attempt to send additional parameters. It is used for the git-daemon name based virtual hosting. See --interpolated-path option to git daemon, with the %H/%CH format characters. Basically what the Git client is doing to connect to an 'upload-pack' process on the server side over the Git protocol is this: $ echo -e -n \ "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | nc -v example.com 9418 If the server refuses the request for some reasons, it could abort gracefully with an error message. ---- error-line = PKT-LINE("ERR" SP explanation-text) ---- SSH Transport ------------- Initiating the upload-pack or receive-pack processes over SSH is executing the binary on the server via SSH remote execution. It is basically equivalent to running this: $ ssh git.example.com "git-upload-pack '/project.git'" For a server to support Git pushing and pulling for a given user over SSH, that user needs to be able to execute one or both of those commands via the SSH shell that they are provided on login. On some systems, that shell access is limited to only being able to run those two commands, or even just one of them. In an ssh:// format URI, it's absolute in the URI, so the '/' after the host name (or port number) is sent as an argument, which is then read by the remote git-upload-pack exactly as is, so it's effectively an absolute path in the remote filesystem. git clone ssh://user@example.com/project.git | v ssh user@example.com "git-upload-pack '/project.git'" In a "user@host:path" format URI, its relative to the user's home directory, because the Git client will run: git clone <EMAIL>:project.git | v ssh <EMAIL> "git-upload-pack 'project.git'" The exception is if a '~' is used, in which case we execute it without the leading '/'. ssh://user@example.com/~alice/project.git, | v ssh <EMAIL> "git-upload-pack '~alice/project.git'" A few things to remember here: - The "command name" is spelled with dash (e.g. git-upload-pack), but this can be overridden by the client; - The repository path is always quoted with single quotes. Fetching Data From a Server --------------------------- When one Git repository wants to get data that a second repository has, the first can 'fetch' from the second. This operation determines what data the server has that the client does not then streams that data down to the client in packfile format. Reference Discovery ------------------- When the client initially connects the server will immediately respond with a listing of each reference it has (all branches and tags) along with the object name that each reference currently points to. $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | nc -v example.com 9418 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack side-band side-band-64k ofs-delta shallow no-progress include-tag 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{} 0000 The returned response is a pkt-line stream describing each ref and its current value. The stream MUST be sorted by name according to the C locale ordering. If HEAD is a valid ref, HEAD MUST appear as the first advertised ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the advertisement list at all, but other refs may still appear. The stream MUST include capability declarations behind a NUL on the first ref. The peeled value of a ref (that is "ref^{}") MUST be immediately after the ref itself, if presented. A conforming server MUST peel the ref if it's an annotated tag. ---- advertised-refs = (no-refs / list-of-refs) *shallow flush-pkt no-refs = PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list) list-of-refs = first-ref *other-ref first-ref = PKT-LINE(obj-id SP refname NUL capability-list) other-ref = PKT-LINE(other-tip / other-peeled) other-tip = obj-id SP refname other-peeled = obj-id SP refname "^{}" shallow = PKT-LINE("shallow" SP obj-id) capability-list = capability *(SP capability) capability = 1*(LC_ALPHA / DIGIT / "-" / "_") LC_ALPHA = %x61-7A ---- Server and client MUST use lowercase for obj-id, both MUST treat obj-id as case-insensitive. See protocol-capabilities.txt for a list of allowed server capabilities and descriptions. Packfile Negotiation -------------------- After reference and capabilities discovery, the client can decide to terminate the connection by sending a flush-pkt, telling the server it can now gracefully terminate, and disconnect, when it does not need any pack data. This can happen with the ls-remote command, and also can happen when the client already is up-to-date. Otherwise, it enters the negotiation phase, where the client and server determine what the minimal packfile necessary for transport is, by telling the server what objects it wants, its shallow objects (if any), and the maximum commit depth it wants (if any). The client will also send a list of the capabilities it wants to be in effect, out of what the server said it could do with the first 'want' line. ---- upload-request = want-list *shallow-line *1depth-request flush-pkt want-list = first-want *additional-want shallow-line = PKT-LINE("shallow" SP obj-id) depth-request = PKT-LINE("deepen" SP depth) / PKT-LINE("deepen-since" SP timestamp) / PKT-LINE("deepen-not" SP ref) first-want = PKT-LINE("want" SP obj-id SP capability-list) additional-want = PKT-LINE("want" SP obj-id) depth = 1*DIGIT ---- Clients MUST send all the obj-ids it wants from the reference discovery phase as 'want' lines. Clients MUST send at least one 'want' command in the request body. Clients MUST NOT mention an obj-id in a 'want' command which did not appear in the response obtained through ref discovery. The client MUST write all obj-ids which it only has shallow copies of (meaning that it does not have the parents of a commit) as 'shallow' lines so that the server is aware of the limitations of the client's history. The client now sends the maximum commit history depth it wants for this transaction, which is the number of commits it wants from the tip of the history, if any, as a 'deepen' line. A depth of 0 is the same as not making a depth request. The client does not want to receive any commits beyond this depth, nor does it want objects needed only to complete those commits. Commits whose parents are not received as a result are defined as shallow and marked as such in the server. This information is sent back to the client in the next step. Once all the 'want's and 'shallow's (and optional 'deepen') are transferred, clients MUST send a flush-pkt, to tell the server side that it is done sending the list. Otherwise, if the client sent a positive depth request, the server will determine which commits will and will not be shallow and send this information to the client. If the client did not request a positive depth, this step is skipped. ---- shallow-update = *shallow-line *unshallow-line flush-pkt shallow-line = PKT-LINE("shallow" SP obj-id) unshallow-line = PKT-LINE("unshallow" SP obj-id) ---- If the client has requested a positive depth, the server will compute the set of commits which are no deeper than the desired depth. The set of commits start at the client's wants. The server writes 'shallow' lines for each commit whose parents will not be sent as a result. The server writes an 'unshallow' line for each commit which the client has indicated is shallow, but is no longer shallow at the currently requested depth (that is, its parents will now be sent). The server MUST NOT mark as unshallow anything which the client has not indicated was shallow. Now the client will send a list of the obj-ids it has using 'have' lines, so the server can make a packfile that only contains the objects that the client needs. In multi_ack mode, the canonical implementation will send up to 32 of these at a time, then will send a flush-pkt. The canonical implementation will skip ahead and send the next 32 immediately, so that there is always a block of 32 "in-flight on the wire" at a time. ---- upload-haves = have-list compute-end have-list = *have-line have-line = PKT-LINE("have" SP obj-id) compute-end = flush-pkt / PKT-LINE("done") ---- If the server reads 'have' lines, it then will respond by ACKing any of the obj-ids the client said it had that the server also has. The server will ACK obj-ids differently depending on which ack mode is chosen by the client. In multi_ack mode: * the server will respond with 'ACK obj-id continue' for any common commits. * once the server has found an acceptable common base commit and is ready to make a packfile, it will blindly ACK all 'have' obj-ids back to the client. * the server will then send a 'NAK' and then wait for another response from the client - either a 'done' or another list of 'have' lines. In multi_ack_detailed mode: * the server will differentiate the ACKs where it is signaling that it is ready to send data with 'ACK obj-id ready' lines, and signals the identified common commits with 'ACK obj-id common' lines. Without either multi_ack or multi_ack_detailed: * upload-pack sends "ACK obj-id" on the first common object it finds. After that it says nothing until the client gives it a "done". * upload-pack sends "NAK" on a flush-pkt if no common object has been found yet. If one has been found, and thus an ACK was already sent, it's silent on the flush-pkt. After the client has gotten enough ACK responses that it can determine that the server has enough information to send an efficient packfile (in the canonical implementation, this is determined when it has received enough ACKs that it can color everything left in the --date-order queue as common with the server, or the --date-order queue is empty), or the client determines that it wants to give up (in the canonical implementation, this is determined when the client sends 256 'have' lines without getting any of them ACKed by the server - meaning there is nothing in common and the server should just send all of its objects), then the client will send a 'done' command. The 'done' command signals to the server that the client is ready to receive its packfile data. However, the 256 limit *only* turns on in the canonical client implementation if we have received at least one "ACK %s continue" during a prior round. This helps to ensure that at least one common ancestor is found before we give up entirely. Once the 'done' line is read from the client, the server will either send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object name of the last commit determined to be common. The server only sends ACK after 'done' if there is at least one common base and multi_ack or multi_ack_detailed is enabled. The server always sends NAK after 'done' if there is no common base found. Then the server will start sending its packfile data. ---- server-response = *ack_multi ack / nak ack_multi = PKT-LINE("ACK" SP obj-id ack_status) ack_status = "continue" / "common" / "ready" ack = PKT-LINE("ACK" SP obj-id) nak = PKT-LINE("NAK") ---- A simple clone may look like this (with no 'have' lines): ---- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ side-band-64k ofs-delta\n C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n C: 0000 C: 0009done\n S: 0008NAK\n S: [PACKFILE] ---- An incremental update (fetch) response might look like this: ---- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ side-band-64k ofs-delta\n C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n C: 0000 C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n C: [30 more have lines] C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n C: 0000 S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n S: 0008NAK\n C: 0009done\n S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n S: [PACKFILE] ---- Packfile Data ------------- Now that the client and server have finished negotiation about what the minimal amount of data that needs to be sent to the client is, the server will construct and send the required data in packfile format. See pack-format.txt for what the packfile itself actually looks like. If 'side-band' or 'side-band-64k' capabilities have been specified by the client, the server will send the packfile data multiplexed. Each packet starting with the packet-line length of the amount of data that follows, followed by a single byte specifying the sideband the following data is coming in on. In 'side-band' mode, it will send up to 999 data bytes plus 1 control code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' mode it will send up to 65519 data bytes plus 1 control code, for a total of up to 65520 bytes in a pkt-line. The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain packfile data, sideband '2' will be used for progress information that the client will generally print to stderr and sideband '3' is used for error information. If no 'side-band' capability was specified, the server will stream the entire packfile without multiplexing. Pushing Data To a Server ------------------------ Pushing data to a server will invoke the 'receive-pack' process on the server, which will allow the client to tell it which references it should update and then send all the data the server will need for those new references to be complete. Once all the data is received and validated, the server will then update its references to what the client specified. Authentication -------------- The protocol itself contains no authentication mechanisms. That is to be handled by the transport, such as SSH, before the 'receive-pack' process is invoked. If 'receive-pack' is configured over the Git transport, those repositories will be writable by anyone who can access that port (9418) as that transport is unauthenticated. Reference Discovery ------------------- The reference discovery phase is done nearly the same way as it is in the fetching protocol. Each reference obj-id and name on the server is sent in packet-line format to the client, followed by a flush-pkt. The only real difference is that the capability listing is different - the only possible values are 'report-status', 'delete-refs', 'ofs-delta' and 'push-options'. Reference Update Request and Packfile Transfer ---------------------------------------------- Once the client knows what references the server is at, it can send a list of reference update requests. For each reference on the server that it wants to update, it sends a line listing the obj-id currently on the server, the obj-id the client would like to update it to and the name of the reference. This list is followed by a flush-pkt. Then the push options are transmitted one per packet followed by another flush-pkt. After that the packfile that should contain all the objects that the server will need to complete the new references will be sent. ---- update-request = *shallow ( command-list | push-cert ) [packfile] shallow = PKT-LINE("shallow" SP obj-id) command-list = PKT-LINE(command NUL capability-list) *PKT-LINE(command) flush-pkt command = create / delete / update create = zero-id SP new-id SP name delete = old-id SP zero-id SP name update = old-id SP new-id SP name old-id = obj-id new-id = obj-id push-cert = PKT-LINE("push-cert" NUL capability-list LF) PKT-LINE("certificate version 0.1" LF) PKT-LINE("pusher" SP ident LF) PKT-LINE("pushee" SP url LF) PKT-LINE("nonce" SP nonce LF) PKT-LINE(LF) *PKT-LINE(command LF) *PKT-LINE(gpg-signature-lines LF) PKT-LINE("push-cert-end" LF) packfile = "PACK" 28*(OCTET) ---- If the receiving end does not support delete-refs, the sending end MUST NOT ask for delete command. If the receiving end does not support push-cert, the sending end MUST NOT send a push-cert command. When a push-cert command is sent, command-list MUST NOT be sent; the commands recorded in the push certificate is used instead. The packfile MUST NOT be sent if the only command used is 'delete'. A packfile MUST be sent if either create or update command is used, even if the server already has all the necessary objects. In this case the client MUST send an empty packfile. The only time this is likely to happen is if the client is creating a new branch or a tag that points to an existing obj-id. The server will receive the packfile, unpack it, then validate each reference that is being updated that it hasn't changed while the request was being processed (the obj-id is still the same as the old-id), and it will run any update hooks to make sure that the update is acceptable. If all of that is fine, the server will then update the references. Push Certificate ---------------- A push certificate begins with a set of header lines. After the header and an empty line, the protocol commands follow, one per line. Note that the trailing LF in push-cert PKT-LINEs is _not_ optional; it must be present. Currently, the following header fields are defined: `pusher` ident:: Identify the GPG key in "Human Readable Name <email@address>" format. `pushee` url:: The repository URL (anonymized, if the URL contains authentication material) the user who ran `git push` intended to push into. `nonce` nonce:: The 'nonce' string the receiving repository asked the pushing user to include in the certificate, to prevent replay attacks. The GPG signature lines are a detached signature for the contents recorded in the push certificate before the signature block begins. The detached signature is used to certify that the commands were given by the pusher, who must be the signer. Report Status ------------- After receiving the pack data from the sender, the receiver sends a report if 'report-status' capability is in effect. It is a short listing of what happened in that update. It will first list the status of the packfile unpacking as either 'unpack ok' or 'unpack [error]'. Then it will list the status for each of the references that it tried to update. Each line is either 'ok [refname]' if the update was successful, or 'ng [refname] [error]' if the update was not. ---- report-status = unpack-status 1*(command-status) flush-pkt unpack-status = PKT-LINE("unpack" SP unpack-result) unpack-result = "ok" / error-msg command-status = command-ok / command-fail command-ok = PKT-LINE("ok" SP refname) command-fail = PKT-LINE("ng" SP refname SP error-msg) error-msg = 1*(OCTECT) ; where not "ok" ---- Updates can be unsuccessful for a number of reasons. The reference can have changed since the reference discovery phase was originally sent, meaning someone pushed in the meantime. The reference being pushed could be a non-fast-forward reference and the update hooks or configuration could be set to not allow that, etc. Also, some references can be updated while others can be rejected. An example client/server communication might look like this: ---- S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n S: 0000 C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n C: 0000 C: [PACKDATA] S: 000eunpack ok\n S: 0018ok refs/heads/debug\n S: 002ang refs/heads/master non-fast-forward\n ---- */
vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/doc.go
0.725746
0.415788
doc.go
starcoder
package goheaps import ( "errors" "math" ) // The Heap type that holds a list of Nodes and a Type type Heap struct { Nodes []Node Type string } // Create a new Heap strucutre with the given nodes and heapType (type is a reserved word) func NewHeap(nodes []Node, heapType string) (*Heap, error) { // 'Validation', there might be a sanity checker in the core api but I've yet to find it if heapType != "min" || heapType != "max" { return nil, errors.New("heapType must be on of 'min' or 'max'") } return &Heap{Nodes: nodes, Type: heapType}, nil; } // Set the type of the Heap func (h *Heap) SetType(heapType string) error { if heapType != "min" || heapType != "max" { return errors.New("heapType must be on of 'min' or 'max'") } h.Type = heapType return nil; } // Clear the Heap of all its Nodes func (h *Heap) Clear() { h.Nodes = []Node{} } // Pop the Node from the head of the Heap, returning its weight and payload func (h *Heap) Pop() (int, int) { if h.IsEmpty() { return -1, -1 } node := h.Nodes[0] // TODO: I am confident that (with tests) this can be // changed to just assign [1:] and get the end of the list h.Nodes = append(h.Nodes[:0], h.Nodes[1:]...) if h.IsEmpty() { h.percolateDown(1) } return node.Weight, node.Payload } // Calculate the left child index for the given index func (h *Heap) LeftChildIndex(index int) int { return 2 * index } // Calculate the right child index for the given index func (h *Heap) RightChildIndex(index int) int { return 2 * index + 1 } // Get the Nodes Weight and Payload at the given index func (h *Heap) Fetch(index int) (int, int) { node := &h.Nodes[index] if node != nil { return node.Weight, node.Payload } else { return -1, -1 } } // Get the Payload and Weight of the first Node func (h *Heap) First() (int, int) { return h.Fetch(0) } // Get the size of the Heap func (h *Heap) Size() int { return len(h.Nodes) } // Return whether the Heap is empty or not func (h *Heap) IsEmpty() bool { return h.Size() == 0 } // Return whether the heap is valid, that is whether Nodes are sorted correctly func (h *Heap) IsValid() bool { if h.IsEmpty() { return true } for index, _ := range(h.Nodes) { leftChildIndex := h.LeftChildIndex(index) rightChildIndex := h.RightChildIndex(index) if &h.Nodes[leftChildIndex] != nil { sorted, err := h.sort(h.Nodes[index].Weight, h.Nodes[leftChildIndex].Weight) if err == nil || !sorted { return false } } if &h.Nodes[rightChildIndex] != nil { sorted, err := h.sort(h.Nodes[index].Weight, h.Nodes[rightChildIndex].Weight) if err == nil || !sorted { return false } } } return true } // Reset the Heap by percolating down the entire Heap (starting at index 0) func (h *Heap) Reset() { h.percolateDown(0) } // Insert a Node into the Heap with the given weight and payload func (h *Heap) Insert(weight, payload int) { h.Nodes = append(h.Nodes, Node{weight, payload}) h.percolateUp(h.Size()) } // Percolate up the Heap, starting at the given index func (h *Heap) percolateUp(index int) { parentIndex := int(h.parentIndex(index)) sorted, err := h.sort(h.Nodes[parentIndex].Weight, h.Nodes[index].Weight) if &h.Nodes[parentIndex] != nil && err != nil && sorted { h.Nodes[parentIndex], h.Nodes[index] = h.Nodes[index], h.Nodes[parentIndex] h.percolateUp(parentIndex) } } // Get the parent index func (h *Heap) parentIndex(index int) float64 { return math.Floor(float64(index) / 2) } func (h *Heap) sort(a, b int) (bool, error) { if h.Type == "min" { return a < b, nil } else if h.Type == "max" { return a > b, nil } return false, errors.New("type must be one of 'min' or 'max'") } // Percolate down the Heap, starting at the given index func (h *Heap) percolateDown(index int) { leftChildIndex := h.LeftChildIndex(index) rightChildIndex := h.LeftChildIndex(index) minIndex := 0 if &h.Nodes[leftChildIndex] == nil && &h.Nodes[rightChildIndex] == nil { return } if &h.Nodes[rightChildIndex] != nil { minIndex = leftChildIndex } else { _, err := h.sort(h.Nodes[leftChildIndex].Weight, h.Nodes[rightChildIndex].Weight) if err == nil { minIndex = leftChildIndex } else { minIndex = rightChildIndex } } _, err := h.sort(h.Nodes[minIndex].Weight, h.Nodes[index].Weight) if err == nil { h.Nodes[index], h.Nodes[minIndex] = h.Nodes[minIndex], h.Nodes[index] h.percolateDown(minIndex) } }
heap.go
0.635449
0.507446
heap.go
starcoder
package cryptotrader import ( "fmt" "strings" ) // TradeVolumeType represents the way to calculate the trade volume type TradeVolumeType int const ( // TVTFixed use a fixed volume (quote asset) for trading TVTFixed TradeVolumeType = iota // TVTPercent use a percentage of the available quote asset for trading TVTPercent ) const maxPctVolume = 0.998 // TradeVolumeTypeFromString creates a new TradeVolumeType from it's string representation func TradeVolumeTypeFromString(in string) (out TradeVolumeType, err error) { switch strings.ToLower(in) { case "fixed": out = TVTFixed case "pct", "percent": out = TVTPercent default: err = fmt.Errorf("Invalid tradevolume type: %s", in) } return } // String return the string representation fo the TradeVolumeType func (tvt TradeVolumeType) String() string { if tvt == TVTFixed { return "fixed" } return "percent" } // TradeConfig represents the config for the trades to place type TradeConfig struct { // TradeVolumeType how to calculate the volume of the buy/sell orders TradeVolumeType TradeVolumeType // The volume to trade (depends on TradeVolumeType) // If TradeVolumeType == TVTFixed the Volume value is the actual quote asset quantity to trade // If TradeVolumeType == TVTPercent the Volume value represents the percentage of the available quote asset quantity to trade (max Volume value = 1.0) Volume float64 // Reduce if true reduces the Volume to the available quantity if the TradeVolumeType == TVTFixed and the available asset quantity is insufficient Reduce bool // Paper perform paper trading only, do not issue any orer on the exchange Paper bool // Max slippage in percent // 0.1% = 0.001 MaxSlippage float64 // Stop loss un percent StopLoss float64 } // NewTradeConfigFromFlags creates a new TradeConfig insance from the cmdline argument values func NewTradeConfigFromFlags(tvt string, volume float64, reduce bool, paper bool, maxSlippage float64, stopLoss float64) (tc TradeConfig, err error) { if tc.TradeVolumeType, err = TradeVolumeTypeFromString(tvt); err != nil { return } if tc.TradeVolumeType == TVTPercent { if volume > maxPctVolume { volume = maxPctVolume } } tc.Volume = volume tc.Reduce = reduce tc.Paper = paper tc.MaxSlippage = maxSlippage tc.StopLoss = stopLoss return }
tradeconfig.go
0.680454
0.54958
tradeconfig.go
starcoder
package aiplatform import ( context "context" cmpopts "github.com/google/go-cmp/cmp/cmpopts" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" proto "google.golang.org/protobuf/proto" protocmp "google.golang.org/protobuf/testing/protocmp" fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" assert "gotest.tools/v3/assert" strings "strings" testing "testing" ) type FeaturestoreServiceTestSuite struct { T *testing.T // Server to test. Server FeaturestoreServiceServer } func (fx FeaturestoreServiceTestSuite) TestEntityType(ctx context.Context, options EntityTypeTestSuiteConfig) { fx.T.Run("EntityType", func(t *testing.T) { options.ctx = ctx options.service = fx.Server options.test(t) }) } func (fx FeaturestoreServiceTestSuite) TestFeature(ctx context.Context, options FeatureTestSuiteConfig) { fx.T.Run("Feature", func(t *testing.T) { options.ctx = ctx options.service = fx.Server options.test(t) }) } func (fx FeaturestoreServiceTestSuite) TestFeaturestore(ctx context.Context, options FeaturestoreTestSuiteConfig) { fx.T.Run("Featurestore", func(t *testing.T) { options.ctx = ctx options.service = fx.Server options.test(t) }) } type EntityTypeTestSuiteConfig struct { ctx context.Context service FeaturestoreServiceServer currParent int // The parents to use when creating resources. // At least one parent needs to be set. Depending on methods available on the resource, // more may be required. If insufficient number of parents are // provided the test will fail. Parents []string // Create should return a resource which is valid to create, i.e. // all required fields set. Create func(parent string) *EntityType // Update should return a resource which is valid to update, i.e. // all required fields set. Update func(parent string) *EntityType // Patterns of tests to skip. // For example if a service has a Get method: // Skip: ["Get"] will skip all tests for Get. // Skip: ["Get/persisted"] will only skip the subtest called "persisted" of Get. Skip []string } func (fx *EntityTypeTestSuiteConfig) test(t *testing.T) { t.Run("Create", fx.testCreate) t.Run("Get", fx.testGet) t.Run("Update", fx.testUpdate) t.Run("List", fx.testList) } func (fx *EntityTypeTestSuiteConfig) testCreate(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if no parent is provided. t.Run("missing parent", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.CreateEntityType(fx.ctx, &CreateEntityTypeRequest{ Parent: "", EntityType: fx.Create(fx.nextParent(t, false)), }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if provided parent is invalid. t.Run("invalid parent", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.CreateEntityType(fx.ctx, &CreateEntityTypeRequest{ Parent: "invalid resource name", EntityType: fx.Create(fx.nextParent(t, false)), }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) } func (fx *EntityTypeTestSuiteConfig) testGet(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if no name is provided. t.Run("missing name", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.GetEntityType(fx.ctx, &GetEntityTypeRequest{ Name: "", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if the provided name is not valid. t.Run("invalid name", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.GetEntityType(fx.ctx, &GetEntityTypeRequest{ Name: "invalid resource name", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Resource should be returned without errors if it exists. t.Run("exists", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) created := fx.create(t, parent) msg, err := fx.service.GetEntityType(fx.ctx, &GetEntityTypeRequest{ Name: created.Name, }) assert.NilError(t, err) assert.DeepEqual(t, msg, created, protocmp.Transform()) }) // Method should fail with NotFound if the resource does not exist. t.Run("not found", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) created := fx.create(t, parent) _, err := fx.service.GetEntityType(fx.ctx, &GetEntityTypeRequest{ Name: created.Name + "notfound", }) assert.Equal(t, codes.NotFound, status.Code(err), err) }) // Method should fail with InvalidArgument if the provided name only contains wildcards ('-') t.Run("only wildcards", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.GetEntityType(fx.ctx, &GetEntityTypeRequest{ Name: "projects/-/locations/-/featurestores/-/entityTypes/-", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) } func (fx *EntityTypeTestSuiteConfig) testUpdate(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if no name is provided. t.Run("missing name", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) msg := fx.Update(parent) msg.Name = "" _, err := fx.service.UpdateEntityType(fx.ctx, &UpdateEntityTypeRequest{ EntityType: msg, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if provided name is not valid. t.Run("invalid name", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) msg := fx.Update(parent) msg.Name = "invalid resource name" _, err := fx.service.UpdateEntityType(fx.ctx, &UpdateEntityTypeRequest{ EntityType: msg, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // The updated resource should be persisted and reachable with Get. t.Run("persisted", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) created := fx.create(t, parent) updated, err := fx.service.UpdateEntityType(fx.ctx, &UpdateEntityTypeRequest{ EntityType: created, }) assert.NilError(t, err) persisted, err := fx.service.GetEntityType(fx.ctx, &GetEntityTypeRequest{ Name: updated.Name, }) assert.NilError(t, err) assert.DeepEqual(t, updated, persisted, protocmp.Transform()) }) parent := fx.nextParent(t, false) created := fx.create(t, parent) // Method should fail with NotFound if the resource does not exist. t.Run("not found", func(t *testing.T) { fx.maybeSkip(t) msg := fx.Update(parent) msg.Name = created.Name + "notfound" _, err := fx.service.UpdateEntityType(fx.ctx, &UpdateEntityTypeRequest{ EntityType: msg, }) assert.Equal(t, codes.NotFound, status.Code(err), err) }) // The method should fail with InvalidArgument if the update_mask is invalid. t.Run("invalid update mask", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.UpdateEntityType(fx.ctx, &UpdateEntityTypeRequest{ EntityType: created, UpdateMask: &fieldmaskpb.FieldMask{ Paths: []string{ "invalid_field_xyz", }, }, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) } func (fx *EntityTypeTestSuiteConfig) testList(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if provided parent is invalid. t.Run("invalid parent", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.ListEntityTypes(fx.ctx, &ListEntityTypesRequest{ Parent: "invalid resource name", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument is provided page token is not valid. t.Run("invalid page token", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) _, err := fx.service.ListEntityTypes(fx.ctx, &ListEntityTypesRequest{ Parent: parent, PageToken: "invalid page token", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument is provided page size is negative. t.Run("negative page size", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) _, err := fx.service.ListEntityTypes(fx.ctx, &ListEntityTypesRequest{ Parent: parent, PageSize: -10, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) const resourcesCount = 15 parent := fx.nextParent(t, true) parentMsgs := make([]*EntityType, resourcesCount) for i := 0; i < resourcesCount; i++ { parentMsgs[i] = fx.create(t, parent) } // If parent is provided the method must only return resources // under that parent. t.Run("isolation", func(t *testing.T) { fx.maybeSkip(t) response, err := fx.service.ListEntityTypes(fx.ctx, &ListEntityTypesRequest{ Parent: parent, PageSize: 999, }) assert.NilError(t, err) assert.DeepEqual( t, parentMsgs, response.EntityTypes, cmpopts.SortSlices(func(a, b *EntityType) bool { return a.Name < b.Name }), protocmp.Transform(), ) }) // If there are no more resources, next_page_token should not be set. t.Run("last page", func(t *testing.T) { fx.maybeSkip(t) response, err := fx.service.ListEntityTypes(fx.ctx, &ListEntityTypesRequest{ Parent: parent, PageSize: resourcesCount, }) assert.NilError(t, err) assert.Equal(t, "", response.NextPageToken) }) // If there are more resources, next_page_token should be set. t.Run("more pages", func(t *testing.T) { fx.maybeSkip(t) response, err := fx.service.ListEntityTypes(fx.ctx, &ListEntityTypesRequest{ Parent: parent, PageSize: resourcesCount - 1, }) assert.NilError(t, err) assert.Check(t, response.NextPageToken != "") }) // Listing resource one by one should eventually return all resources. t.Run("one by one", func(t *testing.T) { fx.maybeSkip(t) msgs := make([]*EntityType, 0, resourcesCount) var nextPageToken string for { response, err := fx.service.ListEntityTypes(fx.ctx, &ListEntityTypesRequest{ Parent: parent, PageSize: 1, PageToken: nextPageToken, }) assert.NilError(t, err) assert.Equal(t, 1, len(response.EntityTypes)) msgs = append(msgs, response.EntityTypes...) nextPageToken = response.NextPageToken if nextPageToken == "" { break } } assert.DeepEqual( t, parentMsgs, msgs, cmpopts.SortSlices(func(a, b *EntityType) bool { return a.Name < b.Name }), protocmp.Transform(), ) }) // Method should not return deleted resources. t.Run("deleted", func(t *testing.T) { fx.maybeSkip(t) const deleteCount = 5 for i := 0; i < deleteCount; i++ { _, err := fx.service.DeleteEntityType(fx.ctx, &DeleteEntityTypeRequest{ Name: parentMsgs[i].Name, }) assert.NilError(t, err) } response, err := fx.service.ListEntityTypes(fx.ctx, &ListEntityTypesRequest{ Parent: parent, PageSize: 9999, }) assert.NilError(t, err) assert.DeepEqual( t, parentMsgs[deleteCount:], response.EntityTypes, cmpopts.SortSlices(func(a, b *EntityType) bool { return a.Name < b.Name }), protocmp.Transform(), ) }) } func (fx *EntityTypeTestSuiteConfig) nextParent(t *testing.T, pristine bool) string { if pristine { fx.currParent++ } if fx.currParent >= len(fx.Parents) { t.Fatal("need at least", fx.currParent+1, "parents") } return fx.Parents[fx.currParent] } func (fx *EntityTypeTestSuiteConfig) peekNextParent(t *testing.T) string { next := fx.currParent + 1 if next >= len(fx.Parents) { t.Fatal("need at least", next+1, "parents") } return fx.Parents[next] } func (fx *EntityTypeTestSuiteConfig) maybeSkip(t *testing.T) { for _, skip := range fx.Skip { if strings.Contains(t.Name(), skip) { t.Skip("skipped because of .Skip") } } } func (fx *EntityTypeTestSuiteConfig) create(t *testing.T, parent string) *EntityType { t.Helper() t.Skip("Long running create method not supported") return nil } type FeatureTestSuiteConfig struct { ctx context.Context service FeaturestoreServiceServer currParent int // The parents to use when creating resources. // At least one parent needs to be set. Depending on methods available on the resource, // more may be required. If insufficient number of parents are // provided the test will fail. Parents []string // Create should return a resource which is valid to create, i.e. // all required fields set. Create func(parent string) *Feature // Update should return a resource which is valid to update, i.e. // all required fields set. Update func(parent string) *Feature // Patterns of tests to skip. // For example if a service has a Get method: // Skip: ["Get"] will skip all tests for Get. // Skip: ["Get/persisted"] will only skip the subtest called "persisted" of Get. Skip []string } func (fx *FeatureTestSuiteConfig) test(t *testing.T) { t.Run("Create", fx.testCreate) t.Run("Get", fx.testGet) t.Run("Update", fx.testUpdate) t.Run("List", fx.testList) } func (fx *FeatureTestSuiteConfig) testCreate(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if no parent is provided. t.Run("missing parent", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.CreateFeature(fx.ctx, &CreateFeatureRequest{ Parent: "", Feature: fx.Create(fx.nextParent(t, false)), }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if provided parent is invalid. t.Run("invalid parent", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.CreateFeature(fx.ctx, &CreateFeatureRequest{ Parent: "invalid resource name", Feature: fx.Create(fx.nextParent(t, false)), }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // The method should fail with InvalidArgument if the resource has any // required fields and they are not provided. t.Run("required fields", func(t *testing.T) { fx.maybeSkip(t) t.Run(".value_type", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) msg := fx.Create(parent) container := msg if container == nil { t.Skip("not reachable") } fd := container.ProtoReflect().Descriptor().Fields().ByName("value_type") container.ProtoReflect().Clear(fd) _, err := fx.service.CreateFeature(fx.ctx, &CreateFeatureRequest{ Parent: parent, Feature: msg, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) }) } func (fx *FeatureTestSuiteConfig) testGet(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if no name is provided. t.Run("missing name", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.GetFeature(fx.ctx, &GetFeatureRequest{ Name: "", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if the provided name is not valid. t.Run("invalid name", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.GetFeature(fx.ctx, &GetFeatureRequest{ Name: "invalid resource name", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Resource should be returned without errors if it exists. t.Run("exists", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) created := fx.create(t, parent) msg, err := fx.service.GetFeature(fx.ctx, &GetFeatureRequest{ Name: created.Name, }) assert.NilError(t, err) assert.DeepEqual(t, msg, created, protocmp.Transform()) }) // Method should fail with NotFound if the resource does not exist. t.Run("not found", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) created := fx.create(t, parent) _, err := fx.service.GetFeature(fx.ctx, &GetFeatureRequest{ Name: created.Name + "notfound", }) assert.Equal(t, codes.NotFound, status.Code(err), err) }) // Method should fail with InvalidArgument if the provided name only contains wildcards ('-') t.Run("only wildcards", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.GetFeature(fx.ctx, &GetFeatureRequest{ Name: "projects/-/locations/-/featurestores/-/entityTypes/-/features/-", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) } func (fx *FeatureTestSuiteConfig) testUpdate(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if no name is provided. t.Run("missing name", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) msg := fx.Update(parent) msg.Name = "" _, err := fx.service.UpdateFeature(fx.ctx, &UpdateFeatureRequest{ Feature: msg, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if provided name is not valid. t.Run("invalid name", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) msg := fx.Update(parent) msg.Name = "invalid resource name" _, err := fx.service.UpdateFeature(fx.ctx, &UpdateFeatureRequest{ Feature: msg, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // The updated resource should be persisted and reachable with Get. t.Run("persisted", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) created := fx.create(t, parent) updated, err := fx.service.UpdateFeature(fx.ctx, &UpdateFeatureRequest{ Feature: created, }) assert.NilError(t, err) persisted, err := fx.service.GetFeature(fx.ctx, &GetFeatureRequest{ Name: updated.Name, }) assert.NilError(t, err) assert.DeepEqual(t, updated, persisted, protocmp.Transform()) }) // The field create_time should be preserved when a '*'-update mask is used. t.Run("preserve create_time", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) created := fx.create(t, parent) originalCreateTime := created.CreateTime updated, err := fx.service.UpdateFeature(fx.ctx, &UpdateFeatureRequest{ Feature: created, UpdateMask: &fieldmaskpb.FieldMask{ Paths: []string{ "*", }, }, }) assert.NilError(t, err) assert.DeepEqual(t, originalCreateTime, updated.CreateTime, protocmp.Transform()) }) parent := fx.nextParent(t, false) created := fx.create(t, parent) // Method should fail with NotFound if the resource does not exist. t.Run("not found", func(t *testing.T) { fx.maybeSkip(t) msg := fx.Update(parent) msg.Name = created.Name + "notfound" _, err := fx.service.UpdateFeature(fx.ctx, &UpdateFeatureRequest{ Feature: msg, }) assert.Equal(t, codes.NotFound, status.Code(err), err) }) // The method should fail with InvalidArgument if the update_mask is invalid. t.Run("invalid update mask", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.UpdateFeature(fx.ctx, &UpdateFeatureRequest{ Feature: created, UpdateMask: &fieldmaskpb.FieldMask{ Paths: []string{ "invalid_field_xyz", }, }, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if any required field is missing // when called with '*' update_mask. t.Run("required fields", func(t *testing.T) { fx.maybeSkip(t) t.Run(".value_type", func(t *testing.T) { fx.maybeSkip(t) msg := proto.Clone(created).(*Feature) container := msg if container == nil { t.Skip("not reachable") } fd := container.ProtoReflect().Descriptor().Fields().ByName("value_type") container.ProtoReflect().Clear(fd) _, err := fx.service.UpdateFeature(fx.ctx, &UpdateFeatureRequest{ Feature: msg, UpdateMask: &fieldmaskpb.FieldMask{ Paths: []string{ "*", }, }, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) }) } func (fx *FeatureTestSuiteConfig) testList(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if provided parent is invalid. t.Run("invalid parent", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.ListFeatures(fx.ctx, &ListFeaturesRequest{ Parent: "invalid resource name", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument is provided page token is not valid. t.Run("invalid page token", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) _, err := fx.service.ListFeatures(fx.ctx, &ListFeaturesRequest{ Parent: parent, PageToken: "invalid page token", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument is provided page size is negative. t.Run("negative page size", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) _, err := fx.service.ListFeatures(fx.ctx, &ListFeaturesRequest{ Parent: parent, PageSize: -10, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) const resourcesCount = 15 parent := fx.nextParent(t, true) parentMsgs := make([]*Feature, resourcesCount) for i := 0; i < resourcesCount; i++ { parentMsgs[i] = fx.create(t, parent) } // If parent is provided the method must only return resources // under that parent. t.Run("isolation", func(t *testing.T) { fx.maybeSkip(t) response, err := fx.service.ListFeatures(fx.ctx, &ListFeaturesRequest{ Parent: parent, PageSize: 999, }) assert.NilError(t, err) assert.DeepEqual( t, parentMsgs, response.Features, cmpopts.SortSlices(func(a, b *Feature) bool { return a.Name < b.Name }), protocmp.Transform(), ) }) // If there are no more resources, next_page_token should not be set. t.Run("last page", func(t *testing.T) { fx.maybeSkip(t) response, err := fx.service.ListFeatures(fx.ctx, &ListFeaturesRequest{ Parent: parent, PageSize: resourcesCount, }) assert.NilError(t, err) assert.Equal(t, "", response.NextPageToken) }) // If there are more resources, next_page_token should be set. t.Run("more pages", func(t *testing.T) { fx.maybeSkip(t) response, err := fx.service.ListFeatures(fx.ctx, &ListFeaturesRequest{ Parent: parent, PageSize: resourcesCount - 1, }) assert.NilError(t, err) assert.Check(t, response.NextPageToken != "") }) // Listing resource one by one should eventually return all resources. t.Run("one by one", func(t *testing.T) { fx.maybeSkip(t) msgs := make([]*Feature, 0, resourcesCount) var nextPageToken string for { response, err := fx.service.ListFeatures(fx.ctx, &ListFeaturesRequest{ Parent: parent, PageSize: 1, PageToken: nextPageToken, }) assert.NilError(t, err) assert.Equal(t, 1, len(response.Features)) msgs = append(msgs, response.Features...) nextPageToken = response.NextPageToken if nextPageToken == "" { break } } assert.DeepEqual( t, parentMsgs, msgs, cmpopts.SortSlices(func(a, b *Feature) bool { return a.Name < b.Name }), protocmp.Transform(), ) }) // Method should not return deleted resources. t.Run("deleted", func(t *testing.T) { fx.maybeSkip(t) const deleteCount = 5 for i := 0; i < deleteCount; i++ { _, err := fx.service.DeleteFeature(fx.ctx, &DeleteFeatureRequest{ Name: parentMsgs[i].Name, }) assert.NilError(t, err) } response, err := fx.service.ListFeatures(fx.ctx, &ListFeaturesRequest{ Parent: parent, PageSize: 9999, }) assert.NilError(t, err) assert.DeepEqual( t, parentMsgs[deleteCount:], response.Features, cmpopts.SortSlices(func(a, b *Feature) bool { return a.Name < b.Name }), protocmp.Transform(), ) }) } func (fx *FeatureTestSuiteConfig) nextParent(t *testing.T, pristine bool) string { if pristine { fx.currParent++ } if fx.currParent >= len(fx.Parents) { t.Fatal("need at least", fx.currParent+1, "parents") } return fx.Parents[fx.currParent] } func (fx *FeatureTestSuiteConfig) peekNextParent(t *testing.T) string { next := fx.currParent + 1 if next >= len(fx.Parents) { t.Fatal("need at least", next+1, "parents") } return fx.Parents[next] } func (fx *FeatureTestSuiteConfig) maybeSkip(t *testing.T) { for _, skip := range fx.Skip { if strings.Contains(t.Name(), skip) { t.Skip("skipped because of .Skip") } } } func (fx *FeatureTestSuiteConfig) create(t *testing.T, parent string) *Feature { t.Helper() t.Skip("Long running create method not supported") return nil } type FeaturestoreTestSuiteConfig struct { ctx context.Context service FeaturestoreServiceServer currParent int // The parents to use when creating resources. // At least one parent needs to be set. Depending on methods available on the resource, // more may be required. If insufficient number of parents are // provided the test will fail. Parents []string // Create should return a resource which is valid to create, i.e. // all required fields set. Create func(parent string) *Featurestore // Update should return a resource which is valid to update, i.e. // all required fields set. Update func(parent string) *Featurestore // Patterns of tests to skip. // For example if a service has a Get method: // Skip: ["Get"] will skip all tests for Get. // Skip: ["Get/persisted"] will only skip the subtest called "persisted" of Get. Skip []string } func (fx *FeaturestoreTestSuiteConfig) test(t *testing.T) { t.Run("Create", fx.testCreate) t.Run("Get", fx.testGet) t.Run("Update", fx.testUpdate) t.Run("List", fx.testList) } func (fx *FeaturestoreTestSuiteConfig) testCreate(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if no parent is provided. t.Run("missing parent", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.CreateFeaturestore(fx.ctx, &CreateFeaturestoreRequest{ Parent: "", Featurestore: fx.Create(fx.nextParent(t, false)), }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if provided parent is invalid. t.Run("invalid parent", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.CreateFeaturestore(fx.ctx, &CreateFeaturestoreRequest{ Parent: "invalid resource name", Featurestore: fx.Create(fx.nextParent(t, false)), }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // The method should fail with InvalidArgument if the resource has any // required fields and they are not provided. t.Run("required fields", func(t *testing.T) { fx.maybeSkip(t) t.Run(".encryption_spec.kms_key_name", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) msg := fx.Create(parent) container := msg.GetEncryptionSpec() if container == nil { t.Skip("not reachable") } fd := container.ProtoReflect().Descriptor().Fields().ByName("kms_key_name") container.ProtoReflect().Clear(fd) _, err := fx.service.CreateFeaturestore(fx.ctx, &CreateFeaturestoreRequest{ Parent: parent, Featurestore: msg, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) }) } func (fx *FeaturestoreTestSuiteConfig) testGet(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if no name is provided. t.Run("missing name", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.GetFeaturestore(fx.ctx, &GetFeaturestoreRequest{ Name: "", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if the provided name is not valid. t.Run("invalid name", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.GetFeaturestore(fx.ctx, &GetFeaturestoreRequest{ Name: "invalid resource name", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Resource should be returned without errors if it exists. t.Run("exists", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) created := fx.create(t, parent) msg, err := fx.service.GetFeaturestore(fx.ctx, &GetFeaturestoreRequest{ Name: created.Name, }) assert.NilError(t, err) assert.DeepEqual(t, msg, created, protocmp.Transform()) }) // Method should fail with NotFound if the resource does not exist. t.Run("not found", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) created := fx.create(t, parent) _, err := fx.service.GetFeaturestore(fx.ctx, &GetFeaturestoreRequest{ Name: created.Name + "notfound", }) assert.Equal(t, codes.NotFound, status.Code(err), err) }) // Method should fail with InvalidArgument if the provided name only contains wildcards ('-') t.Run("only wildcards", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.GetFeaturestore(fx.ctx, &GetFeaturestoreRequest{ Name: "projects/-/locations/-/featurestores/-", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) } func (fx *FeaturestoreTestSuiteConfig) testUpdate(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if no name is provided. t.Run("missing name", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) msg := fx.Update(parent) msg.Name = "" _, err := fx.service.UpdateFeaturestore(fx.ctx, &UpdateFeaturestoreRequest{ Featurestore: msg, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if provided name is not valid. t.Run("invalid name", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) msg := fx.Update(parent) msg.Name = "invalid resource name" _, err := fx.service.UpdateFeaturestore(fx.ctx, &UpdateFeaturestoreRequest{ Featurestore: msg, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) parent := fx.nextParent(t, false) created := fx.create(t, parent) // Method should fail with NotFound if the resource does not exist. t.Run("not found", func(t *testing.T) { fx.maybeSkip(t) msg := fx.Update(parent) msg.Name = created.Name + "notfound" _, err := fx.service.UpdateFeaturestore(fx.ctx, &UpdateFeaturestoreRequest{ Featurestore: msg, }) assert.Equal(t, codes.NotFound, status.Code(err), err) }) // The method should fail with InvalidArgument if the update_mask is invalid. t.Run("invalid update mask", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.UpdateFeaturestore(fx.ctx, &UpdateFeaturestoreRequest{ Featurestore: created, UpdateMask: &fieldmaskpb.FieldMask{ Paths: []string{ "invalid_field_xyz", }, }, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument if any required field is missing // when called with '*' update_mask. t.Run("required fields", func(t *testing.T) { fx.maybeSkip(t) t.Run(".encryption_spec.kms_key_name", func(t *testing.T) { fx.maybeSkip(t) msg := proto.Clone(created).(*Featurestore) container := msg.GetEncryptionSpec() if container == nil { t.Skip("not reachable") } fd := container.ProtoReflect().Descriptor().Fields().ByName("kms_key_name") container.ProtoReflect().Clear(fd) _, err := fx.service.UpdateFeaturestore(fx.ctx, &UpdateFeaturestoreRequest{ Featurestore: msg, UpdateMask: &fieldmaskpb.FieldMask{ Paths: []string{ "*", }, }, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) }) } func (fx *FeaturestoreTestSuiteConfig) testList(t *testing.T) { fx.maybeSkip(t) // Method should fail with InvalidArgument if provided parent is invalid. t.Run("invalid parent", func(t *testing.T) { fx.maybeSkip(t) _, err := fx.service.ListFeaturestores(fx.ctx, &ListFeaturestoresRequest{ Parent: "invalid resource name", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument is provided page token is not valid. t.Run("invalid page token", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) _, err := fx.service.ListFeaturestores(fx.ctx, &ListFeaturestoresRequest{ Parent: parent, PageToken: "invalid page token", }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) // Method should fail with InvalidArgument is provided page size is negative. t.Run("negative page size", func(t *testing.T) { fx.maybeSkip(t) parent := fx.nextParent(t, false) _, err := fx.service.ListFeaturestores(fx.ctx, &ListFeaturestoresRequest{ Parent: parent, PageSize: -10, }) assert.Equal(t, codes.InvalidArgument, status.Code(err), err) }) const resourcesCount = 15 parent := fx.nextParent(t, true) parentMsgs := make([]*Featurestore, resourcesCount) for i := 0; i < resourcesCount; i++ { parentMsgs[i] = fx.create(t, parent) } // If parent is provided the method must only return resources // under that parent. t.Run("isolation", func(t *testing.T) { fx.maybeSkip(t) response, err := fx.service.ListFeaturestores(fx.ctx, &ListFeaturestoresRequest{ Parent: parent, PageSize: 999, }) assert.NilError(t, err) assert.DeepEqual( t, parentMsgs, response.Featurestores, cmpopts.SortSlices(func(a, b *Featurestore) bool { return a.Name < b.Name }), protocmp.Transform(), ) }) // If there are no more resources, next_page_token should not be set. t.Run("last page", func(t *testing.T) { fx.maybeSkip(t) response, err := fx.service.ListFeaturestores(fx.ctx, &ListFeaturestoresRequest{ Parent: parent, PageSize: resourcesCount, }) assert.NilError(t, err) assert.Equal(t, "", response.NextPageToken) }) // If there are more resources, next_page_token should be set. t.Run("more pages", func(t *testing.T) { fx.maybeSkip(t) response, err := fx.service.ListFeaturestores(fx.ctx, &ListFeaturestoresRequest{ Parent: parent, PageSize: resourcesCount - 1, }) assert.NilError(t, err) assert.Check(t, response.NextPageToken != "") }) // Listing resource one by one should eventually return all resources. t.Run("one by one", func(t *testing.T) { fx.maybeSkip(t) msgs := make([]*Featurestore, 0, resourcesCount) var nextPageToken string for { response, err := fx.service.ListFeaturestores(fx.ctx, &ListFeaturestoresRequest{ Parent: parent, PageSize: 1, PageToken: nextPageToken, }) assert.NilError(t, err) assert.Equal(t, 1, len(response.Featurestores)) msgs = append(msgs, response.Featurestores...) nextPageToken = response.NextPageToken if nextPageToken == "" { break } } assert.DeepEqual( t, parentMsgs, msgs, cmpopts.SortSlices(func(a, b *Featurestore) bool { return a.Name < b.Name }), protocmp.Transform(), ) }) // Method should not return deleted resources. t.Run("deleted", func(t *testing.T) { fx.maybeSkip(t) const deleteCount = 5 for i := 0; i < deleteCount; i++ { _, err := fx.service.DeleteFeaturestore(fx.ctx, &DeleteFeaturestoreRequest{ Name: parentMsgs[i].Name, }) assert.NilError(t, err) } response, err := fx.service.ListFeaturestores(fx.ctx, &ListFeaturestoresRequest{ Parent: parent, PageSize: 9999, }) assert.NilError(t, err) assert.DeepEqual( t, parentMsgs[deleteCount:], response.Featurestores, cmpopts.SortSlices(func(a, b *Featurestore) bool { return a.Name < b.Name }), protocmp.Transform(), ) }) } func (fx *FeaturestoreTestSuiteConfig) nextParent(t *testing.T, pristine bool) string { if pristine { fx.currParent++ } if fx.currParent >= len(fx.Parents) { t.Fatal("need at least", fx.currParent+1, "parents") } return fx.Parents[fx.currParent] } func (fx *FeaturestoreTestSuiteConfig) peekNextParent(t *testing.T) string { next := fx.currParent + 1 if next >= len(fx.Parents) { t.Fatal("need at least", next+1, "parents") } return fx.Parents[next] } func (fx *FeaturestoreTestSuiteConfig) maybeSkip(t *testing.T) { for _, skip := range fx.Skip { if strings.Contains(t.Name(), skip) { t.Skip("skipped because of .Skip") } } } func (fx *FeaturestoreTestSuiteConfig) create(t *testing.T, parent string) *Featurestore { t.Helper() t.Skip("Long running create method not supported") return nil }
proto/gen/googleapis/cloud/aiplatform/v1/featurestore_service_aiptest.pb.go
0.610105
0.48182
featurestore_service_aiptest.pb.go
starcoder
package solver const ( nbMaxRecent = 50 // How many recent LBD values we consider; "X" in papers about LBD. triggerRestartK = 0.8 nbMaxTrail = 5000 // How many elements in queueTrail we consider; "Y" in papers about LBD. postponeRestartT = 1.4 ) type queueData struct { totalNb int // Current total nb of values considered totalSum int // Sum of all values so far nbRecent int // NB of values used in the array ptr int // current index of oldest value in the array recentAvg float64 // Average value } // lbdStats is a structure dealing with recent LBD evolutions. type lbdStats struct { lbdData queueData trailData queueData recentVals [nbMaxRecent]int // Last LBD values recentTrails [nbMaxTrail]int // Last trail lengths } // mustRestart is true iff recent LBDs are much smaller on average than average of all LBDs. func (l *lbdStats) mustRestart() bool { if l.lbdData.nbRecent < nbMaxRecent { return false } return l.lbdData.recentAvg*triggerRestartK > float64(l.lbdData.totalSum)/float64(l.lbdData.totalNb) } // addConflict adds information about a conflict that just happened. func (l *lbdStats) addConflict(trailSz int) { td := &l.trailData td.totalNb++ td.totalSum += trailSz if td.nbRecent < nbMaxTrail { l.recentTrails[td.nbRecent] = trailSz old := float64(td.nbRecent) new := old + 1 td.recentAvg = (td.recentAvg*old)/new + float64(trailSz)/new td.nbRecent++ } else { old := l.recentTrails[td.ptr] l.recentTrails[td.ptr] = trailSz td.ptr++ if td.ptr == nbMaxTrail { td.ptr = 0 } td.recentAvg = td.recentAvg - float64(old)/nbMaxTrail + float64(trailSz)/nbMaxTrail } if td.nbRecent == nbMaxTrail && l.lbdData.nbRecent == nbMaxRecent && trailSz > int(postponeRestartT*td.recentAvg) { // Too many good assignments: postpone restart l.clear() } } // addLbd adds information about a recent learned clause's LBD. // TODO: this is very close to addConflicts's code, this should probably be rewritten/merged. func (l *lbdStats) addLbd(lbd int) { ld := &l.lbdData ld.totalNb++ ld.totalSum += lbd if ld.nbRecent < nbMaxRecent { l.recentVals[ld.nbRecent] = lbd old := float64(ld.nbRecent) new := old + 1 ld.recentAvg = (ld.recentAvg*old)/new + float64(lbd)/new ld.nbRecent++ } else { old := l.recentVals[ld.ptr] l.recentVals[ld.ptr] = lbd ld.ptr++ if ld.ptr == nbMaxRecent { ld.ptr = 0 } ld.recentAvg = ld.recentAvg - float64(old)/nbMaxRecent + float64(lbd)/nbMaxRecent } } // clear clears last values. It should be called after a restart. func (l *lbdStats) clear() { l.lbdData.ptr = 0 l.lbdData.nbRecent = 0 l.lbdData.recentAvg = 0.0 }
vendor/github.com/crillab/gophersat/solver/lbd.go
0.501953
0.437042
lbd.go
starcoder
package assert import ( "encoding/json" "math/big" "reflect" "github.com/zoncoen/scenarigo/errors" ) type compareType int const ( compareGreater compareType = iota compareGreaterOrEqual compareLess compareLessOrEqual ) // compareNumber compares expected with actual based on compareType. // If the comparison fails, an error will be returned. func compareNumber(expected, actual interface{}, typ compareType) error { if !reflect.ValueOf(expected).IsValid() { return errors.Errorf("expected value %v is invalid", expected) } if !reflect.ValueOf(actual).IsValid() { return errors.Errorf("actual value %v is invalid", actual) } n1, err := toNumber(expected) if err != nil { return err } n2, err := toNumber(actual) if err != nil { return err } if isKindOfInt(n1) && isKindOfInt(n2) { i1, err := convertToBigInt(n1) if err != nil { return err } i2, err := convertToBigInt(n2) if err != nil { return err } return compareByType(i1.Cmp(i2), i2.String(), typ) } f1, err := convertToBigFloat(n1) if err != nil { return err } f2, err := convertToBigFloat(n2) if err != nil { return err } return compareByType(f1.Cmp(f2), f2.String(), typ) } func toNumber(v interface{}) (interface{}, error) { if n, ok := v.(json.Number); ok { if i, err := n.Int64(); err == nil { return i, nil } if f, err := n.Float64(); err == nil { return f, nil } return nil, errors.Errorf("failed to convert %v to number", n) } if !isKindOfNumber(v) { return nil, errors.Errorf("failed to convert %T to number", v) } return v, nil } func isKindOfInt(v interface{}) bool { switch reflect.TypeOf(v).Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return true default: return false } } func isKindOfFloat(v interface{}) bool { switch reflect.TypeOf(v).Kind() { case reflect.Float32, reflect.Float64: return true default: return false } } func isKindOfNumber(v interface{}) bool { return isKindOfInt(v) || isKindOfFloat(v) } func compareByType(result int, expValue string, typ compareType) error { switch typ { case compareGreater: if result > 0 { return nil } return errors.Errorf("must be greater than %s", expValue) case compareGreaterOrEqual: if result >= 0 { return nil } return errors.Errorf("must be equal or greater than %s", expValue) case compareLess: if result < 0 { return nil } return errors.Errorf("must be less than %s", expValue) case compareLessOrEqual: if result <= 0 { return nil } return errors.Errorf("must be equal or less than %s", expValue) default: return errors.Errorf("unknown compare type %v", typ) } } func convert(v interface{}, t reflect.Type) (interface{}, error) { rv := reflect.ValueOf(v) if !rv.IsValid() { return nil, errors.Errorf("value is invalid") } if rv.Type().ConvertibleTo(t) { return rv.Convert(t).Interface(), nil } return nil, errors.Errorf("%T is not convertible to %s", v, t) } func convertToInt64(v interface{}) (int64, error) { vv, err := convert(v, reflect.TypeOf(int64(0))) if err != nil { return 0, err } return vv.(int64), nil } func convertToUint64(v interface{}) (uint64, error) { vv, err := convert(v, reflect.TypeOf(uint64(0))) if err != nil { return 0, err } return vv.(uint64), nil } func convertToFloat64(v interface{}) (float64, error) { vv, err := convert(v, reflect.TypeOf(float64(0))) if err != nil { return 0, err } return vv.(float64), nil } func convertToBigInt(v interface{}) (*big.Int, error) { switch reflect.TypeOf(v).Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: i64, err := convertToInt64(v) if err != nil { return nil, err } return big.NewInt(i64), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: u64, err := convertToUint64(v) if err != nil { return nil, err } return big.NewInt(0).SetUint64(u64), nil default: return nil, errors.Errorf("%T is not convertible to *big.Int", v) } } func convertToBigFloat(v interface{}) (*big.Float, error) { switch reflect.TypeOf(v).Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: i64, err := convertToInt64(v) if err != nil { return nil, err } return big.NewFloat(0).SetInt64(i64), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: u64, err := convertToUint64(v) if err != nil { return nil, err } return big.NewFloat(0).SetUint64(u64), nil case reflect.Float32, reflect.Float64: f64, err := convertToFloat64(v) if err != nil { return nil, err } return big.NewFloat(f64), nil default: return nil, errors.Errorf("%T is not convertible to *big.Float", v) } }
assert/compare.go
0.627723
0.492798
compare.go
starcoder
package core import ( "bytes" "io/ioutil" "log" "strconv" "strings" "github.com/Knetic/govaluate" ) // CompositeEstimator returns a similarity function based on // compositions of simpler similarity expressions. The user needs to provide a // formula containing the expression of the similarity function, e.g.: // 0.8 x BHATTACHARRYA + 0.2 CORRELATION // Note that it is the user's responsibility to guarantee that the overall // expression remains within the limits of the similarity expression [0,1]. type CompositeEstimator struct { AbstractDatasetSimilarityEstimator datasetIndexes map[string]int // the slice of estimators to use for the similarity evaluation estimators map[string]DatasetSimilarityEstimator // the math expression used for the estimation expression string } // Compute method constructs the Similarity Matrix func (e *CompositeEstimator) Compute() error { return datasetSimilarityEstimatorCompute(e) } // Similarity returns the similarity between two datasets func (e *CompositeEstimator) Similarity(a, b *Dataset) float64 { expression, err := govaluate.NewEvaluableExpression(e.expression) if err != nil { log.Println(err) return 0.0 } params := make(map[string]interface{}) for k, est := range e.estimators { if est.SimilarityMatrix() != nil { params[k] = est.SimilarityMatrix().Get(e.datasetIndexes[a.Path()], e.datasetIndexes[b.Path()]) } else { params[k] = est.Similarity(a, b) } } result, err := expression.Evaluate(params) if err != nil { log.Println(err) return 0.0 } if val, ok := result.(float64); ok { return val } return -1.0 } // Serialize returns an array of bytes representing the Estimator. func (e *CompositeEstimator) Serialize() []byte { buffer := new(bytes.Buffer) buffer.Write(getBytesInt(int(SimilarityTypeComposite))) buffer.Write(datasetSimilarityEstimatorSerialize( e.AbstractDatasetSimilarityEstimator)) // serialize expression buffer.WriteString(e.expression + "\n") // serialize estimators buffer.Write(getBytesInt(len(e.estimators))) for k, est := range e.estimators { buffer.WriteString(k + "\n") temp := est.Serialize() buffer.Write(getBytesInt(len(temp))) buffer.Write(temp) } return buffer.Bytes() } // Deserialize constructs an Estimator object based on the byte array provided. func (e *CompositeEstimator) Deserialize(b []byte) { buffer := bytes.NewBuffer(b) tempInt := make([]byte, 4) buffer.Read(tempInt) // consume estimator type buffer.Read(tempInt) absEstBytes := make([]byte, getIntBytes(tempInt)) buffer.Read(absEstBytes) e.AbstractDatasetSimilarityEstimator = *datasetSimilarityEstimatorDeserialize(absEstBytes) // parse expression line, _ := buffer.ReadString('\n') e.expression = strings.TrimSpace(line) // parse the estimators buffer.Read(tempInt) count := getIntBytes(tempInt) e.estimators = make(map[string]DatasetSimilarityEstimator) for i := 0; i < count; i++ { line, _ := buffer.ReadString('\n') key := strings.TrimSpace(line) buffer.Read(tempInt) length := getIntBytes(tempInt) tempBuff := make([]byte, length) buffer.Read(tempBuff) est := DeserializeSimilarityEstimator(tempBuff) e.estimators[key] = est } } // Configure provides the configuration parameters needed by the Estimator func (e *CompositeEstimator) Configure(conf map[string]string) { e.estimators = make(map[string]DatasetSimilarityEstimator) e.concurrency = 1 for k, v := range conf { if k == "concurrency" { val, err := strconv.ParseInt(v, 10, 32) if err != nil { log.Println(err) } e.concurrency = int(val) } else if k == "expression" { e.expression = v } else { // one of x1,x2,... etc. cnt, err := ioutil.ReadFile(v) if err != nil { log.Println(err) } else { est := DeserializeSimilarityEstimator(cnt) e.estimators[k] = est } } } log.Println("Creating inverse dataset index") e.datasetIndexes = make(map[string]int) for i, d := range e.datasets { e.datasetIndexes[d.Path()] = i } } // Options returns the applicable parameters needed by the Estimator. func (e *CompositeEstimator) Options() map[string]string { return map[string]string{ "concurrency": "max number of threads to be run in parallel", "expression": "the math expression that combines the estimators " + "e.g.: 0.2*x + 0.8*y " + "(x and y must be later defined)", "x": "the path of a a similarity estimator", "y": "the path of another similarity estimator", } }
core/similaritycomposite.go
0.698638
0.560493
similaritycomposite.go
starcoder
package unicode // Bit masks for each code point under U+0100, for fast lookup. const ( pC = 1 << iota // a control character. pP // a punctuation character. pN // a numeral. pS // a symbolic character. pZ // a spacing character. pLu // an upper-case letter. pLl // a lower-case letter. pp // a printable character according to Go's definition. pg = pp | pZ // a graphical character according to the Unicode definition. pLo = pLl | pLu // a letter that is neither upper nor lower case. pLmask = pLo ) // GraphicRanges defines the set of graphic characters according to Unicode. var GraphicRanges = []*RangeTable{ L, M, N, P, S, Zs, } // PrintRanges defines the set of printable characters according to Go. // ASCII space, U+0020, is handled separately. var PrintRanges = []*RangeTable{ L, M, N, P, S, } // IsGraphic reports whether the rune is defined as a Graphic by Unicode. // Such characters include letters, marks, numbers, punctuation, symbols, and // spaces, from categories L, M, N, P, S, Zs. func IsGraphic(r rune) bool { // We convert to uint32 to avoid the extra test for negative, // and in the index we convert to uint8 to avoid the range check. if uint32(r) <= MaxLatin1 { return properties[uint8(r)]&pg != 0 } return In(r, GraphicRanges...) } // IsPrint reports whether the rune is defined as printable by Go. Such // characters include letters, marks, numbers, punctuation, symbols, and the // ASCII space character, from categories L, M, N, P, S and the ASCII space // character. This categorization is the same as IsGraphic except that the // only spacing character is ASCII space, U+0020. func IsPrint(r rune) bool { if uint32(r) <= MaxLatin1 { return properties[uint8(r)]&pp != 0 } return In(r, PrintRanges...) } // IsOneOf reports whether the rune is a member of one of the ranges. // The function "In" provides a nicer signature and should be used in preference to IsOneOf. func IsOneOf(ranges []*RangeTable, r rune) bool { for _, inside := range ranges { if Is(inside, r) { return true } } return false } // In reports whether the rune is a member of one of the ranges. func In(r rune, ranges ...*RangeTable) bool { for _, inside := range ranges { if Is(inside, r) { return true } } return false } // IsControl reports whether the rune is a control character. // The C (Other) Unicode category includes more code points // such as surrogates; use Is(C, r) to test for them. func IsControl(r rune) bool { if uint32(r) <= MaxLatin1 { return properties[uint8(r)]&pC != 0 } // All control characters are < MaxLatin1. return false } // IsLetter reports whether the rune is a letter (category L). func IsLetter(r rune) bool { if uint32(r) <= MaxLatin1 { return properties[uint8(r)]&(pLmask) != 0 } return isExcludingLatin(Letter, r) } // IsMark reports whether the rune is a mark character (category M). func IsMark(r rune) bool { // There are no mark characters in Latin-1. return isExcludingLatin(Mark, r) } // IsNumber reports whether the rune is a number (category N). func IsNumber(r rune) bool { if uint32(r) <= MaxLatin1 { return properties[uint8(r)]&pN != 0 } return isExcludingLatin(Number, r) } // IsPunct reports whether the rune is a Unicode punctuation character // (category P). func IsPunct(r rune) bool { if uint32(r) <= MaxLatin1 { return properties[uint8(r)]&pP != 0 } return Is(Punct, r) } // IsSpace reports whether the rune is a space character as defined // by Unicode's White Space property; in the Latin-1 space // this is // '\t', '\n', '\v', '\f', '\r', ' ', U+0085 (NEL), U+00A0 (NBSP). // Other definitions of spacing characters are set by category // Z and property Pattern_White_Space. func IsSpace(r rune) bool { // This property isn't the same as Z; special-case it. if uint32(r) <= MaxLatin1 { switch r { case '\t', '\n', '\v', '\f', '\r', ' ', 0x85, 0xA0: return true } return false } return isExcludingLatin(White_Space, r) } // IsSymbol reports whether the rune is a symbolic character. func IsSymbol(r rune) bool { if uint32(r) <= MaxLatin1 { return properties[uint8(r)]&pS != 0 } return isExcludingLatin(Symbol, r) }
src/unicode/graphic.go
0.650911
0.4856
graphic.go
starcoder
package docs import ( "bytes" "fmt" "strings" "text/template" "github.com/Jeffail/benthos/v3/internal/bloblang" "github.com/Jeffail/benthos/v3/internal/bloblang/parser" "github.com/Jeffail/benthos/v3/internal/bloblang/query" ) // LintBloblangMapping is function for linting a config field expected to be a // bloblang mapping. func LintBloblangMapping(ctx LintContext, line, col int, v interface{}) []Lint { str, ok := v.(string) if !ok { return []Lint{NewLintWarning(line, fmt.Sprintf("expected string value, got %T", v))} } if str == "" { return nil } _, err := bloblang.NewMapping("", str) if err == nil { return nil } if mErr, ok := err.(*parser.Error); ok { bline, bcol := parser.LineAndColOf([]rune(str), mErr.Input) lint := NewLintError(line+bline, mErr.ErrorAtPositionStructured("", []rune(str))) lint.Column = col + bcol return []Lint{lint} } return []Lint{NewLintError(line, err.Error())} } // LintBloblangField is function for linting a config field expected to be an // interpolation string. func LintBloblangField(ctx LintContext, line, col int, v interface{}) []Lint { str, ok := v.(string) if !ok { return []Lint{NewLintWarning(line, fmt.Sprintf("expected string value, got %T", v))} } if str == "" { return nil } _, err := bloblang.NewField(str) if err == nil { return nil } if mErr, ok := err.(*parser.Error); ok { bline, bcol := parser.LineAndColOf([]rune(str), mErr.Input) lint := NewLintError(line+bline, mErr.ErrorAtPositionStructured("", []rune(str))) lint.Column = col + bcol return []Lint{lint} } return []Lint{NewLintError(line, err.Error())} } type functionCategory struct { Name string Specs []query.FunctionSpec } type functionsContext struct { Categories []functionCategory } var bloblangFunctionsTemplate = `{{define "function_example" -}} {{if gt (len .Summary) 0 -}} {{.Summary}} {{end -}} ` + "```coffee" + ` {{.Mapping}} {{range $i, $result := .Results}} # In: {{index $result 0}} # Out: {{index $result 1}} {{end -}} ` + "```" + ` {{end -}} {{define "function_spec" -}} ### ` + "`{{.Name}}`" + ` {{if eq .Status "beta" -}} BETA: This function is mostly stable but breaking changes could still be made outside of major version releases if a fundamental problem with it is found. {{end -}} {{.Description}} {{range $i, $example := .Examples}} {{template "function_example" $example -}} {{end -}} {{end -}} --- title: Bloblang Functions sidebar_label: Functions description: A list of Bloblang functions --- <!-- THIS FILE IS AUTOGENERATED! To make changes please edit the contents of: internal/bloblang/query/functions.go internal/docs/bloblang.go --> import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; Functions can be placed anywhere and allow you to extract information from your environment, generate values, or access data from the underlying message being mapped: ` + "```coffee" + ` root.doc.id = uuid_v4() root.doc.received_at = now() root.doc.host = hostname() ` + "```" + ` {{range $i, $cat := .Categories -}} ## {{$cat.Name}} {{range $i, $spec := $cat.Specs -}} {{template "function_spec" $spec}} {{end -}} {{end -}} [error_handling]: /docs/configuration/error_handling [field_paths]: /docs/configuration/field_paths [meta_proc]: /docs/components/processors/metadata [methods.encode]: /docs/guides/bloblang/methods#encode [methods.string]: /docs/guides/bloblang/methods#string ` // BloblangFunctionsMarkdown returns a markdown document for all Bloblang // functions. func BloblangFunctionsMarkdown() ([]byte, error) { ctx := functionsContext{} specs := query.FunctionDocs() for _, cat := range []query.FunctionCategory{ query.FunctionCategoryGeneral, query.FunctionCategoryMessage, query.FunctionCategoryEnvironment, query.FunctionCategoryDeprecated, } { functions := functionCategory{ Name: string(cat), } for _, spec := range specs { if spec.Category == cat { functions.Specs = append(functions.Specs, spec) } } if len(functions.Specs) > 0 { ctx.Categories = append(ctx.Categories, functions) } } var buf bytes.Buffer err := template.Must(template.New("functions").Parse(bloblangFunctionsTemplate)).Execute(&buf, ctx) return buf.Bytes(), err } //------------------------------------------------------------------------------ type methodCategory struct { Name string Specs []query.MethodSpec } type methodsContext struct { Categories []methodCategory General []query.MethodSpec } var bloblangMethodsTemplate = `{{define "method_example" -}} {{if gt (len .Summary) 0 -}} {{.Summary}} {{end -}} ` + "```coffee" + ` {{.Mapping}} {{range $i, $result := .Results}} # In: {{index $result 0}} # Out: {{index $result 1}} {{end -}} ` + "```" + ` {{end -}} {{define "method_spec" -}} ### ` + "`{{.Name}}`" + ` {{if eq .Status "beta" -}} BETA: This method is mostly stable but breaking changes could still be made outside of major version releases if a fundamental problem with it is found. {{end -}} {{.Description}} {{range $i, $example := .Examples}} {{template "method_example" $example -}} {{end -}} {{end -}} --- title: Bloblang Methods sidebar_label: Methods description: A list of Bloblang methods --- <!-- THIS FILE IS AUTOGENERATED! To make changes please edit the contents of: internal/bloblang/query/methods.go internal/bloblang/query/methods_strings.go internal/docs/bloblang.go --> import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; Methods provide most of the power in Bloblang as they allow you to augment values and can be added to any expression (including other methods): ` + "```coffee" + ` root.doc.id = this.thing.id.string().catch(uuid_v4()) root.doc.reduced_nums = this.thing.nums.map_each(num -> if num < 10 { deleted() } else { num - 10 }) root.has_good_taste = ["pikachu","mewtwo","magmar"].contains(this.user.fav_pokemon) ` + "```" + ` {{if gt (len .General) 0 -}} ## General {{range $i, $spec := .General -}} {{template "method_spec" $spec}} {{end -}} {{end -}} {{range $i, $cat := .Categories -}} ## {{$cat.Name}} {{range $i, $spec := $cat.Specs -}} {{template "method_spec" $spec}} {{end -}} {{end -}} [field_paths]: /docs/configuration/field_paths [methods.encode]: #encode [methods.string]: #string ` func methodForCat(s query.MethodSpec, cat query.MethodCategory) (query.MethodSpec, bool) { for _, c := range s.Categories { if c.Category == cat { spec := s if len(c.Description) > 0 { spec.Description = strings.TrimSpace(c.Description) } if len(c.Examples) > 0 { spec.Examples = c.Examples } return spec, true } } return s, false } // BloblangMethodsMarkdown returns a markdown document for all Bloblang methods. func BloblangMethodsMarkdown() ([]byte, error) { ctx := methodsContext{} specs := query.MethodDocs() for _, cat := range []query.MethodCategory{ query.MethodCategoryStrings, query.MethodCategoryNumbers, query.MethodCategoryRegexp, query.MethodCategoryTime, query.MethodCategoryCoercion, query.MethodCategoryObjectAndArray, query.MethodCategoryParsing, query.MethodCategoryEncoding, query.MethodCategoryDeprecated, } { methods := methodCategory{ Name: string(cat), } for _, spec := range specs { var ok bool if spec, ok = methodForCat(spec, cat); ok { methods.Specs = append(methods.Specs, spec) } } if len(methods.Specs) > 0 { ctx.Categories = append(ctx.Categories, methods) } } for _, spec := range specs { if len(spec.Categories) == 0 && spec.Status != query.StatusHidden { spec.Description = strings.TrimSpace(spec.Description) ctx.General = append(ctx.General, spec) } } var buf bytes.Buffer err := template.Must(template.New("methods").Parse(bloblangMethodsTemplate)).Execute(&buf, ctx) return buf.Bytes(), err }
internal/docs/bloblang.go
0.657428
0.470433
bloblang.go
starcoder
package modbus import ( "errors" "fmt" "github.com/aldas/go-modbus-client/packet" ) const ( // FieldTypeBit represents single bit out 16 bit register. Use `Field.Bit` (0-15) to indicate which bit is meant. FieldTypeBit FieldType = 1 // FieldTypeByte represents single byte of 16 bit, 2 byte, single register. Use `Field.FromHighByte` to indicate is high or low byte is meant. FieldTypeByte FieldType = 2 // FieldTypeUint8 represents uint8 value of 2 byte, single register. Use `Field.FromHighByte` to indicate is high or low byte value is meant. FieldTypeUint8 FieldType = 3 // FieldTypeInt8 represents int8 value of 2 byte, single register. Use `Field.FromHighByte` to indicate is high or low byte value is meant. FieldTypeInt8 FieldType = 4 // FieldTypeUint16 represents single register (16 bit) as uint16 value FieldTypeUint16 FieldType = 5 // FieldTypeInt16 represents single register (16 bit) as int16 value FieldTypeInt16 FieldType = 6 // FieldTypeUint32 represents 2 registers (32 bit) as uint32 value. Use `Field.ByteOrder` to indicate byte and word order of register data. FieldTypeUint32 FieldType = 7 // FieldTypeInt32 represents 2 registers (32 bit) as int32 value. Use `Field.ByteOrder` to indicate byte and word order of register data. FieldTypeInt32 FieldType = 8 // FieldTypeUint64 represents 4 registers (64 bit) as uint64 value. Use `Field.ByteOrder` to indicate byte and word order of register data. FieldTypeUint64 FieldType = 9 // FieldTypeInt64 represents 4 registers (64 bit) as int64 value. Use `Field.ByteOrder` to indicate byte and word order of register data. FieldTypeInt64 FieldType = 10 // FieldTypeFloat32 represents 2 registers (32 bit) as float32 value. Use `Field.ByteOrder` to indicate byte and word order of register data. FieldTypeFloat32 FieldType = 11 // FieldTypeFloat64 represents 4 registers (64 bit) as float64 value. Use `Field.ByteOrder` to indicate byte and word order of register data. FieldTypeFloat64 FieldType = 12 // FieldTypeString represents N registers as string value. Use `Field.Length` to length of string. FieldTypeString FieldType = 13 maxFieldTypeValue = uint8(13) ) // FieldType is enum type for data types that Field can represent type FieldType uint8 // Fields is slice of Field instances type Fields []Field // Field is distinct field be requested and extracted from response // Tag `mapstructure` allows you to marshal https://github.com/spf13/viper supported configuration format to the Field type Field struct { ServerAddress string `json:"server_address" mapstructure:"server_address"` // [network://]host:port UnitID uint8 `json:"unit_id" mapstructure:"unit_id"` RegisterAddress uint16 `json:"register_address" mapstructure:"register_address"` Type FieldType `json:"type" mapstructure:"type"` Bit uint8 `json:"bit" mapstructure:"bit"` FromHighByte bool `json:"from_high_byte" mapstructure:"from_high_byte"` Length uint8 `json:"Length" mapstructure:"Length"` ByteOrder packet.ByteOrder `json:"byte_order" mapstructure:"byte_order"` Name string `json:"Name" mapstructure:"Name"` } // registerSize returns how many register/words does this field would take in modbus response func (f *Field) registerSize() uint16 { switch f.Type { case FieldTypeFloat64, FieldTypeInt64, FieldTypeUint64: return 4 case FieldTypeFloat32, FieldTypeInt32, FieldTypeUint32: return 2 case FieldTypeString: if f.Length%2 == 0 { // even return uint16(f.Length) / 2 } return (uint16(f.Length) / 2) + 1 // odd default: return 1 } } // Validate checks if Field is values are correctly filled func (f Field) Validate() error { if f.ServerAddress == "" { return errors.New("field server address can not be empty") } if f.Type == 0 { return errors.New("field type must be set") } if uint8(f.Type) > maxFieldTypeValue { return errors.New("field type has invalid value") } if f.Bit > 15 { return errors.New("field bit value must be in range (0-15)") } if f.Type == FieldTypeString && f.Length == 0 { return errors.New("field with type string must have length set") } return nil } // ExtractFrom extracts field value from given registers data func (f Field) ExtractFrom(registers *packet.Registers) (interface{}, error) { switch f.Type { case FieldTypeBit: return registers.Bit(f.RegisterAddress, f.Bit) case FieldTypeByte: return registers.Byte(f.RegisterAddress, f.FromHighByte) case FieldTypeUint8: return registers.Uint8(f.RegisterAddress, f.FromHighByte) case FieldTypeInt8: return registers.Int8(f.RegisterAddress, f.FromHighByte) case FieldTypeUint16: return registers.Uint16(f.RegisterAddress) case FieldTypeInt16: return registers.Int16(f.RegisterAddress) case FieldTypeUint32: return registers.Uint32WithByteOrder(f.RegisterAddress, f.ByteOrder) case FieldTypeInt32: return registers.Int32WithByteOrder(f.RegisterAddress, f.ByteOrder) case FieldTypeUint64: return registers.Uint64WithByteOrder(f.RegisterAddress, f.ByteOrder) case FieldTypeInt64: return registers.Int64WithByteOrder(f.RegisterAddress, f.ByteOrder) case FieldTypeFloat32: return registers.Float32WithByteOrder(f.RegisterAddress, f.ByteOrder) case FieldTypeFloat64: return registers.Float64WithByteOrder(f.RegisterAddress, f.ByteOrder) case FieldTypeString: return registers.StringWithByteOrder(f.RegisterAddress, f.Length, f.ByteOrder) } return nil, errors.New("extraction failure due unknown field type") } // BField is distinct field be requested and extracted from response type BField struct { Field } // ServerAddress sets modbus server address for Field. Usage `[network://]host:port` func (f *BField) ServerAddress(serverAddress string) *BField { f.Field.ServerAddress = serverAddress return f } // UnitID sets unitID for Field func (f *BField) UnitID(unitID uint8) *BField { f.Field.UnitID = unitID return f } // ByteOrder sets word and byte order for Field to be used when extracting values from response func (f *BField) ByteOrder(byteOrder packet.ByteOrder) *BField { f.Field.ByteOrder = byteOrder return f } // Name sets name/identifier for Field to be used to uniquely identify value when extracting values from response func (f *BField) Name(name string) *BField { f.Field.Name = name return f } // Builder helps to group extractable field values of different types into modbus requests with minimal amount of separate requests produced type Builder struct { fields Fields serverAddress string // [network://]host:port unitID uint8 } // NewRequestBuilder creates new instance of Builder with given defaults. // Arguments can be left empty and ServerAddress+UnitID provided for each field separately func NewRequestBuilder(serverAddress string, unitID uint8) *Builder { return &Builder{ serverAddress: serverAddress, unitID: unitID, fields: make(Fields, 0, 5), } } // AddAll adds field into Builder. AddAll does not set ServerAddress and UnitID values. func (b *Builder) AddAll(fields Fields) *Builder { b.fields = append(b.fields, fields...) return b } // Add adds field into Builder func (b *Builder) Add(field *BField) *Builder { b.fields = append(b.fields, field.Field) return b } // Bit add bit (0-15) field to Builder to be requested and extracted func (b *Builder) Bit(registerAddress uint16, bit uint8) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeBit, RegisterAddress: registerAddress, Bit: bit, }, } } // Byte add byte field to Builder to be requested and extracted func (b *Builder) Byte(registerAddress uint16, fromHighByte bool) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeByte, RegisterAddress: registerAddress, FromHighByte: fromHighByte, }, } } // Uint8 add uint8 field to Builder to be requested and extracted func (b *Builder) Uint8(registerAddress uint16, fromHighByte bool) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeUint8, RegisterAddress: registerAddress, FromHighByte: fromHighByte, }, } } // Int8 add int8 field to Builder to be requested and extracted func (b *Builder) Int8(registerAddress uint16, fromHighByte bool) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeInt8, RegisterAddress: registerAddress, FromHighByte: fromHighByte, }, } } // Uint16 add uint16 field to Builder to be requested and extracted func (b *Builder) Uint16(registerAddress uint16) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeUint16, RegisterAddress: registerAddress, }, } } // Int16 add int16 field to Builder to be requested and extracted func (b *Builder) Int16(registerAddress uint16) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeInt16, RegisterAddress: registerAddress, }, } } // Uint32 add uint32 field to Builder to be requested and extracted func (b *Builder) Uint32(registerAddress uint16) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeUint32, RegisterAddress: registerAddress, }, } } // Int32 add int32 field to Builder to be requested and extracted func (b *Builder) Int32(registerAddress uint16) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeInt32, RegisterAddress: registerAddress, }, } } // Uint64 add uint64 field to Builder to be requested and extracted func (b *Builder) Uint64(registerAddress uint16) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeUint64, RegisterAddress: registerAddress, }, } } // Int64 add int64 field to Builder to be requested and extracted func (b *Builder) Int64(registerAddress uint16) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeInt64, RegisterAddress: registerAddress, }, } } // Float32 add float32 field to Builder to be requested and extracted func (b *Builder) Float32(registerAddress uint16) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeFloat32, RegisterAddress: registerAddress, }, } } // Float64 add float64 field to Builder to be requested and extracted func (b *Builder) Float64(registerAddress uint16) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeFloat64, RegisterAddress: registerAddress, }, } } // String add string field to Builder to be requested and extracted func (b *Builder) String(registerAddress uint16, length uint8) *BField { return &BField{ Field{ ServerAddress: b.serverAddress, UnitID: b.unitID, Type: FieldTypeString, Length: length, RegisterAddress: registerAddress, }, } } // RegisterRequest helps to connect requested fields to responses type RegisterRequest struct { packet.Request // ServerAddress is modbus server address where request should be sent ServerAddress string // UnitID is unit identifier of modbus slave device UnitID uint8 // StartAddress is start register address for request StartAddress uint16 // Fields is slice of field use to construct the request and to be extracted from response Fields Fields } // RegistersResponse is marker interface for responses returning register data type RegistersResponse interface { packet.Response AsRegisters(requestStartAddress uint16) (*packet.Registers, error) } // AsRegisters returns response data as Register to more convenient access func (r RegisterRequest) AsRegisters(response RegistersResponse) (*packet.Registers, error) { return response.AsRegisters(r.StartAddress) } // FieldValue is concrete value extracted from register data using field data type and byte order type FieldValue struct { Field Field Value interface{} Error error } // ErrorFieldExtractHadError is returned when ExtractFields could not extract value from Field var ErrorFieldExtractHadError = errors.New("field extraction had an error. check FieldValue.Error for details") // ExtractFields extracts Field values from given response. When continueOnExtractionErrors is true and error occurs // during extraction, this method does not end but continues to extract all Fields and returns ErrorFieldExtractHadError // at the end. To distinguish errors check FieldValue.Error field. func (r RegisterRequest) ExtractFields(response RegistersResponse, continueOnExtractionErrors bool) ([]FieldValue, error) { regs, err := response.AsRegisters(r.StartAddress) if err != nil { return nil, err } hadErrors := false capacity := 0 if continueOnExtractionErrors { capacity = len(r.Fields) } result := make([]FieldValue, 0, capacity) for _, f := range r.Fields { vTmp, err := f.ExtractFrom(regs) if err != nil && !continueOnExtractionErrors { return nil, fmt.Errorf("field extraction failed. name: %v err: %w", f.Name, err) } if !hadErrors && err != nil { hadErrors = true } tmp := FieldValue{ Field: f, Value: vTmp, Error: err, } result = append(result, tmp) } if hadErrors { return result, ErrorFieldExtractHadError } return result, nil } // ReadHoldingRegistersTCP combines fields into TCP Read Holding Registers (FC3) requests func (b *Builder) ReadHoldingRegistersTCP() ([]RegisterRequest, error) { return split(b.fields, "fc3_tcp") } // ReadHoldingRegistersRTU combines fields into RTU Read Holding Registers (FC3) requests func (b *Builder) ReadHoldingRegistersRTU() ([]RegisterRequest, error) { return split(b.fields, "fc3_rtu") } // ReadInputRegistersTCP combines fields into TCP Read Input Registers (FC4) requests func (b *Builder) ReadInputRegistersTCP() ([]RegisterRequest, error) { return split(b.fields, "fc4_tcp") } // ReadInputRegistersRTU combines fields into RTU Read Input Registers (FC4) requests func (b *Builder) ReadInputRegistersRTU() ([]RegisterRequest, error) { return split(b.fields, "fc4_rtu") }
builder.go
0.811003
0.520618
builder.go
starcoder
package main import ( utils "github.com/Jordi-Jaspers/AdventOfCode2021/Util" "log" "math" ) type Vector struct { start Coordinate end Coordinate slope float64 } type Coordinate struct { x int y int } type Space struct { width int height int overlap [][]int } const MINIMUM_OVERLAP = 2 func main() { // read the input of the file log.Println("Reading the input file...") input := utils.ReadInput("../input.txt") // Setup Environment log.Println("Setting up the environment...") space, vectors := setup(input) log.Printf("Created space-matrix of '%d' * '%d' with '%d' vectors.\n", space.width, space.height, len(vectors)) // Check if vectors overlap in the space log.Println("Checking if vectors overlap...") space = checkOverlap(space, vectors) // Find the coordinate with the most amount of overlap coordinates := getCoordinatesWithMinimumOverlap(space, MINIMUM_OVERLAP) log.Printf("There are '%d' coordinates with at least '%d' overlap.\n", len(coordinates), MINIMUM_OVERLAP) //log.Printf("space %v", space.overlap) } func getCoordinatesWithMinimumOverlap(space Space, maxOverlap int) []Coordinate { coordinates := make([]Coordinate, 0) for x := 0; x < space.width; x++ { for y := 0; y < space.height; y++ { if space.overlap[x][y] >= maxOverlap { coordinates = append(coordinates, Coordinate{x, y}) } } } return coordinates } func checkOverlap(space Space, vectors []Vector) Space { matrix := make([][]int, space.width) for i := range matrix { matrix[i] = make([]int, space.height) } for _, vector := range vectors { deltaX := float64(vector.end.x - vector.start.x) deltaY := float64(vector.end.y - vector.start.y) if deltaX == float64(0){ log.Println("X-coordinate is constant.") for i := 0; float64(i) <= math.Abs(deltaY); i++ { if deltaY < float64(0) { matrix[vector.start.x-1][vector.start.y-1-i]++ } else { matrix[vector.start.x-1][vector.start.y-1+i]++ } } } else if deltaY == float64(0) { log.Println("Y-coordinate is constant.") for i := 0; float64(i) <= math.Abs(deltaX); i++ { if deltaX < float64(0) { matrix[vector.start.x-1-i][vector.start.y-1]++ } else { matrix[vector.start.x-1+i][vector.start.y-1]++ } } } else if vector.slope == float64(1) { log.Println("The Slope is 45 degrees.") for i := 0; float64(i) <= math.Abs(deltaX); i++ { switch { case deltaX < float64(0) && deltaY < float64(0): matrix[vector.start.x-1-i][vector.start.y-1-i]++ case deltaX < float64(0) && deltaY > float64(0): matrix[vector.start.x-1-i][vector.start.y-1+i]++ case deltaX > float64(0) && deltaY < float64(0): matrix[vector.start.x-1+i][vector.start.y-1-i]++ case deltaX > float64(0) && deltaY > float64(0): matrix[vector.start.x-1+i][vector.start.y-1+i]++ } } } } space.overlap = matrix return space } func setup(input []string) (Space, []Vector) { maxX := 0 maxY := 0 vectors := make([]Vector, 0) for _, line := range input { // get the fuel required for the module coordinates := utils.SplitDigitsFromSeperatedString(line) // create the vector vector := Vector{ start: Coordinate{ x: coordinates[0], y: coordinates[1], }, end: Coordinate{ x: coordinates[2], y: coordinates[3], }, slope: math.Abs(float64(coordinates[3] - coordinates[1])) / math.Abs(float64(coordinates[2] - coordinates[0])), } vectors = append(vectors, vector) // Find maximum X & Y coordinate. if vector.start.x > maxX { maxX = vector.start.x } if vector.end.x > maxX { maxX = vector.end.x } if vector.start.y > maxY { maxY = vector.start.y } if vector.end.y > maxY { maxY = vector.end.y } } return Space{ width: maxX, height: maxY, }, vectors }
Day 5 - Hydrothermal Venture/5.2/main.go
0.610918
0.442938
main.go
starcoder
package tick import ( "sort" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/tick/ast" ) // AlertNode converts the Alert pipeline node into the TICKScript AST type AlertNode struct { Function } // NewAlert creates an Alert function builder func NewAlert(parents []ast.Node) *AlertNode { return &AlertNode{ Function{ Parents: parents, }, } } // Build creates a Alert ast.Node func (n *AlertNode) Build(a *pipeline.AlertNode) (ast.Node, error) { n.Pipe("alert"). Dot("topic", a.Topic). Dot("id", a.Id). Dot("message", a.Message). Dot("details", a.Details). Dot("info", a.Info). Dot("warn", a.Warn). Dot("crit", a.Crit). Dot("infoReset", a.InfoReset). Dot("warnReset", a.WarnReset). Dot("critReset", a.CritReset). Dot("history", a.History). Dot("levelTag", a.LevelTag). Dot("levelField", a.LevelField). Dot("messageField", a.MessageField). Dot("durationField", a.DurationField). Dot("idTag", a.IdTag). Dot("idField", a.IdField). DotIf("all", a.AllFlag). DotIf("noRecoveries", a.NoRecoveriesFlag) for _, in := range a.Inhibitors { args := make([]interface{}, len(in.EqualTags)+1) args[0] = in.Category for i, t := range in.EqualTags { args[i+1] = t } n.Dot("inhibit", args...) } if a.IsStateChangesOnly { if a.StateChangesOnlyDuration == 0 { n.Dot("stateChangesOnly") } else { n.Dot("stateChangesOnly", a.StateChangesOnlyDuration) } } if a.UseFlapping { n.DotZeroValueOK("flapping", a.FlapLow, a.FlapHigh) } for _, h := range a.HTTPPostHandlers { n.DotRemoveZeroValue("post", h.URL). Dot("endpoint", h.Endpoint). DotIf("captureResponse", h.CaptureResponseFlag). Dot("timeout", h.Timeout). DotIf("skipSSLVerification", h.SkipSSLVerificationFlag) var headers []string for k := range h.Headers { headers = append(headers, k) } sort.Strings(headers) for _, k := range headers { n.Dot("header", k, h.Headers[k]) } } for _, h := range a.TcpHandlers { n.DotRemoveZeroValue("tcp", h.Address) } for _, h := range a.EmailHandlers { n.Dot("email") for _, to := range h.ToList { n.Dot("to", to) } } for _, h := range a.ExecHandlers { n.DotRemoveZeroValue("exec", args(h.Command)...) } for _, h := range a.LogHandlers { n.DotRemoveZeroValue("log", h.FilePath) if h.Mode != 0 { mode := &ast.NumberNode{ IsInt: true, Int64: h.Mode, Base: 8, } n.Dot("mode", mode) } } for _, h := range a.VictorOpsHandlers { n.Dot("victorOps"). Dot("routingKey", h.RoutingKey) } for _, h := range a.PagerDutyHandlers { n.Dot("pagerDuty"). Dot("serviceKey", h.ServiceKey) } for _, h := range a.PagerDuty2Handlers { n.Dot("pagerDuty2"). Dot("routingKey", h.RoutingKey) for _, l := range h.Links { if len(l.Text) > 0 { n.Dot("link", l.Href, l.Text) } else { n.Dot("link", l.Href) } } } for _, h := range a.PushoverHandlers { n.Dot("pushover"). Dot("userKey", h.UserKey). Dot("device", h.Device). Dot("title", h.Title). Dot("uRL", h.URL). Dot("uRLTitle", h.URLTitle). Dot("sound", h.Sound) } for _, h := range a.SensuHandlers { n.Dot("sensu"). Dot("source", h.Source). Dot("handlers", args(h.HandlersList)...) // Use stable key order keys := make([]string, 0, len(h.MetadataMap)) for k := range h.MetadataMap { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { n.Dot("metadata", k, h.MetadataMap[k]) } } for _, h := range a.ServiceNowHandlers { n.Dot("servicenow"). Dot("source", h.Source). Dot("node", h.Node). Dot("type", h.Type). Dot("resource", h.Resource). Dot("metricName", h.MetricName). Dot("messageKey", h.MessageKey) // Use stable key order keys := make([]string, 0, len(h.AdditionalInfoMap)) for k := range h.AdditionalInfoMap { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { n.Dot("additionalInfo", k, h.AdditionalInfoMap[k]) } } for _, h := range a.SlackHandlers { n.Dot("slack"). Dot("workspace", h.Workspace). Dot("channel", h.Channel). Dot("username", h.Username). Dot("iconEmoji", h.IconEmoji) } for _, h := range a.TelegramHandlers { n.Dot("telegram"). Dot("chatId", h.ChatId). Dot("parseMode", h.ParseMode). DotIf("disableWebPagePreview", h.IsDisableWebPagePreview). DotIf("disableNotification", h.IsDisableNotification) } for _, h := range a.HipChatHandlers { n.Dot("hipChat"). Dot("room", h.Room). Dot("token", h.Token) } for _, h := range a.KafkaHandlers { n.Dot("kafka"). Dot("cluster", h.Cluster). Dot("kafkaTopic", h.KafkaTopic). Dot("template", h.Template) } for _, h := range a.AlertaHandlers { n.Dot("alerta"). Dot("token", h.Token). Dot("resource", h.Resource). Dot("event", h.Event). Dot("environment", h.Environment). Dot("group", h.Group). Dot("value", h.Value). Dot("origin", h.Origin). Dot("services", args(h.Service)...). Dot("correlated", args(h.Correlate)...). Dot("timeout", h.Timeout) } for _, h := range a.OpsGenieHandlers { n.Dot("opsGenie"). Dot("teams", args(h.TeamsList)...). Dot("recipients", args(h.RecipientsList)...) } for _, h := range a.OpsGenie2Handlers { n.Dot("opsGenie2"). Dot("teams", args(h.TeamsList)...). Dot("recipients", args(h.RecipientsList)...) } for _ = range a.TalkHandlers { n.Dot("talk") } for _, h := range a.MQTTHandlers { n.DotRemoveZeroValue("mqtt", h.Topic). Dot("brokerName", h.BrokerName). Dot("qos", h.Qos). Dot("retained", h.Retained) } for _, h := range a.SNMPTrapHandlers { n.DotRemoveZeroValue("snmpTrap", h.TrapOid) for _, d := range h.DataList { n.Dot("data", d.Oid, d.Type, d.Value) } } return n.prev, n.err }
pipeline/tick/alert.go
0.531209
0.408513
alert.go
starcoder
package simplemerkle import ( "math" "github.com/zarbchain/zarb-go/crypto/hash" ) var hasher func([]byte) hash.Hash func init() { hasher = hash.CalcHash } type Tree struct { merkles []*hash.Hash } // nextPowerOfTwo returns the next highest power of two from a given number if // it is not already a power of two. This is a helper function used during the // calculation of a merkle tree. func nextPowerOfTwo(n int) int { // Return the number if it's already a power of 2. if n&(n-1) == 0 { return n } // Figure out and return the next power of two. exponent := uint(math.Log2(float64(n))) + 1 return 1 << exponent // 2^exponent } // HashMerkleBranches takes two hashes, treated as the left and right tree // nodes, and returns the hash of their concatenation. This is a helper // function used to aid in the generation of a merkle tree. func HashMerkleBranches(left *hash.Hash, right *hash.Hash) *hash.Hash { // Concatenate the left and right nodes. var h [hash.HashSize * 2]byte copy(h[:hash.HashSize], left.RawBytes()) copy(h[hash.HashSize:], right.RawBytes()) newHash := hasher(h[:]) return &newHash } func NewTreeFromSlices(slices [][]byte) *Tree { hashes := make([]hash.Hash, len(slices)) for i, b := range slices { hashes[i] = hasher(b) } return NewTreeFromHashes(hashes) } func NewTreeFromHashes(hashes []hash.Hash) *Tree { if len(hashes) == 0 { return nil } // abcdww Calculate how many entries are required to hold the binary merkle // tree as a linear array and create an array of that size. nextPoT := nextPowerOfTwo(len(hashes)) arraySize := nextPoT*2 - 1 merkles := make([]*hash.Hash, arraySize) for i := range hashes { merkles[i] = &hashes[i] } // Start the array offset after the last transaction and adjusted to the // next power of two. offset := nextPoT for i := 0; i < arraySize-1; i += 2 { switch { // When there is no left child node, the parent is nil too. case merkles[i] == nil: merkles[offset] = nil // When there is no right child, the parent is generated by // hashing the concatenation of the left child with itself. case merkles[i+1] == nil: newHash := HashMerkleBranches(merkles[i], merkles[i]) merkles[offset] = newHash // The normal case sets the parent node to the double sha256 // of the concatenation of the left and right children. default: newHash := HashMerkleBranches(merkles[i], merkles[i+1]) merkles[offset] = newHash } offset++ } return &Tree{merkles: merkles} } func (tree *Tree) Root() hash.Hash { if tree == nil { return hash.UndefHash } h := tree.merkles[len(tree.merkles)-1] if h != nil { return *h } return hash.UndefHash } func (tree *Tree) Depth() int { if tree == nil { return 0 } return int(math.Log2(float64(len(tree.merkles)))) }
libs/merkle/merkle.go
0.768646
0.562357
merkle.go
starcoder
// Package day12 solves AoC 2020 day 12. package day12 import ( "fmt" "github.com/fis/aoc/glue" "github.com/fis/aoc/util" ) func init() { glue.RegisterSolver(2020, 12, glue.LineSolver(solve)) } func solve(lines []string) ([]string, error) { actions := parseInput(lines) t := newTurtle() t.move(actions) p1 := t.distance() s := newShip() s.move(actions) p2 := s.distance() return glue.Ints(p1, p2), nil } type action struct { command byte arg int } func parseInput(lines []string) (out []action) { for _, line := range lines { c := action{} if _, err := fmt.Sscanf(line, "%c%d", &c.command, &c.arg); err == nil { out = append(out, c) } } return out } type turtle struct { pos util.P dir util.P } func newTurtle() turtle { return turtle{pos: util.P{0, 0}, dir: util.P{1, 0}} } func (t *turtle) move(actions []action) { for _, a := range actions { switch a.command { case 'N': t.pos.Y -= a.arg case 'S': t.pos.Y += a.arg case 'E': t.pos.X += a.arg case 'W': t.pos.X -= a.arg case 'L': t.dir = rotate(t.dir, a.arg) case 'R': t.dir = rotate(t.dir, 360-a.arg) case 'F': t.pos.X += a.arg * t.dir.X t.pos.Y += a.arg * t.dir.Y } } } func (t turtle) distance() int { return abs(t.pos.X) + abs(t.pos.Y) } type ship struct { pos util.P wp util.P } func newShip() ship { return ship{pos: util.P{0, 0}, wp: util.P{10, -1}} } func (s *ship) move(actions []action) { for _, a := range actions { switch a.command { case 'N': s.wp.Y -= a.arg case 'S': s.wp.Y += a.arg case 'E': s.wp.X += a.arg case 'W': s.wp.X -= a.arg case 'L': s.wp = rotate(s.wp, a.arg) case 'R': s.wp = rotate(s.wp, 360-a.arg) case 'F': s.pos.X += a.arg * s.wp.X s.pos.Y += a.arg * s.wp.Y } } } func (s ship) distance() int { return abs(s.pos.X) + abs(s.pos.Y) } func rotate(p util.P, deg int) util.P { switch deg { case 90: return util.P{p.Y, -p.X} case 180: return util.P{-p.X, -p.Y} case 270: return util.P{-p.Y, p.X} } return p } func abs(x int) int { if x < 0 { return -x } return x }
2020/day12/day12.go
0.688887
0.409752
day12.go
starcoder
package collection import ( "encoding/json" "errors" "math" "math/rand" "time" "github.com/shopspring/decimal" ) type NumberArrayCollection struct { value []decimal.Decimal BaseCollection } // Sum returns the sum of all items in the collection. func (c NumberArrayCollection) Sum(key ...string) decimal.Decimal { var sum = decimal.New(0, 0) for i := 0; i < len(c.value); i++ { sum = sum.Add(c.value[i]) } return sum } // Length return the length of the collection. func (c NumberArrayCollection) Length() int { return len(c.value) } // Avg returns the average value of a given key. func (c NumberArrayCollection) Avg(key ...string) decimal.Decimal { var sum = decimal.New(0, 0) for i := 0; i < len(c.value); i++ { sum = sum.Add(c.value[i]) } return sum.Div(nd(len(c.value))) } // Min returns the minimum value of a given key. func (c NumberArrayCollection) Min(key ...string) decimal.Decimal { var smallest = decimal.New(0, 0) for i := 0; i < len(c.value); i++ { if i == 0 { smallest = c.value[i] continue } if smallest.GreaterThan(c.value[i]) { smallest = c.value[i] } } return smallest } // Max returns the maximum value of a given key. func (c NumberArrayCollection) Max(key ...string) decimal.Decimal { var biggest = decimal.New(0, 0) for i := 0; i < len(c.value); i++ { if i == 0 { biggest = c.value[i] continue } if biggest.LessThan(c.value[i]) { biggest = c.value[i] } } return biggest } // Prepend adds an item to the beginning of the collection. func (c NumberArrayCollection) Prepend(values ...interface{}) Collection { var d NumberArrayCollection var n = make([]decimal.Decimal, len(c.value)) copy(n, c.value) d.value = append([]decimal.Decimal{newDecimalFromInterface(values[0])}, n...) d.length = len(d.value) return d } // Splice removes and returns a slice of items starting at the specified index. func (c NumberArrayCollection) Splice(index ...int) Collection { if len(index) == 1 { var n = make([]decimal.Decimal, len(c.value)) copy(n, c.value) n = n[index[0]:] return NumberArrayCollection{n, BaseCollection{length: len(n)}} } else if len(index) > 1 { var n = make([]decimal.Decimal, len(c.value)) copy(n, c.value) n = n[index[0] : index[0]+index[1]] return NumberArrayCollection{n, BaseCollection{length: len(n)}} } else { return BaseCollection{err: errors.New("invalid argument")} } } // Take returns a new collection with the specified number of items. func (c NumberArrayCollection) Take(num int) Collection { var d NumberArrayCollection if num > c.length { return BaseCollection{err: errors.New("not enough elements to take")} } if num >= 0 { d.value = c.value[:num] d.length = num } else { d.value = c.value[len(c.value)+num:] d.length = 0 - num } return d } // All returns the underlying array represented by the collection. func (c NumberArrayCollection) All() []interface{} { s := make([]interface{}, len(c.value)) for i := 0; i < len(c.value); i++ { s[i] = c.value[i] } return s } func (c NumberArrayCollection) AllE() ([]interface{}, error) { return c.All(), c.err } // Mode returns the mode value of a given key. func (c NumberArrayCollection) Mode(key ...string) []interface{} { valueCount := c.CountBy() maxCount := 0 maxValue := make([]interface{}, len(valueCount)) for v, c := range valueCount { switch { case c < maxCount: continue case c == maxCount: maxValue = append(maxValue, newDecimalFromInterface(v)) case c > maxCount: maxValue = append([]interface{}{}, newDecimalFromInterface(v)) maxCount = c } } return maxValue } func (c NumberArrayCollection) ModeE(key ...string) ([]interface{}, error) { return c.Mode(key...), c.err } // ToNumberArray converts the collection into a plain golang slice which contains decimal.Decimal. func (c NumberArrayCollection) ToNumberArray() []decimal.Decimal { return c.value } func (c NumberArrayCollection) ToNumberArrayE() ([]decimal.Decimal, error) { return c.value, c.err } // ToIntArray converts the collection into a plain golang slice which contains int. func (c NumberArrayCollection) ToIntArray() []int { var v = make([]int, len(c.value)) for i, value := range c.value { v[i] = int(value.IntPart()) } return v } func (c NumberArrayCollection) ToIntArrayE() ([]int, error) { return c.ToIntArray(), c.err } // ToInt64Array converts the collection into a plain golang slice which contains int64. func (c NumberArrayCollection) ToInt64Array() []int64 { var v = make([]int64, len(c.value)) for i, value := range c.value { v[i] = value.IntPart() } return v } func (c NumberArrayCollection) ToInt64ArrayE() ([]int64, error) { return c.ToInt64Array(), c.err } // Chunk breaks the collection into multiple, smaller collections of a given size. func (c NumberArrayCollection) Chunk(num int) MultiDimensionalArrayCollection { var d MultiDimensionalArrayCollection d.length = c.length/num + 1 d.value = make([][]interface{}, d.length) count := 0 for i := 1; i <= c.length; i++ { switch { case i == c.length: if i%num == 0 { d.value[count] = c.All()[i-num:] d.value = d.value[:d.length-1] } else { d.value[count] = c.All()[i-i%num:] } case i%num != 0 || i < num: continue default: d.value[count] = c.All()[i-num : i] count++ } } return d } // Concat appends the given array or collection values onto the end of the collection. func (c NumberArrayCollection) Concat(value interface{}) Collection { return NumberArrayCollection{ value: append(c.value, value.([]decimal.Decimal)...), BaseCollection: BaseCollection{length: c.length + len(value.([]decimal.Decimal))}, } } // Contains determines whether the collection contains a given item. func (c NumberArrayCollection) Contains(value ...interface{}) bool { if callback, ok := value[0].(CB); ok { for k, v := range c.value { if callback(k, v) { return true } } return false } for _, v := range c.value { if v.Equal(nd(value[0])) { return true } } return false } func (c NumberArrayCollection) ContainsE(value ...interface{}) (bool, error) { return c.Contains(value...), c.err } // CountBy counts the occurrences of values in the collection. By default, the method counts the occurrences of every element. func (c NumberArrayCollection) CountBy(callback ...interface{}) map[interface{}]int { valueCount := make(map[interface{}]int) if len(callback) > 0 { if cb, ok := callback[0].(FilterFun); ok { for _, v := range c.value { valueCount[cb(v)]++ } } } else { for _, v := range c.value { vv, _ := v.Float64() valueCount[vv]++ } } return valueCount } func (c NumberArrayCollection) CountByE(callback ...interface{}) (map[interface{}]int, error) { return c.CountBy(callback...), c.err } // CrossJoin cross joins the collection's values among the given arrays or collections, returning a Cartesian product with all possible permutations. func (c NumberArrayCollection) CrossJoin(array ...[]interface{}) MultiDimensionalArrayCollection { var d MultiDimensionalArrayCollection // A two-dimensional-slice's initial length := len(c.value) for _, s := range array { length *= len(s) } value := make([][]interface{}, length) for i := range value { value[i] = make([]interface{}, len(array)+1) } offset := length / c.length for i := 0; i < length; i++ { value[i][0] = c.value[i/offset] } assignmentToValue(value, array, length, 1, 0, offset) d.value = value d.length = length return d } // Dd dumps the collection's items and ends execution of the script. func (c NumberArrayCollection) Dd() { dd(c) } func (c NumberArrayCollection) DdE() error { dd(c) return c.err } // Dump dumps the collection's items. func (c NumberArrayCollection) Dump() { dump(c) } func (c NumberArrayCollection) DumpE() error { dump(c) return c.err } // Diff compares the collection against another collection or a plain PHP array based on its values. // This method will return the values in the original collection that are not present in the given collection. func (c NumberArrayCollection) Diff(m interface{}) Collection { ms := newDecimalArray(m) var d = make([]decimal.Decimal, 0) for _, value := range c.value { exist := false for i := 0; i < len(ms); i++ { if ms[i].Equal(value) { exist = true break } } if !exist { d = append(d, value) } } return NumberArrayCollection{ value: d, } } // Each iterates over the items in the collection and passes each item to a callback. func (c NumberArrayCollection) Each(cb func(item, value interface{}) (interface{}, bool)) Collection { var d = make([]decimal.Decimal, 0) var ( newValue interface{} stop = false ) for key, value := range c.value { if !stop { newValue, stop = cb(key, value) d = append(d, newDecimalFromInterface(newValue)) } else { d = append(d, value) } } return NumberArrayCollection{ value: d, } } // Every may be used to verify that all elements of a collection pass a given truth test. func (c NumberArrayCollection) Every(cb CB) bool { for key, value := range c.value { if !cb(key, value) { return false } } return true } func (c NumberArrayCollection) EveryE(cb CB) (bool, error) { return c.Every(cb), c.err } // Filter filters the collection using the given callback, keeping only those items that pass a given truth test. func (c NumberArrayCollection) Filter(cb CB) Collection { var d = make([]decimal.Decimal, 0) for key, value := range c.value { if cb(key, value) { d = append(d, value) } } return NumberArrayCollection{ value: d, } } // First returns the first element in the collection that passes a given truth test. func (c NumberArrayCollection) First(cbs ...CB) interface{} { if len(cbs) > 0 { for key, value := range c.value { if cbs[0](key, value) { return value } } return nil } else { if len(c.value) > 0 { return c.value[0] } else { return nil } } } func (c NumberArrayCollection) FirstE(cbs ...CB) (interface{}, error) { return c.First(cbs...), c.err } // ForPage returns a new collection containing the items that would be present on a given page number. func (c NumberArrayCollection) ForPage(page, size int) Collection { var d = make([]decimal.Decimal, len(c.value)) copy(d, c.value) if size > len(d) || size*(page-1) > len(d) { return NumberArrayCollection{ value: d, } } if (page+1)*size > len(d) { return NumberArrayCollection{ value: d[(page-1)*size:], } } else { return NumberArrayCollection{ value: d[(page-1)*size : (page)*size], } } } // IsEmpty returns true if the collection is empty; otherwise, false is returned. func (c NumberArrayCollection) IsEmpty() bool { return len(c.value) == 0 } func (c NumberArrayCollection) IsEmptyE() (bool, error) { return c.IsEmpty(), c.err } // IsNotEmpty returns true if the collection is not empty; otherwise, false is returned. func (c NumberArrayCollection) IsNotEmpty() bool { return len(c.value) != 0 } func (c NumberArrayCollection) IsNotEmptyE() (bool, error) { return c.IsNotEmpty(), c.err } // Last returns the last element in the collection that passes a given truth test. func (c NumberArrayCollection) Last(cbs ...CB) interface{} { if len(cbs) > 0 { var last interface{} for key, value := range c.value { if cbs[0](key, value) { last = value } } return last } else { if len(c.value) > 0 { return c.value[len(c.value)-1] } else { return nil } } } func (c NumberArrayCollection) LastE(cbs ...CB) (interface{}, error) { return c.Last(cbs...), c.err } // Median returns the median value of a given key. func (c NumberArrayCollection) Median(key ...string) decimal.Decimal { if len(c.value) < 2 { return c.value[0] } var f = make([]decimal.Decimal, len(c.value)) copy(f, c.value) f = qsort(f, true) return f[len(f)/2].Add(f[len(f)/2-1]).Div(nd(2)) } // Merge merges the given array or collection with the original collection. If a string key in the given items // matches a string key in the original collection, the given items's value will overwrite the value in the // original collection. func (c NumberArrayCollection) Merge(i interface{}) Collection { m := newDecimalArray(i) var d = make([]decimal.Decimal, len(c.value)) copy(d, c.value) d = append(d, m...) return NumberArrayCollection{ value: d, } } // Pad will fill the array with the given value until the array reaches the specified size. func (c NumberArrayCollection) Pad(num int, value interface{}) Collection { if len(c.value) > num { d := make([]decimal.Decimal, len(c.value)) copy(d, c.value) return NumberArrayCollection{ value: d, } } if num > 0 { d := make([]decimal.Decimal, num) for i := 0; i < num; i++ { if i < len(c.value) { d[i] = c.value[i] } else { d[i] = nd(value) } } return NumberArrayCollection{ value: d, } } else { d := make([]decimal.Decimal, -num) for i := 0; i < -num; i++ { if i < -num-len(c.value) { d[i] = nd(value) } else { d[i] = c.value[i] } } return NumberArrayCollection{ value: d, } } } // Partition separate elements that pass a given truth test from those that do not. func (c NumberArrayCollection) Partition(cb PartCB) (Collection, Collection) { var d1 = make([]decimal.Decimal, 0) var d2 = make([]decimal.Decimal, 0) for i := 0; i < len(c.value); i++ { if cb(i) { d1 = append(d1, c.value[i]) } else { d2 = append(d2, c.value[i]) } } return NumberArrayCollection{ value: d1, }, NumberArrayCollection{ value: d2, } } // Pop removes and returns the last item from the collection. func (c NumberArrayCollection) Pop() interface{} { last := c.value[len(c.value)-1] c.value = c.value[:len(c.value)-1] return last } func (c NumberArrayCollection) PopE() (interface{}, error) { return c.Pop(), c.err } // Push appends an item to the end of the collection. func (c NumberArrayCollection) Push(v interface{}) Collection { var d = make([]decimal.Decimal, len(c.value)+1) for i := 0; i < len(d); i++ { if i < len(c.value) { d[i] = c.value[i] } else { d[i] = nd(v) } } return NumberArrayCollection{ value: d, } } // Random returns a random item from the collection. func (c NumberArrayCollection) Random(num ...int) Collection { if len(num) == 0 { return BaseCollection{ value: c.value[rand.Intn(len(c.value))], } } else { if num[0] > len(c.value) { return BaseCollection{err: errors.New("wrong num")} } var d = make([]decimal.Decimal, len(c.value)) copy(d, c.value) for i := 0; i < len(c.value)-num[0]; i++ { index := rand.Intn(len(d)) d = append(d[:index], d[index+1:]...) } return NumberArrayCollection{ value: d, } } } // Reduce reduces the collection to a single value, passing the result of each iteration into the subsequent iteration. func (c NumberArrayCollection) Reduce(cb ReduceCB) interface{} { var res interface{} for i := 0; i < len(c.value); i++ { res = cb(res, c.value[i]) } return res } func (c NumberArrayCollection) ReduceE(cb ReduceCB) (interface{}, error) { return c.Reduce(cb), c.err } // Reject filters the collection using the given callback. func (c NumberArrayCollection) Reject(cb CB) Collection { var d = make([]decimal.Decimal, 0) for key, value := range c.value { if !cb(key, value) { d = append(d, value) } } return NumberArrayCollection{ value: d, } } // Reverse reverses the order of the collection's items, preserving the original keys. func (c NumberArrayCollection) Reverse() Collection { var d = make([]decimal.Decimal, len(c.value)) j := 0 for i := len(c.value) - 1; i > -1; i-- { d[j] = c.value[i] j++ } return NumberArrayCollection{ value: d, } } // Search searches the collection for the given value and returns its key if found. If the item is not found, // -1 is returned. func (c NumberArrayCollection) Search(v interface{}) int { if cb, ok := v.(CB); ok { for i := 0; i < len(c.value); i++ { if cb(i, c.value[i]) { return i } } } else { n := nd(v) for i := 0; i < len(c.value); i++ { if n.Equal(c.value[i]) { return i } } } return -1 } func (c NumberArrayCollection) SearchE(v interface{}) (int, error) { return -1, c.err } // Shift removes and returns the first item from the collection. func (c NumberArrayCollection) Shift() Collection { var d = make([]decimal.Decimal, len(c.value)) copy(d, c.value) d = d[1:] return NumberArrayCollection{ value: d, } } // Shuffle randomly shuffles the items in the collection. func (c NumberArrayCollection) Shuffle() Collection { var d = make([]decimal.Decimal, len(c.value)) copy(d, c.value) rand.Seed(time.Now().UnixNano()) rand.Shuffle(len(c.value), func(i, j int) { d[i], d[j] = d[j], d[i] }) return NumberArrayCollection{ value: d, } } // Slice returns a slice of the collection starting at the given index. func (c NumberArrayCollection) Slice(keys ...int) Collection { var d = make([]decimal.Decimal, len(c.value)) copy(d, c.value) if len(keys) == 1 { return NumberArrayCollection{ value: d[keys[0]:], } } else { return NumberArrayCollection{ value: d[keys[0] : keys[0]+keys[1]], } } } // Sort sorts the collection. func (c NumberArrayCollection) Sort() Collection { var d = make([]decimal.Decimal, len(c.value)) copy(d, c.value) d = qsort(d, true) return NumberArrayCollection{ value: d, } } // SortByDesc has the same signature as the sortBy method, but will sort the collection in the opposite order. func (c NumberArrayCollection) SortByDesc() Collection { var d = make([]decimal.Decimal, len(c.value)) copy(d, c.value) d = qsort(d, false) return NumberArrayCollection{ value: d, } } // Split breaks a collection into the given number of groups. func (c NumberArrayCollection) Split(num int) Collection { var d = make([][]interface{}, int(math.Ceil(float64(len(c.value))/float64(num)))) j := -1 for i := 0; i < len(c.value); i++ { if i%num == 0 { j++ if i+num <= len(c.value) { d[j] = make([]interface{}, num) } else { d[j] = make([]interface{}, len(c.value)-i) } d[j][i%num] = c.value[i] } else { d[j][i%num] = c.value[i] } } return MultiDimensionalArrayCollection{ value: d, } } // Unique returns all of the unique items in the collection. func (c NumberArrayCollection) Unique() Collection { var d = make([]decimal.Decimal, len(c.value)) copy(d, c.value) x := make([]decimal.Decimal, 0) for _, i := range d { if len(x) == 0 { x = append(x, i) } else { for k, v := range x { if i.Equal(v) { break } if k == len(x)-1 { x = append(x, i) } } } } return NumberArrayCollection{ value: x, } } // ToJson converts the collection into a json string. func (c NumberArrayCollection) ToJson() string { s, err := json.Marshal(c.value) if err != nil { return "" } return string(s) } func (c NumberArrayCollection) ToJsonE() (string, error) { s, err := json.Marshal(c.value) if err != nil { c.errorHandle(err.Error()) return "", err } return string(s), c.err }
number_array_collection.go
0.763043
0.417034
number_array_collection.go
starcoder
package binary import ( "github.com/matrixorigin/matrixone/pkg/container/nulls" "github.com/matrixorigin/matrixone/pkg/container/types" "github.com/matrixorigin/matrixone/pkg/container/vector" "github.com/matrixorigin/matrixone/pkg/encoding" "github.com/matrixorigin/matrixone/pkg/vectorize/power" "github.com/matrixorigin/matrixone/pkg/vm/process" ) func Power(vectors []*vector.Vector, proc *process.Process) (*vector.Vector, error) { left, right := vectors[0], vectors[1] resultType := types.Type{Oid: types.T_float64, Size: 8} resultElementSize := int(resultType.Size) leftValues, rightValues := vector.MustTCols[float64](left), vector.MustTCols[float64](right) switch { case left.IsScalar() && right.IsScalar(): if left.ConstVectorIsNull() || right.ConstVectorIsNull() { return proc.AllocScalarNullVector(resultType), nil } resultVector := vector.NewConst(resultType) resultValues := make([]float64, 1) vector.SetCol(resultVector, power.Power(leftValues, rightValues, resultValues)) return resultVector, nil case left.IsScalar() && !right.IsScalar(): if left.ConstVectorIsNull() { return proc.AllocScalarNullVector(resultType), nil } resultVector, err := proc.AllocVector(resultType, int64(resultElementSize*len(rightValues))) if err != nil { return nil, err } resultValues := encoding.DecodeFloat64Slice(resultVector.Data) resultValues = resultValues[:len(rightValues)] nulls.Set(resultVector.Nsp, right.Nsp) vector.SetCol(resultVector, power.PowerScalarLeftConst(leftValues[0], rightValues, resultValues)) return resultVector, nil case !left.IsScalar() && right.IsScalar(): if right.ConstVectorIsNull() { return proc.AllocScalarNullVector(resultType), nil } resultVector, err := proc.AllocVector(resultType, int64(resultElementSize*len(leftValues))) if err != nil { return nil, err } resultValues := encoding.DecodeFloat64Slice(resultVector.Data) resultValues = resultValues[:len(leftValues)] nulls.Set(resultVector.Nsp, left.Nsp) vector.SetCol(resultVector, power.PowerScalarRightConst(rightValues[0], leftValues, resultValues)) return resultVector, nil } resultVector, err := proc.AllocVector(resultType, int64(resultElementSize*len(rightValues))) if err != nil { return nil, err } resultValues := encoding.DecodeFloat64Slice(resultVector.Data) resultValues = resultValues[:len(leftValues)] nulls.Or(left.Nsp, right.Nsp, resultVector.Nsp) vector.SetCol(resultVector, power.Power(leftValues, rightValues, resultValues)) return resultVector, nil }
pkg/sql/plan2/function/builtin/binary/power.go
0.5564
0.51013
power.go
starcoder
package zkbpp //This file contains declaration for all fast sha gates for Z2 import "math/big" //================================================================ // UTILS //================================================================ func bit(x uint32, i int) uint32 { return x >> i & 1 } func setBit(x uint32, i int, b uint32) uint32 { if b == 1 { return x | (b << i) } return x & ^(1 << i) } //================================================================ // Fast gates on uint32 //================================================================ func mpcZ2XorFast(x, y []uint32, c *Circuit) (z []uint32) { z = []uint32{0, 0, 0} z[0] = x[0] ^ y[0] z[1] = x[1] ^ y[1] z[2] = x[2] ^ y[2] return } func mpcZ2XorFastVerif(x, y []uint32, c *Circuit) (z []uint32) { z = []uint32{0, 0, 0} z[0] = x[0] ^ y[0] z[1] = x[1] ^ y[1] return } func mpcZ2OrFast(x, y []uint32, c *Circuit) (z []uint32) { w := []uint32{^x[0], ^x[1], ^x[2]} v := []uint32{^y[0], ^y[1], ^y[2]} z = mpcZ2AndFast(w, v, c) z[0] = ^z[0] z[1] = ^z[1] z[2] = ^z[2] return } func mpcZ2OrFastVerif(x, y []uint32, c *Circuit) (z []uint32) { w := []uint32{^x[0], ^x[1], 0} v := []uint32{^y[0], ^y[1], 0} z = mpcZ2AndFastVerif(w, v, c) z[0] = ^z[0] z[1] = ^z[1] return } func mpcZ2AndFast(x, y []uint32, circ *Circuit) (z []uint32) { z = []uint32{0, 0, 0} a := []uint32{circ.generateRandomUint32(0), circ.generateRandomUint32(1), circ.generateRandomUint32(2)} b := []uint32{circ.generateRandomUint32(0), circ.generateRandomUint32(1), circ.generateRandomUint32(2)} c := []uint32{circ.generateRandomUint32(0), circ.generateRandomUint32(1), circ.generateRandomUint32(2)} delta := uint32(circ.preprocessing.deltas[circ.preprocessing.deltasIndex].Uint64()) c = mpcZ2XorFast(c, []uint32{delta, delta, delta}, circ) circ.preprocessing.deltasIndex++ //compute and reconstruct alpha = (x-a), betas = y-b alphas := mpcZ2XorFast(x, a, circ) betas := mpcZ2XorFast(y, b, circ) alpha := alphas[0] ^ alphas[1] ^ alphas[2] beta := betas[0] ^ betas[1] ^ betas[2] for i := 0; i < 3; i++ { tmp := y[i] & alpha tmp1 := x[i] & beta tmp2 := alpha & beta z[i] = c[i] ^ tmp tmp3 := tmp1 ^ tmp2 z[i] = z[i] ^ tmp3 } circ.views.player1 = append(circ.views.player1, big.NewInt(int64(alphas[1])), big.NewInt(int64(betas[1]))) circ.views.player2 = append(circ.views.player2, big.NewInt(int64(alphas[2])), big.NewInt(int64(betas[2]))) circ.views.player3 = append(circ.views.player3, big.NewInt(int64(alphas[0])), big.NewInt(int64(betas[0]))) return } func mpcZ2AndFastVerif(x, y []uint32, circ *Circuit) (z []uint32) { z = []uint32{0, 0, 0} a := []uint32{circ.generateRandomUint32(0), circ.generateRandomUint32(1)} b := []uint32{circ.generateRandomUint32(0), circ.generateRandomUint32(1)} c := []uint32{circ.generateRandomUint32(0), circ.generateRandomUint32(1)} delta := uint32(circ.preprocessing.deltas[circ.preprocessing.deltasIndex].Uint64()) c = mpcZ2XorFastVerif(c, []uint32{delta, delta, delta}, circ) circ.preprocessing.deltasIndex++ //compute and reconstruct alpha = (x-a), betas = y-b alphas := mpcZ2XorFastVerif(x, a, circ) betas := mpcZ2XorFastVerif(y, b, circ) alphas[2] = uint32(circ.views.player2[circ.views.currentIndex].Uint64()) circ.views.currentIndex++ betas[2] = uint32(circ.views.player2[circ.views.currentIndex].Uint64()) circ.views.currentIndex++ alpha := alphas[0] ^ alphas[1] ^ alphas[2] beta := betas[0] ^ betas[1] ^ betas[2] for i := 0; i < 2; i++ { tmp := y[i] & alpha tmp1 := x[i] & beta tmp2 := alpha & beta z[i] = c[i] ^ tmp tmp3 := tmp1 ^ tmp2 z[i] = z[i] ^ tmp3 } circ.views.player1 = append(circ.views.player1, big.NewInt(int64(alphas[1])), big.NewInt(int64(betas[1]))) return } func mpcZ2RightShiftFast(x []uint32, i []uint32, c *Circuit) (z []uint32) { z = []uint32{0, 0, 0} z[0] = x[0] >> i[0] z[1] = x[1] >> i[0] z[2] = x[2] >> i[0] return } func mpcZ2RightShiftFastVerif(x []uint32, i []uint32, c *Circuit) (z []uint32) { z = []uint32{0, 0, 0} z[0] = x[0] >> i[0] z[1] = x[1] >> i[0] return } func mpcZ2RightRotate32Fast(x []uint32, i []uint32, c *Circuit) (z []uint32) { z = []uint32{0, 0, 0} z[0] = x[0]>>i[0] | x[0]<<(32-i[0]) z[1] = x[1]>>i[0] | x[1]<<(32-i[0]) z[2] = x[2]>>i[0] | x[2]<<(32-i[0]) return } func mpcZ2RightRotate32FastVerif(x []uint32, i []uint32, c *Circuit) (z []uint32) { z = []uint32{0, 0, 0} z[0] = x[0]>>i[0] | x[0]<<(32-i[0]) z[1] = x[1]>>i[0] | x[1]<<(32-i[0]) return } func mpcZ2AddFast(x, y []uint32, c *Circuit) (z []uint32) { z = []uint32{0, 0, 0} carry := []uint32{0, 0, 0} tmp := mpcZ2AndFast(x, y, c) tmp2 := mpcZ2XorFast(x, y, c) for i := 0; i < 31; i++ { tmp3 := mpcZ2AndFast(carry, tmp2, c) t := mpcZ2OrFast(tmp3, tmp, c) carry[0] = setBit(carry[0], i+1, bit(t[0], i)) carry[1] = setBit(carry[1], i+1, bit(t[1], i)) carry[2] = setBit(carry[2], i+1, bit(t[2], i)) } z[0] = x[0] ^ y[0] ^ carry[0] z[1] = x[1] ^ y[1] ^ carry[1] z[2] = x[2] ^ y[2] ^ carry[2] return } func mpcZ2AddFastVerif(x, y []uint32, c *Circuit) (z []uint32) { z = []uint32{0, 0, 0} carry := []uint32{0, 0, 0} tmp := mpcZ2AndFastVerif(x, y, c) tmp2 := mpcZ2XorFastVerif(x, y, c) for i := 0; i < 31; i++ { tmp3 := mpcZ2AndFastVerif(carry, tmp2, c) t := mpcZ2OrFastVerif(tmp3, tmp, c) carry[0] = setBit(carry[0], i+1, bit(t[0], i)) carry[1] = setBit(carry[1], i+1, bit(t[1], i)) } z[0] = x[0] ^ y[0] ^ carry[0] z[1] = x[1] ^ y[1] ^ carry[1] return } func mpcZ2AddKFast(x, k []uint32, c *Circuit) (z []uint32) { //since k^k^k = k , we can simply call add with the second var being (k,k,k) z = mpcZ2AddFast(x, []uint32{k[0], k[0], k[0]}, c) return } func mpcZ2AddKFastVerif(x, k []uint32, c *Circuit) (z []uint32) { //since k^k^k = k , we can simply call add with the second var being (k,k,k) z = mpcZ2AddFastVerif(x, []uint32{k[0], k[0], k[0]}, c) return } //================================================================ // PREPROCESS GATES //================================================================ func mpcZ2NoOpFast(x, y []uint32, c *Circuit) (z []uint32) { return } func mpcZ2PreprocessFast(x []uint32, y []uint32, circ *Circuit) (z []uint32) { a_shares := []uint32{circ.generateRandomUint32(0), circ.generateRandomUint32(1), circ.generateRandomUint32(2)} b_shares := []uint32{circ.generateRandomUint32(0), circ.generateRandomUint32(1), circ.generateRandomUint32(2)} c_shares := []uint32{circ.generateRandomUint32(0), circ.generateRandomUint32(1), circ.generateRandomUint32(2)} a := a_shares[0] ^ a_shares[1] ^ a_shares[2] b := b_shares[0] ^ b_shares[1] ^ b_shares[2] c := a & b delta := c ^ (c_shares[0] ^ c_shares[1] ^ c_shares[2]) circ.preprocessing.deltas = append(circ.preprocessing.deltas, big.NewInt(int64(delta))) return } func mpcZ2AddPreprocessFast(x, y []uint32, c *Circuit) (z []uint32) { mpcZ2PreprocessFast(nil, nil, c) for i := 0; i < 31; i++ { mpcZ2PreprocessFast(nil, nil, c) mpcZ2PreprocessFast(nil, nil, c) } return }
CRISP_go/zkbpp/gates_z2_shaFast.go
0.534612
0.431045
gates_z2_shaFast.go
starcoder
package models import ( i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) // WorkbookChartPoint type WorkbookChartPoint struct { Entity // Encapsulates the format properties chart point. Read-only. format WorkbookChartPointFormatable // Returns the value of a chart point. Read-only. value Jsonable } // NewWorkbookChartPoint instantiates a new workbookChartPoint and sets the default values. func NewWorkbookChartPoint()(*WorkbookChartPoint) { m := &WorkbookChartPoint{ Entity: *NewEntity(), } return m } // CreateWorkbookChartPointFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreateWorkbookChartPointFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { return NewWorkbookChartPoint(), nil } // GetFieldDeserializers the deserialization information for the current model func (m *WorkbookChartPoint) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { res := m.Entity.GetFieldDeserializers() res["format"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetObjectValue(CreateWorkbookChartPointFormatFromDiscriminatorValue) if err != nil { return err } if val != nil { m.SetFormat(val.(WorkbookChartPointFormatable)) } return nil } res["value"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetObjectValue(CreateJsonFromDiscriminatorValue) if err != nil { return err } if val != nil { m.SetValue(val.(Jsonable)) } return nil } return res } // GetFormat gets the format property value. Encapsulates the format properties chart point. Read-only. func (m *WorkbookChartPoint) GetFormat()(WorkbookChartPointFormatable) { if m == nil { return nil } else { return m.format } } // GetValue gets the value property value. Returns the value of a chart point. Read-only. func (m *WorkbookChartPoint) GetValue()(Jsonable) { if m == nil { return nil } else { return m.value } } // Serialize serializes information the current object func (m *WorkbookChartPoint) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { err := m.Entity.Serialize(writer) if err != nil { return err } { err = writer.WriteObjectValue("format", m.GetFormat()) if err != nil { return err } } { err = writer.WriteObjectValue("value", m.GetValue()) if err != nil { return err } } return nil } // SetFormat sets the format property value. Encapsulates the format properties chart point. Read-only. func (m *WorkbookChartPoint) SetFormat(value WorkbookChartPointFormatable)() { if m != nil { m.format = value } } // SetValue sets the value property value. Returns the value of a chart point. Read-only. func (m *WorkbookChartPoint) SetValue(value Jsonable)() { if m != nil { m.value = value } }
models/workbook_chart_point.go
0.742141
0.429908
workbook_chart_point.go
starcoder
package block import ( "github.com/df-mc/dragonfly/server/block/cube" "github.com/df-mc/dragonfly/server/entity" "github.com/df-mc/dragonfly/server/entity/damage" "github.com/df-mc/dragonfly/server/event" "github.com/df-mc/dragonfly/server/world" "github.com/df-mc/dragonfly/server/world/sound" "math/rand" "time" ) // Lava is a light-emitting fluid block that causes fire damage. type Lava struct { empty replaceable // Still makes the lava not spread whenever it is updated. Still lava cannot be acquired in the game // without world editing. Still bool // Depth is the depth of the water. This is a number from 1-8, where 8 is a source block and 1 is the // smallest possible lava block. Depth int // Falling specifies if the lava is falling. Falling lava will always appear as a source block, but its // behaviour differs when it starts spreading. Falling bool } // neighboursLavaFlammable returns true if one a block adjacent to the passed position is flammable. func neighboursLavaFlammable(pos cube.Pos, w *world.World) bool { for i := cube.Face(0); i < 6; i++ { if flammable, ok := w.Block(pos.Side(i)).(Flammable); ok && flammable.FlammabilityInfo().LavaFlammable { return true } } return false } // EntityInside ... func (l Lava) EntityInside(_ cube.Pos, _ *world.World, e world.Entity) { if fallEntity, ok := e.(fallDistanceEntity); ok { fallEntity.ResetFallDistance() } if flammable, ok := e.(entity.Flammable); ok { if l, ok := e.(entity.Living); ok && !l.AttackImmune() { l.Hurt(4, damage.SourceLava{}) } flammable.SetOnFire(15 * time.Second) } } // RandomTick ... func (l Lava) RandomTick(pos cube.Pos, w *world.World, r *rand.Rand) { i := r.Intn(3) if i > 0 { for j := 0; j < i; j++ { pos = pos.Add(cube.Pos{r.Intn(3) - 1, 1, r.Intn(3) - 1}) if _, ok := w.Block(pos).(Air); ok { if neighboursLavaFlammable(pos, w) { w.SetBlock(pos, Fire{}, nil) } } } } else { for j := 0; j < 3; j++ { pos = pos.Add(cube.Pos{r.Intn(3) - 1, 0, r.Intn(3) - 1}) if _, ok := w.Block(pos.Side(cube.FaceUp)).(Air); ok { if flammable, ok := w.Block(pos).(Flammable); ok && flammable.FlammabilityInfo().LavaFlammable && flammable.FlammabilityInfo().Encouragement > 0 { w.SetBlock(pos, Fire{}, nil) } } } } } // HasLiquidDrops ... func (Lava) HasLiquidDrops() bool { return false } // LightDiffusionLevel always returns 2. func (Lava) LightDiffusionLevel() uint8 { return 2 } // LightEmissionLevel returns 15. func (Lava) LightEmissionLevel() uint8 { return 15 } // NeighbourUpdateTick ... func (l Lava) NeighbourUpdateTick(pos, _ cube.Pos, w *world.World) { if !l.Harden(pos, w, nil) { w.ScheduleBlockUpdate(pos, w.Dimension().LavaSpreadDuration()) } } // ScheduledTick ... func (l Lava) ScheduledTick(pos cube.Pos, w *world.World, _ *rand.Rand) { if !l.Harden(pos, w, nil) { tickLiquid(l, pos, w) } } // LiquidDepth returns the depth of the lava. func (l Lava) LiquidDepth() int { return l.Depth } // SpreadDecay always returns 2. func (Lava) SpreadDecay() int { return 2 } // WithDepth returns a new Lava block with the depth passed and falling if set to true. func (l Lava) WithDepth(depth int, falling bool) world.Liquid { l.Depth = depth l.Falling = falling l.Still = false return l } // LiquidFalling checks if the lava is falling. func (l Lava) LiquidFalling() bool { return l.Falling } // LiquidType returns 10 as a unique identifier for the lava liquid. func (Lava) LiquidType() string { return "lava" } // Harden handles the hardening logic of lava. func (l Lava) Harden(pos cube.Pos, w *world.World, flownIntoBy *cube.Pos) bool { var ok bool var water, b world.Block if flownIntoBy == nil { var water, b world.Block _, soulSoilFound := w.Block(pos.Side(cube.FaceDown)).(SoulSoil) pos.Neighbours(func(neighbour cube.Pos) { if b != nil || neighbour[1] == pos[1]-1 { return } if _, ok := w.Block(neighbour).(BlueIce); ok { if soulSoilFound { b = Basalt{} } return } if waterBlock, ok := w.Block(neighbour).(Water); ok { water = waterBlock if l.Depth == 8 && !l.Falling { b = Obsidian{} return } b = Cobblestone{} } }, w.Range()) if b != nil { ctx := event.C() if w.Handler().HandleLiquidHarden(ctx, pos, l, water, b); ctx.Cancelled() { return false } w.PlaySound(pos.Vec3Centre(), sound.Fizz{}) w.SetBlock(pos, b, nil) return true } return false } water, ok = w.Block(*flownIntoBy).(Water) if !ok { return false } if l.Depth == 8 && !l.Falling { b = Obsidian{} } else { b = Cobblestone{} } ctx := event.C() if w.Handler().HandleLiquidHarden(ctx, pos, l, water, b); ctx.Cancelled() { return false } w.SetBlock(pos, b, nil) w.PlaySound(pos.Vec3Centre(), sound.Fizz{}) return true } // EncodeBlock ... func (l Lava) EncodeBlock() (name string, properties map[string]any) { if l.Depth < 1 || l.Depth > 8 { panic("invalid lava depth, must be between 1 and 8") } v := 8 - l.Depth if l.Falling { v += 8 } if l.Still { return "minecraft:lava", map[string]any{"liquid_depth": int32(v)} } return "minecraft:flowing_lava", map[string]any{"liquid_depth": int32(v)} } // allLava returns a list of all lava states. func allLava() (b []world.Block) { f := func(still, falling bool) { b = append(b, Lava{Still: still, Falling: falling, Depth: 8}) b = append(b, Lava{Still: still, Falling: falling, Depth: 7}) b = append(b, Lava{Still: still, Falling: falling, Depth: 6}) b = append(b, Lava{Still: still, Falling: falling, Depth: 5}) b = append(b, Lava{Still: still, Falling: falling, Depth: 4}) b = append(b, Lava{Still: still, Falling: falling, Depth: 3}) b = append(b, Lava{Still: still, Falling: falling, Depth: 2}) b = append(b, Lava{Still: still, Falling: falling, Depth: 1}) } f(true, true) f(true, false) f(false, false) f(false, true) return }
server/block/lava.go
0.64791
0.414306
lava.go
starcoder
package main import ( "bufio" "fmt" "log" "os" "strconv" "strings" ) func main() { lines := readFile("input.txt") validPolicyOne := 0 validPolicyTwo := 0 for _, line := range lines { if IsValidPasswordPolicyOne(line) { validPolicyOne++ } if IsValidPasswordPolicyTwo(line) { validPolicyTwo++ } } fmt.Printf("validPolicyOne: %v\n", validPolicyOne) fmt.Printf("validPolicyTwo: %v\n", validPolicyTwo) } // IsValidPasswordPolicyOne determines if a line is a valid password based on // the policy: Each line gives the password policy and then the password. The // password policy indicates the lowest and highest number of times a given // letter must appear for the password to be valid. For example, 1-3 a means // that the password must contain a at least 1 time and at most 3 times. func IsValidPasswordPolicyOne(line string) bool { password, character, min, max := ParseLine(line) count := strings.Count(password, character) return min <= count && count <= max } // IsValidPasswordPolicyTwo determines if a line is a valid password based on // the policy: Each policy actually describes two positions in the password, // where 1 means the first character, 2 means the second character, and so on. // (Be careful; Toboggan Corporate Policies have no concept of "index zero"!) // Exactly one of these positions must contain the given letter. Other // occurrences of the letter are irrelevant for the purposes of policy // enforcement. Given the same example list from above: // 1-3 a: abcde is valid: position 1 contains a and position 3 does not. // 1-3 b: cdefg is invalid: neither position 1 nor position 3 contains b. // 2-9 c: ccccccccc is invalid: both position 2 and position 9 contain c. func IsValidPasswordPolicyTwo(line string) bool { password, character, pos1, pos2 := ParseLine(line) // positions are 1 indexed not 0 indexed. Therefore subtract one from all positions posOneMatchAndPosTwoNoMatch := character == string(password[pos1-1]) && character != string(password[pos2-1]) posTwoMatchAndPosOneNoMatch := character != string(password[pos1-1]) && character == string(password[pos2-1]) return posOneMatchAndPosTwoNoMatch || posTwoMatchAndPosOneNoMatch } // ParseLine extracts the following fields from a line of the format "1-3 a: // abcde": password, character, min, and max // ParseLine("1-3 a: abcde") => "abcde", "a", 1, 3 func ParseLine(line string) (password string, character string, min int, max int) { words := strings.Fields(line) min, max = parseCounts(words[0]) character = strings.Trim(words[1], ":") password = words[2] return } // parseCounts extracts the min and max from a string that represents a range // parseCounts("1-3") => 1, 3 func parseCounts(word string) (min int, max int) { counts := strings.Split(word, "-") min, err := strconv.Atoi(counts[0]) if err != nil { log.Fatal(err) } max, err = strconv.Atoi(counts[1]) if err != nil { log.Fatal(err) } return } func readFile(filename string) (passwords []string) { file, err := os.Open(filename) defer file.Close() if err != nil { log.Fatal(err) } scanner := bufio.NewScanner(file) scanner.Split(bufio.ScanLines) for scanner.Scan() { text := scanner.Text() if text == "" { // Reached end of file file.Close() return passwords } passwords = append(passwords, text) } return passwords }
2020/day2/day2.go
0.725551
0.437944
day2.go
starcoder
package opc // Spatial Stripes // Creates spatial sine wave stripes: x in the red channel, y--green, z--blue // Also makes a white dot which moves down the strip non-spatially in the order // that the LEDs are indexed. import ( "github.com/longears/pixelslinger/colorutils" "github.com/longears/pixelslinger/config" "github.com/longears/pixelslinger/midi" "math" "time" ) func MakePatternDiamond(locations []float64) ByteThread { // get bounding box n_pixels := len(locations) / 3 var max_coord_x, max_coord_y, max_coord_z float64 var min_coord_x, min_coord_y, min_coord_z float64 for ii := 0; ii < n_pixels; ii++ { x := locations[ii*3+0] y := locations[ii*3+1] z := locations[ii*3+2] if ii == 0 || x > max_coord_x { max_coord_x = x } if ii == 0 || y > max_coord_y { max_coord_y = y } if ii == 0 || z > max_coord_z { max_coord_z = z } if ii == 0 || x < min_coord_x { min_coord_x = x } if ii == 0 || y < min_coord_y { min_coord_y = y } if ii == 0 || z < min_coord_z { min_coord_z = z } } return func(bytesIn chan []byte, bytesOut chan []byte, midiState *midi.MidiState) { last_t := 0.0 t := 0.0 for bytes := range bytesIn { var ( // 0 to 1. 0 is large blend, 1 is tiny blend MORPH = float64(midiState.ControllerValues[config.MORPH_KNOB]) / 127.0 HUE = float64(midiState.ControllerValues[config.HUE_KNOB]) / 127.0 SPEED = 0.83 // Overall speed. This is applied in addition to the speed knob. SIDE_SCALE = 1.0 // Horizontal scale (x and y). Smaller numbers compress things horizontally. DISPERSAL = 0.2 // how much of a chromatic aberration effect WHITE_WAVE_PERIOD = 0.4 WHITE_WAVE_SPEED = 0.58 // positive is down WHITE_WAVE_THRESH = 0.9 RED_WAVE_PERIOD = 0.4 RED_WAVE_SPEED = 0.2 // positive is down RED_WAVE_THRESH = 0.9 BLEND_PERIOD = 0.3 BLEND_SPEED = -0.33 // positive is down BLEND_THRESH = 0.5*(1-MORPH) + 0.99*MORPH // 1 is red, 0 is white BLEND_THRESH_AMT = 2.0*(1-MORPH) + 5.0*MORPH // contrast amount ) if MORPH < 0.1 { BLEND_SPEED = -BLEND_SPEED } n_pixels := len(bytes) / 3 // time and speed knob bookkeeping this_t := float64(time.Now().UnixNano())/1.0e9 - 9.4e8 speedKnob := float64(midiState.ControllerValues[config.SPEED_KNOB]) / 127.0 if speedKnob < 0.5 { speedKnob = colorutils.RemapAndClamp(speedKnob, 0, 0.4, 0, 1) } else { speedKnob = colorutils.RemapAndClamp(speedKnob, 0.6, 1, 1, 4) } if midiState.KeyVolumes[config.SLOWMO_PAD] > 0 { speedKnob *= 0.25 } if last_t != 0 { t += (this_t - last_t) * speedKnob * SPEED } last_t = this_t // red (secondary) color rBRaw, gBRaw, bBRaw := colorutils.HslToRgb(HUE, 1.0, 0.75) // fill in bytes slice for ii := 0; ii < n_pixels; ii++ { //-------------------------------------------------------------------------------- // make moving stripes for x, y, and z x := locations[ii*3+0] //y := locations[ii*3+1] z := locations[ii*3+2] // scale the height (z) of the layout to fit in the range 0-1 // and scale x and y accordingly z_scale := max_coord_z - min_coord_z if z_scale == 0 { // avoid divide by zero z_scale = 0.05 } xp := x / z_scale / SIDE_SCALE //yp := y / z_scale / SIDE_SCALE zp := (z - min_coord_z) / z_scale // bend space so that things seem to accelerate upwards zp1 := math.Pow(zp+0.02, 2-DISPERSAL) zp2 := math.Pow(zp+0.02, 2) zp3 := math.Pow(zp+0.02, 2+DISPERSAL) if xp < 0 { xp = -xp } // cos: offset, period, min, max // white wave rA := 0.8 * colorutils.ContrastAndClamp(colorutils.Cos2(xp-zp1, t*WHITE_WAVE_SPEED, WHITE_WAVE_PERIOD, 0, 1), WHITE_WAVE_THRESH, 2, 0, 1) gA := 1.0 * colorutils.ContrastAndClamp(colorutils.Cos2(xp-zp2, t*WHITE_WAVE_SPEED, WHITE_WAVE_PERIOD, 0, 1), WHITE_WAVE_THRESH, 2, 0, 1) bA := 1.0 * colorutils.ContrastAndClamp(colorutils.Cos2(xp-zp3, t*WHITE_WAVE_SPEED, WHITE_WAVE_PERIOD, 0, 1), WHITE_WAVE_THRESH, 2, 0, 1) // red wave rB := rBRaw * colorutils.ContrastAndClamp(colorutils.Cos2(xp-zp3, t*RED_WAVE_SPEED, RED_WAVE_PERIOD, 0, 1), RED_WAVE_THRESH, 2, 0, 1) gB := gBRaw * colorutils.ContrastAndClamp(colorutils.Cos2(xp-zp2, t*RED_WAVE_SPEED, RED_WAVE_PERIOD, 0, 1), RED_WAVE_THRESH, 2, 0, 1) bB := bBRaw * colorutils.ContrastAndClamp(colorutils.Cos2(xp-zp1, t*RED_WAVE_SPEED, RED_WAVE_PERIOD, 0, 1), RED_WAVE_THRESH, 2, 0, 1) // // accent color // rB = 0.3 //+ colorutils.Cos2(t, 0, 7.30, -0.1, 0.3) // gB = 0.4 //+ colorutils.Cos2(t, 0, 7.37, -0.1, 0.3) // bB = 0.5 //+ colorutils.Cos2(t, 0, 7.43, -0.1, 0.3) blendOffset := t * BLEND_SPEED //blendOffset := colorutils.Cos2(t, 0, 6, -0.8, 0.8) blend := colorutils.ContrastAndClamp(colorutils.Cos2(xp/3-zp, blendOffset, BLEND_PERIOD, 0, 1), BLEND_THRESH, BLEND_THRESH_AMT, 0, 1) bytes[ii*3+0] = colorutils.FloatToByte(rA*blend + rB*(1-blend)) bytes[ii*3+1] = colorutils.FloatToByte(gA*blend + gB*(1-blend)) bytes[ii*3+2] = colorutils.FloatToByte(bA*blend + bB*(1-blend)) //-------------------------------------------------------------------------------- } bytesOut <- bytes } } }
opc/pattern-diamond.go
0.627152
0.576661
pattern-diamond.go
starcoder
package kea import "github.com/MaxSlyugrov/cldr" var calendar = cldr.Calendar{ Formats: cldr.CalendarFormats{ Date: cldr.CalendarDateFormat{Full: "EEEE, d 'di' MMMM 'di' y", Long: "d 'di' MMMM 'di' y", Medium: "d MMM y", Short: "d/M/y"}, Time: cldr.CalendarDateFormat{Full: "HH:mm:ss zzzz", Long: "HH:mm:ss z", Medium: "HH:mm:ss", Short: "HH:mm"}, DateTime: cldr.CalendarDateFormat{Full: "{1} {0}", Long: "{1} {0}", Medium: "{1} {0}", Short: "{1} {0}"}, }, FormatNames: cldr.CalendarFormatNames{ Months: cldr.CalendarMonthFormatNames{ Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "Jan", Feb: "Feb", Mar: "Mar", Apr: "Abr", May: "Mai", Jun: "Jun", Jul: "Jul", Aug: "Ago", Sep: "Set", Oct: "Otu", Nov: "Nuv", Dec: "Diz"}, Narrow: cldr.CalendarMonthFormatNameValue{Jan: "J", Feb: "F", Mar: "M", Apr: "A", May: "M", Jun: "J", Jul: "J", Aug: "A", Sep: "S", Oct: "O", Nov: "N", Dec: "D"}, Short: cldr.CalendarMonthFormatNameValue{}, Wide: cldr.CalendarMonthFormatNameValue{Jan: "Janeru", Feb: "Febreru", Mar: "Marsu", Apr: "Abril", May: "Maiu", Jun: "Junhu", Jul: "Julhu", Aug: "Agostu", Sep: "Setenbru", Oct: "Otubru", Nov: "Nuvenbru", Dec: "Dizenbru"}, }, Days: cldr.CalendarDayFormatNames{ Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "dum", Mon: "sig", Tue: "ter", Wed: "kua", Thu: "kin", Fri: "ses", Sat: "sab"}, Narrow: cldr.CalendarDayFormatNameValue{Sun: "d", Mon: "s", Tue: "t", Wed: "k", Thu: "k", Fri: "s", Sat: "s"}, Short: cldr.CalendarDayFormatNameValue{Sun: "du", Mon: "si", Tue: "te", Wed: "ku", Thu: "ki", Fri: "se", Sat: "sa"}, Wide: cldr.CalendarDayFormatNameValue{Sun: "dumingu", Mon: "sigunda-fera", Tue: "tersa-fera", Wed: "kuarta-fera", Thu: "kinta-fera", Fri: "sesta-fera", Sat: "sabadu"}, }, Periods: cldr.CalendarPeriodFormatNames{ Abbreviated: cldr.CalendarPeriodFormatNameValue{}, Narrow: cldr.CalendarPeriodFormatNameValue{AM: "a", PM: "p"}, Short: cldr.CalendarPeriodFormatNameValue{}, Wide: cldr.CalendarPeriodFormatNameValue{AM: "am", PM: "pm"}, }, }, }
resources/locales/kea/calendar.go
0.510252
0.403626
calendar.go
starcoder
package easing import "math" const pi2 = math.Pi / 2 /** * Easing equation function for a simple linear tweening, with no easing. * * @param t Current time (in frames or seconds). * @return The correct value. */ //func None(t float64) float64 //{ // return t; //} /** * Easing equation function for a quadratic (t^2) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InQuad(t float64) float64 { return t * t } /** * Easing equation function for a quadratic (t^2) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutQuad(t float64) float64 { return -t * (t - 2) } /** * Easing equation function for a quadratic (t^2) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InOutQuad(t float64) float64 { t *= 2.0 if t < 1 { return t * t / 2 } else { t -= 1 return -0.5 * (t*(t-2) - 1) } } /** * Easing equation function for a quadratic (t^2) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutInQuad(t float64) float64 { if t < 0.5 { return OutQuad(t*2) / 2 } return InQuad((2*t)-1)/2 + 0.5 } /** * Easing equation function for a cubic (t^3) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InCubic(t float64) float64 { return t * t * t } /** * Easing equation function for a cubic (t^3) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutCubic(t float64) float64 { t -= 1.0 return t*t*t + 1 } /** * Easing equation function for a cubic (t^3) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InOutCubic(t float64) float64 { t *= 2.0 if t < 1 { return 0.5 * t * t * t } else { t -= 2.0 return 0.5 * (t*t*t + 2) } } /** * Easing equation function for a cubic (t^3) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutInCubic(t float64) float64 { if t < 0.5 { return OutCubic(2*t) / 2 } return InCubic(2*t-1)/2 + 0.5 } /** * Easing equation function for a quartic (t^4) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InQuart(t float64) float64 { return t * t * t * t } /** * Easing equation function for a quartic (t^4) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutQuart(t float64) float64 { t -= 1.0 return -(t*t*t*t - 1) } /** * Easing equation function for a quartic (t^4) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InOutQuart(t float64) float64 { t *= 2 if t < 1 { return 0.5 * t * t * t * t } else { t -= 2.0 return -0.5 * (t*t*t*t - 2) } } /** * Easing equation function for a quartic (t^4) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutInQuart(t float64) float64 { if t < 0.5 { return OutQuart(2*t) / 2 } return InQuart(2*t-1)/2 + 0.5 } /** * Easing equation function for a quintic (t^5) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InQuint(t float64) float64 { return t * t * t * t * t } /** * Easing equation function for a quintic (t^5) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutQuint(t float64) float64 { t -= 1.0 return t*t*t*t*t + 1 } /** * Easing equation function for a quintic (t^5) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InOutQuint(t float64) float64 { t *= 2.0 if t < 1 { return 0.5 * t * t * t * t * t } else { t -= 2.0 return 0.5 * (t*t*t*t*t + 2) } } /** * Easing equation function for a quintic (t^5) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutInQuint(t float64) float64 { if t < 0.5 { return OutQuint(2*t) / 2 } return InQuint(2*t-1)/2 + 0.5 } /** * Easing equation function for a sinusoidal (sin(t)) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InSine(t float64) float64 { if t == 1.0 { return 1.0 } return -math.Cos(t*pi2) + 1.0 } /** * Easing equation function for a sinusoidal (sin(t)) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutSine(t float64) float64 { return math.Sin(t * pi2) } /** * Easing equation function for a sinusoidal (sin(t)) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InOutSine(t float64) float64 { return -0.5 * (math.Cos(math.Pi*t) - 1) } /** * Easing equation function for a sinusoidal (sin(t)) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutInSine(t float64) float64 { if t < 0.5 { return OutSine(2*t) / 2 } return InSine(2*t-1)/2 + 0.5 } /** * Easing equation function for an exponential (2^t) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InExpo(t float64) float64 { if t == 0 || t == 1.0 { return t } return math.Pow(2.0, 10*(t-1)) - 0.001 } /** * Easing equation function for an exponential (2^t) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutExpo(t float64) float64 { if t == 1.0 { return 1.0 } return 1.001 * (-math.Pow(2.0, -10*t) + 1) } /** * Easing equation function for an exponential (2^t) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InOutExpo(t float64) float64 { if t == 0.0 { return 0.0 } if t == 1.0 { return 1.0 } t *= 2.0 if t < 1 { return 0.5*math.Pow(2.0, 10*(t-1)) - 0.0005 } return 0.5 * 1.0005 * (-math.Pow(2.0, -10*(t-1)) + 2) } /** * Easing equation function for an exponential (2^t) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutInExpo(t float64) float64 { if t < 0.5 { return OutExpo(2*t) / 2 } return InExpo(2*t-1)/2 + 0.5 } /** * Easing equation function for a circular (sqrt(1-t^2)) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InCirc(t float64) float64 { return -(math.Sqrt(1-t*t) - 1) } /** * Easing equation function for a circular (sqrt(1-t^2)) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutCirc(t float64) float64 { t -= 1.0 return math.Sqrt(1 - t*t) } /** * Easing equation function for a circular (sqrt(1-t^2)) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func InOutCirc(t float64) float64 { t *= 2.0 if t < 1 { return -0.5 * (math.Sqrt(1-t*t) - 1) } else { t -= 2.0 return 0.5 * (math.Sqrt(1-t*t) + 1) } } /** * Easing equation function for a circular (sqrt(1-t^2)) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @return The correct value. */ func OutInCirc(t float64) float64 { if t < 0.5 { return OutCirc(2*t) / 2 } return InCirc(2*t-1)/2 + 0.5 } func easeInElastic_helper(t, b, c, d, a, p float64) float64 { if t == 0 { return b } t_adj := t / d if t_adj == 1 { return b + c } s := 0.0 if a < math.Abs(c) { a = c s = p / 4.0 } else { s = p / (2 * math.Pi) * math.Asin(c/a) } t_adj -= 1.0 return -(a * math.Pow(2.0, 10*t_adj) * math.Sin((t_adj*d-s)*(2*math.Pi)/p)) + b } /** * Easing equation function for an elastic (exponentially decaying sine wave) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @param a Amplitude. * @param p Period. * @return The correct value. */ func InElastic(t, a, p float64) float64 { return easeInElastic_helper(t, 0, 1, 1, a, p) } func easeOutElastic_helper(t, _ /*b*/, c, _ /*d*/, a, p float64) float64 { if t == 0 { return 0 } if t == 1 { return c } s := 0.0 if a < c { a = c s = p / 4.0 } else { s = p / (2 * math.Pi) * math.Asin(c/a) } return (a*math.Pow(2.0, -10*t)*math.Sin((t-s)*(2*math.Pi)/p) + c) } /** * Easing equation function for an elastic (exponentially decaying sine wave) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @param a Amplitude. * @param p Period. * @return The correct value. */ func OutElastic(t, a, p float64) float64 { return easeOutElastic_helper(t, 0, 1, 1, a, p) } /** * Easing equation function for an elastic (exponentially decaying sine wave) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @param a Amplitude. * @param p Period. * @return The correct value. */ func InOutElastic(t, a, p float64) float64 { if t == 0 { return 0.0 } t *= 2.0 if t == 2 { return 1.0 } s := 0.0 if a < 1.0 { a = 1.0 s = p / 4.0 } else { s = p / (2 * math.Pi) * math.Asin(1.0/a) } if t < 1 { return -.5 * (a * math.Pow(2.0, 10*(t-1)) * math.Sin((t-1-s)*(2*math.Pi)/p)) } return a*math.Pow(2.0, -10*(t-1))*math.Sin((t-1-s)*(2*math.Pi)/p)*.5 + 1.0 } /** * Easing equation function for an elastic (exponentially decaying sine wave) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @param a Amplitude. * @param p Period. * @return The correct value. */ func OutInElastic(t, a, p float64) float64 { if t < 0.5 { return easeOutElastic_helper(t*2, 0, 0.5, 1.0, a, p) } return easeInElastic_helper(2*t-1.0, 0.5, 0.5, 1.0, a, p) } /** * Easing equation function for a back (overshooting cubic easing: (s+1)*t^3 - s*t^2) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @param s Overshoot ammount: higher s means greater overshoot (0 produces cubic easing with no overshoot, and the default value of 1.70158 produces an overshoot of 10 percent). * @return The correct value. */ func InBack(t, s float64) float64 { return t * t * ((s+1)*t - s) } /** * Easing equation function for a back (overshooting cubic easing: (s+1)*t^3 - s*t^2) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @param s Overshoot ammount: higher s means greater overshoot (0 produces cubic easing with no overshoot, and the default value of 1.70158 produces an overshoot of 10 percent). * @return The correct value. */ func OutBack(t, s float64) float64 { t -= 1.0 return t*t*((s+1)*t+s) + 1 } /** * Easing equation function for a back (overshooting cubic easing: (s+1)*t^3 - s*t^2) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @param s Overshoot ammount: higher s means greater overshoot (0 produces cubic easing with no overshoot, and the default value of 1.70158 produces an overshoot of 10 percent). * @return The correct value. */ func InOutBack(t, s float64) float64 { t *= 2.0 if t < 1 { s *= 1.525 return 0.5 * (t * t * ((s+1)*t - s)) } else { t -= 2 s *= 1.525 return 0.5 * (t*t*((s+1)*t+s) + 2) } } /** * Easing equation function for a back (overshooting cubic easing: (s+1)*t^3 - s*t^2) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @param s Overshoot ammount: higher s means greater overshoot (0 produces cubic easing with no overshoot, and the default value of 1.70158 produces an overshoot of 10 percent). * @return The correct value. */ func OutInBack(t, s float64) float64 { if t < 0.5 { return OutBack(2*t, s) / 2 } return InBack(2*t-1, s)/2 + 0.5 } func easeOutBounce_helper(t, c, a float64) float64 { if t == 1.0 { return c } else if t < (4 / 11.0) { return c * (7.5625 * t * t) } else if t < (8 / 11.0) { t -= (6 / 11.0) return -a*(1.-(7.5625*t*t+.75)) + c } else if t < (10 / 11.0) { t -= (9 / 11.0) return -a*(1.-(7.5625*t*t+.9375)) + c } else { t -= (21 / 22.0) return -a*(1.-(7.5625*t*t+.984375)) + c } } /** * Easing equation function for a bounce (exponentially decaying parabolic bounce) easing out: decelerating to zero velocity. * * @param t Current time (in frames or seconds). * @param a Amplitude. * @return The correct value. */ func OutBounce(t, a float64) float64 { return easeOutBounce_helper(t, 1, a) } /** * Easing equation function for a bounce (exponentially decaying parabolic bounce) easing in: accelerating from zero velocity. * * @param t Current time (in frames or seconds). * @param a Amplitude. * @return The correct value. */ func InBounce(t, a float64) float64 { return 1.0 - easeOutBounce_helper(1.0-t, 1.0, a) } /** * Easing equation function for a bounce (exponentially decaying parabolic bounce) easing in/out: acceleration until halfway, then deceleration. * * @param t Current time (in frames or seconds). * @param a Amplitude. * @return The correct value. */ func InOutBounce(t, a float64) float64 { if t < 0.5 { return InBounce(2*t, a) / 2 } else if t == 1.0 { return 1.0 } return OutBounce(2*t-1, a)/2 + 0.5 } /** * Easing equation function for a bounce (exponentially decaying parabolic bounce) easing out/in: deceleration until halfway, then acceleration. * * @param t Current time (in frames or seconds). * @param a Amplitude. * @return The correct value. */ func OutInBounce(t, a float64) float64 { if t < 0.5 { return easeOutBounce_helper(t*2, 0.5, a) } return 1.0 - easeOutBounce_helper(2.0-2*t, 0.5, a) } func sinProgress(value float64) float64 { return math.Sin((value*math.Pi)-pi2)/2 + 0.5 } func smoothBeginEndMixFactor(value float64) float64 { return math.Min(math.Max(1-value*2+0.3, 0.0), 1.0) } // SmoothBegin blends Smooth and Linear Interpolation. // Progress 0 - 0.3 : Smooth only // Progress 0.3 - ~ 0.5 : Mix of Smooth and Linear // Progress ~ 0.5 - 1 : Linear only /** * Easing function that starts growing slowly, then incrEases in speed. At the end of the curve the speed will be constant. */ func InCurve(t float64) float64 { sinProgress := sinProgress(t) mix := smoothBeginEndMixFactor(t) return sinProgress*mix + t*(1-mix) } /** * Easing function that starts growing steadily, then ends slowly. The speed will be constant at the beginning of the curve. */ func OutCurve(t float64) float64 { sinProgress := sinProgress(t) mix := smoothBeginEndMixFactor(1 - t) return sinProgress*mix + t*(1-mix) } /** * Easing function where the value grows sinusoidally. Note that the calculated end value will be 0 rather than 1. */ func SineCurve(t float64) float64 { return (math.Sin((t*math.Pi*2)-pi2) + 1) / 2 } /** * Easing function where the value grows cosinusoidally. Note that the calculated start value will be 0.5 and the end value will be 0.5 * contrary to the usual 0 to 1 easing curve. */ func CosineCurve(t float64) float64 { return (math.Cos((t*math.Pi*2)-pi2) + 1) / 2 }
animation/easing/easing.go
0.970933
0.735689
easing.go
starcoder
package matutil import ( "fmt" "gonum.org/v1/gonum/mat" ) // New safely creates a new dense matrix. func New(r, c int, data []float64) (*mat.Dense, error) { var o *mat.Dense if err := safe(func() error { o = mat.NewDense(r, c, data) return nil }); err != nil { return nil, err } return o, nil } // Dot product of 2 matrices. func Dot(m, n mat.Matrix) (d *mat.Dense, err error) { var o mat.Dense if err := safe(func() error { o.Product(m, n) return nil }); err != nil { return nil, err } return &o, nil } // Apply a function to all elements of a matrix. func Apply(fn func(i, j int, v float64) float64, m mat.Matrix) (d *mat.Dense, err error) { var o mat.Dense if err := safe(func() error { o.Apply(fn, m) return nil }); err != nil { return nil, err } return &o, nil } // Scale each element of a matrix. func Scale(s float64, m mat.Matrix) (*mat.Dense, error) { var o mat.Dense if err := safe(func() error { o.Scale(s, m) return nil }); err != nil { return nil, err } return &o, nil } // MulElem each corresponding element of the matrices together. func MulElem(m, n mat.Matrix) (*mat.Dense, error) { var o mat.Dense if err := safe(func() error { o.MulElem(m, n) return nil }); err != nil { return nil, err } return &o, nil } // Add each corresponding element of the matrices together. func Add(m, n mat.Matrix) (*mat.Dense, error) { var o mat.Dense if err := safe(func() error { o.Add(m, n) return nil }); err != nil { return nil, err } return &o, nil } // Sub the corresponding elements of the second matrix from the first. func Sub(m, n mat.Matrix) (*mat.Dense, error) { var o mat.Dense if err := safe(func() error { o.Sub(m, n) return nil }); err != nil { return nil, err } return &o, nil } // FromVector creates a single-column matrix from a vector. func FromVector(v []float64) (*mat.Dense, error) { l := len(v) if l == 0 { return nil, fmt.Errorf("vector length is zero, cannot create matrix") } return New(l, 1, v) } // ToVector creates a vector from a single-column matrix. func ToVector(m mat.Matrix) (v []float64, err error) { if m == nil { return nil, fmt.Errorf("matrix cannot be nil") } if err := safe(func() error { r, c := m.Dims() if c != 1 { return fmt.Errorf("matrix must have a single column to convert to a vector, but has %d", c) } v = make([]float64, r) for i := r - 1; i >= 0; i-- { v[i] = m.At(i, 0) } return nil }); err != nil { return nil, err } return v, nil } func safe(f func() error) (err error) { defer func() { if r := recover(); r != nil { err = fmt.Errorf("%s", r) } }() return f() }
matutil/dense.go
0.777975
0.511839
dense.go
starcoder
package set import ( "fmt" "reflect" ) const minSize = 8 // Set is set collection of general type. // The zero value of Set is an empty instance ready to use. A zero Set // value shall not be copied, or it may result incorrect behavior. type Set struct { m map[interface{}]struct{} } // NewSet creates a Set instance and add the given values into the set. // If given only one param which is a slice, the elements of the slice // will be added into the set using reflection. func NewSet(vals ...interface{}) Set { size := max(len(vals), minSize) set := Set{ m: make(map[interface{}]struct{}, size), } if len(vals) == 1 && reflect.TypeOf(vals[0]).Kind() == reflect.Slice { values := reflect.ValueOf(vals[0]) for i := 0; i < values.Len(); i++ { set.m[values.Index(i).Interface()] = struct{}{} } } else { set.Add(vals...) } return set } // NewSetWithSize creates Set instance with given initial size. func NewSetWithSize(size int) Set { set := Set{ m: make(map[interface{}]struct{}, size), } return set } // Size returns the size of the set. func (s Set) Size() int { return len(s.m) } // Add adds the given values into the set. // If given only one param and which is a slice, the elements of the slice // will be added into the set using reflection. func (s *Set) Add(vals ...interface{}) { if s.m == nil { size := max(len(vals), minSize) s.m = make(map[interface{}]struct{}, size) } if len(vals) == 1 && reflect.TypeOf(vals[0]).Kind() == reflect.Slice { values := reflect.ValueOf(vals[0]) for i := 0; i < values.Len(); i++ { s.m[values.Index(i).Interface()] = struct{}{} } return } for idx := range vals { s.m[vals[idx]] = struct{}{} } } // Del deletes values from the set. func (s *Set) Del(vals ...interface{}) { if len(vals) == 1 && reflect.TypeOf(vals[0]).Kind() == reflect.Slice { values := reflect.ValueOf(vals[0]) for i := 0; i < values.Len(); i++ { delete(s.m, values.Index(i).Interface()) } return } for idx := range vals { delete(s.m, vals[idx]) } } // Pop pops an element from the set, in no particular order. func (s *Set) Pop() interface{} { for val := range s.m { delete(s.m, val) return val } return nil } // Iterate iterates the set in no particular order and call the given // function for each set element. func (s Set) Iterate(fn func(interface{})) { for val := range s.m { fn(val) } } // Contains returns true if the set contains all the values. func (s Set) Contains(vals ...interface{}) bool { if len(vals) == 0 { return false } for _, v := range vals { if _, ok := s.m[v]; !ok { return false } } return true } // ContainsAny returns true if the set contains any of the values. func (s Set) ContainsAny(vals ...interface{}) bool { for _, v := range vals { if _, ok := s.m[v]; ok { return true } } return false } // Diff returns new Set about the values which other sets don't contain. func (s Set) Diff(other Set) Set { res := NewSetWithSize(s.Size()) for val := range s.m { if _, ok := other.m[val]; !ok { res.m[val] = struct{}{} } } return res } // DiffSlice is similar to Diff, but takes a slice as parameter. // Param other must be a slice of []interface{} or slice of the concrete // element type, else it panics. func (s Set) DiffSlice(other interface{}) Set { otherTyp := reflect.TypeOf(other) if otherTyp == nil || otherTyp.Kind() != reflect.Slice { panic(fmt.Sprintf("invalid other type %T", other)) } otherVal := reflect.ValueOf(other) otherLen := otherVal.Len() if len(s.m) > otherLen { tmp := NewSetWithSize(otherLen) dup := 0 for i := 0; i < otherLen; i++ { val := otherVal.Index(i).Interface() if _, ok := s.m[val]; ok { dup++ } tmp.m[val] = struct{}{} } res := NewSetWithSize(max(s.Size()-dup, 0)) for val := range s.m { if _, ok := tmp.m[val]; !ok { res.m[val] = struct{}{} } } return res } else { res := NewSetWithSize(s.Size()) for val := range s.m { res.m[val] = struct{}{} } for i := 0; i < otherLen; i++ { val := otherVal.Index(i).Interface() if _, ok := res.m[val]; ok { delete(res.m, val) } } return res } } // FilterInclude returns a new slice which contains values that present in // the provided slice and also present in the Set. // Param slice must be a slice of []interface{} or slice of the concrete // element type, else it panics. func (s Set) FilterInclude(slice interface{}) interface{} { sliceTyp := reflect.TypeOf(slice) if sliceTyp == nil || sliceTyp.Kind() != reflect.Slice { panic(fmt.Sprintf("invalid slice type %T", slice)) } sliceVal := reflect.ValueOf(slice) sliceLen := sliceVal.Len() res := reflect.MakeSlice(sliceTyp, 0, min(s.Size(), sliceLen)) for i := 0; i < sliceLen; i++ { val := sliceVal.Index(i) if _, ok := s.m[val.Interface()]; ok { res = reflect.Append(res, val) } } return res.Interface() } // FilterExclude returns a new slice which contains values that present in // the provided slice but don't present in the Set. // Param slice must be a slice of []interface{} or slice of the concrete // element type, else it panics. func (s Set) FilterExclude(slice interface{}) interface{} { sliceTyp := reflect.TypeOf(slice) if sliceTyp == nil || sliceTyp.Kind() != reflect.Slice { panic(fmt.Sprintf("invalid slice type %T", slice)) } sliceVal := reflect.ValueOf(slice) sliceLen := sliceVal.Len() res := reflect.MakeSlice(sliceTyp, 0, sliceLen) for i := 0; i < sliceLen; i++ { val := sliceVal.Index(i) if _, ok := s.m[val.Interface()]; !ok { res = reflect.Append(res, val) } } return res.Interface() } // Intersect returns new Set about values which other set also contains. func (s Set) Intersect(other Set) Set { res := NewSetWithSize(min(s.Size(), other.Size())) // loop over the smaller set if len(s.m) <= len(other.m) { for val := range s.m { if _, ok := other.m[val]; ok { res.m[val] = struct{}{} } } } else { for val := range other.m { if _, ok := s.m[val]; ok { res.m[val] = struct{}{} } } } return res } // IntersectSlice is similar to Intersect, but takes a slice as parameter. // Param other must be a slice of []interface{} or slice of the concrete // element type, else it panics. func (s Set) IntersectSlice(other interface{}) Set { otherTyp := reflect.TypeOf(other) if otherTyp == nil || otherTyp.Kind() != reflect.Slice { panic(fmt.Sprintf("invalid other type %T", other)) } otherVal := reflect.ValueOf(other) otherLen := otherVal.Len() res := NewSetWithSize(min(s.Size(), otherLen)) for i := 0; i < otherLen; i++ { val := otherVal.Index(i).Interface() if _, ok := s.m[val]; ok { res.m[val] = struct{}{} } } return res } // Union returns new Set about values either in the set or the other set. func (s Set) Union(other Set) Set { res := NewSetWithSize(s.Size() + other.Size()) for val := range s.m { res.m[val] = struct{}{} } for val := range other.m { res.m[val] = struct{}{} } return res } // UnionSlice is similar to Union, but takes a slice as parameter. // Param other must be a slice of []interface{} or slice of the concrete // element type, else it panics. func (s Set) UnionSlice(other interface{}) Set { otherTyp := reflect.TypeOf(other) if otherTyp == nil || otherTyp.Kind() != reflect.Slice { panic(fmt.Sprintf("invalid other type %T", other)) } otherVal := reflect.ValueOf(other) otherLen := otherVal.Len() res := NewSetWithSize(s.Size() + otherLen) for val := range s.m { res.m[val] = struct{}{} } for i := 0; i < otherLen; i++ { val := otherVal.Index(i).Interface() res.m[val] = struct{}{} } return res } // Slice converts set into a []interface{} slice. func (s Set) Slice() []interface{} { res := make([]interface{}, 0, len(s.m)) for val := range s.m { res = append(res, val) } return res } // Map converts set into map[interface{}]bool. func (s Set) Map() map[interface{}]bool { res := make(map[interface{}]bool, len(s.m)) for val := range s.m { res[val] = true } return res } func min(a, b int) int { if a < b { return a } return b } func max(a, b int) int { if a > b { return a } return b }
set/set.go
0.728362
0.562657
set.go
starcoder
package holtwinters // This holt-winters code copied from graphite's functions.py) // It's "mostly" the same as a standard HW forecast import ( "math" ) func holtWintersIntercept(alpha, actual, lastSeason, lastIntercept, lastSlope float64) float64 { return alpha*(actual-lastSeason) + (1-alpha)*(lastIntercept+lastSlope) } func holtWintersSlope(beta, intercept, lastIntercept, lastSlope float64) float64 { return beta*(intercept-lastIntercept) + (1-beta)*lastSlope } func holtWintersSeasonal(gamma, actual, intercept, lastSeason float64) float64 { return gamma*(actual-intercept) + (1-gamma)*lastSeason } func holtWintersDeviation(gamma, actual, prediction, lastSeasonalDev float64) float64 { if math.IsNaN(prediction) { prediction = 0 } return gamma*math.Abs(actual-prediction) + (1-gamma)*lastSeasonalDev } // HoltWintersAnalysis do Holt-Winters Analysis func HoltWintersAnalysis(series []float64, step int64) ([]float64, []float64) { const ( alpha = 0.1 beta = 0.0035 gamma = 0.1 ) // season is currently one day seasonLength := 24 * 60 * 60 / int(step) var ( intercepts []float64 slopes []float64 seasonals []float64 predictions []float64 deviations []float64 ) getLastSeasonal := func(i int) float64 { j := i - seasonLength if j >= 0 { return seasonals[j] } return 0 } getLastDeviation := func(i int) float64 { j := i - seasonLength if j >= 0 { return deviations[j] } return 0 } var nextPred = math.NaN() for i, actual := range series { if math.IsNaN(actual) { // missing input values break all the math // do the best we can and move on intercepts = append(intercepts, math.NaN()) slopes = append(slopes, 0) seasonals = append(seasonals, 0) predictions = append(predictions, nextPred) deviations = append(deviations, 0) nextPred = math.NaN() continue } var ( lastSlope float64 lastIntercept float64 prediction float64 ) if i == 0 { lastIntercept = actual lastSlope = 0 // seed the first prediction as the first actual prediction = actual } else { lastIntercept = intercepts[len(intercepts)-1] lastSlope = slopes[len(slopes)-1] if math.IsNaN(lastIntercept) { lastIntercept = actual } prediction = nextPred } lastSeasonal := getLastSeasonal(i) nextLastSeasonal := getLastSeasonal(i + 1) lastSeasonalDev := getLastDeviation(i) intercept := holtWintersIntercept(alpha, actual, lastSeasonal, lastIntercept, lastSlope) slope := holtWintersSlope(beta, intercept, lastIntercept, lastSlope) seasonal := holtWintersSeasonal(gamma, actual, intercept, lastSeasonal) nextPred = intercept + slope + nextLastSeasonal deviation := holtWintersDeviation(gamma, actual, prediction, lastSeasonalDev) intercepts = append(intercepts, intercept) slopes = append(slopes, slope) seasonals = append(seasonals, seasonal) predictions = append(predictions, prediction) deviations = append(deviations, deviation) } return predictions, deviations } // HoltWintersConfidenceBands do Holt-Winters Confidence Bands func HoltWintersConfidenceBands(series []float64, step int64, delta float64, days int64) ([]float64, []float64) { var lowerBand, upperBand []float64 predictions, deviations := HoltWintersAnalysis(series, step) windowPoints := int(days * 86400 / step) var ( predictionsOfInterest []float64 deviationsOfInterest []float64 ) if len(predictions) < windowPoints || len(deviations) < windowPoints { predictionsOfInterest = predictions deviationsOfInterest = deviations } else { predictionsOfInterest = predictions[windowPoints:] deviationsOfInterest = deviations[windowPoints:] } for i := range predictionsOfInterest { if math.IsNaN(predictionsOfInterest[i]) || math.IsNaN(deviationsOfInterest[i]) { lowerBand = append(lowerBand, math.NaN()) upperBand = append(upperBand, math.NaN()) } else { scaledDeviation := delta * deviationsOfInterest[i] lowerBand = append(lowerBand, predictionsOfInterest[i]-scaledDeviation) upperBand = append(upperBand, predictionsOfInterest[i]+scaledDeviation) } } return lowerBand, upperBand }
expr/holtwinters/hw.go
0.757525
0.561515
hw.go
starcoder
package optimus import ( "fmt" "math" "go.uber.org/zap" ) type GreedyLinearRegressionModelConfig struct { WeightLimit float64 `yaml:"weight_limit" default:"1e-3"` ExhaustionLimit int `yaml:"exhaustion_limit" default:"128"` Model regressionModelFactory `yaml:"regression"` } type GreedyLinearRegressionModelFactory struct { GreedyLinearRegressionModelConfig } func (m *GreedyLinearRegressionModelFactory) Config() interface{} { return &m.GreedyLinearRegressionModelConfig } func (m *GreedyLinearRegressionModelFactory) Create(orders, matchedOrders []*MarketOrder, log *zap.SugaredLogger) OptimizationMethod { return &GreedyLinearRegressionModel{ orders: orders, regression: &regressionClassifier{ model: m.Model.Create(log), }, exhaustionLimit: m.ExhaustionLimit, log: log.With(zap.String("model", "LLS")), } } // GreedyLinearRegressionModel implements greedy knapsack optimization // algorithm. // The basic idea is to train the model using BID orders from the marketplace // by optimizing multidimensional linear regression over order benchmarks to // reduce the number of parameters to a single one - predicted price. This // price can be used to assign weights to orders to be able to determine which // orders are better to buy than others. type GreedyLinearRegressionModel struct { orders []*MarketOrder regression OrderClassifier exhaustionLimit int log *zap.SugaredLogger } func (m *GreedyLinearRegressionModel) Optimize(knapsack *Knapsack, orders []*MarketOrder) error { if len(m.orders) <= minNumOrders { return fmt.Errorf("not enough orders to perform optimization") } weightedOrders, err := m.regression.Classify(m.orders) if err != nil { return fmt.Errorf("failed to classify orders: %v", err) } // Here we create an index of matching orders to be able to filter // the entire training set for only interesting features. filter := map[string]struct{}{} for _, order := range orders { filter[order.GetOrder().GetId().Unwrap().String()] = struct{}{} } exhaustedCounter := 0 for _, weightedOrder := range weightedOrders { // Ignore orders with too low relative weight, i.e. orders that have // quotient of its price to predicted price less than 1%. // It may be, for example, when an order has 0 price. // TODO: For now not sure where to perform this filtering. Let it be here. if math.Abs(weightedOrder.Weight) < 0.01 { m.log.Debugf("ignore `%s` order - weight too low: %.6f", weightedOrder.ID().String(), weightedOrder.Weight) continue } if _, ok := filter[weightedOrder.ID().String()]; !ok { continue } if exhaustedCounter >= m.exhaustionLimit { break } order := weightedOrder.Order.Order m.log.Debugw("trying to put an order into resources pool", zap.Any("order", *weightedOrder.Order), zap.Float64("weight", weightedOrder.Weight), zap.String("price", order.Price.ToPriceString()), zap.Float64("predictedPrice", weightedOrder.PredictedPrice), ) switch err := knapsack.Put(order); err { case nil: case errExhausted: exhaustedCounter += 1 continue default: return fmt.Errorf("failed to consume order: %v", err) } } return nil }
optimus/engine_greedy.go
0.765769
0.415492
engine_greedy.go
starcoder
package tensor type maskedReduceFn func(Tensor) interface{} // MaskedReduce applies a reduction function of type maskedReduceFn to mask, and returns // either an int, or another array func MaskedReduce(t *Dense, retType Dtype, fn maskedReduceFn, axis ...int) interface{} { if len(axis) == 0 || t.IsVector() { return fn(t) } ax := axis[0] if ax >= t.Dims() { return -1 } // create object to be used for slicing slices := make([]Slice, t.Dims()) // calculate shape of tensor to be returned slices[ax] = makeRS(0, 0) tt, _ := t.Slice(slices...) ts := tt.(*Dense) retVal := NewDense(retType, ts.shape) //retVal is array to be returned it := NewIterator(retVal.Info()) // iterate through retVal slices[ax] = makeRS(0, t.shape[ax]) for _, err := it.Next(); err == nil; _, err = it.Next() { coord := it.Coord() k := 0 for d := range slices { if d != ax { slices[d] = makeRS(coord[k], coord[k]+1) k++ } else { slices[d] = nil } } tt, _ = t.Slice(slices...) ts = tt.(*Dense) retVal.SetAt(fn(ts), coord...) } return retVal } // MaskedAny returns True if any mask elements evaluate to True. // If object is not masked, returns false // !!! Not the same as numpy's, which looks at data elements and not at mask // Instead, equivalent to numpy ma.getmask(t).any(axis) func (t *Dense) MaskedAny(axis ...int) interface{} { return MaskedReduce(t, Bool, doMaskAny, axis...) } // MaskedAll returns True if all mask elements evaluate to True. // If object is not masked, returns false // !!! Not the same as numpy's, which looks at data elements and not at mask // Instead, equivalent to numpy ma.getmask(t).all(axis) func (t *Dense) MaskedAll(axis ...int) interface{} { return MaskedReduce(t, Bool, doMaskAll, axis...) } // MaskedCount counts the masked elements of the array (optionally along the given axis) // returns -1 if axis out of bounds func (t *Dense) MaskedCount(axis ...int) interface{} { return MaskedReduce(t, Int, doMaskCt, axis...) } // NonMaskedCount counts the non-masked elements of the array (optionally along the given axis) // returns -1 if axis out of bounds // MaskedCount counts the masked elements of the array (optionally along the given axis) // returns -1 if axis out of bounds func (t *Dense) NonMaskedCount(axis ...int) interface{} { return MaskedReduce(t, Int, doNonMaskCt, axis...) } func doMaskAll(T Tensor) interface{} { switch t := T.(type) { case *Dense: if !t.IsMasked() { return false } m := t.mask if len(t.mask) == t.Size() { for _, v := range m { if !v { return false } } } else { it := IteratorFromDense(t) i, _, _ := it.NextValid() if i != -1 { return false } } return true default: panic("Incompatible type") } } func doMaskAny(T Tensor) interface{} { switch t := T.(type) { case *Dense: if !t.IsMasked() { return false } m := t.mask if len(t.mask) == t.Size() { for _, v := range m { if v { return true } } } else { it := IteratorFromDense(t) i, _, _ := it.NextInvalid() if i != -1 { return true } } return false default: panic("Incompatible type") } } func doMaskCt(T Tensor) interface{} { switch t := T.(type) { case *Dense: // non masked case if !t.IsMasked() { return 0 } count := 0 m := t.mask if len(t.mask) == t.Size() { for _, v := range m { if v { count++ } } } else { it := IteratorFromDense(t) for _, _, err := it.NextInvalid(); err == nil; _, _, err = it.NextInvalid() { count++ } } return count default: panic("Incompatible type") } } func doNonMaskCt(T Tensor) interface{} { switch t := T.(type) { case *Dense: if !t.IsMasked() { return t.Size() } return t.Size() - doMaskCt(t).(int) default: panic("Incompatible type") } } /* ----------- ************ Finding masked data ----------*/ // FlatNotMaskedContiguous is used to find contiguous unmasked data in a masked array. // Applies to a flattened version of the array. // Returns:A sorted sequence of slices (start index, end index). func (t *Dense) FlatNotMaskedContiguous() []Slice { sliceList := make([]Slice, 0, 4) it := IteratorFromDense(t) for start, _, err := it.NextValid(); err == nil; start, _, err = it.NextValid() { end, _, _ := it.NextInvalid() if end == -1 { end = t.Size() } sliceList = append(sliceList, makeRS(start, end)) } return sliceList } // FlatMaskedContiguous is used to find contiguous masked data in a masked array. // Applies to a flattened version of the array. // Returns:A sorted sequence of slices (start index, end index). func (t *Dense) FlatMaskedContiguous() []Slice { sliceList := make([]Slice, 0, 4) it := IteratorFromDense(t) for start, _, err := it.NextInvalid(); err == nil; start, _, err = it.NextInvalid() { end, _, _ := it.NextValid() if end == -1 { end = t.Size() } sliceList = append(sliceList, makeRS(start, end)) } return sliceList } // FlatNotMaskedEdges is used to find the indices of the first and last unmasked values // Applies to a flattened version of the array. // Returns: A pair of ints. -1 if all values are masked. func (t *Dense) FlatNotMaskedEdges() (int, int) { if !t.IsMasked() { return 0, t.Size() - 1 } var start, end int it := IteratorFromDense(t) it.SetForward() start, _, err := it.NextValid() if err != nil { return -1, -1 } it.SetReverse() end, _, _ = it.NextValid() return start, end } // FlatMaskedEdges is used to find the indices of the first and last masked values // Applies to a flattened version of the array. // Returns: A pair of ints. -1 if all values are unmasked. func (t *Dense) FlatMaskedEdges() (int, int) { if !t.IsMasked() { return 0, t.Size() - 1 } var start, end int it := IteratorFromDense(t) it.SetForward() start, _, err := it.NextInvalid() if err != nil { return -1, -1 } it.SetReverse() end, _, _ = it.NextInvalid() return start, end } // ClumpMasked returns a list of slices corresponding to the masked clumps of a 1-D array // Added to match numpy function names func (t *Dense) ClumpMasked() []Slice { return t.FlatMaskedContiguous() } // ClumpUnmasked returns a list of slices corresponding to the unmasked clumps of a 1-D array // Added to match numpy function names func (t *Dense) ClumpUnmasked() []Slice { return t.FlatNotMaskedContiguous() }
dense_mask_inspection.go
0.876291
0.544438
dense_mask_inspection.go
starcoder
package cflag import ( "flag" "time" "github.com/goaltools/xflag/cflag/types" ) // Strings is an equivalent of flag.String but for []string value. // It defines a slice flag with the specified name, default value, // and usage string. The returned value is the address of a string // slice variable that stores the value of the flag. func Strings(name string, value []string, usage string) *[]string { p := &types.Strings{Value: value} flag.Var(p, name, usage) return &p.Value } // Ints is an equivalent of flag.Int but for []int value. // It defines a slice flag with the specified name, default value, // and usage string. The returned value is the address of an int // slice variable that stores the value of the flag. func Ints(name string, value []int, usage string) *[]int { p := &types.Ints{Value: value} flag.Var(p, name, usage) return &p.Value } // Int64s is an equivalent of flag.Int64 but for []int64 value. // It defines a slice flag with the specified name, default value, // and usage string. The returned value is the address of an int64 // slice variable that stores the value of the flag. func Int64s(name string, value []int64, usage string) *[]int64 { p := &types.Int64s{Value: value} flag.Var(p, name, usage) return &p.Value } // Uints is an equivalent of flag.Uint but for []uint value. // It defines a slice flag with the specified name, default value, // and usage string. The returned value is the address of a uint // slice variable that stores the value of the flag. func Uints(name string, value []uint, usage string) *[]uint { p := &types.Uints{Value: value} flag.Var(p, name, usage) return &p.Value } // Uint64s is an equivalent of flag.Uint64 but for []uint64 value. // It defines a slice flag with the specified name, default value, // and usage string. The returned value is the address of a uint64 // slice variable that stores the value of the flag. func Uint64s(name string, value []uint64, usage string) *[]uint64 { p := &types.Uint64s{Value: value} flag.Var(p, name, usage) return &p.Value } // Float64s is an equivalent of flag.Float64 but for []float64 value. // It defines a slice flag with the specified name, default value, // and usage string. The returned value is the address of a float64 // slice variable that stores the value of the flag. func Float64s(name string, value []float64, usage string) *[]float64 { p := &types.Float64s{Value: value} flag.Var(p, name, usage) return &p.Value } // Bools is an equivalent of flag.Bool but for []bool value. // It defines a slice flag with the specified name, default value, // and usage string. The returned value is the address of a bool // slice variable that stores the value of the flag. func Bools(name string, value []bool, usage string) *[]bool { p := &types.Bools{Value: value} flag.Var(p, name, usage) return &p.Value } // Durations is an equivalent of flag.Duration but for []time.Duration value. // It defines a slice flag with the specified name, default value, // and usage string. The returned value is the address of a time.Duration // slice variable that stores the value of the flag. func Durations(name string, value []time.Duration, usage string) *[]time.Duration { p := &types.Durations{Value: value} flag.Var(p, name, usage) return &p.Value }
cflag/slices.go
0.728748
0.454775
slices.go
starcoder
package transcoder import ( "fmt" "v.io/v23/vdl" ) // an array where the index corresponds to the bit index in the allocation // and the value is the tag (here representing vdlIndex+1) type structBitAllocation []int // allocateStructBits performs the naive allocation of fields in the struct, // literally laying out all bits in an array - each associated with a tag // and finding the first spot where a block of the given size fits (given // alignment constraints) func allocateStructBits(a structBitAllocation, tag, size int) structBitAllocation { lenActive := 0 // Scan the given array for a run of |size| empty locations. // If found, fill that section with tag. for i, v := range a { if v == 0 { lenActive++ } else { lenActive = 0 } if i%size == size-1 && lenActive >= size { for j := i - size + 1; j <= i; j++ { a[j] = tag } return a } } // If there isn't a sufficiently large empty location, allocate a new aligned block. paddingAmt := size - len(a)%size if paddingAmt == size { paddingAmt = 0 } for i := 0; i < paddingAmt; i++ { a = append(a, 0) } // Now len(a) % size == 0 for i := 0; i < size; i++ { a = append(a, tag) } return a } type structLayoutField struct { vdlStructIndex int // the index in the vdl value of this field byteOffset uint32 // byte offset from the beginning of the mojom byte array bitOffset uint8 // bit offset [0,8) } // computeStructLayout computes a representation of the fields in a struct, as // a list ordered by mojom byte field order. func computeStructLayout(t *vdl.Type) (layout structLayout) { a := structBitAllocation{} for i := 0; i < t.NumField(); i++ { bits := baseTypeSizeBits(t.Field(i).Type) a = allocateStructBits(a, i+1, int(bits)) } lastVal := 0 for i, v := range a { if v != lastVal && v != 0 { layout = append(layout, structLayoutField{ vdlStructIndex: v - 1, byteOffset: uint32(i / 8), bitOffset: uint8(i % 8), }) lastVal = v } } return } type structLayout []structLayoutField func (s structLayout) MojoOffsetsFromVdlIndex(vdlIndex int) (byteOffset uint32, bitOffset uint8) { for _, alloc := range s { if alloc.vdlStructIndex == vdlIndex { return alloc.byteOffset, alloc.bitOffset } } panic(fmt.Sprintf("unknown vdl index %d (layout %v) -- this should never happen", vdlIndex, s)) }
go/src/v.io/x/mojo/transcoder/struct_layout.go
0.664214
0.460956
struct_layout.go
starcoder
package ciphertools type Cipher struct { p [18]uint32 s0, s1, s2, s3 [256]uint32 Buffer []byte } func (c *Cipher) initCipher() { copy(c.p[0:], p[0:]) copy(c.s0[0:], s0[0:]) copy(c.s1[0:], s1[0:]) copy(c.s2[0:], s2[0:]) copy(c.s3[0:], s3[0:]) c.Buffer = make([]byte, 8) } func NewCipher(key []byte) (*Cipher, error) { var result Cipher if k := len(key); k < 1 || k > 56 { return nil, KeySizeError(k) } result.initCipher() result.expandKey(key) return &result, nil } func (c *Cipher) f(x uint32, p uint32) uint32 { return ((c.s0[byte(x >> 24)] + c.s1[byte(x >> 16)]) ^ c.s2[byte(x >> 8)]) + c.s3[byte(x)] ^ p } func (c Cipher) encryptBlock(left, right uint32) (uint32, uint32) { resultLeft, resultRight := left, right resultLeft ^= c.p[0] for i := 1; i < 17; i += 2 { resultRight ^= c.f(resultLeft, c.p[i]) resultLeft ^= c.f(resultRight, c.p[i + 1]) } resultRight ^= c.p[17] return resultRight, resultLeft } func (c Cipher) decryptBlock(left, right uint32) (uint32, uint32) { resultLeft, resultRight := left, right resultLeft ^= c.p[17] for i := 16; i >= 1; i -= 2 { resultRight ^= c.f(resultLeft, c.p[i]) resultLeft ^= c.f(resultRight, c.p[i - 1]) } resultRight ^= c.p[0] return resultRight, resultLeft } func slicesToArray(left, right uint32) []byte { result := make([]byte, 8) result[0], result[1], result[2], result[3] = byte(left >> 24), byte(left >> 16), byte(left >> 8), byte(left) result[4], result[5], result[6], result[7] = byte(right >> 24), byte(right >> 16), byte(right >> 8), byte(right) return result } func (c Cipher) Encrypt(block []byte) []byte { left := uint32(block[0]) << 24 | uint32(block[1]) << 16 | uint32(block[2]) << 8 | uint32(block[3]) right := uint32(block[4]) << 24 | uint32(block[5]) << 16 | uint32(block[6]) << 8 | uint32(block[7]) left, right = c.encryptBlock(left, right) return slicesToArray(c.encryptBlock(left, right)) } func (c Cipher) Decrypt(block []byte) []byte { left := uint32(block[0]) << 24 | uint32(block[1]) << 16 | uint32(block[2]) << 8 | uint32(block[3]) right := uint32(block[4]) << 24 | uint32(block[5]) << 16 | uint32(block[6]) << 8 | uint32(block[7]) left, right = c.decryptBlock(left, right) return slicesToArray(c.decryptBlock(left, right)) }
blowfish/ciphertools/cipher.go
0.796015
0.449695
cipher.go
starcoder
package util import ( "fmt" "math" ) // IntersectionTValsP obtains values of t for each line for where they intersect. Actual intersection => // both are in [0,1] func IntersectionTValsP(p1, p2, p3, p4 []float64) ([]float64, error) { return IntersectionTVals(p1[0], p1[1], p2[0], p2[1], p3[0], p3[1], p4[0], p4[1]) } // IntersectionTVals obtains values of t for each line for where they intersect. Actual intersection => // both are in [0,1] func IntersectionTVals(x1, y1, x2, y2, x3, y3, x4, y4 float64) ([]float64, error) { x21 := x2 - x1 x43 := x4 - x3 y21 := y2 - y1 y43 := y4 - y3 d := (y43 * x21) - (x43 * y21) if Equals(d, 0) { return nil, fmt.Errorf("parallel or coincident") } x13 := x1 - x3 y13 := y1 - y3 t12 := ((x43 * y13) - (y43 * x13)) / d t34 := ((x21 * y13) - (y21 * x13)) / d return []float64{t12, t34}, nil } // %f formats to 6dp by default const ( Epsilon float64 = 0.000001 // 1:1,000,000 ) // EqualsP returns true if two points are equal. func EqualsP(v1, v2 []float64) bool { v1l := len(v1) if v1l != len(v2) { return false } for i := 0; i < v1l; i++ { if !Equals(v1[i], v2[i]) { return false } } return true } // Equals returns true if two values are within Epsilon of each other. func Equals(d1, d2 float64) bool { return Within(d1, d2, Epsilon) } // Equals32 is the float32 version of Equals. func Equals32(d1, d2 float32) bool { return Within(float64(d1), float64(d2), Epsilon) } // Within returns true if the two values are within e of each other. func Within(d1, d2, e float64) bool { d := d1 - d2 if d < 0.0 { d = -d } return d < e } // DistanceESquared returns the squared Euclidean distance between two points. func DistanceESquared(p1, p2 []float64) float64 { dx, dy := p2[0]-p1[0], p2[1]-p1[1] return dx*dx + dy*dy } // DistanceESquaredN returns the squared Euclidean distance between two points. func DistanceESquaredN(p1, p2 []float64) float64 { var sum float64 for i := 0; i < MinD(p1, p2); i++ { diff := p2[i] - p1[i] sum += diff * diff } return sum } // DistanceE returns the Euclidean distance between two points. func DistanceE(p1, p2 []float64) float64 { return math.Sqrt(DistanceESquared(p1, p2)) } // DistanceToLineSquared calculates the squared Euclidean length of the normal from a point to // the line. func DistanceToLineSquared(lp1, lp2, p []float64) float64 { dx := lp2[0] - lp1[0] dy := lp2[1] - lp1[1] // Check for line degeneracy if Equals(0, dx) && Equals(0, dy) { return DistanceESquared(lp1, p) } qx := p[0] + dy qy := p[1] - dx ts, err := IntersectionTVals(lp1[0], lp1[1], lp2[0], lp2[1], p[0], p[1], qx, qy) if err != nil { return DistanceESquared(lp1, p) } dx = Lerp(ts[1], p[0], qx) - p[0] dy = Lerp(ts[1], p[1], qy) - p[1] return dx*dx + dy*dy } // SideOfLine calculates which side of a line a point is one by calculating the dot product of the // vector from the line start to the point with the line's normal. If +ve then one side, -ve the other, // 0 - on the line. func SideOfLine(lp1, lp2, p []float64) float64 { return (p[0]-lp1[0])*(lp2[1]-lp1[1]) - (p[1]-lp1[1])*(lp2[0]-lp1[0]) } // ToF64 casts a slice of float32 to float64. func ToF64(pts ...float32) []float64 { res := make([]float64, len(pts)) for i, v := range pts { res[i] = float64(v) } return res } // ToF32 casts a slice of float64 to float32. Possible loss of resolution. func ToF32(pts ...float64) []float32 { res := make([]float32, len(pts)) for i, v := range pts { res[i] = float32(v) } return res } // Centroid returns the centroid of a set of points. func Centroid(pts ...[]float64) []float64 { n := len(pts) if n == 0 { return nil } d := MinD(pts...) res := make([]float64, d) // Sum for _, pt := range pts { for i, v := range pt { if i > d-1 { break } res[i] += v } } // Scale for i := 0; i < d; i++ { res[i] /= float64(n) } return res } // CrossProduct returns the cross product of the three points. func CrossProduct(p1, p2, p3 []float64) float64 { return (p3[0]-p1[0])*(p2[1]-p1[1]) - (p3[1]-p1[1])*(p2[0]-p1[0]) } // DotProduct returns the dot product of the two lines, p1-p2 and p3-p4. func DotProduct(p1, p2, p3, p4 []float64) float64 { return (p2[0]-p1[0])*(p4[0]-p3[0]) + (p2[1]-p1[1])*(p4[1]-p3[1]) } // Vec returns the vector joining two points. func Vec(p1, p2 []float64) []float64 { return []float64{p2[0] - p1[0], p2[1] - p1[1]} } // VecMag returns the magnitude of the vector. func VecMag(v []float64) float64 { return math.Sqrt(v[0]*v[0] + v[1]*v[1]) } // VecNormalize scales a vector to unit length. func VecNormalize(v []float64) []float64 { d := VecMag(v) return []float64{v[0] / d, v[1] / d} } // LineAngle returns the angle of a line. func LineAngle(p1, p2 []float64) float64 { return math.Atan2(p2[1]-p1[1], p2[0]-p1[0]) } // AngleBetweenLines using Atan2 vs calculating the dot product (2xSqrt+Acos). // Retains the directionality of the rotation from l1 to l2, unlike dot product. func AngleBetweenLines(p1, p2, p3, p4 []float64) float64 { a1 := LineAngle(p1, p2) a2 := LineAngle(p3, p4) da := a2 - a1 if da < -math.Pi { da += 2 * math.Pi } else if da > math.Pi { da -= 2 * math.Pi } return da } // MinD calculates the minimum dimensionality of point set func MinD(pts ...[]float64) int { d := len(pts[0]) for i := 1; i < len(pts); i++ { n := len(pts[i]) if d > n { d = n } } return d } // Circumcircle returns the circle (center and radius) that passes through the three points. func Circumcircle(p1, p2, p3 []float64) []float64 { // Translate p1, p2 and p3 s.t. p1 is at the origin b := []float64{p2[0] - p1[0], p2[1] - p1[1]} b2 := b[0]*b[0] + b[1]*b[1] c := []float64{p3[0] - p1[0], p3[1] - p1[1]} c2 := c[0]*c[0] + c[1]*c[1] d := 2 * (b[0]*c[1] - c[0]*b[1]) d = 1 / d u := []float64{(c[1]*b2 - b[1]*c2) * d, (b[0]*c2 - c[0]*b2) * d} r2 := u[0]*u[0] + u[1]*u[1] return []float64{u[0] + p1[0], u[1] + p1[1], math.Sqrt(r2)} }
util/math.go
0.827898
0.57069
math.go
starcoder
package ln import ( "math" "math/rand" ) type Sphere struct { Center Vector Radius float64 Box Box } func NewSphere(center Vector, radius float64) *Sphere { min := Vector{center.X - radius, center.Y - radius, center.Z - radius} max := Vector{center.X + radius, center.Y + radius, center.Z + radius} box := Box{min, max} return &Sphere{center, radius, box} } func (s *Sphere) Compile() { } func (s *Sphere) BoundingBox() Box { return s.Box } func (s *Sphere) Contains(v Vector, f float64) bool { return v.Sub(s.Center).Length() <= s.Radius+f } func (s *Sphere) Intersect(r Ray) Hit { radius := s.Radius to := r.Origin.Sub(s.Center) b := to.Dot(r.Direction) c := to.Dot(to) - radius*radius d := b*b - c if d > 0 { d = math.Sqrt(d) t1 := -b - d if t1 > 1e-2 { return Hit{s, t1} } t2 := -b + d if t2 > 1e-2 { return Hit{s, t2} } } return NoHit } func (s *Sphere) Paths4() Paths { var paths Paths var seen []Vector var radii []float64 for i := 0; i < 140; i++ { var v Vector var m float64 for { v = RandomUnitVector() m = rand.Float64()*0.25 + 0.05 ok := true for i, other := range seen { threshold := m + radii[i] + 0.02 if other.Sub(v).Length() < threshold { ok = false break } } if ok { seen = append(seen, v) radii = append(radii, m) break } } p := v.Cross(RandomUnitVector()).Normalize() q := p.Cross(v).Normalize() n := rand.Intn(4) + 1 for k := 0; k < n; k++ { var path Path for j := 0; j <= 360; j += 5 { a := Radians(float64(j)) x := v x = x.Add(p.MulScalar(math.Cos(a) * m)) x = x.Add(q.MulScalar(math.Sin(a) * m)) x = x.Normalize() x = x.MulScalar(s.Radius).Add(s.Center) path = append(path, x) } paths = append(paths, path) m *= 0.75 } } return paths } func (s *Sphere) Paths3() Paths { var paths Paths for i := 0; i < 20000; i++ { v := RandomUnitVector() v = v.MulScalar(s.Radius).Add(s.Center) paths = append(paths, Path{v, v}) } return paths } func (s *Sphere) Paths2() Paths { var equator Path for lng := 0; lng <= 360; lng++ { v := LatLngToXYZ(0, float64(lng), s.Radius) equator = append(equator, v) } var paths Paths for i := 0; i < 100; i++ { m := Identity() for j := 0; j < 3; j++ { v := RandomUnitVector() m = m.Rotate(v, rand.Float64()*2*math.Pi) } m = m.Translate(s.Center) paths = append(paths, equator.Transform(m)) } return paths } func (s *Sphere) Paths() Paths { var paths Paths n := 10 o := 10 for lat := -90 + o; lat <= 90-o; lat += n { var path Path for lng := 0; lng <= 360; lng++ { v := LatLngToXYZ(float64(lat), float64(lng), s.Radius).Add(s.Center) path = append(path, v) } paths = append(paths, path) } for lng := 0; lng <= 360; lng += n { var path Path for lat := -90 + o; lat <= 90-o; lat++ { v := LatLngToXYZ(float64(lat), float64(lng), s.Radius).Add(s.Center) path = append(path, v) } paths = append(paths, path) } return paths } func LatLngToXYZ(lat, lng, radius float64) Vector { lat, lng = Radians(lat), Radians(lng) x := radius * math.Cos(lat) * math.Cos(lng) y := radius * math.Cos(lat) * math.Sin(lng) z := radius * math.Sin(lat) return Vector{x, y, z} } type OutlineSphere struct { Sphere Eye Vector Up Vector } func NewOutlineSphere(eye, up, center Vector, radius float64) *OutlineSphere { sphere := NewSphere(center, radius) return &OutlineSphere{*sphere, eye, up} } func (s *OutlineSphere) Paths() Paths { var path Path center := s.Sphere.Center radius := s.Sphere.Radius hyp := center.Sub(s.Eye).Length() opp := radius theta := math.Asin(opp / hyp) adj := opp / math.Tan(theta) d := math.Cos(theta) * adj r := math.Sin(theta) * adj w := center.Sub(s.Eye).Normalize() u := w.Cross(s.Up).Normalize() v := w.Cross(u).Normalize() c := s.Eye.Add(w.MulScalar(d)) for i := 0; i <= 360; i++ { a := Radians(float64(i)) p := c p = p.Add(u.MulScalar(math.Cos(a) * r)) p = p.Add(v.MulScalar(math.Sin(a) * r)) path = append(path, p) } return Paths{path} }
ln/sphere.go
0.710226
0.469824
sphere.go
starcoder
package main import ( "fmt" "math/rand" "time" ) func degrees() int { return rand.Intn((450 - 300) + 400) } func preheat() string { firstStep := fmt.Sprintf("Preheat oven to %d degrees \n", degrees()) return firstStep } func cookTime() int { rand.Seed(time.Now().Unix()) return rand.Intn((30 - 20) + 30) } func coolTime() int { rand.Seed(time.Now().Unix()) return rand.Intn((30 - 5) + 5) } func getEggs() int { return rand.Intn(4) + 1 } func beatEggs() string { eggStep := fmt.Sprintf("Beat %d eggs in a %s sized bowl \n", getEggs(), bowlSize()) return eggStep } func numMuff() int { n := rand.Intn(13) + 1 return n } func getMuffinTins() string { muffinStep := fmt.Sprintf("Grease %d cup muffin pan or use %d Muffin Liners \n", numMuff(), numMuff()) return muffinStep } func pourMuffin() string { pourMuffin := fmt.Sprintf("Pour mixture into %d muffin tins \n", numMuff()) return pourMuffin } func bowlSize() string { bowls := [4]string{"small", "medium", "large", "giant"} return bowls[rand.Intn(4)] } func combineAll() string { combineStep := fmt.Sprintf("Mix all your ingredients in a %s size bowl\n", bowlSize()) return combineStep } func combineMain() string { combineStep := fmt.Sprintf("Mix your flour, baking powder, and eggs in a %s sized bowl", bowlSize()) return combineStep } func fillMuff() string { fillStep := fmt.Sprintf("Whisk together all your remaining ingredients. Pour mixture into %s squeeze bottle and fill an equal amount into each muffin\n", bowlSize()) return fillStep } func muffLayer(muffArray []string) string { layerStep := fmt.Sprintf("Chop the %s into %s pieces. Roast it on the stovetop, Set aside for later", muffArray[2], bowlSize()) return layerStep } func layerMuff() string { layeringStep := fmt.Sprintf("Using the ingredients you chopped earlier sprinkle them onto cooked muffins") return layeringStep } func combineRemain() string { combineStep := fmt.Sprintf("Mix your remaining ingredients in a %s size bowl\n", bowlSize()) return combineStep } func cookMuff() string { cookStep := fmt.Sprintf("Bake for %d minutes\n", cookTime()) return cookStep } func coolLocation() string { bowls := [2]string{"refrigerator", "freezer"} return bowls[rand.Intn(2)] } func coolAll() string { coolStep := fmt.Sprintf("Cool for %d minutes in the %s\n", coolTime(), coolLocation()) return coolStep } func cakeContents() string { combineStep := fmt.Sprintf("put contents of bowl into cake pan. Put directly into oven %d minutes.\n", cookTime()) return combineStep } func CookCake() []string { steps := make([]string, 5) steps[0] = preheat() steps[1] = beatEggs() steps[2] = combineAll() steps[3] = cakeContents() steps[4] = coolAll() return steps } func CookMuffin(muffArray []string) []string { rand.Seed(time.Now().Unix()) randSelect := rand.Intn(3) steps := make([]string, 9) // 0 Generic Muffins // 1 Filling Muffins // 2 Crumb Muffins if randSelect == 0 { steps[0] = preheat() steps[1] = getMuffinTins() steps[2] = beatEggs() steps[3] = combineAll() steps[4] = pourMuffin() steps[5] = cookMuff() steps[6] = coolAll() return steps[0:6] } else if randSelect == 1 { steps[0] = preheat() steps[1] = getMuffinTins() steps[2] = beatEggs() steps[3] = combineMain() steps[4] = pourMuffin() steps[5] = cookMuff() steps[6] = fillMuff() steps[7] = coolAll() return steps[0:7] } else if randSelect == 2 { steps[0] = preheat() steps[1] = getMuffinTins() steps[2] = beatEggs() steps[3] = muffLayer(muffArray) steps[4] = combineRemain() steps[5] = pourMuffin() steps[6] = cookMuff() steps[7] = layerMuff() steps[8] = coolAll() return steps[0:8] } return nil } func cookieContents() string { combineStep := fmt.Sprintf("Mix eggs with melted butter, flour,baking soda, and salt\n") return combineStep } func cookieAddIngridients() string { combineStep := fmt.Sprintf(" add finely chopped -ingridients here- into the bowl. Mix thourghly\n") return combineStep } func cookieCookInstruc() string { combineStep := fmt.Sprintf(" For each cookie, drop 1/4 a cup onto the baking tray\n") return combineStep } func CookCookies() []string { steps := make([]string, 6) steps[0] = preheat() steps[1] = beatEggs() steps[2] = cookieContents() steps[3] = cookieAddIngridients() steps[4] = cookieCookInstruc() steps[5] = coolAll() return steps } func GetSteps(recipe Recipe) []string { bakeType := recipe.BakeType if bakeType == "Muffins" { return CookMuffin(recipe.Ingredients) } else if bakeType == "Cake" { return CookCake() } else if bakeType == "Cookies" { return CookCake() } return nil }
steps.go
0.54359
0.466785
steps.go
starcoder
package stringfuncs // (c) <NAME> 2022 import ( "strings" ) func In(LookingIn string, LookingFor rune) (int) { for Index, Element := range LookingIn { if Element == LookingFor { return Index } } return -1 } func In_string(LookingIn []string, LookingFor string) (int) { for Index, Element := range LookingIn { if Element == LookingFor { return Index } } return -1 } func In_int(LookingIn []int, LookingFor int) (int) { for Index, Element := range LookingIn { if Element == LookingFor { return Index } } return -1 } func Usage_string(LookingIn []string, LookingFor string) (int) { Counter := 0 for _, Element := range LookingIn { if Element == LookingFor { Counter ++ } } return Counter } func Usage(LookingIn string, LookingFor rune) (int) { Counter := 0 for _, Element := range LookingIn { if Element == LookingFor { Counter ++ } } return Counter } func Find(LookingIn string, LookingFor rune, Time int) (int) { Counter := 0 for Index, Element := range LookingIn { if Element == LookingFor { Counter ++ if Counter == Time { return Index } } } return -1 } func Outliers(Source string, Input string) (int) { Counter := 0 for _, Element := range Input { if In(Source, Element) == -1 { Counter ++ } } return Counter } func RemoveSpace(Source string) (string) { return strings.Replace(Source, " ", "", -1) } func StripSpace(Source string) (string) { return strings.TrimSpace(Source) } func Split(Source string, Input string) ([]string) { return strings.Split(Source, Input) } func IsInt(Source string) (bool) { return Outliers("0123456789", Source) == 0 } func IsFloat(Source string) (bool) { return (Outliers("0123456789.", Source) == 0 && Usage(Source, '.') <= 1) } func Contains(Source string, LookingFor string) (bool) { return strings.Contains(Source, LookingFor) } func Flatten(Source []string) (string) { Output := "" for _, Item := range Source { Output = Output + Item } return Output }
StringFuncs.go
0.682256
0.430327
StringFuncs.go
starcoder
package ffnn import ( "gonum.org/v1/gonum/mat" "../utils/matrices/ops" ) type FFNetwork struct { // The layers, in strict order. layers []*FFLayer // ******************************** // Training-related fields start here. // ******************************** // The default learning rate, needed for training. defaultLearningRate float64 // The cost (error) function for the training. c ErrorMetric // These hold the gradient costs for the layers; these ones go // for all the layer(s). rDcDa []*mat.Dense // Per-layer f-derivative over weighted i. Will have // the sizes of corresponding layers' weighted i. rDaDz []*mat.Dense // Will have the sizes of corresponding layers' weighted i. // It is = dc/dz = dc/da (*) da/dz. delta []*mat.Dense } func (network *FFNetwork) Layer(index int) *FFLayer { return network.layers[index] } func (network *FFNetwork) DefaultLearningRate() float64 { return network.defaultLearningRate } func (network *FFNetwork) Forward(input *mat.Dense) *mat.Dense { for _, layer := range network.layers { layer.Forward(input) input = layer.a } // After this, all the data will be available inside each layer // And the paradoxical part is that `i` will hold the outputs // in the end return input } // Gradient(network.c)(layer.a, expected) -> stored in networks' output a cost gradient func (network *FFNetwork) opDcDaInLastLayer(layer *FFLayer, layerIndex int, t *mat.Dense) *mat.Dense { // op1 Matrix size: (layer.outputSize rows, 1 column) // op2 Matrix size: (layer.outputSize rows, 1 column) // Result Matrix size: (layer.outputSize rows, 1 column) return network.c.Gradient(layer.a, t, network.rDcDa[layerIndex]) } // Recursive error calculation func (network *FFNetwork) opDcDaInNonLastLayer(layerIndex int, nextLayerErrors *mat.Dense) *mat.Dense { // Op1 Matrix Size: (nextLayer.inputSize = layer.outputSize rows, nextLayer.outputSize columns) // Op2 Matrix Size: (nextLayer.outputSize rows, 1 column) // Result Matrix Size: (nextLayer.inputSize = layer.outputSize rows, 1 column) return ops.Mul(network.layers[layerIndex + 1].w.T(), network.delta[layerIndex + 1], network.rDcDa[layerIndex]) } // Derivative(layer.Activation)(layer.z) -> stored in corresponding f's derivative result func (network *FFNetwork) opDaDz(layer *FFLayer, layerIndex int) *mat.Dense { // Op1 Matrix Size: (layer.outputSize rows, 1 column) // Result Matrix Size: (layer.outputSize rows, 1 column) return layer.f.Derivative(layer.z, network.rDaDz[layerIndex]) } // This is the first differential error being calculated. It will imply the gradient function over the costs. func (network *FFNetwork) opDeltaInLastLayer( lastLayerIndex int, expectedOutputActivations *mat.Dense, ) *mat.Dense { // Consider z = weighted i // a = final output a // C = the cost function // differential error on the output = gradient of C with respect to a // differential error on the weighted i = element-wise differential error on the output * ( // derivative of Activation function over the weighted input for that output // ) lastLayer := network.layers[lastLayerIndex] // First, we calculate the gradient of C by the a using our particular final output a // Fetched Matrix size: (layer.outputSize rows, 1 column) rDcDa := network.opDcDaInLastLayer(lastLayer, lastLayerIndex, expectedOutputActivations) // Then we calculate the sigmoid prime over the last weighted i (which will have the same dimensions of the // a, and so the result will) // Fetched Matrix Size: (layer.outputSize rows, 1 column) rDaDz := network.opDaDz(lastLayer, lastLayerIndex) // And finally we element-wise multiply the gradient with the derivative // Op1 Matrix Size: (layer.outputSize rows, 1 column) // Op2 Matrix Size: (layer.outputSize rows, 1 column) // Result Matrix Size: (layer.outputSize rows, 1 column) return ops.H(rDcDa, rDaDz, network.delta[lastLayerIndex]) } // This is the second, and more, differential error(s) being calculated. It will imply the w of the following // layer, and the errors from the following layer. func (network *FFNetwork) opDeltaInNonLastLayer( layerIndex int, ) *mat.Dense { layer := network.layers[layerIndex] // First, we calculate the propagated gradient by using the next layer errors and transposing the next layer w // Op2 Matrix Size: (nextLayer.outputSize rows, 1 column) // Fetched Matrix Size: (nextLayer.inputSize = layer.outputSize rows, 1 column) rDcDa := network.opDcDaInNonLastLayer(layerIndex, network.delta[layerIndex + 1]) // Then, we have a matching matrix of propagated gradients. Just calculate the derivative // Fetched Matrix Size: (layer.outputSize rows, 1 column) rDaDz := network.opDaDz(layer, layerIndex) // And finally we element-wise multiply the propagated gradient with the derivative // Op1 Matrix Size: (nextLayer.inputSize = layer.outputSize rows, 1 column) // Op2 Matrix Size: (layer.outputSize rows, 1 column) // Result Matrix Size: (layer.outputSize rows, 1 column) return ops.H(rDcDa, rDaDz, network.delta[layerIndex]) } // Now, to fix the layers! func (network *FFNetwork) fixLayer(layerIndex int, learningRate float64) { layer := network.layers[layerIndex] weights := layer.w biases := layer.b // Cartesian product of i and delta iT := layer.i.T() delta := network.delta[layerIndex] rows, _ := delta.Dims() // rows = n. of errors (neurons) _, columns := iT.Dims() // columns = n. of inputs (or former a) deltaXiT := mat.NewDense(rows, columns, nil) // size = n. of errors x n. of inputs delta_ := mat.NewDense(rows, 1, nil) // size = n. of errors x n. of inputs // Op1 Matrix Size: (layer.outputSize rows, 1 column) // Op2 Matrix Size: (1 row, layer.inputSize columns) // Result Matrix Size: (layer.outputSize rows, layer.inputSize column) // Finally, modify the widths and bias by subtracting the scaled delta weights.Sub(weights, ops.Scale(learningRate, ops.Mul(delta, iT, deltaXiT), deltaXiT)) biases.Sub(biases, ops.Scale(learningRate, delta, delta_)) } func (network *FFNetwork) Test(input *mat.Dense, expectedOutput *mat.Dense) (*mat.Dense, float64) { // Get the outputs by running a normal forward, and the cost (absolute error) output := network.Forward(input) return output, network.c.Base(output, expectedOutput) } func (network *FFNetwork) adjust(expectedOutput *mat.Dense, learningRate float64) { layersCount := len(network.layers) network.opDeltaInLastLayer(layersCount - 1, expectedOutput) for index := layersCount - 2; index >= 0; index-- { network.opDeltaInNonLastLayer(index) } // And finally, after we know all the errors (which are vertical rows), fix the layers for index := 0; index < layersCount; index++ { network.fixLayer(index, learningRate) } } func (network *FFNetwork) TrainWithRate(input *mat.Dense, expectedOutput *mat.Dense, learningRate float64) (*mat.Dense, float64) { // Get the outputs by running a normal forward, and the cost (absolute error) output, cost := network.Test(input, expectedOutput) // Now compute the errors backward, and adjust using a learning rate network.adjust(expectedOutput, learningRate) return output, cost } func (network *FFNetwork) Train(input *mat.Dense, expectedOutput *mat.Dense) (*mat.Dense, float64) { return network.TrainWithRate(input, expectedOutput, network.defaultLearningRate) }
ffnn/network.go
0.717606
0.620507
network.go
starcoder
package main import ( "fmt" "math" "os" "time" "github.com/ChristopherRabotin/gokalman" "github.com/ChristopherRabotin/smd" "github.com/gonum/matrix/mat64" ) func main() { // Define the times startDT := time.Now() endDT := startDT.Add(time.Duration(24) * time.Hour) // Define the orbits leo := smd.NewOrbitFromOE(7000, 0.001, 30, 80, 40, 0, smd.Earth) stateVector := mat64.NewVector(6, nil) // Define the stations σρ := math.Pow(1e-3, 2) // m , but all measurements in km. σρDot := math.Pow(1e-3, 2) // m/s , but all measurements in km/s. st1 := NewStation("st1", 0, -35.398333, 148.981944, σρ, σρDot) st2 := NewStation("st2", 0, 40.427222, 355.749444, σρ, σρDot) st3 := NewStation("st3", 0, 35.247164, 243.205, σρ, σρDot) stations := []Station{st1, st2, st3} // Vector of measurements measurements := []Measurement{} // Define the special export functions export := smd.ExportConfig{Filename: "LEO", Cosmo: true, AsCSV: true, Timestamp: false} export.CSVAppendHdr = func() string { hdr := "secondsSinceEpoch," for _, st := range stations { hdr += fmt.Sprintf("%sRange,%sRangeRate,%sNoisyRange,%sNoisyRangeRate,", st.name, st.name, st.name, st.name) } return hdr[:len(hdr)-1] // Remove trailing comma } export.CSVAppend = func(state smd.State) string { Δt := state.DT.Sub(startDT).Seconds() str := fmt.Sprintf("%f,", Δt) θgst := Δt * smd.EarthRotationRate // Compute visibility for each station. for _, st := range stations { _, measurement := st.PerformMeasurement(θgst, state) if measurement.Visible { measurements = append(measurements, measurement) str += measurement.CSV() } else { str += ",,,," } } return str[:len(str)-1] // Remove trailing comma } timeStep := 2 * time.Second // Generate the perturbed orbit scName := "LEO" smd.NewPreciseMission(smd.NewEmptySC(scName, 0), leo, startDT, endDT, smd.Perturbations{Jn: 3}, timeStep, false, export).Propagate() // Take care of the measurements: fmt.Printf("\n[INFO] Generated %d measurements\n", len(measurements)) // Perturbations in the estimate estPerts := smd.Perturbations{Jn: 2} // Initialize the KF noise noiseQ := mat64.NewSymDense(3, nil) noiseR := mat64.NewSymDense(2, []float64{σρ, 0, 0, σρDot}) noiseKF := gokalman.NewNoiseless(noiseQ, noiseR) visibilityErrors := 0 var orbitEstimate *smd.OrbitEstimate kf := gokalman.NewBatchKF(len(measurements), noiseKF) var prevStationName = "" var prevΦ *mat64.Dense for measNo, measurement := range measurements { if !measurement.Visible { panic("why is there a non visible measurement?!") } if measNo == 0 { orbitEstimate = smd.NewOrbitEstimate("estimator", measurement.State.Orbit, estPerts, measurement.State.DT, time.Second) // Create the initial state vector to fix initR, initV := measurement.State.Orbit.RV() for i := 0; i < 3; i++ { stateVector.SetVec(i, initR[i]) stateVector.SetVec(i+3, initV[i]) } } prevΦ = orbitEstimate.Φ // Propagate the reference trajectory until the next measurement time. orbitEstimate.PropagateUntil(measurement.State.DT) // This leads to Φ(ti+1, ti) // Compute Φ(ti+1, t0) var prevΦinv mat64.Dense if err := prevΦinv.Inverse(prevΦ); err != nil { panic(fmt.Errorf("the following Φ is singular:\n%+v", mat64.Formatted(prevΦ))) } var Φtit0 mat64.Dense Φtit0.Mul(orbitEstimate.Φ, &prevΦinv) if measurement.Station.name != prevStationName { fmt.Printf("[INFO] #%04d %s in visibility of %s (T+%s)\n", measNo, scName, measurement.Station.name, measurement.State.DT.Sub(startDT)) prevStationName = measurement.Station.name } // Compute "real" measurement vis, computed := measurement.Station.PerformMeasurement(measurement.θgst, orbitEstimate.State()) if !vis { fmt.Printf("[WARNING] station %s should see the SC but does not\n", measurement.Station.name) visibilityErrors++ } // Compute H var H mat64.Dense H.Mul(computed.HTilde(), &Φtit0) kf.SetNextMeasurement(measurement.Observation(), computed.Observation(), orbitEstimate.Φ, &H) } severity := "INFO" if visibilityErrors > 0 { severity = "WARNING" } fmt.Printf("[%s] %d visibility errors\n", severity, visibilityErrors) // Solve Batch xHat0, P0, err := kf.Solve() if err != nil { panic(fmt.Errorf("could not solve BatchKF: %s", err)) } fmt.Printf("Batch P0:\n%+v\n", mat64.Formatted(P0)) fmt.Printf("Batch xHat0:\n%+v\n", mat64.Formatted(xHat0)) // Let's perform the correction on the reference trajectory, and propagate it. stateVector.SubVec(stateVector, xHat0) // Generate the new orbit via Mission. correctedOrbit := *smd.NewOrbitFromRV([]float64{stateVector.At(0, 0), stateVector.At(1, 0), stateVector.At(2, 0)}, []float64{stateVector.At(3, 0), stateVector.At(4, 0), stateVector.At(5, 0)}, smd.Earth) fmt.Printf("%s\n\n", correctedOrbit) residuals := make([]*mat64.Vector, len(measurements)) Δstate := make([]*mat64.Vector, len(measurements)) for measNo, measurement := range measurements { if measNo == 0 { orbitEstimate = smd.NewOrbitEstimate("estimator", correctedOrbit, estPerts, measurement.State.DT, time.Second) } // Propagate the reference trajectory until the next measurement time. orbitEstimate.PropagateUntil(measurement.State.DT) // This leads to Φ(ti+1, ti) // Compute the residuals stateError := mat64.NewVector(6, nil) R, V := orbitEstimate.State().Orbit.RV() iR, iV := measurement.State.Orbit.RV() for i := 0; i < 3; i++ { stateError.SetVec(i, R[i]-iR[i]) stateError.SetVec(i+3, V[i]-iV[i]) } Δstate[measNo] = stateError // Compute residual residual := mat64.NewVector(2, nil) residual.MulVec(measurement.HTilde(), stateError) residual.AddScaledVec(residual, -1, kf.Measurements[measNo].ObservationDev) residual.ScaleVec(-1, residual) residuals[measNo] = residual } // Export state error f, err := os.Create("./batch-state-errors.csv") if err != nil { panic(err) } defer f.Close() f.WriteString("\\Delta X,\\Delta Y,\\Delta Z,\\Delta X_{dot},\\Delta Y_{dot},\\Delta Z_{dot}\n") for _, delta := range Δstate { csv := fmt.Sprintf("%f,%f,%f,%f,%f,%f\n", delta.At(0, 0), delta.At(1, 0), delta.At(2, 0), delta.At(3, 0), delta.At(4, 0), delta.At(5, 0)) if _, errF := f.WriteString(csv); err != nil { panic(errF) } } // Export residuals f, err = os.Create("./batch-residuals.csv") if err != nil { panic(err) } defer f.Close() f.WriteString("rho,rhoDot\n") for _, residual := range residuals { csv := fmt.Sprintf("%f,%f\n", residual.At(0, 0), residual.At(1, 0)) if _, err := f.WriteString(csv); err != nil { panic(err) } } }
examples/statOD/batch/main.go
0.660063
0.461563
main.go
starcoder
Discrete Fourier Transform See: https://en.wikipedia.org/wiki/Discrete_Fourier_transform https://github.com/takatoh/fft */ //----------------------------------------------------------------------------- package core import ( "math" "math/bits" "math/cmplx" ) //----------------------------------------------------------------------------- // toComplex128 converts a slice of float values to complex values. // The imaginary part is set to zero. func toComplex128(in []float64) []complex128 { out := make([]complex128, len(in)) for i := range out { out[i] = complex(in[i], 0) } return out } // toFloat64 converts a slice of complex values to float values by taking the real part. func toFloat64(in []complex128) []float64 { out := make([]float64, len(in)) for i := range out { out[i] = real(in[i]) } return out } //----------------------------------------------------------------------------- // isPowerOf2 return true if n is a power of 2. func isPowerOf2(x int) bool { return x != 0 && (x&-x) == x } // bitReverse reverses the first n bits of x. func bitReverse(x, n int) int { return int(bits.Reverse(uint(x)) >> (bits.UintSize - uint(n))) } // log2 returns log base 2 of x (assumes x is a power of 2). func log2(x int) int { return bits.TrailingZeros(uint(x)) } //----------------------------------------------------------------------------- // DFT returns the discrete fourier transform of the complex input. // This is the definition based, n*n (slow) algorithm. // It's mainly used for generating test vectors for FFT validation. func DFT(in []complex128) []complex128 { n := len(in) nInv := 1.0 / float64(n) out := make([]complex128, n) for k := 0; k < n; k++ { for i := 0; i < n; i++ { p := -Tau * float64(k*i) * nInv s, c := math.Sincos(p) out[k] += in[i] * complex(c, s) } } return out } // InverseDFT returns the inverse discrete fourier transform of the complex input. // This is the definition based, n*n (slow) algorithm. // It's mainly used for generating test vectors for FFT validation. func InverseDFT(in []complex128) []complex128 { n := len(in) nInv := 1.0 / float64(n) out := make([]complex128, n) for k := 0; k < n; k++ { for i := 0; i < n; i++ { p := Tau * float64(k*i) * nInv s, c := math.Sincos(p) out[k] += in[i] * complex(c, s) } out[k] *= complex(nInv, 0) } return out } //----------------------------------------------------------------------------- // fftConst contains pre-calculated fft constants. type fftConst struct { hn int // half length hmask int // half mask for mod n/2 stages int // number of butterfly stages rev []int // input reversing indices w []complex128 // twiddle factors } // fftCache is a cache of pre-calculated fft constants. var fftCache map[int]*fftConst // fftLookup returns the fft constants for a given input length. func fftLookup(n int) *fftConst { // has the cache been created? if fftCache == nil { fftCache = make(map[int]*fftConst) } // do we have the entry in the cache? if k, ok := fftCache[n]; ok { return k } // check length if !isPowerOf2(n) { panic("input length is not a power of 2") } if n < 4 { panic("input length has to be >= 4") } // create the entry k := &fftConst{} // create the reverse indices k.rev = make([]int, n) nbits := log2(n) for i := range k.rev { k.rev[i] = bitReverse(i, nbits) } // create the half variables k.hn = n >> 1 k.hmask = k.hn - 1 // number of butterfly stages k.stages = nbits // create the twiddle factors k.w = make([]complex128, k.hn) nInv := 1.0 / float64(n) for i := range k.w { theta := -Tau * float64(i) * nInv s, c := math.Sincos(theta) k.w[i] = complex(c, s) } // add it to the cache and return fftCache[n] = k return k } // InverseFFT returns the (fast) inverse discrete fourier transform of the complex input. func InverseFFT(in []complex128) []complex128 { n := len(in) nInv := complex(1.0/float64(n), 0) out := make([]complex128, n) for i := range out { out[i] = cmplx.Conj(in[i]) } out = FFT(out) for i := range out { out[i] = cmplx.Conj(out[i]) out[i] *= nInv } return out } // FFT returns the (fast) discrete fourier transform of the complex input. func FFT(in []complex128) []complex128 { n := len(in) fk := fftLookup(n) // reverse the input order out := make([]complex128, n) for i := range out { out[i] = in[fk.rev[i]] } // run the butterflies kmax := 1 mul := fk.hn for { if kmax >= n { break } istep := kmax * 2 for k := 0; k < kmax; k++ { w := fk.w[k*mul] for i := k; i < n; i += istep { j := i + kmax tmp := out[j] * w out[j] = out[i] - tmp out[i] += tmp } } mul >>= 1 kmax = istep } return out } //----------------------------------------------------------------------------- // test code // FFTx returns the (fast) discrete fourier transform of the complex input. func FFTx(in []complex128) []complex128 { n := len(in) fk := fftLookup(n) // reverse the input order out := make([]complex128, n) for i := range out { out[i] = in[fk.rev[i]] } // run the butterflies oneMask := 1 hiMask := -1 loMask := 0 shift := uint(fk.stages - 1) for s := 0; s < fk.stages; s++ { for i := 0; i < fk.hn; i++ { j := (i&hiMask)<<1 | (i & loMask) k := j | oneMask tmp := out[k] * fk.w[(i<<shift)&fk.hmask] out[k] = out[j] - tmp out[j] += tmp } shift-- oneMask <<= 1 hiMask <<= 1 loMask = (loMask << 1) | 1 } return out } //-----------------------------------------------------------------------------
core/dft.go
0.832849
0.54468
dft.go
starcoder
package finnhub import ( "encoding/json" ) // FinancialStatements struct for FinancialStatements type FinancialStatements struct { // Symbol of the company. Symbol *string `json:"symbol,omitempty"` // An array of map of key, value pairs containing the data for each period. Financials *[]map[string]interface{} `json:"financials,omitempty"` } // NewFinancialStatements instantiates a new FinancialStatements object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewFinancialStatements() *FinancialStatements { this := FinancialStatements{} return &this } // NewFinancialStatementsWithDefaults instantiates a new FinancialStatements object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewFinancialStatementsWithDefaults() *FinancialStatements { this := FinancialStatements{} return &this } // GetSymbol returns the Symbol field value if set, zero value otherwise. func (o *FinancialStatements) GetSymbol() string { if o == nil || o.Symbol == nil { var ret string return ret } return *o.Symbol } // GetSymbolOk returns a tuple with the Symbol field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *FinancialStatements) GetSymbolOk() (*string, bool) { if o == nil || o.Symbol == nil { return nil, false } return o.Symbol, true } // HasSymbol returns a boolean if a field has been set. func (o *FinancialStatements) HasSymbol() bool { if o != nil && o.Symbol != nil { return true } return false } // SetSymbol gets a reference to the given string and assigns it to the Symbol field. func (o *FinancialStatements) SetSymbol(v string) { o.Symbol = &v } // GetFinancials returns the Financials field value if set, zero value otherwise. func (o *FinancialStatements) GetFinancials() []map[string]interface{} { if o == nil || o.Financials == nil { var ret []map[string]interface{} return ret } return *o.Financials } // GetFinancialsOk returns a tuple with the Financials field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *FinancialStatements) GetFinancialsOk() (*[]map[string]interface{}, bool) { if o == nil || o.Financials == nil { return nil, false } return o.Financials, true } // HasFinancials returns a boolean if a field has been set. func (o *FinancialStatements) HasFinancials() bool { if o != nil && o.Financials != nil { return true } return false } // SetFinancials gets a reference to the given []map[string]interface{} and assigns it to the Financials field. func (o *FinancialStatements) SetFinancials(v []map[string]interface{}) { o.Financials = &v } func (o FinancialStatements) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.Symbol != nil { toSerialize["symbol"] = o.Symbol } if o.Financials != nil { toSerialize["financials"] = o.Financials } return json.Marshal(toSerialize) } type NullableFinancialStatements struct { value *FinancialStatements isSet bool } func (v NullableFinancialStatements) Get() *FinancialStatements { return v.value } func (v *NullableFinancialStatements) Set(val *FinancialStatements) { v.value = val v.isSet = true } func (v NullableFinancialStatements) IsSet() bool { return v.isSet } func (v *NullableFinancialStatements) Unset() { v.value = nil v.isSet = false } func NewNullableFinancialStatements(val *FinancialStatements) *NullableFinancialStatements { return &NullableFinancialStatements{value: val, isSet: true} } func (v NullableFinancialStatements) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableFinancialStatements) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
model_financial_statements.go
0.683314
0.431405
model_financial_statements.go
starcoder
package processor import ( "github.com/Jeffail/benthos/lib/log" "github.com/Jeffail/benthos/lib/message" "github.com/Jeffail/benthos/lib/metrics" "github.com/Jeffail/benthos/lib/response" "github.com/Jeffail/benthos/lib/types" ) //------------------------------------------------------------------------------ func init() { Constructors[TypeSplit] = TypeSpec{ constructor: NewSplit, description: ` Breaks message batches (synonymous with multiple part messages) into smaller batches, targeting a specific batch size of discrete message parts (default size is 1 message.) For each batch, if there is a remainder of parts after splitting a batch, the remainder is also sent as a single batch. For example, if your target size was 10, and the processor received a batch of 95 message parts, the result would be 9 batches of 10 messages followed by a batch of 5 messages. The split processor should *always* be positioned at the end of a list of processors.`, } } //------------------------------------------------------------------------------ // SplitConfig is a configuration struct containing fields for the Split // processor, which breaks message batches down into batches of a smaller size. type SplitConfig struct { Size int `json:"size" yaml:"size"` } // NewSplitConfig returns a SplitConfig with default values. func NewSplitConfig() SplitConfig { return SplitConfig{ Size: 1, } } //------------------------------------------------------------------------------ // Split is a processor that splits messages into a message per part. type Split struct { log log.Modular stats metrics.Type size int mCount metrics.StatCounter mDropped metrics.StatCounter mSent metrics.StatCounter mSentParts metrics.StatCounter } // NewSplit returns a Split processor. func NewSplit( conf Config, mgr types.Manager, log log.Modular, stats metrics.Type, ) (Type, error) { return &Split{ log: log.NewModule(".processor.split"), stats: stats, size: conf.Split.Size, mCount: stats.GetCounter("processor.split.count"), mDropped: stats.GetCounter("processor.split.dropped"), mSent: stats.GetCounter("processor.split.sent"), mSentParts: stats.GetCounter("processor.split.parts.sent"), }, nil } //------------------------------------------------------------------------------ // ProcessMessage applies the processor to a message, either creating >0 // resulting messages or a response to be sent back to the message source. func (s *Split) ProcessMessage(msg types.Message) ([]types.Message, types.Response) { s.mCount.Incr(1) if msg.Len() == 0 { s.mDropped.Incr(1) return nil, response.NewAck() } msgs := []types.Message{} for i := 0; i < msg.Len(); i += s.size { batchSize := s.size if msg.Len() < (i + batchSize) { batchSize = msg.Len() - i } parts := make([]types.Part, batchSize) for j := range parts { parts[j] = msg.Get(i + j).Copy() } newMsg := message.New(nil) newMsg.SetAll(parts) msgs = append(msgs, newMsg) } s.mSent.Incr(int64(len(msgs))) s.mSentParts.Incr(int64(msg.Len())) return msgs, nil } //------------------------------------------------------------------------------
lib/processor/split.go
0.765374
0.418756
split.go
starcoder
package staticarray import ( "github.com/influxdata/flux/array" "github.com/influxdata/flux/memory" "github.com/influxdata/flux/semantic" ) type booleans struct { data []bool alloc *memory.Allocator } func Boolean(data []bool) array.Boolean { return &booleans{data: data} } func (a *booleans) Type() semantic.Type { return semantic.Bool } func (a *booleans) IsNull(i int) bool { return false } func (a *booleans) IsValid(i int) bool { return i >= 0 && i < len(a.data) } func (a *booleans) Len() int { return len(a.data) } func (a *booleans) NullN() int { return 0 } func (a *booleans) Value(i int) bool { return a.data[i] } func (a *booleans) Copy() array.Base { panic("implement me") } func (a *booleans) Free() { if a.alloc != nil { a.alloc.Free(cap(a.data) * boolSize) } a.data = nil } func (a *booleans) Slice(start, stop int) array.BaseRef { return a.BooleanSlice(start, stop) } func (a *booleans) BooleanSlice(start, stop int) array.BooleanRef { return &booleans{data: a.data[start:stop]} } func (a *booleans) BoolValues() []bool { return a.data } func BooleanBuilder(a *memory.Allocator) array.BooleanBuilder { return &booleanBuilder{alloc: a} } type booleanBuilder struct { data []bool alloc *memory.Allocator } func (b *booleanBuilder) Type() semantic.Type { return semantic.Bool } func (b *booleanBuilder) Len() int { return len(b.data) } func (b *booleanBuilder) Cap() int { return cap(b.data) } func (b *booleanBuilder) Reserve(n int) { newCap := len(b.data) + n if newCap := len(b.data) + n; newCap <= cap(b.data) { return } if err := b.alloc.Allocate(newCap * boolSize); err != nil { panic(err) } data := make([]bool, len(b.data), newCap) copy(data, b.data) b.alloc.Free(cap(b.data) * boolSize) b.data = data } func (b *booleanBuilder) BuildArray() array.Base { return b.BuildBooleanArray() } func (b *booleanBuilder) Free() { panic("implement me") } func (b *booleanBuilder) Append(v bool) { if len(b.data) == cap(b.data) { // Grow the slice in the same way as built-in append. n := len(b.data) if n == 0 { n = 2 } b.Reserve(n) } b.data = append(b.data, v) } func (b *booleanBuilder) AppendNull() { // The staticarray does not support nulls so it will do the current behavior of just appending // the zero value. b.Append(false) } func (b *booleanBuilder) AppendValues(v []bool, valid ...[]bool) { if newCap := len(b.data) + len(v); newCap > cap(b.data) { b.Reserve(newCap - cap(b.data)) } b.data = append(b.data, v...) } func (b *booleanBuilder) BuildBooleanArray() array.Boolean { return &booleans{ data: b.data, alloc: b.alloc, } }
internal/staticarray/bool.go
0.64969
0.466542
bool.go
starcoder
package image2d import ( "errors" "image" "image/color" "image/draw" "github.com/adrianderstroff/pbr/pkg/cgm" gl "github.com/adrianderstroff/pbr/pkg/core/gl" "github.com/mdouchement/hdr" "github.com/mdouchement/hdr/hdrcolor" ) // ConvertToPowerOfTwo subsamples an image to be quadratic and be a power of two. func (img *Image2D) ConvertToPowerOfTwo() { width := img.GetWidth() height := img.GetHeight() channels := img.GetChannels() bytedepth := img.GetByteDepth() // determine appropriate power of two dimensions and take the smaller one nwidth := closestPowerOfTwoSmallerThanDimension(width) nheight := closestPowerOfTwoSmallerThanDimension(height) dim := cgm.Mini(nwidth, nheight) // determine the skip for sampling the image skipX := float64(width) / float64(dim) skipY := float64(height) / float64(dim) var data []uint8 for y := 0; y < dim; y++ { ny := int(float64(y) * skipY) for x := 0; x < dim; x++ { nx := int(float64(x) * skipX) // get the start of the pixel idx := img.getIdx(nx, ny) // iterate over all color channels for c := 0; c < channels; c++ { // iterate over byte depth for d := 0; d < bytedepth; d++ { // get the color and depth offset off := c*bytedepth + d data = append(data, img.data[idx+off]) } } } } // overwrite data img.data = data img.width = dim img.height = dim } // IsPowerOfTwo returns true if width and height are both powers or two func (img *Image2D) IsPowerOfTwo() bool { return img.width == closestPowerOfTwoSmallerThanDimension(img.width) && img.height == closestPowerOfTwoSmallerThanDimension(img.height) } // IsQuadratic returns true if width equals height. func (img *Image2D) IsQuadratic() bool { return img.width == img.height } func closestPowerOfTwoSmallerThanDimension(dim int) int { powerOfTwo := 1 for (powerOfTwo * 2) <= dim { powerOfTwo *= 2 } return powerOfTwo } // getIdx turns the x and y indices into a 1D index with respect to the channels // and byte depth. func (img *Image2D) getIdx(x, y int) int { return (x + y*img.width) * img.channels * img.bytedepth } // getIdx turns the x and y indices into a 1D index. func (img *Image2D) getOIdx(x, y int) int { return (x + y*img.width) * img.channels } // extractData grabs the image data from the image.Image. // it returns the image data, channels, bytedepth and an error. func extractData(img image.Image, rect image.Rectangle, fname string, channels int) ([]uint8, int, int, error) { // exctract data values var ( data []uint8 bytedepth int ) if fname == "hdr" { bytedepth = 4 channels = 3 colormodel := img.ColorModel() switch colormodel { case hdrcolor.RGBModel: rgb := hdr.NewRGB(rect) draw.Draw(rgb, rect, img, image.Pt(0, 0), draw.Src) data = float32SliceToUint8Slice(rgb.Pix) break case hdrcolor.XYZModel: rgb := hdr.NewXYZ(rect) draw.Draw(rgb, rect, img, image.Pt(0, 0), draw.Src) data = float32SliceToUint8Slice(rgb.Pix) break default: return data, channels, bytedepth, errors.New("hdr color model is not supported") } } else { bytedepth = 1 // determine number of channels if not already provided if channels == -1 { colormodel := img.ColorModel() channels = 4 if colormodel == color.AlphaModel || colormodel == color.Alpha16Model || colormodel == color.GrayModel || colormodel == color.Gray16Model { channels = 1 } } switch channels { case 1: gray := image.NewGray(rect) draw.Draw(gray, rect, img, image.Pt(0, 0), draw.Src) data = gray.Pix case 4: rgba := image.NewRGBA(rect) draw.Draw(rgba, rect, img, image.Pt(0, 0), draw.Src) data = rgba.Pix } } return data, channels, bytedepth, nil } // checkDimensions checks if width, height and number of channels is in an // appropriate range. func checkDimensions(width, height, channels int) error { if width < 1 || height < 1 { return errors.New("width and height must be bigger than 0") } if channels < 1 || channels > 4 { return errors.New("number of channels must be between 1 and 4") } return nil } // getColorModel returns the name of the respective color model. func getColorModel(model color.Model) string { colorname := "undefined" switch model { case color.RGBAModel: colorname = "RGBA" break case color.RGBA64Model: colorname = "RGBA64" break case color.NRGBAModel: colorname = "NRGBA" break case color.NRGBA64Model: colorname = "NRGBA64" break case color.AlphaModel: colorname = "Alpha" break case color.Alpha16Model: colorname = "Alpha16" break case color.GrayModel: colorname = "Gray" break case color.Gray16Model: colorname = "Gray16" break } return colorname } // getChannelsName returns the name of the channel. func getChannelsName(channels int) string { c := "Unknown Channel Number" switch channels { case 1: c = "RED" break case 2: c = "RG" break case 3: c = "RGB" break case 4: c = "RGBA" break } return c } // getPixelTypeFromByteDepth returns the appropriate pixel type for the given // bytedepth. So far online a bytedepth of 1 and 4 is supported. func getPixelTypeFromByteDepth(bytedepth int) (int, error) { switch bytedepth { case 1: return gl.UNSIGNED_BYTE, nil case 4: return gl.FLOAT, nil } return 0, errors.New("bytedepth not supported") }
pkg/view/image/image2d/util.go
0.81928
0.436982
util.go
starcoder
// Package kunstruct provides unstructured from api machinery and factory for creating unstructured package kunstruct import ( "fmt" "strconv" "strings" ) // A PathSection contains a list of nested fields, which may end with an // indexable value. For instance, foo.bar resolves to a PathSection with 2 // fields and no index, while foo[0].bar resolves to two path sections, the // first containing the field foo and the index 0, and the second containing // the field bar, with no index. The latter PathSection references the bar // field of the first item in the foo list type PathSection struct { fields []string idx int searchName string searchValue string } func newPathSection() PathSection { return PathSection{idx: -1, searchName: "", searchValue: ""} } func (ps *PathSection) appendNonEmpty(field string) { if len(field) != 0 { ps.fields = append(ps.fields, field) } } func (ps *PathSection) NotIndexed() bool { return ps.idx == -1 && ps.searchName == "" } func (ps *PathSection) ResolveIndex(s []interface{}) (int, bool, error) { if ps.idx >= len(s) { return ps.idx, false, fmt.Errorf("index %d is out of bounds", ps.idx) } if ps.idx != -1 { return ps.idx, true, nil } for curId, subField := range s { subMap, ok1 := subField.(map[string]interface{}) if !ok1 { return ps.idx, false, fmt.Errorf("%v is of the type %T, expected map[string]interface{}", subField, subField) } if foundValue, ok2 := subMap[ps.searchName]; ok2 { if stringValue, ok3 := foundValue.(string); ok3 { if stringValue == ps.searchValue { return curId, true, nil } } } } return ps.idx, false, nil } func (ps *PathSection) parseIndex(pathElement string) { // Assign this index to the current // PathSection, save it to the result, then begin // a new PathSection tmpIdx, err := strconv.Atoi(pathElement) if err == nil { // We have detected an integer so an array. ps.idx = tmpIdx ps.searchName = "" ps.searchValue = "" return } if strings.Contains(pathElement, "=") { // We have detected an searchKey so an array keyPart := strings.Split(pathElement, "=") ps.searchName = keyPart[0] ps.searchValue = keyPart[1] return } // We have detected the downwardapi syntax ps.appendNonEmpty(pathElement) } func parseFields(path string) (result []PathSection, err error) { section := newPathSection() if !strings.Contains(path, "[") { section.fields = strings.Split(path, ".") result = append(result, section) return result, nil } start := 0 insideParentheses := false for i, c := range path { switch c { case '.': if !insideParentheses { section.appendNonEmpty(path[start:i]) start = i + 1 } case '[': if !insideParentheses { section.appendNonEmpty(path[start:i]) start = i + 1 insideParentheses = true } else { return nil, fmt.Errorf("nested parentheses are not allowed: %s", path) } case ']': if insideParentheses { section.parseIndex(path[start:i]) result = append(result, section) section = newPathSection() start = i + 1 insideParentheses = false } else { return nil, fmt.Errorf("invalid field path %s", path) } } } if start < len(path)-1 { section.appendNonEmpty(path[start:]) result = append(result, section) } for _, section := range result { for i, f := range section.fields { if strings.HasPrefix(f, "\"") || strings.HasPrefix(f, "'") { section.fields[i] = strings.Trim(f, "\"'") } } } return result, nil }
api/k8sdeps/kunstruct/helper.go
0.551332
0.591251
helper.go
starcoder
package gm import ( "github.com/cpmech/gosl/chk" "github.com/cpmech/gosl/utl" ) // ExtractSurfaces returns a new NURBS representing a boundary of this NURBS func (o *Nurbs) ExtractSurfaces() (surfs []*Nurbs) { if o.gnd == 1 { return } nsurf := o.gnd * 2 surfs = make([]*Nurbs, nsurf) var ords [][]int var knots [][][]float64 if o.gnd == 2 { ords = [][]int{ {o.p[1]}, // perpendicular to x {o.p[0]}, // perpendicular to y } knots = [][][]float64{ {o.b[1].T}, // perpendicular to x {o.b[0].T}, // perpendicular to y } } else { ords = [][]int{ {o.p[1], o.p[2]}, // perpendicular to x {o.p[2], o.p[0]}, // perpendicular to y {o.p[0], o.p[1]}, // perpendicular to z } knots = [][][]float64{ {o.b[1].T, o.b[2].T}, // perpendicular to x {o.b[2].T, o.b[0].T}, // perpendicular to y {o.b[0].T, o.b[1].T}, // perpendicular to z } } for i := 0; i < o.gnd; i++ { a, b := i*o.gnd, i*o.gnd+1 surfs[a] = NewNurbs(o.gnd-1, ords[i], knots[i]) // surface perpendicular to i surfs[b] = NewNurbs(o.gnd-1, ords[i], knots[i]) // opposite surface perpendicular to i if o.gnd == 2 { // boundary is curve j := (i + 1) % o.gnd // direction perpendicular to i surfs[a].Q = o.CloneCtrlsAlongCurve(j, 0) surfs[b].Q = o.CloneCtrlsAlongCurve(j, o.n[i]-1) } else { // boundary is surface j := (i + 1) % o.gnd // direction perpendicular to i k := (i + 2) % o.gnd // other direction perpendicular to i surfs[a].Q = o.CloneCtrlsAlongSurface(j, k, 0) surfs[b].Q = o.CloneCtrlsAlongSurface(j, k, o.n[i]-1) } } return } // CloneCtrlsAlongCurve returns a copy of control points @ 2D boundary func (o *Nurbs) CloneCtrlsAlongCurve(iAlong, jAt int) (Qnew [][][][]float64) { Qnew = utl.Deep4alloc(o.n[iAlong], 1, 1, 4) var i, j int for m := 0; m < o.n[iAlong]; m++ { i, j = m, jAt if iAlong == 1 { i, j = jAt, m } for e := 0; e < 4; e++ { Qnew[m][0][0][e] = o.Q[i][j][0][e] } } return } // CloneCtrlsAlongSurface returns a copy of control points @ 3D boundary func (o *Nurbs) CloneCtrlsAlongSurface(iAlong, jAlong, kat int) (Qnew [][][][]float64) { Qnew = utl.Deep4alloc(o.n[iAlong], o.n[jAlong], 1, 4) var i, j, k int for m := 0; m < o.n[iAlong]; m++ { for n := 0; n < o.n[jAlong]; n++ { switch { case iAlong == 0 && jAlong == 1: i, j, k = m, n, kat case iAlong == 1 && jAlong == 2: i, j, k = kat, m, n case iAlong == 2 && jAlong == 0: i, j, k = n, kat, m default: chk.Panic("clone Q surface is specified by 'along' indices in (0,1) or (1,2) or (2,0). (%d,%d) is incorrect", iAlong, jAlong) } for e := 0; e < 4; e++ { Qnew[m][n][0][e] = o.Q[i][j][k][e] } } } return } // IndsAlongCurve returns the control points indices along curve func (o *Nurbs) IndsAlongCurve(iAlong, iSpan0, jAt int) (L []int) { nb := o.p[iAlong] + 1 // number of basis along i L = make([]int, nb) var i, j int for m := 0; m < nb; m++ { if iAlong == 0 { i = iSpan0 - o.p[0] + m j = jAt } else { i = jAt j = iSpan0 - o.p[1] + m } L[m] = i + j*o.n[0] } return } // IndsAlongSurface return the control points indices along surface func (o *Nurbs) IndsAlongSurface(iAlong, jAlong, iSpan0, jSpan0, kat int) (L []int) { nbu := o.p[iAlong] + 1 // number of basis functions along i nbv := o.p[jAlong] + 1 // number of basis functions along j L = make([]int, nbu*nbv) var c, i, j, k int for m := 0; m < nbu; m++ { for n := 0; n < nbv; n++ { switch { case iAlong == 0 && jAlong == 1: i = iSpan0 - o.p[0] + m j = jSpan0 - o.p[1] + n k = kat case iAlong == 1 && jAlong == 2: i = kat j = iSpan0 - o.p[1] + m k = jSpan0 - o.p[2] + n case iAlong == 2 && jAlong == 0: i = jSpan0 - o.p[0] + n j = kat k = iSpan0 - o.p[2] + m } L[c] = i + j*o.n[0] + k*o.n[1]*o.n[2] c++ } } return } // ElemBryLocalInds returns the local (element) indices of control points @ boundaries // (if element would have all surfaces @ boundaries) func (o *Nurbs) ElemBryLocalInds() (I [][]int) { switch o.gnd { case 1: return case 2: I = make([][]int, 2*o.gnd) nx, ny := o.p[0]+1, o.p[1]+1 I[3] = utl.IntRange3(0, nx*ny, nx) I[1] = utl.IntAddScalar(I[3], nx-1) I[0] = utl.IntRange(nx) I[2] = utl.IntAddScalar(I[0], (ny-1)*nx) case 3: I = make([][]int, 2*o.gnd) chk.Panic("3D NURBS: ElemBryLocalInds: TODO") // TODO } return }
gm/topologynurbs.go
0.655115
0.420957
topologynurbs.go
starcoder
package wordchain import ( "errors" ) // Errors that can be returned from the Build function. var ( // ErrEmpty is returned when both of the words are empty strings. ErrEmpty = errors.New("both words must not be empty") // ErrNotInDictionary is returned when either of the words could not be found in the provided dictionary. ErrNotInDictionary = errors.New("both words must be present in the dictionary") // ErrNotEqualLength is returned when the word lengths differ. Since a Dictionary can only contain words of // a specific length, both words must be of this length as well. ErrNotEqualLength = errors.New("both words must be the same length") // ErrNotFound is returned if a chain cannot be built as there is no possible path from origin to target word using // words in the dictionary. ErrNotFound = errors.New("chain not found") ) // Build finds the shortest chain of words from origin to target by changing only one character in a word each time. // All words in the chain must be included in the passed dictionary. func Build(dict *Dictionary, origin, target string) ([]string, error) { if len(origin) != len(target) { return nil, ErrNotEqualLength } if len(origin) == 0 { return nil, ErrEmpty } if !dict.HasWord(origin) || !dict.HasWord(target) { return nil, ErrNotInDictionary } b := newBuilder(dict, origin, target) b.populateNodes() return b.shortestChain() } // builder is a helper struct used to find the word chain, holding the dictionary, origin and target words, and // a map of visited nodes type builder struct { dict *Dictionary origin, target string nodes map[string]*node } func newBuilder(dict *Dictionary, origin, target string) *builder { b := &builder{ dict: dict, origin: origin, target: target, nodes: make(map[string]*node), } b.nodes[origin] = &node{word: origin} return b } // populateNodes uses breadth-first search to find the target word, adding all the visited nodes to the nodes map. func (b *builder) populateNodes() { q := newQueue() q.enqueue(b.nodes[b.origin]) for q.len() > 0 { current := q.dequeue() if current == nil { break } current.similar = b.similarWords(current.word) for _, word := range current.similar { if _, ok := b.nodes[word]; !ok { b.nodes[word] = &node{word: word, distance: current.distance + 1} if word == b.target { // store only the current word in the target's similar slice, since other paths might not have been // constructed at this depth in the tree b.nodes[word].similar = []string{current.word} q.reset() } else { q.enqueue(b.nodes[word]) } } } } } // similarWords finds words from the dictionary that are similar (e.g. with only one different character) to the passed // word. It does this by exchanging each character with all possible characters from the alphabet and saving the // combinations that are present in the dictionary. func (b *builder) similarWords(word string) []string { similarMap := make(map[string]bool) runes := []rune(word) for i := range runes { orig := runes[i] for _, letter := range b.dict.Alphabet() { runes[i] = letter s := string(runes) if b.dict.HasWord(s) { similarMap[s] = true } runes[i] = orig } } similar := make([]string, 0, len(similarMap)) for w := range similarMap { similar = append(similar, w) } return similar } // shortestChain returns the shortest possible chain from the origin word to the target word by using the previously // populated nodes map. It does this by first retrieving the target word, finding a similar word that has the lowest // distance to origin and iterating this way until reaching the origin itself. func (b *builder) shortestChain() ([]string, error) { current, ok := b.nodes[b.target] if !ok { return nil, ErrNotFound } path := make([]string, current.distance+1) path[current.distance] = current.word for current.word != b.origin { var closest *node for _, w := range current.similar { n := b.nodes[w] if n != nil && (closest == nil || n.distance < closest.distance) { closest = n } } if closest == nil { return nil, ErrNotFound } current = closest path[current.distance] = current.word } return path, nil }
wordchain/build.go
0.737253
0.470615
build.go
starcoder
package list import ( "fmt" "github.com/flowonyx/functional" "github.com/flowonyx/functional/errors" "golang.org/x/exp/constraints" ) // Min finds the minimum value in values. func Min[T constraints.Ordered](values ...T) (T, error) { if len(values) == 0 { return *(new(T)), fmt.Errorf("%w: Min cannot operate on an empty values", errors.BadArgumentErr) } return minMax(func(t T, min T) bool { return t < min }, values), nil } // MustMin is the same as Min but panics instead of returning an error. func MustMin[T constraints.Ordered](values ...T) T { return functional.Must(Min(values...)) } // Max finds the maximum value in values. func Max[T constraints.Ordered](values ...T) (T, error) { if len(values) == 0 { return *(new(T)), fmt.Errorf("%w: Max cannot operate on an empty values", errors.BadArgumentErr) } return minMax(func(t T, max T) bool { return t > max }, values), nil } // MustMax is the same as Max but panics instead of returning an error. func MustMax[T constraints.Ordered](values ...T) T { return functional.Must(Max(values...)) } func minMax[T any](test func(T, T) bool, values []T) T { if len(values) == 1 { return values[0] } current := values[0] Iter(func(t T) { if test(t, current) { current = t } }, values) return current } // MaxBy returns the maximum projection(value) in values. func MaxBy[T any, T2 constraints.Ordered](projection func(T) T2, values ...T) (T, error) { if len(values) == 0 { return *(new(T)), fmt.Errorf("%w: MaxBy cannot operate on an empty values", errors.BadArgumentErr) } return minMax(func(t1, t2 T) bool { return projection(t1) > projection(t2) }, values), nil } // MustMaxBy is the same as MaxBy but panics instead of returning an error. func MustMaxBy[T any, T2 constraints.Ordered](projection func(T) T2, values ...T) T { return functional.Must(MaxBy(projection, values...)) } // MinBy returns the minimum projection(value) in values. func MinBy[T any, T2 constraints.Ordered](projection func(T) T2, values ...T) (T, error) { if len(values) == 0 { return *(new(T)), fmt.Errorf("%w: MinBy cannot operate on an empty values", errors.BadArgumentErr) } return minMax(func(t1, t2 T) bool { return projection(t1) < projection(t2) }, values), nil } // MustMinBy is the same as MinBy but panics instead of returning an error. func MustMinBy[T any, T2 constraints.Ordered](projection func(T) T2, values ...T) T { return functional.Must(MinBy(projection, values...)) }
list/compare.go
0.826991
0.620938
compare.go
starcoder
package iso20022 // Specifies corporate action dates. type CorporateActionDate4 struct { // Date/time at which the coupons are to be/were submitted for payment of interest. CouponClippingDate *DateFormat4Choice `xml:"CpnClpngDt,omitempty"` // Last date/time at which a holder can consent to the changes sought by the corporation. ConsentExpirationDate *DateFormat4Choice `xml:"CnsntXprtnDt,omitempty"` // Date/time used by the offeror to determine the beneficiary eligible to participate in a consent based on the registered owner of the securities, eg, beneficial owner of consent record. The consent record date qualifier is used to indicate that a record date only applies to a certain part of the offer, not the entire offer. ConsentRecordDate *DateFormat4Choice `xml:"CnsntRcrdDt,omitempty"` // Date/time at which the distribution is due to take place (cash and/or securities). PaymentDate *DateFormat4Choice `xml:"PmtDt,omitempty"` // Date/time at which a payment can be made, eg, if payment date is a non-business day or to indicate the first payment date of an offer. EarliestPaymentDate *DateFormat4Choice `xml:"EarlstPmtDt,omitempty"` // Issuer or issuer's agent deadline to respond, with an instruction, to an outstanding offer or privilege. MarketDeadline *DateFormat4Choice `xml:"MktDdln,omitempty"` // Date/time at which the account servicer has set as the deadline to respond, with instructions, to an outstanding event. This time is dependent on the reference time zone of the account servicer as specified in an SLA. ResponseDeadline *DateFormat4Choice `xml:"RspnDdln,omitempty"` // Deadline by which instructions must be received to split securities, eg, of physical certificates. DeadlineToSplit *DateFormat4Choice `xml:"DdlnToSplt,omitempty"` // Date/time at which an order expires or on which a privilege or offer terminates. ExpiryDate *DateFormat4Choice `xml:"XpryDt,omitempty"` // Date/time at which the price of a security is determined. QuotationSettingDate *DateFormat4Choice `xml:"QtnSetngDt,omitempty"` // Date/time by which cash must be in place in order to take part in the event. SubscriptionCostDebitDate *DateFormat4Choice `xml:"SbcptCostDbtDt,omitempty"` } func (c *CorporateActionDate4) AddCouponClippingDate() *DateFormat4Choice { c.CouponClippingDate = new(DateFormat4Choice) return c.CouponClippingDate } func (c *CorporateActionDate4) AddConsentExpirationDate() *DateFormat4Choice { c.ConsentExpirationDate = new(DateFormat4Choice) return c.ConsentExpirationDate } func (c *CorporateActionDate4) AddConsentRecordDate() *DateFormat4Choice { c.ConsentRecordDate = new(DateFormat4Choice) return c.ConsentRecordDate } func (c *CorporateActionDate4) AddPaymentDate() *DateFormat4Choice { c.PaymentDate = new(DateFormat4Choice) return c.PaymentDate } func (c *CorporateActionDate4) AddEarliestPaymentDate() *DateFormat4Choice { c.EarliestPaymentDate = new(DateFormat4Choice) return c.EarliestPaymentDate } func (c *CorporateActionDate4) AddMarketDeadline() *DateFormat4Choice { c.MarketDeadline = new(DateFormat4Choice) return c.MarketDeadline } func (c *CorporateActionDate4) AddResponseDeadline() *DateFormat4Choice { c.ResponseDeadline = new(DateFormat4Choice) return c.ResponseDeadline } func (c *CorporateActionDate4) AddDeadlineToSplit() *DateFormat4Choice { c.DeadlineToSplit = new(DateFormat4Choice) return c.DeadlineToSplit } func (c *CorporateActionDate4) AddExpiryDate() *DateFormat4Choice { c.ExpiryDate = new(DateFormat4Choice) return c.ExpiryDate } func (c *CorporateActionDate4) AddQuotationSettingDate() *DateFormat4Choice { c.QuotationSettingDate = new(DateFormat4Choice) return c.QuotationSettingDate } func (c *CorporateActionDate4) AddSubscriptionCostDebitDate() *DateFormat4Choice { c.SubscriptionCostDebitDate = new(DateFormat4Choice) return c.SubscriptionCostDebitDate }
CorporateActionDate4.go
0.758332
0.469885
CorporateActionDate4.go
starcoder
package mandelbrot import ( "image" "image/color" "image/jpeg" "image/png" "io" ) // ColorFucn is what is used to generate the colorscheme // It takes the number of iterations from the mandelbrot // calculation and returns a color DefaultColorize is an // extremely basic example type ColorFunc func(int) color.RGBA // DefaultColorze is a very simple Greyscale ColorFunc func DefaultColorize(iter int) color.RGBA { if iter == -1 { return color.RGBA{0, 0, 0, 0xff} } return color.RGBA{ uint8(iter % 255), uint8(iter % 255), uint8(iter % 255), 0xff, } } // Generator is the used to generate the fractal type Generator struct { // Width and Height specify the resolution to use Width int Height int // X and Y specify what point on the fractal to center on X float64 Y float64 // Zoom specifies how much to zoom in Zoom float64 // Limit specifies when the mandelbrot calculation // should bail out and return -1 instead of // the number of iterations Limit int // AntiAlias specifies what level of antialiasing to use // An AntiAlias of 2 will average 4 points for each pixel // 3 will average 9 points. The increase is exponential AntiAlias int // Colorize is the ColorFunc used to generate the colorscheme Colorize ColorFunc //img is the underlying image img *image.RGBA } // NewGenerator creates a new *Generator // it should be used to ensure all fields // are filled. func NewGenerator(width, height int, x, y float64) *Generator { return &Generator{ Width: width, Height: height, X: x, Y: y, Zoom: 1, Limit: 1000, AntiAlias: 1, Colorize: DefaultColorize, img: image.NewRGBA(image.Rect(0, 0, width, height)), } } // Sets the AntiAliasing level func (g *Generator) WithAntiAlias(aa int) *Generator { if aa < 1 { aa = 1 } g.AntiAlias = aa return g } // Sets the zoom level func (g *Generator) WithZoom(z float64) *Generator { if z < 1 { z = 1 } g.Zoom = z return g } // Sets the bailout limit for fractal func (g *Generator) WithLimit(l int) *Generator { g.Limit = l return g } // Sets the Colorize function used to generate colorscheme func (g *Generator) WithColorizeFunc(f ColorFunc) *Generator { if f == nil { return g } g.Colorize = f return g } func (g *Generator) SetWidth(width int) { g.Width = width } func (g *Generator) SetHeight(height int) { g.Height = height } func (g *Generator) SetX(x float64) { g.X = x } func (g *Generator) SetY(y float64) { g.Y = y } func (g *Generator) SetZoom(zoom float64) { g.Zoom = zoom } func (g *Generator) SetLimit(limit int) { g.Limit = limit } func (g *Generator) SetColorize(cf ColorFunc) { g.Colorize = cf } // Generate does the mandelbrot calculation // and stores the fractal into an image func (g *Generator) Generate() { type pixel struct { X, Y int Color color.RGBA } ch := make(chan pixel, 0) inc := 4.0 / (float64(g.Height) * g.Zoom) x0 := g.X - inc*float64(g.Width/2) y0 := g.Y - inc*float64(g.Height/2) for x, a := 0, x0; x < g.Width; x, a = x+1, a+inc { go func(a, inc float64, x int) { for y, b := 0, y0; y < g.Height; y, b = y+1, b+inc { col := g.AntiAliasedColor(a, b, inc) ch <- pixel{x, y, col} } }(a, inc, x) } for c := 0; c < g.Width*g.Height; c++ { p := <-ch g.img.Set(p.X, p.Y, p.Color) } return } // AntiAliasedColor breaks a pixel down into parts // and gets the color for each point, then averages // them out for the pixel color func (g *Generator) AntiAliasedColor(x, y, inc float64) color.RGBA { colors := []color.RGBA{} smallInc := inc / float64(g.AntiAlias) for i := x + smallInc/2; i < x+inc; i += smallInc { for j := y + smallInc/2; j < y+inc; j += smallInc { colors = append(colors, g.GetColor(i, j)) } } return Average(colors...) } // GetColor gets the mandelbrot calculation iterations // and Uses the defined Colorize function turn into a color func (g *Generator) GetColor(x, y float64) color.RGBA { iter := Calculate(x, y, g.Limit) return g.Colorize(iter) } // WritePNG writes the underlying image to a an io.Writer // as a PNG func (g *Generator) WritePNG(w io.Writer) error { return png.Encode(w, g.img) } // WriteJPG writes the underlying image to a an io.Writer // as a JPG func (g *Generator) WriteJPG(w io.Writer) error { return jpeg.Encode(w, g.img, nil) }
generator.go
0.748904
0.51623
generator.go
starcoder
package iterator import ( "context" "github.com/cayleygraph/cayley/graph/refs" "github.com/cayleygraph/quad" ) // Count iterator returns one element with size of underlying iterator. type Count struct { it Shape qs refs.Namer } // NewCount creates a new iterator to count a number of results from a provided subiterator. // qs may be nil - it's used to check if count Contains (is) a given value. func NewCount(it Shape, qs refs.Namer) *Count { return &Count{ it: it, qs: qs, } } func (it *Count) Iterate() Scanner { return newCountNext(it.it) } func (it *Count) Lookup() Index { return newCountContains(it.it, it.qs) } // SubIterators returns a slice of the sub iterators. func (it *Count) SubIterators() []Shape { return []Shape{it.it} } func (it *Count) Optimize(ctx context.Context) (Shape, bool) { sub, optimized := it.it.Optimize(ctx) it.it = sub return it, optimized } func (it *Count) Stats(ctx context.Context) (Costs, error) { stats := Costs{ NextCost: 1, Size: refs.Size{ Value: 1, Exact: true, }, } if sub, err := it.it.Stats(ctx); err == nil && !sub.Size.Exact { stats.NextCost = sub.NextCost * sub.Size.Value } stats.ContainsCost = stats.NextCost return stats, nil } func (it *Count) String() string { return "Count" } // Count iterator returns one element with size of underlying iterator. type countNext struct { it Shape done bool result quad.Value err error } // NewCount creates a new iterator to count a number of results from a provided subiterator. // qs may be nil - it's used to check if count Contains (is) a given value. func newCountNext(it Shape) *countNext { return &countNext{ it: it, } } func (it *countNext) TagResults(dst map[string]refs.Ref) {} // Next counts a number of results in underlying iterator. func (it *countNext) Next(ctx context.Context) bool { if it.done { return false } // TODO(dennwc): this most likely won't include the NextPath st, err := it.it.Stats(ctx) if err != nil { it.err = err return false } if !st.Size.Exact { sit := it.it.Iterate() defer sit.Close() for st.Size.Value = 0; sit.Next(ctx); st.Size.Value++ { // TODO(dennwc): it's unclear if we should call it here or not for ; sit.NextPath(ctx); st.Size.Value++ { } } it.err = sit.Err() } it.result = quad.Int(st.Size.Value) it.done = true return true } func (it *countNext) Err() error { return it.err } func (it *countNext) Result() refs.Ref { if it.result == nil { return nil } return refs.PreFetched(it.result) } func (it *countNext) NextPath(ctx context.Context) bool { return false } func (it *countNext) Close() error { return nil } func (it *countNext) String() string { return "CountNext" } // Count iterator returns one element with size of underlying iterator. type countContains struct { it *countNext qs refs.Namer } // NewCount creates a new iterator to count a number of results from a provided subiterator. // qs may be nil - it's used to check if count Contains (is) a given value. func newCountContains(it Shape, qs refs.Namer) *countContains { return &countContains{ it: newCountNext(it), qs: qs, } } func (it *countContains) TagResults(dst map[string]refs.Ref) {} func (it *countContains) Err() error { return it.it.Err() } func (it *countContains) Result() refs.Ref { return it.it.Result() } func (it *countContains) Contains(ctx context.Context, val refs.Ref) bool { if !it.it.done { it.it.Next(ctx) } if v, ok := val.(refs.PreFetchedValue); ok { return v.NameOf() == it.it.result } if it.qs != nil { return it.qs.NameOf(val) == it.it.result } return false } func (it *countContains) NextPath(ctx context.Context) bool { return false } func (it *countContains) Close() error { return it.it.Close() } func (it *countContains) String() string { return "CountContains" }
graph/iterator/count.go
0.727492
0.452899
count.go
starcoder
package input import ( "errors" "fmt" "strconv" "github.com/benthosdev/benthos/v4/internal/batch/policy" "github.com/benthosdev/benthos/v4/internal/component/input" "github.com/benthosdev/benthos/v4/internal/component/metrics" iprocessor "github.com/benthosdev/benthos/v4/internal/component/processor" "github.com/benthosdev/benthos/v4/internal/docs" "github.com/benthosdev/benthos/v4/internal/interop" "github.com/benthosdev/benthos/v4/internal/log" "github.com/benthosdev/benthos/v4/internal/old/broker" ) //------------------------------------------------------------------------------ var ( // ErrBrokerNoInputs is returned when creating a broker with zero inputs. ErrBrokerNoInputs = errors.New("attempting to create broker input type with no inputs") ) //------------------------------------------------------------------------------ func init() { Constructors[TypeBroker] = TypeSpec{ constructor: NewBroker, Summary: ` Allows you to combine multiple inputs into a single stream of data, where each input will be read in parallel.`, Description: ` A broker type is configured with its own list of input configurations and a field to specify how many copies of the list of inputs should be created. Adding more input types allows you to combine streams from multiple sources into one. For example, reading from both RabbitMQ and Kafka: ` + "```yaml" + ` input: broker: copies: 1 inputs: - amqp_0_9: urls: - amqp://guest:guest@localhost:5672/ consumer_tag: benthos-consumer queue: benthos-queue # Optional list of input specific processing steps processors: - bloblang: | root.message = this root.meta.link_count = this.links.length() root.user.age = this.user.age.number() - kafka: addresses: - localhost:9092 client_id: benthos_kafka_input consumer_group: benthos_consumer_group topics: [ benthos_stream:0 ] ` + "```" + ` If the number of copies is greater than zero the list will be copied that number of times. For example, if your inputs were of type foo and bar, with 'copies' set to '2', you would end up with two 'foo' inputs and two 'bar' inputs. ### Batching It's possible to configure a [batch policy](/docs/configuration/batching#batch-policy) with a broker using the ` + "`batching`" + ` fields. When doing this the feeds from all child inputs are combined. Some inputs do not support broker based batching and specify this in their documentation. ### Processors It is possible to configure [processors](/docs/components/processors/about) at the broker level, where they will be applied to _all_ child inputs, as well as on the individual child inputs. If you have processors at both the broker level _and_ on child inputs then the broker processors will be applied _after_ the child nodes processors.`, Categories: []Category{ CategoryUtility, }, FieldSpecs: docs.FieldSpecs{ docs.FieldAdvanced("copies", "Whatever is specified within `inputs` will be created this many times."), docs.FieldCommon("inputs", "A list of inputs to create.").Array().HasType(docs.FieldTypeInput), policy.FieldSpec(), }, } } //------------------------------------------------------------------------------ // BrokerConfig contains configuration fields for the Broker input type. type BrokerConfig struct { Copies int `json:"copies" yaml:"copies"` Inputs []Config `json:"inputs" yaml:"inputs"` Batching policy.Config `json:"batching" yaml:"batching"` } // NewBrokerConfig creates a new BrokerConfig with default values. func NewBrokerConfig() BrokerConfig { return BrokerConfig{ Copies: 1, Inputs: []Config{}, Batching: policy.NewConfig(), } } //------------------------------------------------------------------------------ // NewBroker creates a new Broker input type. func NewBroker( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, pipelines ...iprocessor.PipelineConstructorFunc, ) (input.Streamed, error) { pipelines = AppendProcessorsFromConfig(conf, mgr, pipelines...) lInputs := len(conf.Broker.Inputs) * conf.Broker.Copies if lInputs <= 0 { return nil, ErrBrokerNoInputs } var err error var b input.Streamed if lInputs == 1 { if b, err = New(conf.Broker.Inputs[0], mgr, log, stats, pipelines...); err != nil { return nil, err } } else { inputs := make([]input.Streamed, lInputs) for j := 0; j < conf.Broker.Copies; j++ { for i, iConf := range conf.Broker.Inputs { iMgr := mgr.IntoPath("broker", "inputs", strconv.Itoa(i)) inputs[len(conf.Broker.Inputs)*j+i], err = New(iConf, iMgr, iMgr.Logger(), iMgr.Metrics(), pipelines...) if err != nil { return nil, fmt.Errorf("failed to create input '%v' type '%v': %v", i, iConf.Type, err) } } } if b, err = broker.NewFanIn(inputs, stats); err != nil { return nil, err } } if conf.Broker.Batching.IsNoop() { return b, nil } bMgr := mgr.IntoPath("broker", "batching") policy, err := policy.New(conf.Broker.Batching, bMgr) if err != nil { return nil, fmt.Errorf("failed to construct batch policy: %v", err) } return NewBatcher(policy, b, log, stats), nil } //------------------------------------------------------------------------------
internal/old/input/broker.go
0.734024
0.662169
broker.go
starcoder
package poly import "github.com/adamcolton/geom/calc/fbuf" // Coefficients wraps the concept of a list of float64. It can express the order // of the polynomial and return any coeffcient. type Coefficients interface { Coefficient(idx int) float64 Len() int } // Slice fulfills Coefficients with a []float64. type Slice []float64 // Buf creates an instance of Poly with c capacity and a value of 1. This is // useful when taking the product of several polynomials. func Buf(c int, buf []float64) Slice { return append(fbuf.Empty(c, buf), 1) } // Coefficient at idx. If the idx is greater than the length of the // polynomial, then a 0 is returned. func (s Slice) Coefficient(idx int) float64 { if idx >= len(s) { return 0 } return s[idx] } // Len of the polynomial is equal to the length of the slice. func (s Slice) Len() int { return len(s) } // Empty constructs an empty polynomial. type Empty struct{} // Coefficient always returns 0 func (Empty) Coefficient(idx int) float64 { return 0 } // Len always returns 0 func (Empty) Len() int { return 0 } // D0 is a degree 0 polynomial - a constant. type D0 float64 // Coefficient returns underlying float64 if the idx is 0, otherwise it returns // 0. func (d D0) Coefficient(idx int) float64 { if idx == 0 { return float64(d) } return 0 } // Len is always 1 func (D0) Len() int { return 1 } // D1 is a degree 1 polynomial with the first coefficient equal to 1. type D1 float64 // Coefficient returns the underlying float64 if idx is 0 and returns 1 if the // idx is 1. func (d D1) Coefficient(idx int) float64 { if idx == 0 { return float64(d) } if idx == 1 { return 1 } return 0 } // Len is always equal to 2 func (D1) Len() int { return 2 } // Sum of 2 Coefficients type Sum [2]Coefficients // Coefficient at idx is the sum of the underlying Coefficients at idx. func (s Sum) Coefficient(idx int) float64 { return s[0].Coefficient(idx) + s[1].Coefficient(idx) } // Len is the greater len of the 2 Coefficients. func (s Sum) Len() int { ln := s[0].Len() if ln2 := s[1].Len(); ln2 > ln { return ln2 } return ln } // Scale Coefficients by a constant value type Scale struct { By float64 Coefficients } // Coefficient is product of scale factor and the underlying Coefficient at idx. func (s Scale) Coefficient(idx int) float64 { return s.Coefficients.Coefficient(idx) * s.By } // Product of two Coefficients type Product [2]Coefficients // Coefficient at idx is the sum of all p[i]*p2[j] where i+j == idx func (p Product) Coefficient(idx int) float64 { l0 := p[0].Len() l1 := p[1].Len() var sum float64 i := idx - l1 if i < 0 { i = 0 } for j := 0; i < l0 && i <= idx; i++ { j = idx - i sum += p[0].Coefficient(i) * p[1].Coefficient(j) } return sum } // Len is one less than the sum of the lengths. func (p Product) Len() int { return p[0].Len() + p[1].Len() - 1 } // Derivative of the Coefficients type Derivative struct { Coefficients } // Coefficient at idx is (idx+1)*Coefficient(idx+1). func (d Derivative) Coefficient(idx int) float64 { idx++ return d.Coefficients.Coefficient(idx) * float64(idx) } // Len is always one less than the underlying Coefficients. func (d Derivative) Len() int { return d.Coefficients.Len() - 1 } // Integral of the underlying Coefficients. type Integral struct { Coefficients C float64 } // Coefficient at idx is Coefficient(idx-1)/idx. Except at 0 where it is C. func (i Integral) Coefficient(idx int) float64 { if idx == 0 { return i.C } return i.Coefficients.Coefficient(idx-1) / float64(idx) } // Len is always one more than the underlying Coefficients. func (i Integral) Len() int { return i.Coefficients.Len() + 1 } // RemoveLeadingZero simplifies a Polynomial where the leading Coefficient is // zero. Note that this does no verification, it is only intended as a wrapper. type RemoveLeadingZero struct{ Coefficients } // Len is always one less than the underlying Coefficients. func (r RemoveLeadingZero) Len() int { return r.Coefficients.Len() - 1 }
calc/poly/coefficients.go
0.881608
0.79909
coefficients.go
starcoder
package tallytest import ( "fmt" "sort" "testing" "github.com/stretchr/testify/assert" "github.com/uber-go/tally" ) // AssertCounterValue asserts that the given counter has the expected value. func AssertCounterValue(t *testing.T, expected int64, s tally.Snapshot, name string, tags map[string]string) bool { index := flattenMetricIndex(name, tags) counter := s.Counters()[index] notFound := fmt.Sprintf("not found: key=%s, actual=%v", index, counterKeys(s.Counters())) if !assert.NotNil(t, counter, notFound) { return false } mismatch := fmt.Sprintf("current values: %v", counterMap(s.Counters())) return assert.Equal(t, expected, counter.Value(), mismatch) } // AssertGaugeValue asserts that the given gauge has the expected value. func AssertGaugeValue(t *testing.T, expected float64, s tally.Snapshot, name string, tags map[string]string) bool { index := flattenMetricIndex(name, tags) gauge := s.Gauges()[index] notFound := fmt.Sprintf("not found: key=%s, actual=%v", index, gaugeKeys(s.Gauges())) if !assert.NotNil(t, gauge, notFound) { return false } mismatch := fmt.Sprintf("current values: %v", gaugeMap(s.Gauges())) return assert.InDelta(t, expected, gauge.Value(), 0.0001, mismatch) } // AssertGaugeNil asserts that the given gauge does not exist. func AssertGaugeNil(t *testing.T, s tally.Snapshot, name string, tags map[string]string) bool { index := flattenMetricIndex(name, tags) gauge := s.Gauges()[index] found := fmt.Sprintf("found: key=%s, actual=%v", index, gaugeKeys(s.Gauges())) return assert.Nil(t, gauge, found) } func flattenMetricIndex(name string, tags map[string]string) string { keys := make([]string, 0, len(tags)) for k := range tags { keys = append(keys, k) } sort.Strings(keys) index := name + "+" for i, k := range keys { sep := "" if i != 0 { sep = "," } index += fmt.Sprintf("%s%s=%s", sep, k, tags[k]) } return index } func counterMap(m map[string]tally.CounterSnapshot) map[string]int64 { result := make(map[string]int64, len(m)) for k, v := range m { result[k] = v.Value() } return result } func gaugeMap(m map[string]tally.GaugeSnapshot) map[string]float64 { result := make(map[string]float64, len(m)) for k, v := range m { result[k] = v.Value() } return result } func counterKeys(m map[string]tally.CounterSnapshot) []string { r := make([]string, 0, len(m)) for k := range m { r = append(r, k) } return r } func gaugeKeys(m map[string]tally.GaugeSnapshot) []string { r := make([]string, 0, len(m)) for k := range m { r = append(r, k) } return r }
src/x/tallytest/tallytest.go
0.732113
0.569613
tallytest.go
starcoder
package models import ( i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) // PlannerAssignedToTaskBoardTaskFormat type PlannerAssignedToTaskBoardTaskFormat struct { Entity // Dictionary of hints used to order tasks on the AssignedTo view of the Task Board. The key of each entry is one of the users the task is assigned to and the value is the order hint. The format of each value is defined as outlined here. orderHintsByAssignee PlannerOrderHintsByAssigneeable // Hint value used to order the task on the AssignedTo view of the Task Board when the task is not assigned to anyone, or if the orderHintsByAssignee dictionary does not provide an order hint for the user the task is assigned to. The format is defined as outlined here. unassignedOrderHint *string } // NewPlannerAssignedToTaskBoardTaskFormat instantiates a new plannerAssignedToTaskBoardTaskFormat and sets the default values. func NewPlannerAssignedToTaskBoardTaskFormat()(*PlannerAssignedToTaskBoardTaskFormat) { m := &PlannerAssignedToTaskBoardTaskFormat{ Entity: *NewEntity(), } return m } // CreatePlannerAssignedToTaskBoardTaskFormatFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreatePlannerAssignedToTaskBoardTaskFormatFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { return NewPlannerAssignedToTaskBoardTaskFormat(), nil } // GetFieldDeserializers the deserialization information for the current model func (m *PlannerAssignedToTaskBoardTaskFormat) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { res := m.Entity.GetFieldDeserializers() res["orderHintsByAssignee"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetObjectValue(CreatePlannerOrderHintsByAssigneeFromDiscriminatorValue) if err != nil { return err } if val != nil { m.SetOrderHintsByAssignee(val.(PlannerOrderHintsByAssigneeable)) } return nil } res["unassignedOrderHint"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetUnassignedOrderHint(val) } return nil } return res } // GetOrderHintsByAssignee gets the orderHintsByAssignee property value. Dictionary of hints used to order tasks on the AssignedTo view of the Task Board. The key of each entry is one of the users the task is assigned to and the value is the order hint. The format of each value is defined as outlined here. func (m *PlannerAssignedToTaskBoardTaskFormat) GetOrderHintsByAssignee()(PlannerOrderHintsByAssigneeable) { if m == nil { return nil } else { return m.orderHintsByAssignee } } // GetUnassignedOrderHint gets the unassignedOrderHint property value. Hint value used to order the task on the AssignedTo view of the Task Board when the task is not assigned to anyone, or if the orderHintsByAssignee dictionary does not provide an order hint for the user the task is assigned to. The format is defined as outlined here. func (m *PlannerAssignedToTaskBoardTaskFormat) GetUnassignedOrderHint()(*string) { if m == nil { return nil } else { return m.unassignedOrderHint } } // Serialize serializes information the current object func (m *PlannerAssignedToTaskBoardTaskFormat) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { err := m.Entity.Serialize(writer) if err != nil { return err } { err = writer.WriteObjectValue("orderHintsByAssignee", m.GetOrderHintsByAssignee()) if err != nil { return err } } { err = writer.WriteStringValue("unassignedOrderHint", m.GetUnassignedOrderHint()) if err != nil { return err } } return nil } // SetOrderHintsByAssignee sets the orderHintsByAssignee property value. Dictionary of hints used to order tasks on the AssignedTo view of the Task Board. The key of each entry is one of the users the task is assigned to and the value is the order hint. The format of each value is defined as outlined here. func (m *PlannerAssignedToTaskBoardTaskFormat) SetOrderHintsByAssignee(value PlannerOrderHintsByAssigneeable)() { if m != nil { m.orderHintsByAssignee = value } } // SetUnassignedOrderHint sets the unassignedOrderHint property value. Hint value used to order the task on the AssignedTo view of the Task Board when the task is not assigned to anyone, or if the orderHintsByAssignee dictionary does not provide an order hint for the user the task is assigned to. The format is defined as outlined here. func (m *PlannerAssignedToTaskBoardTaskFormat) SetUnassignedOrderHint(value *string)() { if m != nil { m.unassignedOrderHint = value } }
models/planner_assigned_to_task_board_task_format.go
0.602763
0.417925
planner_assigned_to_task_board_task_format.go
starcoder
package interpreter import ( "strconv" ) /* * This file contains the defnition of tokens in the system */ //Token is the parsed word and possible nodes a word associated to type Token struct { //Pos is the position of the token in the root sentence Pos int //Word is the match word corresponding to the token Word []rune //Node has the list of nodes applicable to a token Nodes []Node } //FastToken is used to store the token with nodes converted into their concrete type so //processing become becomes easy type FastToken struct { //Pos is the position of the token in the root sentence Pos int //Word is the match word corresponding to the token Word []rune //Tables is the list of table nodes in the token Tables []TableNode //Columns is the list of column nodes in the token Columns []ColumnNode //Values is the list of value nodes in the token Values []ValueNode //Operators is the list of operators nodes in the token Operators []OperatorNode //Unknowns is the list of unknows nodes in the token Unknowns []UnknownNode //Times is the list of time nodes in the token Times []TimeNode } //FastToken returns the converted fast token of the token func (t Token) FastToken() FastToken { result := FastToken{Pos: t.Pos, Word: t.Word} for _, n := range t.Nodes { switch n.Type() { case Table: tab, ok := n.(*TableNode) if ok { if result.Tables == nil { result.Tables = []TableNode{} } result.Tables = append(result.Tables, *tab) } case Column: col, ok := n.(*ColumnNode) if ok { if result.Columns == nil { result.Columns = []ColumnNode{} } result.Columns = append(result.Columns, *col) } case Value: val, ok := n.(*ValueNode) if ok { if result.Values == nil { result.Values = []ValueNode{} } result.Values = append(result.Values, *val) } case Operator: op, ok := n.(*OperatorNode) if ok { if result.Operators == nil { result.Operators = []OperatorNode{} } result.Operators = append(result.Operators, *op) } case Unknown: un, ok := n.(*UnknownNode) if ok { if result.Unknowns == nil { result.Unknowns = []UnknownNode{} } result.Unknowns = append(result.Unknowns, *un) } case Time: tn, ok := n.(*TimeNode) if ok { if result.Times == nil { result.Times = []TimeNode{} } result.Times = append(result.Times, *tn) } } } return result } //Copy makes a deep copy of the token func (t Token) Copy() Token { res := Token{Pos: t.Pos, Word: t.Word, Nodes: []Node{}} for _, v := range t.Nodes { res.Nodes = append(res.Nodes, v.Copy()) } return res } //String is the stringer implementation of the fast token func (f FastToken) String() string { return string(f.Word) + "-" + strconv.Itoa(f.Pos) }
interpreter/token.go
0.542136
0.477615
token.go
starcoder
package detect import ( "context" "errors" "github.com/rs/zerolog/log" ) // DetectOptions contains the options for detecting the environment. type DetectOptions struct { // Platform limits the detection to find only environments for the given platform. Platform string // TargetType limits the detection to find only environments for the given target type (pull request or commit SHA) TargetType string } // DetectResult contains the result of a detection. // It contains the platform, project (repo), target type and target ref (pull request number/commit SHA). // It also contains any extra values the detector can detect, e.g. a token or API URL. type DetectResult struct { Platform string Project string TargetType string TargetRef string Extra interface{} } // DetectError is an error that is returned when the environment could not be detected // by the current detector. type DetectError struct { err error } // Error returns the string message of the error. func (e *DetectError) Error() string { return e.err.Error() } // Detector is the interface that must be implemented by a detector. type Detector interface { DisplayName() string Detect(ctx context.Context, opts DetectOptions) (DetectResult, error) } // DetectorRegistryItem represents an item in the detector registry. // It maps the detector to the platforms it detects. type detectorRegistryItem struct { supportedPlatforms []string detector Detector } // detectorRegistry contains the list of all detectors. // Detectors are registered using the registerDetector function in the init() // function of the files containing them. var detectorRegistry = []detectorRegistryItem{} // registerDetector registers a new detector in the detector registry, // mapping it to the platforms it detects. func registerDetector(supportedPlatforms []string, detector Detector) { detectorRegistry = append(detectorRegistry, detectorRegistryItem{ supportedPlatforms: supportedPlatforms, detector: detector, }) } // DetectEnvironment detects the environment for a given platform and target type. // It iterates through the detectors and returns the first one that detects the // environment. func DetectEnvironment(ctx context.Context, opts DetectOptions) (DetectResult, error) { for _, detectorRegistryItem := range detectorRegistry { if opts.Platform != "" && !contains(detectorRegistryItem.supportedPlatforms, opts.Platform) { continue } detector := detectorRegistryItem.detector log.Ctx(ctx).Debug().Msgf("Checking for %s", detector.DisplayName()) result, err := detector.Detect(ctx, opts) if err != nil { if e, ok := err.(*DetectError); ok { log.Ctx(ctx).Debug().Err(e).Msgf("Could not detect %s environment", detector.DisplayName()) continue } else { return DetectResult{}, err } } else { log.Ctx(ctx).Info().Msgf("Detected %s (platform: %s, target type: %s, target ref: %s)", detector.DisplayName(), result.Platform, result.TargetType, result.TargetRef) return result, nil } } return DetectResult{}, &DetectError{errors.New("Could not to detect environment")} } // contains returns true if the given string slice contains the given string. func contains(a []string, s string) bool { for _, e := range a { if e == s { return true } } return false }
internal/detect/autodetect.go
0.798423
0.418459
autodetect.go
starcoder
package godouble import "fmt" // An Expectation verifies a count against an expected Value type Expectation interface { // Is the expectation met, complete with count? Met(count int) bool } //A Completion is an expectation that can indicate that further calls will fail to meet the expectation //Expectations that are not also Completions are never considered complete type Completion interface { Expectation Complete(count int) bool } type calledExactly int func (times calledExactly) Met(count int) bool { return count == int(times) } func (times calledExactly) Complete(count int) bool { return count >= int(times) } func (times calledExactly) String() string { return fmt.Sprintf("exactly %d", int(times)) } type calledNever struct{} func (n *calledNever) Met(count int) bool { return count == 0 } func (n *calledNever) String() string { return "never" } type calledAtLeast int func (times calledAtLeast) Met(count int) bool { return count >= int(times) } func (times calledAtLeast) String() string { return fmt.Sprintf("at least %d", int(times)) } type calledBetween struct { atLeast int atMost int } func (c calledBetween) Met(count int) bool { return count >= c.atLeast && count <= c.atMost } func (c calledBetween) Complete(count int) bool { return count >= c.atMost } func (c calledBetween) String() string { if c.atLeast <= 0 { return fmt.Sprintf("at most %d", c.atMost) } return fmt.Sprintf("between %d and %d", c.atLeast, c.atMost) } // Exactly returns an expectation to be called exactly n times // This expectation is considered complete after being exercised n times func Exactly(n int) Completion { return calledExactly(n) } // Once is shorthand for Exactly(1) func Once() Completion { return Exactly(1) } // Twice is shorthand for Exactly(2) func Twice() Completion { return Exactly(2) } var calledNeverSingleton = &calledNever{} // Never returns an expectation to never be called func Never() Expectation { return calledNeverSingleton } // AtLeast returns an expectation to be called at least n times func AtLeast(n int) Expectation { return calledAtLeast(n) } // AtMost returns an expectation to be called at most n times // This expectation is considered complete after being exercised n times func AtMost(n int) Completion { return Between(0, n) } // Between returns a new expectation that a method is exercised at least min times and at most max times // The expectation is considered complete after being exercised max times func Between(min int, max int) Completion { return &calledBetween{min, max} }
godouble/expectation.go
0.739046
0.650398
expectation.go
starcoder
package digit import ( "math/big" "sort" "strconv" "github.com/jackytck/gowboy/common" ) // SliceInt returns the individual digits as a slice of int. func SliceInt(n int) []int { if n == 0 { return []int{0} } var d []int for n > 0 { d = append([]int{n % 10}, d...) n /= 10 } return d } // SliceIntBig returns the individual digits of a big.Int as a slice of int. func SliceIntBig(n *big.Int) []int { x := big.NewInt(0) if n.Cmp(x) == 0 { return []int{0} } var d []int x.Set(n) zero := big.NewInt(0) ten := big.NewInt(10) for x.Cmp(zero) > 0 { m := big.NewInt(1) x.DivMod(x, ten, m) d = append([]int{int(m.Int64())}, d...) } return d } // ReverseSliceInts reverses a slice of ints, and return the new slice. func ReverseSliceInts(a []int) []int { s := len(a) r := make([]int, s) for i, v := range a { r[s-1-i] = v } return r } // JoinInts joins slice of single digit ints and return it as an int. func JoinInts(slice []int) int { var sum int len := len(slice) - 1 for i, v := range slice { p := 1 for j := 0; j < len-i; j++ { p *= 10 } sum += p * v } return sum } // JoinIntsBig joins slice of single digit ints into a big.Int. func JoinIntsBig(slice []int) *big.Int { sum := big.NewInt(0) ten := big.NewInt(10) p := common.Exp(10, len(slice)-1) for _, v := range slice { z := big.NewInt(1) z.Mul(p, big.NewInt(int64(v))) sum.Add(sum, z) p.Div(p, ten) } return sum } // Sum sums the digit^p of a given number. func Sum(n, p int) *big.Int { s := big.NewInt(0) for _, c := range strconv.Itoa(n) { i, _ := strconv.Atoi(string(c)) s.Add(s, common.Exp(i, p)) } return s } // SumBig sums the digits of a big number. func SumBig(n *big.Int) *big.Int { s := big.NewInt(0) for _, d := range SliceIntBig(n) { z := big.NewInt(int64(d)) s.Add(s, z) } return s } // GetIth returns the i-th digit of a number n. func GetIth(n, i int) int { d := SliceInt(n) if i < 0 || i >= len(d) { return -1 } return d[i] } // ReverseInt reverses a non-negative int. func ReverseInt(n int) int { return JoinInts(ReverseSliceInts(SliceInt(n))) } // ReverseIntBig reverses a given big.Int. func ReverseIntBig(n *big.Int) *big.Int { return JoinIntsBig(ReverseSliceInts(SliceIntBig(n))) } // IsPalindromeInt tells if a number is a palindrome, // i.e. reads the same backward as forward. func IsPalindromeInt(n int) bool { return IsPalindromeString(strconv.Itoa(n)) } // IsPalindromeIntBig tells if a big.Int number is a palindrome. func IsPalindromeIntBig(n *big.Int) bool { return IsPalindromeString(n.String()) } // IsPalindromeString tells if a given string is a palindrome. func IsPalindromeString(s string) bool { return s == common.ReverseString(s) } // IsPermuted determines if digits in the two given ints are permuted. func IsPermuted(a, b int) bool { da := SliceInt(a) db := SliceInt(b) sort.Ints(da) sort.Ints(db) return len(da) == len(db) && JoinInts(da) == JoinInts(db) } // IsPandigital determines if a number is pandigital. func IsPandigital(n int) bool { d := SliceInt(n) sort.Ints(d) ans := true for i, v := range d { if i+1 != v { ans = false break } } return ans } // IsBouncy determines if a number is bouncy. // Bouncy number is a positive integer whose digits neither increase nor // decrease. func IsBouncy(n int) bool { if n <= 0 { return false } ds := SliceInt(n) var up, down bool for i := 1; i < len(ds); i++ { if ds[i] > ds[i-1] { up = true } else if ds[i] < ds[i-1] { down = true } if up && down { break } } return up && down }
digit/digit.go
0.735737
0.407687
digit.go
starcoder
package graphql // listTypeCreator is given to newTypeImpl for creating a List. type listTypeCreator struct { typeDef ListTypeDefinition } // listTypeCreator implements typeCreator. var _ typeCreator = (*listTypeCreator)(nil) // TypeDefinition implements typeCreator. func (creator *listTypeCreator) TypeDefinition() TypeDefinition { return creator.typeDef } // LoadDataAndNew implements typeCreator. func (creator *listTypeCreator) LoadDataAndNew() (Type, error) { return &list{}, nil } // Finalize implements typeCreator. func (creator *listTypeCreator) Finalize(t Type, typeDefResolver typeDefinitionResolver) error { // Resolve element type. elementType, err := typeDefResolver(creator.typeDef.ElementType()) if err != nil { return err } else if elementType == nil { return NewError("Must provide an non-nil element type for List.") } list := t.(*list) list.elementType = elementType return nil } // listTypeDefinitionOf wraps a TypeDefinition of the element type and implements // ListTypeDefinition. type listTypeDefinitionOf struct { ThisIsTypeDefinition elementTypeDef TypeDefinition } var _ ListTypeDefinition = listTypeDefinitionOf{} // ElementType implements ListTypeDefinition. func (typeDef listTypeDefinitionOf) ElementType() TypeDefinition { return typeDef.elementTypeDef } // ListOf returns a ListTypeDefinition with the given TypeDefinition of element type. func ListOf(elementTypeDef TypeDefinition) ListTypeDefinition { return listTypeDefinitionOf{ elementTypeDef: elementTypeDef, } } // listTypeDefinitionOfType wraps a Type of the element type and implements // ListTypeDefinition. type listTypeDefinitionOfType struct { ThisIsTypeDefinition elementType Type } var _ ListTypeDefinition = listTypeDefinitionOfType{} // ElementType implements ListTypeDefinition. func (typeDef listTypeDefinitionOfType) ElementType() TypeDefinition { return T(typeDef.elementType) } // ListOfType returns a ListTypeDefinition with the given Type of element type. func ListOfType(elementType Type) ListTypeDefinition { return listTypeDefinitionOfType{ elementType: elementType, } } // list is our built-in implementation for List. It is configured with and built from // ListTypeDefinition. type list struct { ThisIsListType elementType Type } var _ List = (*list)(nil) // NewListOfType defines a List type from a given Type of element type. func NewListOfType(elementType Type) (List, error) { return NewList(ListOfType(elementType)) } // MustNewListOfType is a panic-on-fail version of NewListOfType. func MustNewListOfType(elementType Type) List { return MustNewList(ListOfType(elementType)) } // NewListOf defines a List type from a given TypeDefinition of element type. func NewListOf(elementTypeDef TypeDefinition) (List, error) { return NewList(ListOf(elementTypeDef)) } // MustNewListOf is a panic-on-fail version of NewListOf. func MustNewListOf(elementTypeDef TypeDefinition) List { return MustNewList(ListOf(elementTypeDef)) } // NewList defines a List type from a ListTypeDefinition. func NewList(typeDef ListTypeDefinition) (List, error) { t, err := newTypeImpl(&listTypeCreator{ typeDef: typeDef, }) if err != nil { return nil, err } return t.(List), nil } // MustNewList is a convenience function equivalent to NewList but panics on failure instead of // returning an error. func MustNewList(typeDef ListTypeDefinition) List { l, err := NewList(typeDef) if err != nil { panic(err) } return l } // UnwrappedType implements WrappingType. func (l *list) UnwrappedType() Type { return l.ElementType() } // ElementType implements List. func (l *list) ElementType() Type { return l.elementType }
graphql/list.go
0.789356
0.530419
list.go
starcoder
package cart // NewMBC3 returns a new MBC3 memory controller. func NewMBC3(data []byte) BankingController { return &MBC3{ rom: data, romBank: 1, ram: make([]byte, 0x8000), rtc: make([]byte, 0x10), latchedRtc: make([]byte, 0x10), } } // MBC3 is a GameBoy cartridge that supports rom and ram banking and possibly // a real time clock (RTC). type MBC3 struct { rom []byte romBank uint32 ram []byte ramBank uint32 ramEnabled bool rtc []byte latchedRtc []byte latched bool } // Read returns a value at a memory address in the ROM. func (r *MBC3) Read(address uint16) byte { switch { case address < 0x4000: return r.rom[address] // Bank 0 is fixed case address < 0x8000: return r.rom[uint32(address-0x4000)+(r.romBank*0x4000)] // Use selected rom bank default: if r.ramBank >= 0x4 { if r.latched { return r.latchedRtc[r.ramBank] } return r.rtc[r.ramBank] } return r.ram[(0x2000*r.ramBank)+uint32(address-0xA000)] // Use selected ram bank } } // WriteROM attempts to switch the ROM or RAM bank. func (r *MBC3) WriteROM(address uint16, value byte) { switch { case address < 0x2000: // RAM enable r.ramEnabled = (value & 0xA) != 0 case address < 0x4000: // ROM bank number (lower 5) r.romBank = uint32(value & 0x7F) if r.romBank == 0x00 { r.romBank++ } case address < 0x6000: r.ramBank = uint32(value) case address < 0x8000: if value == 0x1 { r.latched = false } else if value == 0x0 { r.latched = true copy(r.rtc, r.latchedRtc) } } } // WriteRAM writes data to the ram or RTC if it is enabled. func (r *MBC3) WriteRAM(address uint16, value byte) { if r.ramEnabled { if r.ramBank >= 0x4 { r.rtc[r.ramBank] = value } else { r.ram[(0x2000*r.ramBank)+uint32(address-0xA000)] = value } } } // GetSaveData returns the save data for this banking controller. func (r *MBC3) GetSaveData() []byte { data := make([]byte, len(r.ram)) copy(data, r.ram) return data } // LoadSaveData loads the save data into the cartridge. func (r *MBC3) LoadSaveData(data []byte) { r.ram = data }
pkg/cart/mbc3.go
0.724091
0.505737
mbc3.go
starcoder
package world import ( "gonum.org/v1/gonum/graph" "gonum.org/v1/gonum/graph/simple" "maze/common" ) // 1 - 5 - 9 // | X | | // 2 - 6 10 // | | | // 3 7 11 // | | | // 4 - 8 - 12 //CreateWorld generates a network of 12 nodes func CreateWorld(tm common.TaskManager) common.World { w := simpleWorld{} var g = simple.NewWeightedUndirectedGraph(1, 10000000) for i := 1; i < 13; i++ { g.AddNode(simple.Node(i)) } g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(1), simple.Node(2), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(1), simple.Node(5), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(1), simple.Node(6), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(2), simple.Node(5), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(2), simple.Node(3), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(2), simple.Node(6), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(3), simple.Node(4), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(4), simple.Node(8), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(8), simple.Node(7), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(7), simple.Node(6), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(6), simple.Node(5), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(5), simple.Node(9), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(9), simple.Node(10), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(10), simple.Node(11), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(11), simple.Node(12), 1)) g.SetWeightedEdge(g.NewWeightedEdge(simple.Node(12), simple.Node(8), 1)) w.grid = g w.tm = tm return &w } // simpleWorld is the base implementation of a fully visible world, backed with Gonum Simple Graph type simpleWorld struct { robots []common.Robot tm common.TaskManager grid *simple.WeightedUndirectedGraph } func (s *simpleWorld) TaskUpdate(taskID common.TaskID, status common.TaskStatus) error { return s.tm.TaskUpdate(taskID, status) } // SetTasks allows the new tasks to be added to the world func (s *simpleWorld) AddTasks(tasks []common.Task) bool { s.tm.AddTasks(tasks) return true } // GetAllTasks allows the retrieval of tasks (available only) func (s *simpleWorld) GetAllTasks() []common.Task { return s.tm.GetAllTasks() } // GetAllTasks allows the retrieval of tasks (available only) func (s *simpleWorld) GetTasks(n int) []common.Task { return s.tm.GetAllTasks()[:n] } // GetGraph allows the retrieval of world state. The current implementation returns the full world. This is where visibility can be implemented func (s *simpleWorld) GetGraph() graph.Graph { return s.grid } // ClaimTask defines the mechanism that a Robot can claim a given task from the world func (s *simpleWorld) ClaimTask(tid common.TaskID, rid common.RobotID) (success bool, err error) { err = s.tm.TaskUpdate(tid, common.Assigned) if err != nil { return false, err } else { return true, err } } func (s *simpleWorld) AddTask(t common.Task) bool { s.tm.AddTask(t) return true } // GetRobots implements the functionality for retrieval of list of robots func (s *simpleWorld) GetRobots() []common.Robot { return s.robots } // AddRobots add more robots to the stack func (s *simpleWorld) AddRobots(robots []common.Robot) bool { s.robots = append(s.robots, robots...) return true } func (s *simpleWorld) GetBroadcastInfo() interface{} { return struct{}{} } func (s *simpleWorld) GetNextTask() common.Task { return s.tm.GetNextTask() } // AddRobot function add individual robot to tracking on the world map func (s *simpleWorld) AddRobot(robot common.Robot) bool { s.robots = append(s.robots, robot) return true } func (s *simpleWorld) UpdateRobot(that common.Robot) bool { for i, this := range s.robots { if this.ID() == that.ID() { s.robots[i] = that return true } } return false } func (s *simpleWorld) HasTasks() bool { return s.tm.HasTasks() }
common/world/world.go
0.520984
0.413181
world.go
starcoder
package intset import ( "bytes" "fmt" ) const bitsize = 32 << (^uint(0) >> 63) // An IntSet is a set of small non-negative integers. // Its zero value represents the empty set. type IntSet struct { words []uint } // Has reports whether the set contains the non-negative value x. func (s *IntSet) Has(x int) bool { word, bit := x/bitsize, uint(x%bitsize) return word < len(s.words) && s.words[word]&(1<<bit) != 0 } // Add adds the non-negative value x to the set. func (s *IntSet) Add(x int) { word, bit := x/bitsize, uint(x%bitsize) for word >= len(s.words) { s.words = append(s.words, 0) } s.words[word] |= 1 << bit } // AddAll adds the given elements to the set. func (s *IntSet) AddAll(xs ...int) { for i := range xs { s.Add(xs[i]) } } // UnionWith sets s to the union of s and t. func (s *IntSet) UnionWith(t *IntSet) { for i, tword := range t.words { if i < len(s.words) { // OR logic s.words[i] |= tword } else { s.words = append(s.words, tword) } } } // IntersectWith sets s to the intersect of s and t. func (s *IntSet) IntersectWith(t *IntSet) { for i, tword := range t.words { if i < len(s.words) { // AND logic s.words[i] &= tword } } } // DifferenceWith sets s to the difference of s and t. func (s *IntSet) DifferenceWith(t *IntSet) { for i, tword := range t.words { if i < len(s.words) { // NAND logic s.words[i] &^= tword } } } // SymmetricDifference sets s to the symmetric difference of s and t. func (s *IntSet) SymmetricDifference(t *IntSet) { for i, tword := range t.words { if i < len(s.words) { // XOR logic s.words[i] ^= tword } else { s.words = append(s.words, tword) } } } // String returns the set as a string of the form "{1 2 3}". func (s *IntSet) String() string { var buf bytes.Buffer buf.WriteByte('{') for i, word := range s.words { if word == 0 { continue } for j := 0; j < bitsize; j++ { if word&(1<<uint(j)) != 0 { if buf.Len() > len("{") { buf.WriteByte(' ') } fmt.Fprintf(&buf, "%d", bitsize*i+j) } } } buf.WriteByte('}') return buf.String() } // Len returns the number of elements. func (s *IntSet) Len() int { var n int for _, word := range s.words { for i := 0; i < bitsize; i++ { if word&(1<<uint(i)) != 0 { n++ } } } return n } // Remove removes x from the set. func (s *IntSet) Remove(x int) { word, bit := x/bitsize, uint(x%bitsize) s.words[word] &= ^(1 << bit) } // Clear removes all the elements from the set. func (s *IntSet) Clear() { for i := range s.words { s.words[i] &= 0 } } // Copy returns a copy of the set. func (s *IntSet) Copy() *IntSet { cpy := &IntSet{} for i := range s.words { cpy.words = append(cpy.words, s.words[i]) } return cpy } // Elem returns a slice containing the elements of s. func (s *IntSet) Elem() []int { var elems = make([]int, 0) for i := range s.words { for j := 0; j < bitsize; j++ { if val := s.words[i] & (1 << uint(j)); val != 0 { elems = append(elems, i*bitsize+j) } } } return elems }
ch06/ex5/intset.go
0.620737
0.404213
intset.go
starcoder
package threefish import ( "crypto/cipher" ) const ( // Size of a 512-bit block in bytes blockSize512 = 64 // Number of 64-bit words per 512-bit block numWords512 = blockSize512 / 8 // Number of rounds when using a 512-bit cipher numRounds512 = 72 ) type cipher512 struct { t [(tweakSize / 8) + 1]uint64 ks [(numRounds512 / 4) + 1][numWords512]uint64 } // New512 creates a new Threefish cipher with a block size of 512 bits. // The key argument must be 64 bytes and the tweak argument must be 16 bytes. func New512(key, tweak []byte) (cipher.Block, error) { // Length check the provided key if len(key) != blockSize512 { return nil, KeySizeError(blockSize512) } c := new(cipher512) // Load and extend the tweak value if err := calculateTweak(&c.t, tweak); err != nil { return nil, err } // Load and extend the key k := new([numWords512 + 1]uint64) k[numWords512] = c240 for i := 0; i < numWords512; i++ { k[i] = loadWord(key[i*8 : (i+1)*8]) k[numWords512] ^= k[i] } // Calculate the key schedule for s := 0; s <= numRounds512/4; s++ { for i := 0; i < numWords512; i++ { c.ks[s][i] = k[(s+i)%(numWords512+1)] switch i { case numWords512 - 3: c.ks[s][i] += c.t[s%3] case numWords512 - 2: c.ks[s][i] += c.t[(s+1)%3] case numWords512 - 1: c.ks[s][i] += uint64(s) } } } return c, nil } // BlockSize returns the block size of a 512-bit cipher. func (c *cipher512) BlockSize() int { return blockSize512 } // Encrypt loads plaintext from src, encrypts it, and stores it in dst. func (c *cipher512) Encrypt(dst, src []byte) { // Load the input in := new([numWords512]uint64) in[0] = loadWord(src[0:8]) in[1] = loadWord(src[8:16]) in[2] = loadWord(src[16:24]) in[3] = loadWord(src[24:32]) in[4] = loadWord(src[32:40]) in[5] = loadWord(src[40:48]) in[6] = loadWord(src[48:56]) in[7] = loadWord(src[56:64]) // Perform encryption rounds for d := 0; d < numRounds512; d += 8 { // Add round key in[0] += c.ks[d/4][0] in[1] += c.ks[d/4][1] in[2] += c.ks[d/4][2] in[3] += c.ks[d/4][3] in[4] += c.ks[d/4][4] in[5] += c.ks[d/4][5] in[6] += c.ks[d/4][6] in[7] += c.ks[d/4][7] // Four rounds of mix and permute in[0] += in[1] in[1] = ((in[1] << 46) | (in[1] >> (64 - 46))) ^ in[0] in[2] += in[3] in[3] = ((in[3] << 36) | (in[3] >> (64 - 36))) ^ in[2] in[4] += in[5] in[5] = ((in[5] << 19) | (in[5] >> (64 - 19))) ^ in[4] in[6] += in[7] in[7] = ((in[7] << 37) | (in[7] >> (64 - 37))) ^ in[6] in[0], in[2], in[3], in[4], in[6], in[7] = in[2], in[4], in[7], in[6], in[0], in[3] in[0] += in[1] in[1] = ((in[1] << 33) | (in[1] >> (64 - 33))) ^ in[0] in[2] += in[3] in[3] = ((in[3] << 27) | (in[3] >> (64 - 27))) ^ in[2] in[4] += in[5] in[5] = ((in[5] << 14) | (in[5] >> (64 - 14))) ^ in[4] in[6] += in[7] in[7] = ((in[7] << 42) | (in[7] >> (64 - 42))) ^ in[6] in[0], in[2], in[3], in[4], in[6], in[7] = in[2], in[4], in[7], in[6], in[0], in[3] in[0] += in[1] in[1] = ((in[1] << 17) | (in[1] >> (64 - 17))) ^ in[0] in[2] += in[3] in[3] = ((in[3] << 49) | (in[3] >> (64 - 49))) ^ in[2] in[4] += in[5] in[5] = ((in[5] << 36) | (in[5] >> (64 - 36))) ^ in[4] in[6] += in[7] in[7] = ((in[7] << 39) | (in[7] >> (64 - 39))) ^ in[6] in[0], in[2], in[3], in[4], in[6], in[7] = in[2], in[4], in[7], in[6], in[0], in[3] in[0] += in[1] in[1] = ((in[1] << 44) | (in[1] >> (64 - 44))) ^ in[0] in[2] += in[3] in[3] = ((in[3] << 9) | (in[3] >> (64 - 9))) ^ in[2] in[4] += in[5] in[5] = ((in[5] << 54) | (in[5] >> (64 - 54))) ^ in[4] in[6] += in[7] in[7] = ((in[7] << 56) | (in[7] >> (64 - 56))) ^ in[6] in[0], in[2], in[3], in[4], in[6], in[7] = in[2], in[4], in[7], in[6], in[0], in[3] // Add round key in[0] += c.ks[(d/4)+1][0] in[1] += c.ks[(d/4)+1][1] in[2] += c.ks[(d/4)+1][2] in[3] += c.ks[(d/4)+1][3] in[4] += c.ks[(d/4)+1][4] in[5] += c.ks[(d/4)+1][5] in[6] += c.ks[(d/4)+1][6] in[7] += c.ks[(d/4)+1][7] // Four rounds of mix and permute in[0] += in[1] in[1] = ((in[1] << 39) | (in[1] >> (64 - 39))) ^ in[0] in[2] += in[3] in[3] = ((in[3] << 30) | (in[3] >> (64 - 30))) ^ in[2] in[4] += in[5] in[5] = ((in[5] << 34) | (in[5] >> (64 - 34))) ^ in[4] in[6] += in[7] in[7] = ((in[7] << 24) | (in[7] >> (64 - 24))) ^ in[6] in[0], in[2], in[3], in[4], in[6], in[7] = in[2], in[4], in[7], in[6], in[0], in[3] in[0] += in[1] in[1] = ((in[1] << 13) | (in[1] >> (64 - 13))) ^ in[0] in[2] += in[3] in[3] = ((in[3] << 50) | (in[3] >> (64 - 50))) ^ in[2] in[4] += in[5] in[5] = ((in[5] << 10) | (in[5] >> (64 - 10))) ^ in[4] in[6] += in[7] in[7] = ((in[7] << 17) | (in[7] >> (64 - 17))) ^ in[6] in[0], in[2], in[3], in[4], in[6], in[7] = in[2], in[4], in[7], in[6], in[0], in[3] in[0] += in[1] in[1] = ((in[1] << 25) | (in[1] >> (64 - 25))) ^ in[0] in[2] += in[3] in[3] = ((in[3] << 29) | (in[3] >> (64 - 29))) ^ in[2] in[4] += in[5] in[5] = ((in[5] << 39) | (in[5] >> (64 - 39))) ^ in[4] in[6] += in[7] in[7] = ((in[7] << 43) | (in[7] >> (64 - 43))) ^ in[6] in[0], in[2], in[3], in[4], in[6], in[7] = in[2], in[4], in[7], in[6], in[0], in[3] in[0] += in[1] in[1] = ((in[1] << 8) | (in[1] >> (64 - 8))) ^ in[0] in[2] += in[3] in[3] = ((in[3] << 35) | (in[3] >> (64 - 35))) ^ in[2] in[4] += in[5] in[5] = ((in[5] << 56) | (in[5] >> (64 - 56))) ^ in[4] in[6] += in[7] in[7] = ((in[7] << 22) | (in[7] >> (64 - 22))) ^ in[6] in[0], in[2], in[3], in[4], in[6], in[7] = in[2], in[4], in[7], in[6], in[0], in[3] } // Add the final round key in[0] += c.ks[numRounds512/4][0] in[1] += c.ks[numRounds512/4][1] in[2] += c.ks[numRounds512/4][2] in[3] += c.ks[numRounds512/4][3] in[4] += c.ks[numRounds512/4][4] in[5] += c.ks[numRounds512/4][5] in[6] += c.ks[numRounds512/4][6] in[7] += c.ks[numRounds512/4][7] // Store the ciphertext in destination storeWord(dst[0:8], in[0]) storeWord(dst[8:16], in[1]) storeWord(dst[16:24], in[2]) storeWord(dst[24:32], in[3]) storeWord(dst[32:40], in[4]) storeWord(dst[40:48], in[5]) storeWord(dst[48:56], in[6]) storeWord(dst[56:64], in[7]) } // Decrypt loads ciphertext from src, decrypts it, and stores it in dst. func (c *cipher512) Decrypt(dst, src []byte) { // Load the ciphertext ct := new([numWords512]uint64) ct[0] = loadWord(src[0:8]) ct[1] = loadWord(src[8:16]) ct[2] = loadWord(src[16:24]) ct[3] = loadWord(src[24:32]) ct[4] = loadWord(src[32:40]) ct[5] = loadWord(src[40:48]) ct[6] = loadWord(src[48:56]) ct[7] = loadWord(src[56:64]) // Subtract the final round key ct[0] -= c.ks[numRounds512/4][0] ct[1] -= c.ks[numRounds512/4][1] ct[2] -= c.ks[numRounds512/4][2] ct[3] -= c.ks[numRounds512/4][3] ct[4] -= c.ks[numRounds512/4][4] ct[5] -= c.ks[numRounds512/4][5] ct[6] -= c.ks[numRounds512/4][6] ct[7] -= c.ks[numRounds512/4][7] // Perform decryption rounds for d := numRounds512 - 1; d >= 0; d -= 8 { // Four rounds of permute and unmix ct[0], ct[2], ct[3], ct[4], ct[6], ct[7] = ct[6], ct[0], ct[7], ct[2], ct[4], ct[3] ct[7] = ((ct[7] ^ ct[6]) << (64 - 22)) | ((ct[7] ^ ct[6]) >> 22) ct[6] -= ct[7] ct[5] = ((ct[5] ^ ct[4]) << (64 - 56)) | ((ct[5] ^ ct[4]) >> 56) ct[4] -= ct[5] ct[3] = ((ct[3] ^ ct[2]) << (64 - 35)) | ((ct[3] ^ ct[2]) >> 35) ct[2] -= ct[3] ct[1] = ((ct[1] ^ ct[0]) << (64 - 8)) | ((ct[1] ^ ct[0]) >> 8) ct[0] -= ct[1] ct[0], ct[2], ct[3], ct[4], ct[6], ct[7] = ct[6], ct[0], ct[7], ct[2], ct[4], ct[3] ct[7] = ((ct[7] ^ ct[6]) << (64 - 43)) | ((ct[7] ^ ct[6]) >> 43) ct[6] -= ct[7] ct[5] = ((ct[5] ^ ct[4]) << (64 - 39)) | ((ct[5] ^ ct[4]) >> 39) ct[4] -= ct[5] ct[3] = ((ct[3] ^ ct[2]) << (64 - 29)) | ((ct[3] ^ ct[2]) >> 29) ct[2] -= ct[3] ct[1] = ((ct[1] ^ ct[0]) << (64 - 25)) | ((ct[1] ^ ct[0]) >> 25) ct[0] -= ct[1] ct[0], ct[2], ct[3], ct[4], ct[6], ct[7] = ct[6], ct[0], ct[7], ct[2], ct[4], ct[3] ct[7] = ((ct[7] ^ ct[6]) << (64 - 17)) | ((ct[7] ^ ct[6]) >> 17) ct[6] -= ct[7] ct[5] = ((ct[5] ^ ct[4]) << (64 - 10)) | ((ct[5] ^ ct[4]) >> 10) ct[4] -= ct[5] ct[3] = ((ct[3] ^ ct[2]) << (64 - 50)) | ((ct[3] ^ ct[2]) >> 50) ct[2] -= ct[3] ct[1] = ((ct[1] ^ ct[0]) << (64 - 13)) | ((ct[1] ^ ct[0]) >> 13) ct[0] -= ct[1] ct[0], ct[2], ct[3], ct[4], ct[6], ct[7] = ct[6], ct[0], ct[7], ct[2], ct[4], ct[3] ct[7] = ((ct[7] ^ ct[6]) << (64 - 24)) | ((ct[7] ^ ct[6]) >> 24) ct[6] -= ct[7] ct[5] = ((ct[5] ^ ct[4]) << (64 - 34)) | ((ct[5] ^ ct[4]) >> 34) ct[4] -= ct[5] ct[3] = ((ct[3] ^ ct[2]) << (64 - 30)) | ((ct[3] ^ ct[2]) >> 30) ct[2] -= ct[3] ct[1] = ((ct[1] ^ ct[0]) << (64 - 39)) | ((ct[1] ^ ct[0]) >> 39) ct[0] -= ct[1] // Subtract round key ct[0] -= c.ks[d/4][0] ct[1] -= c.ks[d/4][1] ct[2] -= c.ks[d/4][2] ct[3] -= c.ks[d/4][3] ct[4] -= c.ks[d/4][4] ct[5] -= c.ks[d/4][5] ct[6] -= c.ks[d/4][6] ct[7] -= c.ks[d/4][7] // Four rounds of permute and unmix ct[0], ct[2], ct[3], ct[4], ct[6], ct[7] = ct[6], ct[0], ct[7], ct[2], ct[4], ct[3] ct[7] = ((ct[7] ^ ct[6]) << (64 - 56)) | ((ct[7] ^ ct[6]) >> 56) ct[6] -= ct[7] ct[5] = ((ct[5] ^ ct[4]) << (64 - 54)) | ((ct[5] ^ ct[4]) >> 54) ct[4] -= ct[5] ct[3] = ((ct[3] ^ ct[2]) << (64 - 9)) | ((ct[3] ^ ct[2]) >> 9) ct[2] -= ct[3] ct[1] = ((ct[1] ^ ct[0]) << (64 - 44)) | ((ct[1] ^ ct[0]) >> 44) ct[0] -= ct[1] ct[0], ct[2], ct[3], ct[4], ct[6], ct[7] = ct[6], ct[0], ct[7], ct[2], ct[4], ct[3] ct[7] = ((ct[7] ^ ct[6]) << (64 - 39)) | ((ct[7] ^ ct[6]) >> 39) ct[6] -= ct[7] ct[5] = ((ct[5] ^ ct[4]) << (64 - 36)) | ((ct[5] ^ ct[4]) >> 36) ct[4] -= ct[5] ct[3] = ((ct[3] ^ ct[2]) << (64 - 49)) | ((ct[3] ^ ct[2]) >> 49) ct[2] -= ct[3] ct[1] = ((ct[1] ^ ct[0]) << (64 - 17)) | ((ct[1] ^ ct[0]) >> 17) ct[0] -= ct[1] ct[0], ct[2], ct[3], ct[4], ct[6], ct[7] = ct[6], ct[0], ct[7], ct[2], ct[4], ct[3] ct[7] = ((ct[7] ^ ct[6]) << (64 - 42)) | ((ct[7] ^ ct[6]) >> 42) ct[6] -= ct[7] ct[5] = ((ct[5] ^ ct[4]) << (64 - 14)) | ((ct[5] ^ ct[4]) >> 14) ct[4] -= ct[5] ct[3] = ((ct[3] ^ ct[2]) << (64 - 27)) | ((ct[3] ^ ct[2]) >> 27) ct[2] -= ct[3] ct[1] = ((ct[1] ^ ct[0]) << (64 - 33)) | ((ct[1] ^ ct[0]) >> 33) ct[0] -= ct[1] ct[0], ct[2], ct[3], ct[4], ct[6], ct[7] = ct[6], ct[0], ct[7], ct[2], ct[4], ct[3] ct[7] = ((ct[7] ^ ct[6]) << (64 - 37)) | ((ct[7] ^ ct[6]) >> 37) ct[6] -= ct[7] ct[5] = ((ct[5] ^ ct[4]) << (64 - 19)) | ((ct[5] ^ ct[4]) >> 19) ct[4] -= ct[5] ct[3] = ((ct[3] ^ ct[2]) << (64 - 36)) | ((ct[3] ^ ct[2]) >> 36) ct[2] -= ct[3] ct[1] = ((ct[1] ^ ct[0]) << (64 - 46)) | ((ct[1] ^ ct[0]) >> 46) ct[0] -= ct[1] // Subtract round key ct[0] -= c.ks[(d/4)-1][0] ct[1] -= c.ks[(d/4)-1][1] ct[2] -= c.ks[(d/4)-1][2] ct[3] -= c.ks[(d/4)-1][3] ct[4] -= c.ks[(d/4)-1][4] ct[5] -= c.ks[(d/4)-1][5] ct[6] -= c.ks[(d/4)-1][6] ct[7] -= c.ks[(d/4)-1][7] } // Store decrypted value in destination storeWord(dst[0:8], ct[0]) storeWord(dst[8:16], ct[1]) storeWord(dst[16:24], ct[2]) storeWord(dst[24:32], ct[3]) storeWord(dst[32:40], ct[4]) storeWord(dst[40:48], ct[5]) storeWord(dst[48:56], ct[6]) storeWord(dst[56:64], ct[7]) }
threefish/threefish512.go
0.593963
0.469459
threefish512.go
starcoder
// encapsulates standard host entities into a simple interface package wasmlib import ( "encoding/binary" "strconv" ) // used to retrieve any information that is related to colored token balances type ScBalances struct { balances ScImmutableMap } // retrieve the balance for the specified token color func (ctx ScBalances) Balance(color ScColor) int64 { return ctx.balances.GetInt64(color).Value() } // retrieve a list of all token colors that have a non-zero balance func (ctx ScBalances) Colors() ScImmutableColorArray { return ctx.balances.GetColorArray(KeyColor) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScTransfers struct { transfers ScMutableMap } // create a new transfers object ready to add token transfers func NewScTransfers() ScTransfers { return ScTransfers{transfers: *NewScMutableMap()} } // create a new transfers object from a balances object func NewScTransfersFromBalances(balances ScBalances) ScTransfers { transfers := NewScTransfers() colors := balances.Colors() length := colors.Length() for i := int32(0); i < length; i++ { color := colors.GetColor(i).Value() transfers.Set(color, balances.Balance(color)) } return transfers } // create a new transfers object and initialize it with the specified amount of iotas func NewScTransferIotas(amount int64) ScTransfers { return NewScTransfer(IOTA, amount) } // create a new transfers object and initialize it with the specified token transfer func NewScTransfer(color ScColor, amount int64) ScTransfers { transfer := NewScTransfers() transfer.Set(color, amount) return transfer } // set the specified colored token transfer in the transfers object // note that this will overwrite any previous amount for the specified color func (ctx ScTransfers) Set(color ScColor, amount int64) { ctx.transfers.GetInt64(color).SetValue(amount) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScUtility struct { utility ScMutableMap } // decodes the specified base58-encoded string value to its original bytes func (ctx ScUtility) Base58Decode(value string) []byte { return ctx.utility.CallFunc(KeyBase58Decode, []byte(value)) } // encodes the specified bytes to a base-58-encoded string func (ctx ScUtility) Base58Encode(value []byte) string { return string(ctx.utility.CallFunc(KeyBase58Encode, value)) } func (ctx ScUtility) BlsAddressFromPubKey(pubKey []byte) ScAddress { result := ctx.utility.CallFunc(KeyBlsAddress, pubKey) return NewScAddressFromBytes(result) } func (ctx ScUtility) BlsAggregateSignatures(pubKeys, sigs [][]byte) ([]byte, []byte) { encode := NewBytesEncoder() encode.Int32(int32(len(pubKeys))) for _, pubKey := range pubKeys { encode.Bytes(pubKey) } encode.Int32(int32(len(sigs))) for _, sig := range sigs { encode.Bytes(sig) } result := ctx.utility.CallFunc(KeyBlsAggregate, encode.Data()) decode := NewBytesDecoder(result) return decode.Bytes(), decode.Bytes() } func (ctx ScUtility) BlsValidSignature(data, pubKey, signature []byte) bool { encode := NewBytesEncoder().Bytes(data).Bytes(pubKey).Bytes(signature) result := ctx.utility.CallFunc(KeyBlsValid, encode.Data()) return len(result) != 0 } func (ctx ScUtility) Ed25519AddressFromPubKey(pubKey []byte) ScAddress { result := ctx.utility.CallFunc(KeyEd25519Address, pubKey) return NewScAddressFromBytes(result) } func (ctx ScUtility) Ed25519ValidSignature(data, pubKey, signature []byte) bool { encode := NewBytesEncoder().Bytes(data).Bytes(pubKey).Bytes(signature) result := ctx.utility.CallFunc(KeyEd25519Valid, encode.Data()) return len(result) != 0 } // hashes the specified value bytes using blake2b hashing and returns the resulting 32-byte hash func (ctx ScUtility) HashBlake2b(value []byte) ScHash { result := ctx.utility.CallFunc(KeyHashBlake2b, value) return NewScHashFromBytes(result) } // hashes the specified value bytes using sha3 hashing and returns the resulting 32-byte hash func (ctx ScUtility) HashSha3(value []byte) ScHash { result := ctx.utility.CallFunc(KeyHashSha3, value) return NewScHashFromBytes(result) } // hashes the specified value bytes using blake2b hashing and returns the resulting 32-byte hash func (ctx ScUtility) Hname(value string) ScHname { result := ctx.utility.CallFunc(KeyHname, []byte(value)) return NewScHnameFromBytes(result) } // converts an integer to its string representation func (ctx ScUtility) String(value int64) string { return strconv.FormatInt(value, 10) } // wrapper for simplified use by hashtypes func base58Encode(bytes []byte) string { return ScFuncContext{}.Utility().Base58Encode(bytes) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // shared interface part of ScFuncContext and ScViewContext type ScBaseContext struct{} // retrieve the agent id of this contract account func (ctx ScBaseContext) AccountID() ScAgentID { return Root.GetAgentID(KeyAccountID).Value() } // access the current balances for all token colors func (ctx ScBaseContext) Balances() ScBalances { return ScBalances{Root.GetMap(KeyBalances).Immutable()} } // retrieve the chain id of the chain this contract lives on func (ctx ScBaseContext) ChainID() ScChainID { return Root.GetChainID(KeyChainID).Value() } // retrieve the agent id of the owner of the chain this contract lives on func (ctx ScBaseContext) ChainOwnerID() ScAgentID { return Root.GetAgentID(KeyChainOwnerID).Value() } // retrieve the hname of this contract func (ctx ScBaseContext) Contract() ScHname { return Root.GetHname(KeyContract).Value() } // retrieve the agent id of the creator of this contract func (ctx ScBaseContext) ContractCreator() ScAgentID { return Root.GetAgentID(KeyContractCreator).Value() } // logs informational text message func (ctx ScBaseContext) Log(text string) { Log(text) } // logs error text message and then panics func (ctx ScBaseContext) Panic(text string) { Panic(text) } // retrieve parameters passed to the smart contract function that was called func (ctx ScBaseContext) Params() ScImmutableMap { return Root.GetMap(KeyParams).Immutable() } // panics if condition is not satisfied func (ctx ScBaseContext) Require(cond bool, msg string) { if !cond { Panic(msg) } } // any results returned by the smart contract function call are returned here func (ctx ScBaseContext) Results() ScMutableMap { return Root.GetMap(KeyResults) } // deterministic time stamp fixed at the moment of calling the smart contract func (ctx ScBaseContext) Timestamp() int64 { return Root.GetInt64(KeyTimestamp).Value() } // logs debugging trace text message func (ctx ScBaseContext) Trace(text string) { Trace(text) } // access diverse utility functions func (ctx ScBaseContext) Utility() ScUtility { return ScUtility{Root.GetMap(KeyUtility)} } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // smart contract interface with mutable access to state type ScFuncContext struct { ScBaseContext } var _ ScFuncCallContext = &ScFuncContext{} // calls a smart contract function func (ctx ScFuncContext) Call(hContract, hFunction ScHname, params *ScMutableMap, transfer *ScTransfers) ScImmutableMap { encode := NewBytesEncoder() encode.Hname(hContract) encode.Hname(hFunction) if params != nil { encode.Int32(params.objID) } else { encode.Int32(0) } if transfer != nil { encode.Int32(transfer.transfers.objID) } else { encode.Int32(0) } Root.GetBytes(KeyCall).SetValue(encode.Data()) return Root.GetMap(KeyReturn).Immutable() } // retrieve the agent id of the caller of the smart contract func (ctx ScFuncContext) Caller() ScAgentID { return Root.GetAgentID(KeyCaller).Value() } // deploys a smart contract func (ctx ScFuncContext) Deploy(programHash ScHash, name, description string, params *ScMutableMap) { encode := NewBytesEncoder() encode.Hash(programHash) encode.String(name) encode.String(description) if params != nil { encode.Int32(params.objID) } else { encode.Int32(0) } Root.GetBytes(KeyDeploy).SetValue(encode.Data()) } // signals an event on the node that external entities can subscribe to func (ctx ScFuncContext) Event(text string) { Root.GetString(KeyEvent).SetValue(text) } func (ctx ScFuncContext) Host() ScHost { return nil } // access the incoming balances for all token colors func (ctx ScFuncContext) Incoming() ScBalances { return ScBalances{Root.GetMap(KeyIncoming).Immutable()} } func (ctx ScFuncContext) InitFuncCallContext() { } func (ctx ScFuncContext) InitViewCallContext() { } // retrieve the tokens that were minted in this transaction func (ctx ScFuncContext) Minted() ScBalances { return ScBalances{Root.GetMap(KeyMinted).Immutable()} } // (delayed) posts a smart contract function func (ctx ScFuncContext) Post(chainID ScChainID, hContract, hFunction ScHname, params *ScMutableMap, transfer ScTransfers, delay int32) { encode := NewBytesEncoder() encode.ChainID(chainID) encode.Hname(hContract) encode.Hname(hFunction) if params != nil { encode.Int32(params.objID) } else { encode.Int32(0) } encode.Int32(transfer.transfers.objID) encode.Int32(delay) Root.GetBytes(KeyPost).SetValue(encode.Data()) } // TODO expose Entropy function // generates a random value from 0 to max (exclusive max) using a deterministic RNG func (ctx ScFuncContext) Random(max int64) int64 { if max == 0 { ctx.Panic("random: max parameter should be non-zero") } state := ScMutableMap{objID: OBJ_ID_STATE} rnd := state.GetBytes(KeyRandom) seed := rnd.Value() if len(seed) == 0 { seed = Root.GetBytes(KeyRandom).Value() } rnd.SetValue(ctx.Utility().HashSha3(seed).Bytes()) return int64(binary.LittleEndian.Uint64(seed[:8]) % uint64(max)) } // retrieve the request id of this transaction func (ctx ScFuncContext) RequestID() ScRequestID { return Root.GetRequestID(KeyRequestID).Value() } // access to mutable state storage func (ctx ScFuncContext) State() ScMutableMap { return Root.GetMap(KeyState) } // transfer colored token amounts to the specified Tangle ledger address func (ctx ScFuncContext) TransferToAddress(address ScAddress, transfer ScTransfers) { transfers := Root.GetMapArray(KeyTransfers) tx := transfers.GetMap(transfers.Length()) tx.GetAddress(KeyAddress).SetValue(address) tx.GetInt32(KeyBalances).SetValue(transfer.transfers.objID) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // smart contract interface with immutable access to state type ScViewContext struct { ScBaseContext } var _ ScViewCallContext = &ScViewContext{} // calls a smart contract function func (ctx ScViewContext) Call(contract, function ScHname, params *ScMutableMap) ScImmutableMap { encode := NewBytesEncoder() encode.Hname(contract) encode.Hname(function) if params != nil { encode.Int32(params.objID) } else { encode.Int32(0) } encode.Int32(0) Root.GetBytes(KeyCall).SetValue(encode.Data()) return Root.GetMap(KeyReturn).Immutable() } func (ctx ScViewContext) InitViewCallContext() { } // access to immutable state storage func (ctx ScViewContext) State() ScImmutableMap { return Root.GetMap(KeyState).Immutable() }
packages/vm/wasmlib/go/wasmlib/context.go
0.832203
0.541954
context.go
starcoder
package quantity import ( "errors" "fmt" "math" "strconv" "strings" "time" ) // Quantity represents a physical quantity: a value and a unit. // The units have to be registered in the unit table with DefineUnit. type Quantity struct { value float64 *Unit } // String returns a default string representation of the Quantity func (m Quantity) String() string { return m.Format(DefaultFormat) } // Inspect returns a string representation of the Quantity for debugging func (m Quantity) Inspect() string { return fmt.Sprintf("%f %s -> %f %s %v", m.value, m.symbol, m.factor, makeSymbol(m.exponents), m.exponents) } // Format returns a string representation of the Quantity according to the // format string passed in. The first argument of the format string is the value, // the second one is the unit. The unit and value can be swapped by using // format string indexes such as in "%[2]s %.2[1]f". If only one argument is to be // used, then an index must be used as well, e.g. "%[1]e radians". // A better way to format quantities is by using a Context. func (m Quantity) Format(format string) string { var a, b interface{} if m.Unit == nil { a, b = m.value, "?" } else { a, b = m.value, m.symbol } return fmt.Sprintf(format, a, b) } // Split returns the value and the unit symbol of the Quantity func (m Quantity) Split() (float64, string) { return m.value, m.symbol } // Value returns only the value part of the Quantity. func (m Quantity) Value() float64 { return m.value } // Convert a quantity to another compatible unit. func (m Quantity) Convert(u *Unit) Quantity { return Quantity{m.value * m.factor / u.factor, u} } // ConvertTo creates and returns a new Quantity that has undergone conversion to the given unit. // It also returns true/false to indicate success/failure. The conversion fails if the given unit // cannot be found or calculated, or if that unit is not compatible. func (m Quantity) ConvertTo(u string) (Quantity, bool) { target := UnitFor(u) compatible := haveSameExponents(m.exponents, target.exponents) if target == nil || !compatible { return Quantity{}, false } f := target.factor / m.factor return Quantity{m.value / f, target}, true } // In returns a Quantity converted to the given unit. No unit compatibility check is // performed. If the target unit is not compatible the function will return garbage. func (m Quantity) In(u string) Quantity { target := UnitFor(u) return Quantity{m.value * m.factor / target.factor, target} } // Q returns a Quantity with the given value and unit. func Q(value float64, symbol string) Quantity { u := UnitFor(symbol) if u == &UndefinedUnit { panic(fmt.Sprintf("undefined unit: %s", symbol)) } return Quantity{value, u} } // Parse can be used to parse text input. The input is expected to contain a number // followed by a unit string. Whitespace between number and unit string is optional. // The number can have a negative sign and optional group separators (,). // The unit string has to be a registered unit symbol using the dot and slash to connect // factors, numbers for exponents and optional minus signs, e.g. "-1,500 N.m/s2" = // -1500 newton meter per square second. This function returns the Quantity and an // error which is nil in case the string has been correctly parsed into a Quantity. func Parse(s string) (Quantity, error) { undef := Quantity{0, &UndefinedUnit} match := muRx.FindStringSubmatch(s) if len(match) != 3 { return undef, errors.New("invalid quantity format [" + s + "]") } f := match[1] if strings.Count(f, ".") > 1 { return undef, errors.New("more than one decimal point in [" + s + "]") } f = strings.Replace(f, ",", "", -1) value, err := strconv.ParseFloat(f, 64) if err != nil { return undef, err } sym := strings.Trim(match[2], " \r\n\t") mu, err := ParseSymbol(sym) if err != nil { return undef, err } return Quantity{value, mu.Unit}, nil } // Invalid checks if the Quantity is valid, i.e. if it has a unit. func (m Quantity) Invalid() bool { return m.Unit == nil } // AreCompatible checks if two quantities are compatible. Compatibility means the exponents // of the SI base units are the same. A return value of true means the quantities // have compatible units. func AreCompatible(a, b Quantity) bool { return haveSameExponents(a.exponents, b.exponents) } // HasCompatibleUnit check whether the Quantity can be converted to the given unit. func (m Quantity) HasCompatibleUnit(symbol string) bool { return haveSameExponents(m.exponents, UnitFor(symbol).exponents) } func check(a, b Quantity) { if PanicOnIncompatibleUnits && !haveSameExponents(a.exponents, b.exponents) { panic(fmt.Sprintf("units not compatible: %q <> %q", a, b)) } } // Add adds 2 Quantities that should have compatible units. If not compatible // a panic happens or garbage is returned, depending on the setting of GOUNITSPANIC environment // variable: 1 = panic, else no panic. // The returned Quantity will be represented in SI units. This can be converted // to the desired units with methods In or ConvertTo. func Add(a, b Quantity) Quantity { check(a, b) u := &Unit{"", 1, a.exponents} u.setSymbol() return Quantity{a.value*a.factor + b.value*b.factor, u} } // Sum adds one or more Quantities. The Quantities should have compatible units. // If not compatible a panic happens or garbage is returned, depending on the setting // of GOUNITSPANIC environment variable: 1 = panic, else no panic. func Sum(a Quantity, more ...Quantity) Quantity { return multi(a, func(m *float64, b Quantity) { *m += b.value * b.factor }, more) } // Subtract subtracts the second argument from the first one. Compatible units are required. func Subtract(a, b Quantity) Quantity { return Add(a, Neg(b)) } // Diff can be used to do multiple subtractions from the first argument. Compatible units are // required. func Diff(a Quantity, more ...Quantity) Quantity { return multi(a, func(m *float64, b Quantity) { *m -= b.value * b.factor }, more) } func multi( a Quantity, op func(*float64, Quantity), more []Quantity) Quantity { result := a.value * a.factor for _, b := range more { check(a, b) op(&result, b) } u := &Unit{"", 1, a.exponents} u.setSymbol() return Quantity{result, u} } // Neg negates a Quantity value. The unit does not change. func Neg(a Quantity) Quantity { return Quantity{-a.value, a.Unit} } // Mult multiplies 2 Quantities. A new unit will be calculated. The returned Quantity will // have SI units. Use In or ConvertTo to convert it to the desired unit. func Mult(a, b Quantity) Quantity { return Quantity{a.value * a.factor * b.value * b.factor, addu(a.Unit, b.Unit)} } // Div divides the first argument by the second. A new unit will be calculated. // The returned Quantity will have SI units. Use In or ConvertTo to convert it to the desired unit. func Div(a, b Quantity) Quantity { return Quantity{(a.value * a.factor) / (b.value * b.factor), subu(a.Unit, b.Unit)} } // Reciprocal calculates 1 divided by the given Quantity. The unit changes accordingly but // will be represented in SI units. func Reciprocal(a Quantity) Quantity { u := &Unit{"", 1, negx(a.exponents)} u.setSymbol() return Quantity{1 / (a.value * a.factor), u} } // MultFac multiplies a Quantity with a factor and returns the new Quantity. The unit // does not change. func MultFac(m Quantity, f float64) Quantity { return Quantity{m.value * f, m.Unit} } // DivFac divides a Quantity by a factor and returns the new Quantity. The unit does not // change. func DivFac(m Quantity, f float64) Quantity { return Quantity{m.value / f, m.Unit} } // Power raises the Quantity to the given power n. The exponents of the resulting unit must // be in the range -128..127. func Power(a Quantity, n int8) Quantity { calc := func(e int8) int8 { return e * n } u := &Unit{"", 1, mapexp(a.exponents, calc)} u.setSymbol() return Quantity{math.Pow(a.value*a.factor, float64(n)), u} } // Abs returns the absolute of Quantity: the result is always >= 0. func Abs(a Quantity) Quantity { if a.value < 0 { return Neg(a) } return a } // Equal checks if two Quantities are equal. A tolerance epsilon is allowed, this value should // be much smaller compared to the two Quantities being compared. All arguments must have // compatible units. func Equal(a, b, epsilon Quantity) bool { check(a, b) check(a, epsilon) return Abs(Subtract(a, b)).value < epsilon.value*epsilon.factor } // More checks if the first argument is greater than the second. func More(a, b Quantity) bool { check(a, b) return a.ToSI().Value() > b.ToSI().Value() } // Less checks if the first argument is less than the second. func Less(a, b Quantity) bool { check(a, b) return a.ToSI().Value() < b.ToSI().Value() } // ToSI returns a converted Quantity represented in SI units. func (m Quantity) ToSI() Quantity { factor, u := m.toSI() return Quantity{m.value * factor, &u} } // Normalize changes the Quantity to SI units. func (m *Quantity) Normalize() { m.value *= m.factor m.Unit = &Unit{makeSymbol(m.exponents), 1, m.exponents} } // Duration converts a Quantity with a duration unit to a time.Duration. // An error or nil is provided as second return value. func Duration(m Quantity) (time.Duration, error) { if si, ok := m.ConvertTo("s"); ok { return time.Duration(si.Value()) * time.Second, nil } return time.Duration(0), errors.New("not a Duration: " + m.String()) } // Quantities is a slice of Quantity values. Useful for sorting. type Quantities []Quantity // Len is used by Sort func (a Quantities) Len() int { return len(a) } // Swap is used by Sort func (a Quantities) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // Less is used by Sort func (a Quantities) Less(i, j int) bool { return Less(a[i], a[j]) }
quantity/quantity.go
0.912787
0.613381
quantity.go
starcoder
package dungeongen import ( "errors" "log" ) //RoomData ... type RoomData struct { X int Y int Width int Height int IsConnected bool Visited bool Section uint8 doors []RoomDoor } //NewRoomData creates a new room data instance func NewRoomData(x int, y int, width int, height int) *RoomData { return &RoomData{ X: x, Y: y, Width: width, Height: height, IsConnected: false, Visited: false, doors: nil, } } //RoomDoor... room door type RoomDoor struct { Direction int Position Vec2D } //NewRoomDoor ... creates a new room door func NewRoomDoor(direction int, pos Vec2D) RoomDoor { if direction < 0 || direction > 3 { log.Fatal("room direction not between 0 and 4") } return RoomDoor{ Direction: direction, Position: pos, } } //GetWallForPosition ... func (r *RoomData) GetWallForPosition(x, y int) (int, error) { // west wall if x == r.X && y >= r.Y && y <= (r.Y+r.Height) { return DirectionWest, nil } // north wall if y == r.Y && x >= r.X && x <= (r.X+r.Width) { return DirectionNorth, nil } // east wall if x == (r.X+r.Width) && y >= r.Y && y <= (r.Y+r.Height) { return DirectionEast, nil } // south wall if y == (r.Y+r.Height) && x >= r.X && x <= (r.X+r.Width) { return DirectionSouth, nil } return -1, errors.New("Position not on wall") } // HasDoor ...returns if room has at least one door func (r *RoomData) HasDoor(direction int) bool { for _, door := range r.doors { if door.Direction == direction { return true } } return false } // AddDoor ... func (r *RoomData) AddDoor(direction int, pos Vec2D) { r.doors = append(r.doors, NewRoomDoor(direction, pos)) } // Doors ... func (r *RoomData) Doors() []RoomDoor { return r.doors } //IsCorner returns if coord is a room corner (dont add doors there) func (r *RoomData) IsCorner(x, y int) bool { if (r.Y == y && r.X == x) || (r.Y == y && r.X+r.Width == x) || (r.Y+r.Height == y && r.X == x) || (r.Y+r.Height == y && r.X+r.Width == x) { return true } return false } // Collides returns true if two rooms overlap func (r *RoomData) Collides(r2 RoomData) bool { if r.X < r2.X+r2.Width && r.X+r.Width > r2.X && r.Y < r2.Y+r2.Height && r.Y+r.Height > r2.Y { return true } return false } // IsInside returns true if a point is within the bounds of the room func (r *RoomData) IsInside(x, y int) bool { return x >= r.X && x <= r.X+r.Width && y >= r.Y && y <= r.Y+r.Height } // Extrude extrudes a room by factor returning a bigger or smaller room func (r *RoomData) Extrude(factor int) *RoomData { return &RoomData{ X: r.X - factor, Y: r.Y - factor, Width: r.Width + (factor * 2), Height: r.Height + (factor * 2), IsConnected: r.IsConnected, doors: r.Doors(), } }
pkg/dungeongen/roomdata.go
0.661923
0.415492
roomdata.go
starcoder
package b1t8 import ( "errors" "fmt" "github.com/iotaledger/iota.go/trinary" ) const ( tritsPerByte = 8 ) // EncodedLen returns the trit-length of an encoding of n source bytes. func EncodedLen(n int) int { return n * tritsPerByte } // Encode encodes src into EncodedLen(len(src)) trits of dst. As a convenience, it returns the number of trits written, // but this value is always EncodedLen(len(src)). // Encode implements the b1t8 encoding converting a bit string into ternary. func Encode(dst trinary.Trits, src []byte) int { for _, b := range src { _ = dst[7] // early bounds check to guarantee safety of writes below dst[0] = int8(b & 0x01 >> 0) dst[1] = int8(b & 0x02 >> 1) dst[2] = int8(b & 0x04 >> 2) dst[3] = int8(b & 0x08 >> 3) dst[4] = int8(b & 0x10 >> 4) dst[5] = int8(b & 0x20 >> 5) dst[6] = int8(b & 0x40 >> 6) dst[7] = int8(b & 0x80 >> 7) dst = dst[8:] } return EncodedLen(len(src)) } var ( // ErrInvalidLength reports an attempt to decode an input which has a trit-length that is not a multiple of 8. ErrInvalidLength = errors.New("length must be a multiple of 8 trits") // ErrInvalidTrit reports an attempt to decode an input that contains an invalid trit sequence. ErrInvalidTrit = errors.New("invalid trits") ) // DecodedLen returns the byte-length of a decoding of n source trits. func DecodedLen(n int) int { return n / tritsPerByte } // Decode decodes src into DecodedLen(len(src)) bytes of dst and returns the actual number of bytes written. // Decode expects that src contains a valid b1t8 encoding and that src has a length that is a multiple of 8, // it returns an error otherwise. // If the input is malformed, Decode returns the number of bytes decoded before the error. func Decode(dst []byte, src trinary.Trits) (int, error) { i := 0 for len(src) >= tritsPerByte { var b byte for j := 0; j < tritsPerByte; j++ { trit := uint(src[j]) if trit > 1 { return i, fmt.Errorf("%w: %d", ErrInvalidTrit, src[j]) } b |= byte(trit << j) } dst[i] = b src = src[tritsPerByte:] i++ } if len(src) > 0 { // Check for invalid char before reporting bad length, // since the invalid trit (if present) is an earlier problem. for _, t := range src { if byte(t) > 1 { return i, fmt.Errorf("%w: %d", ErrInvalidTrit, t) } } return i, ErrInvalidLength } return i, nil }
pkg/encoding/b1t8/b1t8.go
0.743168
0.437163
b1t8.go
starcoder
// Command custommetric creates a custom metric and writes TimeSeries value // to it. It writes a GAUGE measurement, which is a measure of value at a // specific point in time. This means the startTime and endTime of the interval // are the same. To make it easier to see the output, a random value is written. // When reading the TimeSeries back, a window of the last 5 minutes is used. package main import ( "context" "encoding/json" "fmt" "log" "math/rand" "os" "time" "cloud.google.com/go/monitoring/apiv3" timestamp "github.com/golang/protobuf/ptypes/timestamp" "google.golang.org/api/iterator" "google.golang.org/genproto/googleapis/api/label" "google.golang.org/genproto/googleapis/api/metric" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoredres "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" ) const metricType = "custom.googleapis.com/custom_measurement" func projectResource(projectID string) string { return "projects/" + projectID } // [START monitoring_create_metric] // createCustomMetric creates a custom metric specified by the metric type. func createCustomMetric(projectID, metricType string) error { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { return err } md := &metric.MetricDescriptor{ Name: "Custom Metric", Type: metricType, Labels: []*label.LabelDescriptor{{ Key: "environment", ValueType: label.LabelDescriptor_STRING, Description: "An arbitrary measurement", }}, MetricKind: metric.MetricDescriptor_GAUGE, ValueType: metric.MetricDescriptor_INT64, Unit: "s", Description: "An arbitrary measurement", DisplayName: "Custom Metric", } req := &monitoringpb.CreateMetricDescriptorRequest{ Name: "projects/" + projectID, MetricDescriptor: md, } resp, err := c.CreateMetricDescriptor(ctx, req) if err != nil { return fmt.Errorf("could not create custom metric: %v", err) } log.Printf("createCustomMetric: %s\n", formatResource(resp)) return nil } // [END monitoring_create_metric] // [START monitoring_list_descriptors] // getCustomMetric reads the custom metric created. func getCustomMetric(projectID, metricType string) (*metric.MetricDescriptor, error) { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { return nil, err } req := &monitoringpb.GetMetricDescriptorRequest{ Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", projectID, metricType), } resp, err := c.GetMetricDescriptor(ctx, req) if err != nil { return nil, fmt.Errorf("could not get custom metric: %v", err) } log.Printf("getCustomMetric: %s\n", formatResource(resp)) return resp, nil } // [END monitoring_list_descriptors] // [START monitoring_delete_metric] // deleteMetric deletes the given metric. func deleteMetric(projectID, metricType string) error { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { return err } metricResource := "projects/" + projectID + "/metricDescriptors/" + metricType req := &monitoringpb.DeleteMetricDescriptorRequest{ Name: metricResource, } err = c.DeleteMetricDescriptor(ctx, req) if err != nil { return fmt.Errorf("could not delete metric: %v", err) } log.Printf("Deleted metric: %q\n", metricType) return nil } // [END monitoring_delete_metric] // [START monitoring_write_timeseries] // writeTimeSeriesValue writes a value for the custom metric created func writeTimeSeriesValue(projectID, metricType string) error { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { return err } now := &timestamp.Timestamp{ Seconds: time.Now().Unix(), } req := &monitoringpb.CreateTimeSeriesRequest{ Name: projectResource(projectID), TimeSeries: []*monitoringpb.TimeSeries{{ Metric: &metricpb.Metric{ Type: metricType, Labels: map[string]string{ "environment": "STAGING", }, }, Resource: &monitoredres.MonitoredResource{ Type: "gce_instance", Labels: map[string]string{ "instance_id": "test-instance", "zone": "us-central1-f", }, }, Points: []*monitoringpb.Point{{ Interval: &monitoringpb.TimeInterval{ StartTime: now, EndTime: now, }, Value: &monitoringpb.TypedValue{ Value: &monitoringpb.TypedValue_Int64Value{ Int64Value: rand.Int63n(10), }, }, }}, }}, } log.Printf("writeTimeseriesRequest: %s\n", formatResource(req)) err = c.CreateTimeSeries(ctx, req) if err != nil { return fmt.Errorf("could not write time series value, %v ", err) } return nil } // [END monitoring_write_timeseries] // [START monitoring_read_timeseries_simple] // readTimeSeriesValue reads the TimeSeries for the value specified by metric type in a time window from the last 5 minutes. func readTimeSeriesValue(projectID, metricType string) error { ctx := context.Background() c, err := monitoring.NewMetricClient(ctx) if err != nil { return err } startTime := time.Now().UTC().Add(time.Minute * -5).Unix() endTime := time.Now().UTC().Unix() req := &monitoringpb.ListTimeSeriesRequest{ Name: projectResource(projectID), Filter: fmt.Sprintf("metric.type=\"%s\"", metricType), Interval: &monitoringpb.TimeInterval{ StartTime: &timestamp.Timestamp{Seconds: startTime}, EndTime: &timestamp.Timestamp{Seconds: endTime}, }, } iter := c.ListTimeSeries(ctx, req) var series []*monitoringpb.TimeSeries for { resp, err := iter.Next() if err == iterator.Done { break } if err != nil { return fmt.Errorf("could not read time series value, %v ", err) } series = append(series, resp) } log.Printf("readTimeseriesValue: %s\n", formatResource(series)) return nil } // [END monitoring_read_timeseries_simple] func main() { rand.Seed(time.Now().UTC().UnixNano()) if len(os.Args) < 2 { fmt.Println("Usage: custommetric <project_id>") return } projectID := os.Args[1] // Create the metric. if err := createCustomMetric(projectID, metricType); err != nil { log.Fatal(err) } // Wait until the new metric can be read back. for { resp, err := getCustomMetric(projectID, metricType) if err != nil { log.Fatal(err) } if resp != nil { break } time.Sleep(2 * time.Second) } // Write a TimeSeries value for that metric if err := writeTimeSeriesValue(projectID, metricType); err != nil { log.Fatal(err) } time.Sleep(2 * time.Second) // Read the TimeSeries for the last 5 minutes for that metric. if err := readTimeSeriesValue(projectID, metricType); err != nil { log.Fatal(err) } if err := deleteMetric(projectID, metricType); err != nil { log.Fatal(err) } } // formatResource marshals a response object as JSON. func formatResource(resource interface{}) []byte { b, err := json.MarshalIndent(resource, "", " ") if err != nil { panic(err) } return b }
monitoring/custommetric/custommetric.go
0.801509
0.412057
custommetric.go
starcoder
package aduket import ( "encoding/json" "encoding/xml" "io" "io/ioutil" "net/http" "net/url" "testing" "github.com/clbanning/mxj" "github.com/labstack/echo" "github.com/stretchr/testify/assert" ) type RequestRecorder struct { Body Body Header http.Header Data []byte Params map[string]string QueryParams url.Values FormParams url.Values } type Body map[string]interface{} func NewRequestRecorder() *RequestRecorder { requestRecorder := &RequestRecorder{} requestRecorder.Body = make(Body) requestRecorder.Params = make(map[string]string) return requestRecorder } func (r RequestRecorder) AssertStringBodyEqual(t *testing.T, expectedBody string) bool { return assert.Equal(t, expectedBody, string(r.Data)) } func (r RequestRecorder) AssertJSONBodyEqual(t *testing.T, expectedBody interface{}) bool { isEqual, err := isJSONEqual(expectedBody, r.Body) if err != nil { assert.Fail(t, err.Error()) } return assert.True(t, isEqual) } func (r RequestRecorder) AssertXMLBodyEqual(t *testing.T, expectedXMLBody interface{}) bool { isEqual, err := isXMLEqual(expectedXMLBody, r.Body) if err != nil { assert.Fail(t, err.Error()) } return assert.True(t, isEqual) } func (r RequestRecorder) AssertParamEqual(t *testing.T, paramName, paramValue string) bool { return assert.Equal(t, r.Params[paramName], paramValue) } func (r RequestRecorder) AssertQueryParamEqual(t *testing.T, queryParamName string, queryParamValues []string) bool { return assert.Equal(t, r.QueryParams[queryParamName], queryParamValues) } func (r RequestRecorder) AssertFormParamEqual(t *testing.T, formParamName string, formValues []string) bool { return assert.Equal(t, r.FormParams[formParamName], formValues) } func (r RequestRecorder) AssertHeaderEqual(t *testing.T, expectedHeader http.Header) bool { return assert.True(t, isHeaderContains(expectedHeader, r.Header)) } func isHeaderContains(expectedHeader, actualHeader http.Header) bool { for key, value := range expectedHeader { actualValue, contains := actualHeader[key] if !contains { return false } if !assert.ObjectsAreEqualValues(value, actualValue) { return false } } return true } func isJSONEqual(expectedBody interface{}, actualBody Body) (bool, error) { bodyJSON, err := json.Marshal(expectedBody) if err != nil { return false, err } expectedRecorderBody := Body{} if err := json.Unmarshal(bodyJSON, &expectedRecorderBody); err != nil { return false, err } return assert.ObjectsAreEqualValues(expectedRecorderBody, actualBody), nil } func isXMLEqual(expectedBody interface{}, actualBody Body) (bool, error) { bodyXML, err := xml.Marshal(expectedBody) if err != nil { return false, err } mv, err := mxj.NewMapXml(bodyXML) if err != nil { return false, err } expectedRecorderBody := mv.Old() return assert.ObjectsAreEqualValues(expectedRecorderBody, actualBody), nil } func (r *RequestRecorder) saveContext(ctx echo.Context) error { if ctx.Request().Header.Get(echo.HeaderContentType) == echo.MIMEApplicationXML { r.bindXML(ctx.Request().Body) } else if err := ctx.Bind(&r.Body); err != nil { data, err := ioutil.ReadAll(ctx.Request().Body) if err != nil { return err } r.setData(data) } r.setParams(ctx.ParamNames(), ctx.ParamValues()) r.setQueryParams(ctx.QueryParams()) r.setFormParams(ctx.Request().Form) r.setHeader(ctx.Request().Header) return nil } func (r *RequestRecorder) setQueryParams(queryParams url.Values) { r.QueryParams = queryParams } func (r *RequestRecorder) setParams(paramNames, paramValues []string) { for index, name := range paramNames { r.Params[name] = paramValues[index] } } func (r *RequestRecorder) setFormParams(formParams url.Values) { r.FormParams = formParams } func (r *RequestRecorder) setData(b []byte) { r.Data = b } func (r *RequestRecorder) setHeader(header http.Header) { r.Header = header.Clone() } func (r *RequestRecorder) bindXML(from io.ReadCloser) error { body, err := ioutil.ReadAll(from) if err != nil { return err } mv, err := mxj.NewMapXml(body) if err != nil { return err } r.Body = mv.Old() return nil }
requestrecorder.go
0.586286
0.471527
requestrecorder.go
starcoder
package gen_mmo import ( "math" ) type Vector struct { x float64 y float64 z float64 } const ( TOLERANCE float64 = 0.000001 ) func NewVector2(x, y float64) *Vector { v := new(Vector) v.x = x v.y = y v.z = 0 return v } func NewVector(x, y, z float64) *Vector { v := new(Vector) v.x = x v.y = y v.z = z return v } func (v *Vector) X() float64 { return v.x } func (v *Vector) Y() float64 { return v.y } func (v *Vector) Z() float64 { return v.z } func (v *Vector) SetX(x float64) { v.x = x } func (v *Vector) SetY(y float64) { v.y = y } func (v *Vector) SetZ(z float64) { v.z = z } func (v *Vector) AddWith(a *Vector) { v.x += a.X() v.y += a.Y() v.z += a.Z() } func (v *Vector) SubtractWith(a *Vector) { v.x -= a.X() v.y -= a.Y() v.z -= a.Z() } func (v *Vector) MultiplyWith(a *Vector) { v.x *= a.X() v.y *= a.Y() v.z *= a.Z() } func (v *Vector) DivideWith(a *Vector) { v.x /= a.X() v.y /= a.Y() v.z /= a.Z() } func (v *Vector) IsZero() bool { return math.Abs(v.x) < TOLERANCE && math.Abs(v.y) < TOLERANCE && math.Abs(v.z) < TOLERANCE } func (v *Vector) Len2() float64 { return v.X()*v.X() + v.Y()*v.Y() + v.Z()*v.Z() } func (a *Vector) Add(b *Vector) *Vector { return NewVector(a.X()+b.X(), a.Y()+b.Y(), a.Z()+b.Z()) } func (a *Vector) Subtract(b *Vector) *Vector { return NewVector(a.X()-b.X(), a.Y()-b.Y(), a.Z()-b.Z()) } func (a *Vector) Multiply(b *Vector) *Vector { return NewVector(a.X()*b.X(), a.Y()*b.Y(), a.Z()*b.Z()) } func (a *Vector) Divide(b *Vector) *Vector { return NewVector(a.X()/b.X(), a.Y()/b.Y(), a.Z()/b.Z()) } func (a *Vector) Max(b *Vector) *Vector { return NewVector(math.Max(a.X(), b.X()), math.Max(a.Y(), b.Y()), math.Max(a.Z(), b.Z())) } func (a *Vector) Min(b *Vector) *Vector { return NewVector(math.Min(a.X(), b.X()), math.Min(a.Y(), b.Y()), math.Min(a.Z(), b.Z())) } func VAdd(a *Vector, b *Vector) *Vector { return a.Add(b) } func VSubtract(a *Vector, b *Vector) *Vector { return a.Subtract(b) } func VMultiply(a *Vector, b *Vector) *Vector { return a.Multiply(b) } func VDivide(a *Vector, b *Vector) *Vector { return a.Divide(b) } func VMax(a *Vector, b *Vector) *Vector { return a.Max(b) } func VMin(a *Vector, b *Vector) *Vector { return a.Min(b) }
toolkit/gen_mmo/vector.go
0.840521
0.742888
vector.go
starcoder
package main import "fmt" type position struct { X int Y int } func (pos position) E() position { return position{pos.X + 1, pos.Y} } func (pos position) SE() position { return position{pos.X + 1, pos.Y + 1} } func (pos position) NE() position { return position{pos.X + 1, pos.Y - 1} } func (pos position) N() position { return position{pos.X, pos.Y - 1} } func (pos position) S() position { return position{pos.X, pos.Y + 1} } func (pos position) W() position { return position{pos.X - 1, pos.Y} } func (pos position) SW() position { return position{pos.X - 1, pos.Y + 1} } func (pos position) NW() position { return position{pos.X - 1, pos.Y - 1} } func (pos position) Distance(to position) int { deltaX := Abs(to.X - pos.X) deltaY := Abs(to.Y - pos.Y) if deltaX > deltaY { return deltaX } return deltaY } func (pos position) DistanceX(to position) int { deltaX := Abs(to.X - pos.X) return deltaX } func (pos position) DistanceY(to position) int { deltaY := Abs(to.Y - pos.Y) return deltaY } type direction int const ( NoDir direction = iota E ENE NE NNE N NNW NW WNW W WSW SW SSW S SSE SE ESE ) func KeyToDir(k keyAction) (dir direction) { switch k { case KeyW, KeyRunW: dir = W case KeyE, KeyRunE: dir = E case KeyS, KeyRunS: dir = S case KeyN, KeyRunN: dir = N case KeyNW, KeyRunNW: dir = NW case KeySW, KeyRunSW: dir = SW case KeyNE, KeyRunNE: dir = NE case KeySE, KeyRunSE: dir = SE } return dir } func (pos position) To(dir direction) position { to := pos switch dir { case E, ENE, ESE: to = pos.E() case NE: to = pos.NE() case NNE, N, NNW: to = pos.N() case NW: to = pos.NW() case WNW, W, WSW: to = pos.W() case SW: to = pos.SW() case SSW, S, SSE: to = pos.S() case SE: to = pos.SE() } return to } func (pos position) Dir(from position) direction { deltaX := Abs(pos.X - from.X) deltaY := Abs(pos.Y - from.Y) switch { case pos.X > from.X && pos.Y == from.Y: return E case pos.X > from.X && pos.Y < from.Y: switch { case deltaX > deltaY: return ENE case deltaX == deltaY: return NE default: return NNE } case pos.X == from.X && pos.Y < from.Y: return N case pos.X < from.X && pos.Y < from.Y: switch { case deltaY > deltaX: return NNW case deltaX == deltaY: return NW default: return WNW } case pos.X < from.X && pos.Y == from.Y: return W case pos.X < from.X && pos.Y > from.Y: switch { case deltaX > deltaY: return WSW case deltaX == deltaY: return SW default: return SSW } case pos.X == from.X && pos.Y > from.Y: return S case pos.X > from.X && pos.Y > from.Y: switch { case deltaY > deltaX: return SSE case deltaX == deltaY: return SE default: return ESE } default: panic(fmt.Sprintf("internal error: invalid position:%+v-%+v", pos, from)) } } func (pos position) Parents(from position) []position { p := []position{} switch pos.Dir(from) { case E: p = append(p, pos.W()) case ENE: p = append(p, pos.W(), pos.SW()) case NE: p = append(p, pos.SW()) case NNE: p = append(p, pos.S(), pos.SW()) case N: p = append(p, pos.S()) case NNW: p = append(p, pos.S(), pos.SE()) case NW: p = append(p, pos.SE()) case WNW: p = append(p, pos.E(), pos.SE()) case W: p = append(p, pos.E()) case WSW: p = append(p, pos.E(), pos.NE()) case SW: p = append(p, pos.NE()) case SSW: p = append(p, pos.N(), pos.NE()) case S: p = append(p, pos.N()) case SSE: p = append(p, pos.N(), pos.NW()) case SE: p = append(p, pos.NW()) case ESE: p = append(p, pos.W(), pos.NW()) } return p } func (pos position) RandomNeighbor(diag bool) position { if diag { return pos.RandomNeighborDiagonals() } return pos.RandomNeighborCardinal() } func (pos position) RandomNeighborDiagonals() position { neighbors := [8]position{pos.E(), pos.W(), pos.N(), pos.S(), pos.NE(), pos.NW(), pos.SE(), pos.SW()} var r int switch RandInt(8) { case 0: r = RandInt(len(neighbors[0:4])) case 1: r = RandInt(len(neighbors[0:2])) default: r = RandInt(len(neighbors[4:])) } return neighbors[r] } func (pos position) RandomNeighborCardinal() position { neighbors := [8]position{pos.E(), pos.W(), pos.N(), pos.S(), pos.NE(), pos.NW(), pos.SE(), pos.SW()} var r int switch RandInt(6) { case 0: r = RandInt(len(neighbors[0:4])) case 1: r = RandInt(len(neighbors)) default: r = RandInt(len(neighbors[0:2])) } return neighbors[r] } func idxtopos(i int) position { return position{i % DungeonWidth, i / DungeonWidth} } func (pos position) idx() int { return pos.Y*DungeonWidth + pos.X } func (pos position) valid() bool { return pos.Y >= 0 && pos.Y < DungeonHeight && pos.X >= 0 && pos.X < DungeonWidth } func (pos position) Laterals(dir direction) []position { switch dir { case E, ENE, ESE: return []position{pos.NE(), pos.SE()} case NE: return []position{pos.E(), pos.N()} case N, NNE, NNW: return []position{pos.NW(), pos.NE()} case NW: return []position{pos.W(), pos.N()} case W, WNW, WSW: return []position{pos.SW(), pos.NW()} case SW: return []position{pos.W(), pos.S()} case S, SSW, SSE: return []position{pos.SW(), pos.SE()} case SE: return []position{pos.S(), pos.E()} default: // should not happen return []position{} } }
pos.go
0.608478
0.737442
pos.go
starcoder
package volume import ( vector3 "github.com/louis030195/protometry/api/vector3" ) func cuboidTris() []int32 { return []int32{ 0, 2, 1, //face front 0, 3, 2, 2, 3, 4, //face top 2, 4, 5, 1, 2, 5, //face right 1, 5, 6, 0, 7, 4, //face left 0, 4, 3, 5, 4, 7, //face back 5, 7, 6, 0, 6, 7, //face bottom 0, 1, 6, } } // NewMeshSquareCuboid return a mesh forming a square cuboid // Based on http://ilkinulas.github.io/development/unity/2016/04/30/cube-mesh-in-unity3d.html func NewMeshSquareCuboid(sideLength float64, centerBased bool) *Mesh { var vertices []*vector3.Vector3 if centerBased { halfSide := sideLength / 2 vertices = []*vector3.Vector3{ vector3.NewVector3(-halfSide, -halfSide, -halfSide), vector3.NewVector3(halfSide, -halfSide, -halfSide), vector3.NewVector3(halfSide, halfSide, -halfSide), vector3.NewVector3(-halfSide, halfSide, -halfSide), vector3.NewVector3(-halfSide, halfSide, halfSide), vector3.NewVector3(halfSide, halfSide, halfSide), vector3.NewVector3(halfSide, -halfSide, halfSide), vector3.NewVector3(-halfSide, -halfSide, halfSide), } } else { vertices = []*vector3.Vector3{ vector3.NewVector3(0, 0, 0), vector3.NewVector3(sideLength, 0, 0), vector3.NewVector3(sideLength, sideLength, 0), vector3.NewVector3(0, sideLength, 0), vector3.NewVector3(0, sideLength, sideLength), vector3.NewVector3(sideLength, sideLength, sideLength), vector3.NewVector3(sideLength, 0, sideLength), vector3.NewVector3(0, 0, sideLength), } } return &Mesh{Vertices: vertices, Tris: cuboidTris()} } // NewMeshRectangularCuboid return a mesh forming a rectangular cuboid func NewMeshRectangularCuboid(center, size vector3.Vector3) *Mesh { var vertices []*vector3.Vector3 halfSize := size.Times(0.5) vertices = []*vector3.Vector3{ vector3.NewVector3(-halfSize.X, -halfSize.Y, -halfSize.Z), vector3.NewVector3(halfSize.X, -halfSize.Y, -halfSize.Z), vector3.NewVector3(halfSize.X, halfSize.Y, -halfSize.Z), vector3.NewVector3(-halfSize.X, halfSize.Y, -halfSize.Z), vector3.NewVector3(-halfSize.X, halfSize.Y, halfSize.Z), vector3.NewVector3(halfSize.X, halfSize.Y, halfSize.Z), vector3.NewVector3(halfSize.X, -halfSize.Y, halfSize.Z), vector3.NewVector3(-halfSize.X, -halfSize.Y, halfSize.Z), } return &Mesh{Vertices: vertices, Tris: cuboidTris()} } // Fit create a new mesh averaged on 2 meshes func (m *Mesh) Fit(other Volume) bool { return false } // Intersects create a new mesh averaged on 2 meshes func (m *Mesh) Intersects(other Volume) bool { return false } // Average create a new mesh averaged on 2 meshes func (m *Mesh) Average(other Volume) Volume { return nil } // Mutate create a new mesh with random mutations // Not in-place func (m *Mesh) Mutate(rate float64) Volume { newMesh := m.Clone() for i := range newMesh.Vertices { v := newMesh.Vertices[i].Mutate(rate) newMesh.Vertices = append(newMesh.Vertices, &v) } return newMesh } func (m *Mesh) Clone() *Mesh { c := m.Center v := m.Vertices t := m.Tris n := m.Normals u := m.Uvs return &Mesh{ Center: c, Vertices: v, Tris: t, Normals: n, Uvs: u, } }
api/volume/mesh.go
0.789274
0.663049
mesh.go
starcoder
package cp /* #include "chipmunk/include/chipmunk/chipmunk.h" */ import "C" // Chipmunk's axis-aligned 2D bounding box type. (left, bottom, right, top) type BB struct { L float64 B float64 R float64 T float64 } // c converts a BB to a C.cpBB. func (b BB) c() C.cpBB { var cp C.cpBB cp.l = C.cpFloat(b.L) cp.b = C.cpFloat(b.B) cp.r = C.cpFloat(b.R) cp.t = C.cpFloat(b.T) return cp } // goBB converts C.cpBB to a Go BB. func goBB(b C.cpBB) BB { return BB{ L: float64(b.l), B: float64(b.b), R: float64(b.r), T: float64(b.t), } } // Convenience constructor for BB structs. func BBNew(l, b, r, t float64) BB { return BB{ L: l, B: b, R: r, T: t, } } // Constructs a BB centered on a point with the given extents (half sizes). func BBNewForExtents(c Vect, hw, hh float64) BB { return goBB(C.cpBBNewForExtents(c.c(), C.cpFloat(hw), C.cpFloat(hh))) } // Constructs a BB for a circle with the given position and radius. func BBNewForCircle(p Vect, r float64) BB { return goBB(C.cpBBNewForCircle(p.c(), C.cpFloat(r))) } // Returns true if a and b intersect. func (a BB) Intersects(b BB) bool { return goBool(C.cpBBIntersects(a.c(), b.c())) } // Returns true if other lies completely within bb. func (bb BB) ContainsBB(other BB) bool { return goBool(C.cpBBContainsBB(bb.c(), other.c())) } // Returns true if bb contains v. func (bb BB) ContainsVect(v Vect) bool { return goBool(C.cpBBContainsVect(bb.c(), v.c())) } // Returns a bounding box that holds both bounding boxes. func (a BB) Merge(b BB) BB { return goBB(C.cpBBMerge(a.c(), b.c())) } // Returns a bounding box that holds both bb and v. func (bb BB) Expand(v Vect) BB { return goBB(C.cpBBExpand(bb.c(), v.c())) } // Returns the center of a bounding box. func (bb BB) Center() Vect { return goVect(C.cpBBCenter(bb.c())) } // Returns the area of the bounding box. func (bb BB) Area() float64 { return float64(C.cpBBArea(bb.c())) } // Merges a and b and returns the area of the merged bounding box. func (a BB) MergedArea(b BB) float64 { return float64(C.cpBBMergedArea(a.c(), b.c())) } // Returns the fraction along the segment query the BB is hit. Returns // INFINITY if it doesn't hit. func (bb BB) SegmentQuery(a, b Vect) float64 { return float64(C.cpBBSegmentQuery(bb.c(), a.c(), b.c())) } // Return true if the bounding box intersects the line segment with ends a and b. func (bb BB) IntersectsSegment(a, b Vect) bool { return goBool(C.cpBBIntersectsSegment(bb.c(), a.c(), b.c())) } // Clamp a vector to a bounding box. func (bb BB) ClampVect(v Vect) Vect { return goVect(C.cpBBClampVect(bb.c(), v.c())) } // Wrap a vector to a bounding box. func (bb BB) WrapVect(v Vect) Vect { return goVect(C.cpBBWrapVect(bb.c(), v.c())) }
native/cp/bb.go
0.88785
0.530784
bb.go
starcoder
package gm64 import ( "fmt" "math" "strings" "text/tabwriter" ) type Mat struct { M, N int Data []float64 } func NewMat(m, n int) func(data ...float64) *Mat { if m <= 0 || n <= 0 { err := fmt.Errorf("the m and n parameters must be positive (got %d and %d)", m, n) panic(err) } ctor := func(data ...float64) *Mat { if len(data) > m*n { err := fmt.Errorf("the number of input values must not be greater than m * n (%d * %d)", m, n) panic(err) } o := &Mat{ M: m, N: n, Data: make([]float64, m*n), } copy(o.Data, data) return o } return ctor } func (m *Mat) Copy() *Mat { cp := &Mat{ M: m.M, N: m.N, Data: make([]float64, m.M*m.N), } copy(cp.Data, m.Data) return cp } func (m1 *Mat) Add(m2 *Mat) *Mat { if m1.M != m2.M || m1.N != m2.N { err := fmt.Errorf( "the first and second matrices have different dimensions (got (%dx%d) and (%dx%d))", m1.M, m1.N, m2.M, m2.N, ) panic(err) } o := &Mat{ M: m1.M, N: m1.N, Data: make([]float64, m1.M*m1.N), } for i := 0; i < o.M; i++ { for j := 0; j < o.N; j++ { o.Data[j+i*o.N] = m1.Data[j+i*m1.N] + m2.Data[j+i*m2.N] } } return o } func (m1 *Mat) Sub(m2 *Mat) *Mat { if m1.M != m2.M || m1.N != m2.N { err := fmt.Errorf( "the first and second matrices have different dimensions (got (%dx%d) and (%dx%d))", m1.M, m1.N, m2.M, m2.N, ) panic(err) } o := &Mat{ M: m1.M, N: m1.N, Data: make([]float64, m1.M*m1.N), } for i := 0; i < o.M; i++ { for j := 0; j < o.N; j++ { o.Data[j+i*o.N] = m1.Data[j+i*m1.N] - m2.Data[j+i*m2.N] } } return o } func (m *Mat) Mul(c float64) *Mat { o := &Mat{ M: m.M, N: m.N, Data: make([]float64, m.M*m.N), } for i := 0; i < o.M; i++ { for j := 0; j < o.N; j++ { o.Data[j+i*o.N] = m.Data[j+i*o.N] * c } } return o } func (m1 *Mat) MulMat(m2 *Mat) *Mat { if m1.N != m2.M { err := fmt.Errorf( "trying to multiply matrices with different number of columns and rows (got (%dx%d) and (%dx%d))", m1.M, m1.N, m2.M, m2.N, ) panic(err) } o := &Mat{ M: m1.M, N: m2.N, Data: make([]float64, m1.M*m2.N), } for i := 0; i < o.M; i++ { for j := 0; j < o.N; j++ { for k := 0; k < m1.N; k++ { o.Data[j+i*o.N] += m1.Data[k+i*m1.N] * m2.Data[j+k*m2.N] } } } return o } func (m *Mat) At(i, j int) float64 { if i < 0 || j < 0 { err := fmt.Errorf("the i and j parameters must be non-negative (got %d and %d)", i, j) panic(err) } if i >= m.M || j >= m.N { err := fmt.Errorf( "trying to get a value out of matrix bounds (got position (%d, %d) while matrix size is (%dx%d))", i, j, m.M, m.N, ) panic(err) } return m.Data[j+i*m.N] } func (m *Mat) Set(i, j int, value float64) { if i < 0 || j < 0 { err := fmt.Errorf("the i and j parameters must be non-negative (got %d and %d)", i, j) panic(err) } if i >= m.M || j >= m.N { err := fmt.Errorf( "trying to set a value out of matrix bounds (got position (%d, %d) while matrix size is (%dx%d))", i, j, m.M, m.N, ) panic(err) } m.Data[j+i*m.N] = value } func (m *Mat) Det() float64 { if m.M != m.N { err := fmt.Errorf( "trying to get a determinant of a non-square matrix (matrix size is (%dx%d))", m.M, m.N, ) panic(err) } switch m.M { case 1: return m.Data[0] case 2: return m.Data[0]*m.Data[3] - m.Data[1]*m.Data[2] case 3: return m.Data[0]*m.Data[4]*m.Data[8] - m.Data[0]*m.Data[5]*m.Data[7] - m.Data[1]*m.Data[3]*m.Data[8] + m.Data[1]*m.Data[5]*m.Data[6] + m.Data[2]*m.Data[3]*m.Data[7] - m.Data[2]*m.Data[4]*m.Data[6] case 4: return m.Data[0]*m.Data[5]*m.Data[10]*m.Data[15] - m.Data[0]*m.Data[5]*m.Data[11]*m.Data[14] - m.Data[0]*m.Data[6]*m.Data[9]*m.Data[15] + m.Data[0]*m.Data[6]*m.Data[11]*m.Data[13] + m.Data[0]*m.Data[7]*m.Data[9]*m.Data[14] - m.Data[0]*m.Data[7]*m.Data[10]*m.Data[13] - m.Data[1]*m.Data[4]*m.Data[10]*m.Data[15] + m.Data[1]*m.Data[4]*m.Data[11]*m.Data[14] + m.Data[1]*m.Data[6]*m.Data[8]*m.Data[15] - m.Data[1]*m.Data[6]*m.Data[11]*m.Data[12] - m.Data[1]*m.Data[7]*m.Data[8]*m.Data[14] + m.Data[1]*m.Data[7]*m.Data[10]*m.Data[12] + m.Data[2]*m.Data[4]*m.Data[9]*m.Data[15] - m.Data[2]*m.Data[4]*m.Data[11]*m.Data[13] - m.Data[2]*m.Data[5]*m.Data[8]*m.Data[15] + m.Data[2]*m.Data[5]*m.Data[11]*m.Data[12] + m.Data[2]*m.Data[7]*m.Data[8]*m.Data[13] - m.Data[2]*m.Data[7]*m.Data[9]*m.Data[12] - m.Data[3]*m.Data[4]*m.Data[9]*m.Data[14] + m.Data[3]*m.Data[4]*m.Data[10]*m.Data[13] + m.Data[3]*m.Data[5]*m.Data[8]*m.Data[14] - m.Data[3]*m.Data[5]*m.Data[10]*m.Data[12] - m.Data[3]*m.Data[6]*m.Data[8]*m.Data[13] + m.Data[3]*m.Data[6]*m.Data[9]*m.Data[12] default: const EPS = 1e-12 cp := m.Copy() det := float64(1) for i := 0; i < cp.M; i++ { k := i for j := i + 1; j < cp.M; j++ { a1 := math.Abs(cp.Data[i+j*cp.N]) a2 := math.Abs(cp.Data[i+k*cp.N]) if a1 > a2 { k = j } } if math.Abs(cp.Data[i+k*cp.N]) < EPS { return 0 } if i != k { for j := 0; j < cp.N; j++ { tmp := cp.Data[j+i*cp.N] cp.Data[j+i*cp.N] = cp.Data[j+k*cp.N] cp.Data[j+k*cp.N] = tmp } det = -det } det *= cp.Data[i+i*cp.N] for j := i + 1; j < cp.M; j++ { cp.Data[j+i*cp.N] /= cp.Data[i+i*cp.N] } cp.Data[i+i*cp.N] = 1 for j := i + 1; j < cp.M; j++ { if math.Abs(cp.Data[i+j*cp.N]) < EPS { continue } tmp := cp.Data[i+j*cp.N] for l := i; l < cp.M; l++ { cp.Data[l+j*cp.N] -= tmp * cp.Data[l+i*cp.N] } } } return det } } func (m *Mat) Trace() float64 { if m.M != m.N { err := fmt.Errorf( "trying to get a trace of a non-square matrix (matrix size is (%dx%d))", m.M, m.N, ) panic(err) } trace := float64(0) for i := 0; i < m.M; i++ { trace += m.Data[i+i*m.N] } return trace } func (m *Mat) Transpose() *Mat { o := &Mat{ M: m.N, N: m.M, Data: make([]float64, m.N*m.M), } for i := 0; i < o.M; i++ { for j := 0; j < o.N; j++ { o.Data[i+j*o.N] = m.Data[j+i*m.N] } } return o } func (m *Mat) String() string { sb := &strings.Builder{} w := tabwriter.NewWriter(sb, 4, 4, 1, ' ', 0) for i := 0; i < m.M; i++ { for j := 0; j < m.N; j++ { fmt.Fprintf(w, "%f\t", m.Data[j+i*m.N]) } if i != m.M-1 { fmt.Fprintf(w, "\n") } } w.Flush() return sb.String() }
gm64/mat.go
0.525612
0.535463
mat.go
starcoder
package gozxing type ResultMetadataType int const ( /** * Unspecified, application-specific metadata. Maps to an unspecified {@link Object}. */ ResultMetadataType_OTHER = ResultMetadataType(iota) /** * Denotes the likely approximate orientation of the barcode in the image. This value * is given as degrees rotated clockwise from the normal, upright orientation. * For example a 1D barcode which was found by reading top-to-bottom would be * said to have orientation "90". This key maps to an {@link Integer} whose * value is in the range [0,360). */ ResultMetadataType_ORIENTATION /** * <p>2D barcode formats typically encode text, but allow for a sort of 'byte mode' * which is sometimes used to encode binary data. While {@link Result} makes available * the complete raw bytes in the barcode for these formats, it does not offer the bytes * from the byte segments alone.</p> * * <p>This maps to a {@link java.util.List} of byte arrays corresponding to the * raw bytes in the byte segments in the barcode, in order.</p> */ ResultMetadataType_BYTE_SEGMENTS /** * Error correction level used, if applicable. The value type depends on the * format, but is typically a String. */ ResultMetadataType_ERROR_CORRECTION_LEVEL /** * For some periodicals, indicates the issue number as an {@link Integer}. */ ResultMetadataType_ISSUE_NUMBER /** * For some products, indicates the suggested retail price in the barcode as a * formatted {@link String}. */ ResultMetadataType_SUGGESTED_PRICE /** * For some products, the possible country of manufacture as a {@link String} denoting the * ISO country code. Some map to multiple possible countries, like "US/CA". */ ResultMetadataType_POSSIBLE_COUNTRY /** * For some products, the extension text */ ResultMetadataType_UPC_EAN_EXTENSION /** * PDF417-specific metadata */ ResultMetadataType_PDF417_EXTRA_METADATA /** * If the code format supports structured append and the current scanned code is part of one then the * sequence number is given with it. */ ResultMetadataType_STRUCTURED_APPEND_SEQUENCE /** * If the code format supports structured append and the current scanned code is part of one then the * parity is given with it. */ ResultMetadataType_STRUCTURED_APPEND_PARITY /** * Barcode Symbology Identifier. * Note: According to the GS1 specification the identifier may have to replace a leading FNC1/GS character when prepending to the barcode content. */ ResultMetadataType_SYMBOLOGY_IDENTIFIER ) func (t ResultMetadataType) String() string { switch t { case ResultMetadataType_OTHER: return "OTHER" case ResultMetadataType_ORIENTATION: return "ORIENTATION" case ResultMetadataType_BYTE_SEGMENTS: return "BYTE_SEGMENTS" case ResultMetadataType_ERROR_CORRECTION_LEVEL: return "ERROR_CORRECTION_LEVEL" case ResultMetadataType_ISSUE_NUMBER: return "ISSUE_NUMBER" case ResultMetadataType_SUGGESTED_PRICE: return "SUGGESTED_PRICE" case ResultMetadataType_POSSIBLE_COUNTRY: return "POSSIBLE_COUNTRY" case ResultMetadataType_UPC_EAN_EXTENSION: return "UPC_EAN_EXTENSION" case ResultMetadataType_PDF417_EXTRA_METADATA: return "PDF417_EXTRA_METADATA" case ResultMetadataType_STRUCTURED_APPEND_SEQUENCE: return "STRUCTURED_APPEND_SEQUENCE" case ResultMetadataType_STRUCTURED_APPEND_PARITY: return "STRUCTURED_APPEND_PARITY" case ResultMetadataType_SYMBOLOGY_IDENTIFIER: return "SYMBOLOGY_IDENTIFIER" default: return "unknown metadata type" } }
result_metadata_type.go
0.844665
0.510252
result_metadata_type.go
starcoder
package consistenthash import ( "hash/crc32" "math/bits" "sort" "strconv" ) type Hash func(data []byte) uint32 const defaultHashExpansion = 6 type Map struct { // Inputs // hash is the hash function that will be applied to both added // keys and fetched keys hash Hash // replicas is the number of virtual nodes that will be inserted // into the consistent hash ring for each key added replicas int // prefixTableExpansion is the multiple of virtual nodes that // will be inserted into the internal hash table for O(1) lookups. prefixTableExpansion int // Internal data // keys is the hash of the virtual nodes, sorted by hash value keys []int // Sorted // hashMap maps the hashed keys back to the input strings. // Note that all virtual nodes will map back to the same input // string hashMap map[int]string // prefixShift is the number of bits an input hash should // be right-shifted to act as a lookup in the prefixTable prefixShift uint32 // prefixTable is a map of the most significant bits of // a hash value to output all hashes with that prefix // map to. If the result is ambiguous (i.e. there is a // hash range split within this prefix) the value will // be blank and we should fall back to a binary search // through keys to find the exact output prefixTable []string } // New returns a blank consistent hash ring that will return // the key whose hash comes next after the hash of the input to // Map.Get. // Increasing the number of replicas will improve the smoothness // of the hash ring and reduce the data moved when adding/removing // nodes, at the cost of more memory. func New(replicas int, fn Hash) *Map { return NewConsistentHash(replicas, defaultHashExpansion, fn) } // NewConsistentHash returns a blank consistent hash ring that will return // the key whose hash comes next after the hash of the input to // Map.Get. // Increasing the number of replicas will improve the smoothness // of the hash ring and reduce the data moved when adding/removing // nodes. // Increasing the tableExpansion will allocate more entries in the // internal hash table, reducing the frequency of lg(n) binary // searches during calls to the Map.Get method. func NewConsistentHash(replicas int, tableExpansion int, fn Hash) *Map { m := &Map{ replicas: replicas, hash: fn, hashMap: make(map[int]string), prefixTableExpansion: tableExpansion, } if m.hash == nil { m.hash = crc32.ChecksumIEEE } return m } // Returns true if there are no items available. func (m *Map) IsEmpty() bool { return len(m.keys) == 0 } // Adds some keys to the hash. func (m *Map) Add(keys ...string) { for _, key := range keys { for i := 0; i < m.replicas; i++ { hash := int(m.hash([]byte(strconv.Itoa(i) + key))) m.keys = append(m.keys, hash) m.hashMap[hash] = key } } sort.Ints(m.keys) // Find minimum number of bits to hold |keys| * prefixTableExpansion prefixBits := uint32(bits.Len32(uint32(len(m.keys) * m.prefixTableExpansion))) m.prefixShift = 32 - prefixBits prefixTableSize := 1 << prefixBits m.prefixTable = make([]string, prefixTableSize) previousKeyPrefix := -1 // Effectively -Inf currentKeyIdx := 0 currentKeyPrefix := m.keys[currentKeyIdx] >> m.prefixShift for i := range m.prefixTable { if previousKeyPrefix < i && currentKeyPrefix > i { // All keys with this prefix will map to a single value m.prefixTable[i] = m.hashMap[m.keys[currentKeyIdx]] } else { // Several keys might have the same prefix. Walk // over them until it changes previousKeyPrefix = currentKeyPrefix for currentKeyPrefix == previousKeyPrefix { currentKeyIdx++ if currentKeyIdx < len(m.keys) { currentKeyPrefix = m.keys[currentKeyIdx] >> m.prefixShift } else { currentKeyIdx = 0 currentKeyPrefix = prefixTableSize + 1 // Effectively +Inf } } } } } // Gets the closest item in the hash to the provided key. func (m *Map) Get(key string) string { if m.IsEmpty() { return "" } hash := int(m.hash([]byte(key))) // Look for the hash prefix in the prefix table prefixSlot := hash >> m.prefixShift tableResult := m.prefixTable[prefixSlot] if len(tableResult) > 0 { return tableResult } // Binary search for appropriate replica. idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash }) // Means we have cycled back to the first replica. if idx == len(m.keys) { idx = 0 } return m.hashMap[m.keys[idx]] }
consistenthash/consistenthash.go
0.706393
0.449574
consistenthash.go
starcoder
package raster import ( "math" "sort" ) type Statistic struct { Mean float64 `bson:"mean"` Median float64 `bson:"median"` Cells int `bson:"cells"` Sum float64 `bson:"sum"` Min float64 `bson:"min"` Max float64 `bson:"max"` Most float64 `bson:"most"` Histogram map[float64]int `bson:"histogram"` } func calculateBandStatistic(band *Band, precision int) Statistic { var values []float64 var mostValue float64 min := math.MaxFloat64 max := -math.MaxFloat64 median := 0.0 sum := 0.0 cells := 0 most := make(map[float64]int) hist := make(map[float64]int) generateBasicStats(band, &min, &max, &sum, &cells, &values, &most) generateMost(&mostValue, &most, &hist, precision) if cells < 1 { return Statistic{ Mean: 0, Median: 0, Cells: cells, Sum: sum, Min: 0, Max: 0, Most: mostValue, Histogram: hist, } } generateMedian(&median, &cells, &values) return Statistic{ Min: min, Max: max, Sum: sum, Cells: cells, Mean: sum / float64(cells), Median: median, Most: mostValue, Histogram: hist, } } func generateBasicStats(band *Band, min, max, sum *float64, cells *int, values *[]float64, most *map[float64]int) { for _, row := range band.Data { for _, value := range row { if value == band.NoData { continue } if value < *min { *min = value } if value > *max { *max = value } *sum = *sum + value *cells = *cells + 1 *values = append(*values, value) (*most)[value] = (*most)[value] + 1 } } } func generateMedian(median *float64, cells *int, values *[]float64) { sort.Float64s(*values) mid := (*cells - 1) / 2 if mid%2 != 0 { *median = (*values)[mid] } else { *median = ((*values)[mid-1] + (*values)[mid]) / 2 } } func generateMost(mostValue *float64, most *map[float64]int, hist *map[float64]int, precision int) { mostCounter := 0 mult := 1 for v := 0; v < precision; v++ { mult = mult * 10 } for key, value := range *most { rounded := float64(int(key*float64(mult))) / float64(mult) (*hist)[rounded] = (*hist)[rounded] + 1 if value > mostCounter { *mostValue = key mostCounter = value } } }
pkg/raster/statistic.go
0.709623
0.42668
statistic.go
starcoder
package vis import ( "image" "image/color" "math" "gitlab.cs.fau.de/since/radolan" ) // A ColorFunc can be used to assign colors to data values for image creation. type ColorFunc func(val float64) color.RGBA // Sample color and grayscale gradients for visualization with the image method. var ( // HeatmapReflectivityShort is a color gradient for cloud reflectivity // composites between 5dBZ and 75dBZ. HeatmapReflectivityShort = Heatmap(5.0, 75.0, Id) // HeatmapReflectivity is a color gradient for cloud reflectivity // composites between 5dBZ and 75dBZ. HeatmapReflectivity = Heatmap(1.0, 75.0, Id) // HeatmapReflectivityWide is a color gradient for cloud reflectivity // composites between -32.5dBZ and 75dBZ. HeatmapReflectivityWide = Heatmap(-32.5, 75.0, Id) // HeatmapAccumulatedHour is a color gradient for accumulated rainfall // composites (e.g RW) between 0.1mm/h and 100 mm/h using logarithmic // compression. HeatmapAccumulatedHour = Heatmap(0.1, 100, Log) // HeatmapAccumulatedDay is a color gradient for accumulated rainfall // composites (e.g. SF) between 0.1mm and 200mm using logarithmic // compression. HeatmapAccumulatedDay = Heatmap(0.1, 200, Log) HeatmapRadialVelocity = Radialmap(-31.5, 31.5, Log) // GraymapLinear is a linear grayscale gradient between the (raw) rvp-6 // values 0 and 409.5. GraymapLinear = Graymap(0, 409.5, Id) // GraymapLinearWide is a linear grayscale gradient between the (raw) // rvp-6 values 0 and 4095. GraymapLinearWide = Graymap(0, 4095, Id) ) // Id is the identity (no compression) func Id(x float64) float64 { return x } // Log is the natural logarithm (logarithmic compression) func Log(x float64) float64 { return math.Log(x) } // Image creates an image by evaluating the color function fn for each data // value in the given z-layer. func Image(fn ColorFunc, c *radolan.Composite, layer int) *image.RGBA { rec := image.Rect(0, 0, c.Dx, c.Dy) img := image.NewRGBA(rec) if layer < 0 || layer >= c.Dz { return img } for y := 0; y < c.Dy; y++ { for x := 0; x < c.Dx; x++ { img.Set(x, y, fn(float64(c.DataZ[layer][y][x]))) } } return img } // Graymap returns a grayscale gradient between min and max. A compression function is used to // make logarithmic scales possible. func Graymap(min, max float64, compression func(float64) float64) ColorFunc { min = compression(min) max = compression(max) return func(val float64) color.RGBA { val = compression(val) if val < min { return color.RGBA{0x00, 0x00, 0x00, 0xFF} // black } p := (val - min) / (max - min) if p > 1 { p = 1 } l := uint8(0xFF * p) return color.RGBA{l, l, l, 0xFF} } } // Radialmap returns a dichromatic gradient from min to 0 to max which can // be used for doppler radar radial velocity products. func Radialmap(min, max float64, compression func(float64) float64) ColorFunc { return func(val float64) color.RGBA { if val != val { return color.RGBA{0x00, 0x00, 0x00, 0xFF} // black } base := math.Max(math.Abs(min), math.Abs(max)) p := compression(math.Abs(val)) / compression(base) if p > 1 { p = 1 } lev := uint8(0xFF * p) var non byte = 0x00 if math.Abs(val) <= 1 { lev = 0xFF non = 0xCC } if val < 0 { return color.RGBA{non, lev, lev, 0xFF} } return color.RGBA{lev, non, non, 0xFF} } } // Heatmap returns a colour gradient between min and max. A compression function is used to // make logarithmic scales possible. func Heatmap(min, max float64, compression func(float64) float64) ColorFunc { min = compression(min) max = compression(max) return func(val float64) color.RGBA { val = compression(val) if val < min { return color.RGBA{0x00, 0x00, 0x00, 0xFF} // black } p := (val - min) / (max - min) if p > 1 { // limit p = 1 } h := math.Mod(360-(330*p)+240, 360) s := 1.0 // saturation l := 0.5*p + 0.25 // lightness // adapted from https://en.wikipedia.org/wiki/HSL_and_HSV#From_HSL c := (1 - math.Abs(2*l-1)) * s // calculate chroma hh := h / 60 x := c * (1 - math.Abs(math.Mod(hh, 2)-1)) if math.IsNaN(hh) { hh = -1 } var rr, gg, bb float64 switch int(hh) { case 0: rr, gg, bb = c, x, 0 case 1: rr, gg, bb = x, c, 0 case 2: rr, gg, bb = 0, c, x case 3: rr, gg, bb = 0, x, c case 4: rr, gg, bb = x, 0, c case 5: rr, gg, bb = c, 0, x } m := l - c/2 r, g, b := uint8(0xFF*(rr+m)), uint8(0xFF*(gg+m)), uint8(0xFF*(bb+m)) return color.RGBA{r, g, b, 0xFF} } }
radolan2png/vis/vis.go
0.825801
0.625867
vis.go
starcoder