code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package onshape
import (
"encoding/json"
)
// BTVector2d1812 struct for BTVector2d1812
type BTVector2d1812 struct {
BtType *string `json:"btType,omitempty"`
X *float64 `json:"x,omitempty"`
Y *float64 `json:"y,omitempty"`
}
// NewBTVector2d1812 instantiates a new BTVector2d1812 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTVector2d1812() *BTVector2d1812 {
this := BTVector2d1812{}
return &this
}
// NewBTVector2d1812WithDefaults instantiates a new BTVector2d1812 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTVector2d1812WithDefaults() *BTVector2d1812 {
this := BTVector2d1812{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTVector2d1812) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTVector2d1812) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTVector2d1812) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTVector2d1812) SetBtType(v string) {
o.BtType = &v
}
// GetX returns the X field value if set, zero value otherwise.
func (o *BTVector2d1812) GetX() float64 {
if o == nil || o.X == nil {
var ret float64
return ret
}
return *o.X
}
// GetXOk returns a tuple with the X field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTVector2d1812) GetXOk() (*float64, bool) {
if o == nil || o.X == nil {
return nil, false
}
return o.X, true
}
// HasX returns a boolean if a field has been set.
func (o *BTVector2d1812) HasX() bool {
if o != nil && o.X != nil {
return true
}
return false
}
// SetX gets a reference to the given float64 and assigns it to the X field.
func (o *BTVector2d1812) SetX(v float64) {
o.X = &v
}
// GetY returns the Y field value if set, zero value otherwise.
func (o *BTVector2d1812) GetY() float64 {
if o == nil || o.Y == nil {
var ret float64
return ret
}
return *o.Y
}
// GetYOk returns a tuple with the Y field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTVector2d1812) GetYOk() (*float64, bool) {
if o == nil || o.Y == nil {
return nil, false
}
return o.Y, true
}
// HasY returns a boolean if a field has been set.
func (o *BTVector2d1812) HasY() bool {
if o != nil && o.Y != nil {
return true
}
return false
}
// SetY gets a reference to the given float64 and assigns it to the Y field.
func (o *BTVector2d1812) SetY(v float64) {
o.Y = &v
}
func (o BTVector2d1812) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.X != nil {
toSerialize["x"] = o.X
}
if o.Y != nil {
toSerialize["y"] = o.Y
}
return json.Marshal(toSerialize)
}
type NullableBTVector2d1812 struct {
value *BTVector2d1812
isSet bool
}
func (v NullableBTVector2d1812) Get() *BTVector2d1812 {
return v.value
}
func (v *NullableBTVector2d1812) Set(val *BTVector2d1812) {
v.value = val
v.isSet = true
}
func (v NullableBTVector2d1812) IsSet() bool {
return v.isSet
}
func (v *NullableBTVector2d1812) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTVector2d1812(val *BTVector2d1812) *NullableBTVector2d1812 {
return &NullableBTVector2d1812{value: val, isSet: true}
}
func (v NullableBTVector2d1812) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTVector2d1812) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_vector2d_1812.go | 0.740174 | 0.545165 | model_bt_vector2d_1812.go | starcoder |
package text
//------------------------------------------------------------------------------
// InterpolatedString holds a string that potentially has interpolation
// functions. Each time Get is called any functions are replaced with their
// evaluated results in the string.
type InterpolatedString struct {
str string
strBytes []byte
interpolate bool
}
// Get evaluates functions within the original string and returns the result.
func (i *InterpolatedString) Get(msg Message) string {
return i.GetFor(msg, 0)
}
// GetFor evaluates functions within the original string and returns the result,
// evaluated for a specific message part of the message.
func (i *InterpolatedString) GetFor(msg Message, index int) string {
if !i.interpolate {
return i.str
}
return string(ReplaceFunctionVariablesFor(msg, index, i.strBytes))
}
// NewInterpolatedString returns a type that evaluates function interpolations
// on a provided string each time Get is called.
func NewInterpolatedString(str string) *InterpolatedString {
strI := &InterpolatedString{
str: str,
}
if strBytes := []byte(str); ContainsFunctionVariables(strBytes) {
strI.strBytes = strBytes
strI.interpolate = true
}
return strI
}
//------------------------------------------------------------------------------
// InterpolatedBytes holds a byte slice that potentially has interpolation
// functions. Each time Get is called any functions are replaced with their
// evaluated results in the byte slice.
type InterpolatedBytes struct {
v []byte
interpolate bool
}
// Get evaluates functions within the byte slice and returns the result.
func (i *InterpolatedBytes) Get(msg Message) []byte {
return i.GetFor(msg, 0)
}
// GetFor evaluates functions within the byte slice and returns the result,
// evaluated for a specific message part of the message.
func (i *InterpolatedBytes) GetFor(msg Message, index int) []byte {
if !i.interpolate {
return i.v
}
return ReplaceFunctionVariablesFor(msg, index, i.v)
}
// NewInterpolatedBytes returns a type that evaluates function interpolations
// on a provided byte slice each time Get is called.
func NewInterpolatedBytes(v []byte) *InterpolatedBytes {
return &InterpolatedBytes{
v: v,
interpolate: ContainsFunctionVariables(v),
}
}
//------------------------------------------------------------------------------ | lib/util/text/util.go | 0.839125 | 0.557665 | util.go | starcoder |
package build
import "sort"
// VertexSet represents a set of vertices in a graph.
// The zero value of a VertexSet is the universe;
// the set containing all vertices.
type VertexSet struct {
// A set is an immutable sorted list of non-empty disjoint intervals.
// The zero value VertexSet{nil} represents the universe.
set []interval
}
const (
maxInt = int(^uint(0) >> 1)
minInt = -maxInt - 1
)
// An interval represents the numbers [a, b).
type interval struct {
a, b int
index int // index of a in the whole set
}
// update updates the index values.
func (s VertexSet) update() {
prev := 0
for i, in := range s.set {
s.set[i].index = prev
prev += in.b - in.a
}
}
// empty returns the empty set.
func empty() VertexSet {
return VertexSet{[]interval{}}
}
// Range returns a set containing all vertices v, a ≤ v < b.
func Range(a, b int) VertexSet {
if a >= b {
return empty()
}
return VertexSet{[]interval{{a, b, 0}}}
}
// Vertex returns a set containing the single vertex v.
func Vertex(v int) VertexSet {
return VertexSet{[]interval{{v, v + 1, 0}}}
}
// size returns the number of elements in this set, or -1 for the universe.
func (s VertexSet) size() (size int) {
switch {
case s.set == nil:
return -1
case len(s.set) == 0:
return 0
}
in := s.set[len(s.set)-1]
return in.index + in.b - in.a
}
// get returns the i:th element in the set, or -1 if not available.
func (s VertexSet) get(i int) int {
if i < 0 || i >= s.size() {
return -1
}
// The smallest index j such that i < set[j].index.
j := sort.Search(len(s.set), func(j int) bool {
return i < s.set[j].index
})
in := s.set[j-1]
return in.a + i - in.index
}
// rank returns the position of n in the set, or -1 if not available.
func (s VertexSet) rank(n int) int {
if len(s.set) == 0 || n < s.set[0].a {
return -1
}
// The smallest index i such that n < set[i].a
i := sort.Search(len(s.set), func(i int) bool {
return n < s.set[i].a
})
in := s.set[i-1]
if n >= in.b {
return -1
}
return in.index + n - in.a
}
// Contains tells if v is a member of the set.
func (s VertexSet) Contains(v int) bool {
switch {
case s.set == nil:
return true
case len(s.set) == 0 || v < s.set[0].a:
return false
}
// The smallest index i such that v < set[i].a
i := sort.Search(len(s.set), func(i int) bool {
return v < s.set[i].a
})
if v >= s.set[i-1].b {
return false
}
return true
}
// AndNot returns the set of all vertices belonging to s1 but not to s2.
func (s1 VertexSet) AndNot(s2 VertexSet) VertexSet {
return s1.And(s2.complement())
}
// complement returns the set of all vertices not belonging to s.
func (s VertexSet) complement() VertexSet {
switch {
case s.set == nil:
return empty()
case len(s.set) == 0:
return VertexSet{}
}
t := empty()
prev := minInt
for _, in := range s.set {
if prev != in.a {
t.set = append(t.set, interval{prev, in.a, 0})
}
prev = in.b
}
if prev < maxInt {
t.set = append(t.set, interval{prev, maxInt, 0})
}
t.update()
return t
}
// And returns the set of all vertices belonging to both s1 and s2.
func (s1 VertexSet) And(s2 VertexSet) VertexSet {
switch {
case s1.set == nil:
return s2
case s2.set == nil:
return s1
}
type point struct {
x int
a bool // Tells if x is a of [a, b).
}
points := make([]point, 0, 2*(len(s1.set)+len(s2.set)))
for _, in := range s1.set {
points = append(points, point{in.a, true})
points = append(points, point{in.b, false})
}
for _, in := range s2.set {
points = append(points, point{in.a, true})
points = append(points, point{in.b, false})
}
sort.Slice(points, func(i, j int) bool {
if points[i].x == points[j].x {
return !points[i].a
}
return points[i].x < points[j].x
})
s := empty()
start, count := 0, 0
for _, p := range points {
switch count {
case 0:
count++
case 1:
if p.a {
start = p.x
count++
} else {
count--
}
case 2:
s.set = append(s.set, interval{start, p.x, 0})
count--
}
}
s.update()
return s
}
// Or returns the set of all vertices belonging to either s1 or s2.
func (s1 VertexSet) Or(s2 VertexSet) VertexSet {
if s1.set == nil || s2.set == nil {
return VertexSet{nil}
}
type point struct {
x int
a bool // Tells if x is a of [a, b).
}
points := make([]point, 0, 2*(len(s1.set)+len(s2.set)))
for _, in := range s1.set {
points = append(points, point{in.a, true})
points = append(points, point{in.b, false})
}
for _, in := range s2.set {
points = append(points, point{in.a, true})
points = append(points, point{in.b, false})
}
sort.Slice(points, func(i, j int) bool {
if points[i].x == points[j].x {
return points[i].a
}
return points[i].x < points[j].x
})
s := empty()
start, count := 0, 0
for _, p := range points {
switch count {
case 0:
start = p.x
count++
case 1:
if p.a {
count++
} else {
s.set = append(s.set, interval{start, p.x, 0})
count--
}
case 2:
count--
}
}
s.update()
return s
} | build/vertexset.go | 0.772616 | 0.545951 | vertexset.go | starcoder |
package main
/**
请从字符串中找出一个最长的不包含重复字符的子字符串,计算该最长子字符串的长度。
示例1:
输入: "abcabcbb"
输出: 3
解释: 因为无重复字符的最长子串是 "abc",所以其长度为 3。
示例 2:
输入: "bbbbb"
输出: 1
解释: 因为无重复字符的最长子串是 "b",所以其长度为 1。
示例 3:
输入: "pwwkew"
输出: 3
解释: 因为无重复字符的最长子串是"wke",所以其长度为 3。
请注意,你的答案必须是 子串 的长度,"pwke"是一个子序列,不是子串。
提示:
s.length <= 40000
*/
func lengthOfLongestSubstring(s string) int {
res := 0
set := make(map[byte]bool)
j := 0
for i := 0; i < len(s); i++ {
b := s[i]
for set[b] {
set[s[j]] = false
j++
}
set[b] = true
if res < i-j+1 {
res = i - j + 1
}
}
return res
}
/**
class Solution {
public int lengthOfLongestSubstring(String s) {
int res = 0;
Set<Character> set = new HashSet<>();
for (int l = 0, r = 0; r < s.length(); r++) {
char c = s.charAt(r);
while (set.contains(c)) {
set.remove(s.charAt(l++));
}
set.add(c);
res = Math.max(res, r - l + 1);
}
return res;
}
public static void main(String[] args) {
new Solution().lengthOfLongestSubstring("pwwkew");
}
}
class Solution {
public int lengthOfLongestSubstring(String s) {
// 记录字符上一次出现的位置
int[] last = new int[128];
for (int i = 0; i < 128; i++) {
last[i] = -1;
}
int n = s.length();
int res = 0;
int start = 0; // 窗口开始位置
for (int i = 0; i < n; i++) {
int index = s.charAt(i);
start = Math.max(start, last[index] + 1);
res = Math.max(res, i - start + 1);
last[index] = i;
}
return res;
}
}
class Solution {
public int lengthOfLongestSubstring2(String s) {
if (s.length() == 0) return 0;
HashMap<Character, Integer> map = new HashMap<>();
int max = 0;
int left = 0;
for (int i = 0; i < s.length(); i++) {
if (map.containsKey(s.charAt(i))) {
left = Math.max(left, map.get(s.charAt(i)) + 1);
}
map.put(s.charAt(i), i);
max = Math.max(max, i - left + 1);
}
return max;
}
}
*/
func main() {
lengthOfLongestSubstring("pwwkew")
} | lcof/lengthOfLongestSubstring/lengthOfLongestSubstring.go | 0.587943 | 0.446796 | lengthOfLongestSubstring.go | starcoder |
package geographiclibgo
import "math"
type DirectAndInverse interface {
DirectCalcAll(lat1_deg, lon1_deg, azi1_deg, s12_m float64) AllDirectResults
DirectCalcLatLon(lat1_deg, lon1_deg, azi1_deg, s12_m float64) LatLon
DirectCalcLatLonAzi(lat1_deg, lon1_deg, azi1_deg, s12_m float64) LatLonAzi
DirectCalcLatLonAziGeodesicScales(lat1_deg, lon1_deg, azi1_deg, s12_m float64) LatLonAziGeodesicScales
DirectCalcLatLonAziReducedLength(lat1_deg, lon1_deg, azi1_deg, s12_m float64) LatLonAziReducedLength
DirectCalcLatLonAziReducedLengthGeodesicScales(lat1_deg, lon1_deg, azi1_deg, s12_m float64) LatLonAziReducedLengthGeodesicScales
DirectCalcWithCapabilities(lat1_deg, lon1_deg, azi1_deg, s12_m float64, capabilities uint64) AllDirectResults
DirectLineWithCapabilities(lat1_deg, lon1_deg, azi1_deg, s12_m float64, capabilities uint64) GeodesicLine
EqualtorialRadius() float64
Flattening() float64
InverseCalcAll(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) AllInverseResults
InverseCalcAzimuthsArcLength(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) AzimuthsArcLength
InverseCalcDistance(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) float64
InverseCalcDistanceArcLength(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) DistanceArcLength
InverseCalcDistanceAzimuths(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) DistanceAzimuths
InverseCalcDistanceAzimuthsArcLength(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) DistanceAzimuthsArcLength
InverseCalcDistanceAzimuthsArcLengthReducedLength(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) DistanceAzimuthsArcLengthReducedLength
InverseCalcDistanceAzimuthsArcLengthReducedLengthScales(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) DistanceAzimuthsArcLengthReducedLengthScales
InverseCalcWithCapabilities(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64, capabilities uint64) AllInverseResults
InverseLineWithCapabilities(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64, capabilities uint64) GeodesicLine
LineWithCapabilities(lat1_deg, lon1_deg, azi1_deg float64, capabilities uint64) GeodesicLine
}
const WGS84_A float64 = 6378137.0
// Evaluating this as 1000000000.0 / (298257223563f_64) reduces the
// round-off error by about 10%. However, expressing the flattening as
// 1/298.257223563 is well ingrained.
const WGS84_F float64 = 1.0 / ((298257223563.0) / 1000000000.0)
const _GEODESIC_ORDER int64 = 6
const nC3x_ int64 = 15
const nC4x_ int64 = 21
func coeff_A3() [18]float64 {
return [18]float64{
-3.0, 128.0, -2.0, -3.0, 64.0, -1.0, -3.0, -1.0, 16.0, 3.0, -1.0, -2.0, 8.0, 1.0, -1.0, 2.0,
1.0, 1.0,
}
}
func coeff_C3() [45]float64 {
return [45]float64{
3.0, 128.0, 2.0, 5.0, 128.0, -1.0, 3.0, 3.0, 64.0, -1.0, 0.0, 1.0, 8.0, -1.0, 1.0, 4.0, 5.0,
256.0, 1.0, 3.0, 128.0, -3.0, -2.0, 3.0, 64.0, 1.0, -3.0, 2.0, 32.0, 7.0, 512.0, -10.0, 9.0,
384.0, 5.0, -9.0, 5.0, 192.0, 7.0, 512.0, -14.0, 7.0, 512.0, 21.0, 2560.0,
}
}
func coeff_C4() [77]float64 {
return [77]float64{
97.0, 15015.0, 1088.0, 156.0, 45045.0, -224.0, -4784.0, 1573.0, 45045.0, -10656.0, 14144.0,
-4576.0, -858.0, 45045.0, 64.0, 624.0, -4576.0, 6864.0, -3003.0, 15015.0, 100.0, 208.0, 572.0,
3432.0, -12012.0, 30030.0, 45045.0, 1.0, 9009.0, -2944.0, 468.0, 135135.0, 5792.0, 1040.0,
-1287.0, 135135.0, 5952.0, -11648.0, 9152.0, -2574.0, 135135.0, -64.0, -624.0, 4576.0, -6864.0,
3003.0, 135135.0, 8.0, 10725.0, 1856.0, -936.0, 225225.0, -8448.0, 4992.0, -1144.0, 225225.0,
-1440.0, 4160.0, -4576.0, 1716.0, 225225.0, -136.0, 63063.0, 1024.0, -208.0, 105105.0, 3584.0,
-3328.0, 1144.0, 315315.0, -128.0, 135135.0, -2560.0, 832.0, 405405.0, 128.0, 99099.0,
}
}
type Geodesic struct {
a float64
f float64
f1 float64
e2 float64
ep2 float64
n float64
b float64
c2 float64
etol2 float64
GEODESIC_ORDER int64
nC3x_ int64
nC4x_ int64
maxit1_ uint64
maxit2_ uint64
_A3x [_GEODESIC_ORDER]float64
_C3x [nC3x_]float64
_C4x [nC4x_]float64
tiny_ float64
tol0_ float64
tol1_ float64
tol2_ float64
tolb_ float64
xthresh_ float64
}
func NewGeodesic(a, f float64) Geodesic {
var maxit1_ uint64 = 20
maxit2_ := maxit1_ + _DIGITS + 10
tiny_ := math.Sqrt(get_min_val())
tol0_ := get_epsilon()
tol1_ := 200.0 * tol0_
tol2_ := math.Sqrt(tol0_)
tolb_ := tol0_ * tol2_
xthresh_ := 1000.0 * tol2_
_f1 := 1.0 - f
_e2 := f * (2.0 - f)
_ep2 := _e2 / sq(_f1)
_n := f / (2.0 - f)
_b := a * _f1
var is_f_neg float64
if f < 0.0 {
is_f_neg = -1.0
} else {
is_f_neg = 1.0
}
to_mul := eatanhe(1.0, is_f_neg*math.Sqrt(math.Abs(_e2))) / _e2
if _e2 == 0.0 {
to_mul = 1.0
}
_c2 := (sq(a) + sq(_b)*to_mul) / 2.0
_etol2 := 0.1 * tol2_ / math.Sqrt(math.Max(math.Abs(f), 0.001)*math.Min((1.0-f/2.0), 1.0)/2.0)
_A3x := [_GEODESIC_ORDER]float64{}
_C3x := [nC3x_]float64{}
_C4x := [nC4x_]float64{}
// Call a3coeff
var o int64 = 0
k := 0
coefa3 := coeff_A3()
for j := _GEODESIC_ORDER - 1; j >= 0; j-- {
m := int64(math.Min(float64(j), float64(_GEODESIC_ORDER-j-1)))
_A3x[k] = polyval(m, coefa3[o:], _n) / coefa3[o+m+1]
k += 1
o += m + 2
}
// c3coeff
o = 0
k = 0
coefc3 := coeff_C3()
for l := 1; l < int(_GEODESIC_ORDER); l++ {
for j := int(_GEODESIC_ORDER) - 1; j >= l; j-- {
m := int64(math.Min(float64(j), float64(int(_GEODESIC_ORDER)-j-1)))
_C3x[k] = polyval(m, coefc3[o:], _n) / coefc3[o+m+1]
k += 1
o += m + 2
}
}
// c4coeff
o = 0
k = 0
coefc4 := coeff_C4()
for l := 0; l < int(_GEODESIC_ORDER); l++ {
for j := int(_GEODESIC_ORDER) - 1; j >= l; j-- {
m := int64(int(_GEODESIC_ORDER) - j - 1)
_C4x[k] = polyval(m, coefc4[o:], _n) / coefc4[(o+m+1)]
k += 1
o += m + 2
}
}
return Geodesic{
a,
f,
_f1,
_e2,
_ep2,
_n,
_b,
_c2,
_etol2,
_GEODESIC_ORDER,
nC3x_,
nC4x_,
maxit1_,
maxit2_,
_A3x,
_C3x,
_C4x,
tiny_,
tol0_,
tol1_,
tol2_,
tolb_,
xthresh_,
}
}
func Wgs84() Geodesic {
return NewGeodesic(WGS84_A, WGS84_F)
}
func (g *Geodesic) EqualtorialRadius() float64 {
return g.a
}
func (g *Geodesic) Flattening() float64 {
return g.f
}
func (g *Geodesic) _A3f(eps float64) float64 {
return polyval(int64(_GEODESIC_ORDER-1), g._A3x[:], eps)
}
func (g *Geodesic) _C3f(eps float64, c []float64) {
mult := 1.0
o := 0
for l := 1; l < int(_GEODESIC_ORDER); l++ {
m := int(_GEODESIC_ORDER) - l - 1
mult *= eps
c[l] = mult * polyval(int64(m), g._C3x[o:], eps)
o += m + 1
}
}
func (g *Geodesic) _C4f(eps float64, c []float64) {
mult := 1.0
o := 0
for l := 0; l < int(_GEODESIC_ORDER); l++ {
m := int(_GEODESIC_ORDER) - l - 1
c[l] = mult * polyval(int64(m), g._C4x[o:], eps)
o += m + 1
mult *= eps
}
}
func (g *Geodesic) _Lengths(
eps, sig12, ssig1, csig1, dn1, ssig2, csig2, dn2, cbet1, cbet2 float64,
outmask uint64,
c1a []float64,
c2a []float64,
) (float64, float64, float64, float64, float64) {
outmask &= OUT_MASK
s12b := math.NaN()
m12b := math.NaN()
m0 := math.NaN()
M12 := math.NaN()
M21 := math.NaN()
A1 := 0.0
A2 := 0.0
m0x := 0.0
J12 := 0.0
if outmask&(DISTANCE|REDUCEDLENGTH|GEODESICSCALE) != 0 {
A1 = a1m1f(eps, _GEODESIC_ORDER)
c1f(eps, c1a, int(_GEODESIC_ORDER))
if outmask&(REDUCEDLENGTH|GEODESICSCALE) != 0 {
A2 = a2m1f(eps, _GEODESIC_ORDER)
c2f(eps, c2a, int(_GEODESIC_ORDER))
m0x = A1 - A2
A2 = 1.0 + A2
}
A1 = 1.0 + A1
}
if outmask&DISTANCE != 0 {
B1 := sin_cos_series(true, ssig2, csig2, c1a) - sin_cos_series(true, ssig1, csig1, c1a)
s12b = A1 * (sig12 + B1)
if outmask&(REDUCEDLENGTH|GEODESICSCALE) != 0 {
B2 := sin_cos_series(true, ssig2, csig2, c2a) - sin_cos_series(true, ssig1, csig1, c2a)
J12 = m0x*sig12 + (A1*B1 - A2*B2)
}
} else if outmask&(REDUCEDLENGTH|GEODESICSCALE) != 0 {
for l := 1; l <= int(_GEODESIC_ORDER); l++ {
c2a[l] = A1*c1a[l] - A2*c2a[l]
}
J12 = m0x*sig12 + (sin_cos_series(true, ssig2, csig2, c2a) - sin_cos_series(true, ssig1, csig1, c2a))
}
if outmask&REDUCEDLENGTH != 0 {
m0 = m0x
// J12 is wrong
m12b = dn2*(csig1*ssig2) - dn1*(ssig1*csig2) - csig1*csig2*J12
}
if outmask&GEODESICSCALE != 0 {
csig12 := csig1*csig2 + ssig1*ssig2
t := g.ep2 * (cbet1 - cbet2) * (cbet1 + cbet2) / (dn1 + dn2)
M12 = csig12 + (t*ssig2-csig2*J12)*ssig1/dn1
M21 = csig12 - (t*ssig1-csig1*J12)*ssig2/dn2
}
return s12b, m12b, m0, M12, M21
}
func (g *Geodesic) _InverseStart(
sbet1, cbet1, dn1, sbet2, cbet2, dn2, lam12, slam12, clam12 float64,
c1a []float64,
c2a []float64,
) (float64, float64, float64, float64, float64, float64) {
sig12 := -1.0
salp2 := math.NaN()
calp2 := math.NaN()
dnm := math.NaN()
var somg12 float64
var comg12 float64
sbet12 := sbet2*cbet1 - cbet2*sbet1
cbet12 := cbet2*cbet1 + sbet2*sbet1
sbet12a := sbet2 * cbet1
sbet12a += cbet2 * sbet1
shortline := cbet12 >= 0.0 && sbet12 < 0.5 && cbet2*lam12 < 0.5
if shortline {
sbetm2 := sq(sbet1 + sbet2)
sbetm2 /= sbetm2 + sq(cbet1+cbet2)
dnm = math.Sqrt(1.0 + g.ep2*sbetm2)
omg12 := lam12 / (g.f1 * dnm)
somg12 = math.Sin(omg12)
comg12 = math.Cos(omg12)
} else {
somg12 = slam12
comg12 = clam12
}
salp1 := cbet2 * somg12
calp1 := sbet12a - cbet2*sbet1*sq(somg12)/(1.0-comg12)
if comg12 >= 0.0 {
calp1 = sbet12 + cbet2*sbet1*sq(somg12)/(1.0+comg12)
}
ssig12 := math.Hypot(salp1, calp1)
csig12 := sbet1*sbet2 + cbet1*cbet2*comg12
if shortline && (ssig12 < g.etol2) {
salp2 = cbet1 * somg12
var to_mul float64
if comg12 >= 0.0 {
to_mul = sq(somg12) / (1.0 + comg12)
} else {
to_mul = 1.0 - comg12
}
calp2 = sbet12 - cbet1*sbet2*to_mul
salp2, calp2 = norm(salp2, calp2)
sig12 = math.Atan2(ssig12, csig12)
} else if math.Abs(g.n) > 0.1 || csig12 >= 0.0 || ssig12 >= 6.0*math.Abs(g.n)*math.Pi*sq(cbet1) {
} else {
var x float64
var y float64
var betscale float64
var lamscale float64
lam12x := math.Atan2(-slam12, -clam12)
if g.f >= 0.0 {
k2 := sq(sbet1) * g.ep2
eps := k2 / (2.0*(1.0+math.Sqrt(1.0+k2)) + k2)
lamscale = g.f * cbet1 * g._A3f(eps) * math.Pi
betscale = lamscale * cbet1
x = lam12x / lamscale
y = sbet12a / betscale
} else {
cbet12a := cbet2*cbet1 - sbet2*sbet1
bet12a := math.Atan2(sbet12, cbet12a)
_, m12b, m0, _, _ := g._Lengths(
g.n,
math.Pi+bet12a,
sbet1,
-cbet1,
dn1,
sbet2,
cbet2,
dn2,
cbet1,
cbet2,
REDUCEDLENGTH,
c1a,
c2a,
)
x = -1.0 + m12b/(cbet1*cbet2*m0*math.Pi)
var betscale float64
if x < -0.01 {
betscale = sbet12a / x
} else {
betscale = -g.f * sq(cbet1) * math.Pi
}
lamscale = betscale / cbet1
y = lam12x / lamscale
}
if y > -g.tol1_ && x > -1.0-g.xthresh_ {
if g.f >= 0.0 {
salp1 = math.Min(-x, 1.0)
calp1 = -math.Sqrt(1.0 - sq(salp1))
} else {
var to_compare float64
if x > -g.tol1_ {
to_compare = 0.0
} else {
to_compare = -1.0
}
calp1 = math.Max(x, to_compare)
salp1 = math.Sqrt(1.0 - sq(calp1))
}
} else {
k := astroid(x, y)
var to_mul float64
if g.f >= 0.0 {
to_mul = -x * k / (1.0 + k)
} else {
to_mul = -y * (1.0 + k) / k
}
omg12a := lamscale * to_mul
somg12 = math.Sin(omg12a)
comg12 = -math.Cos(omg12a)
salp1 = cbet2 * somg12
calp1 = sbet12a - cbet2*sbet1*sq(somg12)/(1.0-comg12)
}
}
if !(salp1 <= 0.0) {
salp1, calp1 = norm(salp1, calp1)
} else {
salp1 = 1.0
calp1 = 0.0
}
return sig12, salp1, calp1, salp2, calp2, dnm
}
func (g *Geodesic) _Lambda12(
sbet1, cbet1, dn1, sbet2, cbet2, dn2, salp1, calp1, slam120, clam120 float64,
diffp bool,
c1a []float64,
c2a []float64,
c3a []float64,
) (float64, float64, float64, float64, float64, float64, float64, float64, float64, float64, float64) {
if sbet1 == 0.0 && calp1 == 0.0 {
calp1 = -g.tiny_
}
salp0 := salp1 * cbet1
calp0 := math.Hypot(calp1, salp1*sbet1)
ssig1 := sbet1
somg1 := salp0 * sbet1
csig1 := calp1 * cbet1
comg1 := calp1 * cbet1
ssig1, csig1 = norm(ssig1, csig1)
var salp2 float64
if cbet2 != cbet1 {
salp2 = salp0 / cbet2
} else {
salp2 = salp1
}
var to_add float64
if cbet1 < -sbet1 {
to_add = (cbet2 - cbet1) * (cbet1 + cbet2)
} else {
to_add = (sbet1 - sbet2) * (sbet1 + sbet2)
}
calp2 := math.Abs(calp1)
if cbet2 != cbet1 || math.Abs(sbet2) != -sbet1 {
calp2 = math.Sqrt(sq(calp1*cbet1)+to_add) / cbet2
}
ssig2 := sbet2
somg2 := salp0 * sbet2
csig2 := calp2 * cbet2
comg2 := calp2 * cbet2
ssig2, csig2 = norm(ssig2, csig2)
sig12 := math.Atan2(math.Max(csig1*ssig2-ssig1*csig2, 0.0), csig1*csig2+ssig1*ssig2)
somg12 := math.Max((comg1*somg2 - somg1*comg2), 0.0)
comg12 := comg1*comg2 + somg1*somg2
eta := math.Atan2(somg12*clam120-comg12*slam120, comg12*clam120+somg12*slam120)
k2 := sq(calp0) * g.ep2
eps := k2 / (2.0*(1.0+math.Sqrt(1.0+k2)) + k2)
g._C3f(eps, c3a)
B312 := sin_cos_series(true, ssig2, csig2, c3a) - sin_cos_series(true, ssig1, csig1, c3a)
domg12 := -g.f * g._A3f(eps) * salp0 * (sig12 + B312)
lam12 := eta + domg12
var dlam12 float64
if diffp {
if calp2 == 0.0 {
dlam12 = -2.0 * g.f1 * dn1 / sbet1
} else {
_, res, _, _, _ := g._Lengths(
eps,
sig12,
ssig1,
csig1,
dn1,
ssig2,
csig2,
dn2,
cbet1,
cbet2,
REDUCEDLENGTH,
c1a,
c2a,
)
dlam12 = res
dlam12 *= g.f1 / (calp2 * cbet2)
}
} else {
dlam12 = math.NaN()
}
return lam12, salp2, calp2, sig12, ssig1, csig1, ssig2, csig2, eps, domg12, dlam12
}
func (g *Geodesic) _gen_inverse_azi(
lat1, lon1, lat2, lon2 float64,
outmask uint64,
) (
a12 float64,
s12 float64,
azi1 float64,
azi2 float64,
m12 float64,
M12 float64,
M21 float64,
S12 float64,
) {
azi1 = math.NaN()
azi2 = math.NaN()
outmask &= OUT_MASK
a12, s12, salp1, calp1, salp2, calp2, m12, M12, M21, S12 := g._gen_inverse(
lat1, lon1, lat2, lon2, outmask,
)
if outmask&AZIMUTH != 0 {
azi1 = atan2_deg(salp1, calp1)
azi2 = atan2_deg(salp2, calp2)
}
return a12, s12, azi1, azi2, m12, M12, M21, S12
}
func (g *Geodesic) _gen_inverse(lat1, lon1, lat2, lon2 float64, outmask uint64) (
a12 float64,
s12 float64,
salp1 float64,
calp1 float64,
salp2 float64,
calp2 float64,
m12 float64,
M12 float64,
M21 float64,
S12 float64,
) {
a12 = math.NaN()
s12 = math.NaN()
m12 = math.NaN()
M12 = math.NaN()
M21 = math.NaN()
S12 = math.NaN()
outmask &= OUT_MASK
lon12, lon12s := ang_diff(lon1, lon2)
var lonsign float64
if lon12 >= 0.0 {
lonsign = 1.0
} else {
lonsign = -1.0
}
lon12 = lonsign * ang_round(lon12)
lon12s = ang_round((180.0 - lon12) - lonsign*lon12s)
lam12 := lon12 * DEG2RAD
var slam12 float64
var clam12 float64
if lon12 > 90.0 {
slam12, clam12 = sincosd(lon12s)
clam12 = -clam12
} else {
slam12, clam12 = sincosd(lon12)
}
lat1 = ang_round(lat_fix(lat1))
lat2 = ang_round(lat_fix(lat2))
var swapp float64
if math.Abs(lat1) < math.Abs(lat2) {
swapp = -1.0
} else {
swapp = 1.0
}
if swapp < 0.0 {
lonsign *= -1.0
lat2, lat1 = lat1, lat2
}
var latsign float64
if lat1 < 0.0 {
latsign = 1.0
} else {
latsign = -1.0
}
lat1 *= latsign
lat2 *= latsign
sbet1, cbet1 := sincosd(lat1)
sbet1 *= g.f1
sbet1, cbet1 = norm(sbet1, cbet1)
cbet1 = math.Max(cbet1, g.tiny_)
sbet2, cbet2 := sincosd(lat2)
sbet2 *= g.f1
sbet2, cbet2 = norm(sbet2, cbet2)
cbet2 = math.Max(cbet2, g.tiny_)
if cbet1 < -sbet1 {
if cbet2 == cbet1 {
if sbet2 < 0.0 {
sbet2 = sbet1
} else {
sbet2 = -sbet1
}
}
} else if math.Abs(sbet2) == -sbet1 {
cbet2 = cbet1
}
dn1 := math.Sqrt(1.0 + g.ep2*sq(sbet1))
dn2 := math.Sqrt(1.0 + g.ep2*sq(sbet2))
const CARR_SIZE uint64 = uint64(_GEODESIC_ORDER) + 1
C1a := [CARR_SIZE]float64{}
C2a := [CARR_SIZE]float64{}
C3a := [_GEODESIC_ORDER]float64{}
meridian := lat1 == -90.0 || slam12 == 0.0
calp1 = 0.0
salp1 = 0.0
calp2 = 0.0
salp2 = 0.0
ssig1 := 0.0
csig1 := 0.0
ssig2 := 0.0
csig2 := 0.0
var sig12 float64
s12x := 0.0
m12x := 0.0
if meridian {
calp1 = clam12
salp1 = slam12
calp2 = 1.0
salp2 = 0.0
ssig1 = sbet1
csig1 = calp1 * cbet1
ssig2 = sbet2
csig2 = calp2 * cbet2
sig12 = math.Atan2(math.Max((csig1*ssig2-ssig1*csig2), 0.0), csig1*csig2+ssig1*ssig2)
res1, res2, _, res4, res5 := g._Lengths(
g.n,
sig12,
ssig1,
csig1,
dn1,
ssig2,
csig2,
dn2,
cbet1,
cbet2,
outmask|DISTANCE|REDUCEDLENGTH,
C1a[:],
C2a[:],
)
s12x = res1
m12x = res2
M12 = res4
M21 = res5
if sig12 < 1.0 || m12x >= 0.0 {
if sig12 < 3.0*g.tiny_ {
sig12 = 0.0
m12x = 0.0
s12x = 0.0
}
m12x *= g.b
s12x *= g.b
a12 = sig12 * RAD2DEG
} else {
meridian = false
}
}
somg12 := 2.0
comg12 := 0.0
omg12 := 0.0
var dnm float64
eps := 0.0
if !meridian && sbet1 == 0.0 && (g.f <= 0.0 || lon12s >= g.f*180.0) {
calp1 = 0.0
calp2 = 0.0
salp1 = 1.0
salp2 = 1.0
s12x = g.a * lam12
sig12 = lam12 / g.f1
omg12 = lam12 / g.f1
m12x = g.b * math.Sin(sig12)
if outmask&GEODESICSCALE != 0 {
M12 = math.Cos(sig12)
M21 = math.Cos(sig12)
}
a12 = lon12 / g.f1
} else if !meridian {
res1, res2, res3, res4, res5, res6 := g._InverseStart(
sbet1, cbet1, dn1, sbet2, cbet2, dn2, lam12, slam12, clam12, C1a[:], C2a[:],
)
sig12 = res1
salp1 = res2
calp1 = res3
salp2 = res4
calp2 = res5
dnm = res6
if sig12 >= 0.0 {
s12x = sig12 * g.b * dnm
m12x = sq(dnm) * g.b * math.Sin(sig12/dnm)
if outmask&GEODESICSCALE != 0 {
M12 = math.Cos(sig12 / dnm)
M21 = math.Cos(sig12 / dnm)
}
a12 = sig12 * RAD2DEG
omg12 = lam12 / (g.f1 * dnm)
} else {
tripn := false
tripb := false
salp1a := g.tiny_
calp1a := 1.0
salp1b := g.tiny_
calp1b := -1.0
domg12 := 0.0
for numit := uint64(0); numit < g.maxit2_; numit++ {
res1, res2, res3, res4, res5, res6, res7, res8, res9, res10, res11 := g._Lambda12(
sbet1,
cbet1,
dn1,
sbet2,
cbet2,
dn2,
salp1,
calp1,
slam12,
clam12,
numit < g.maxit1_,
C1a[:],
C2a[:],
C3a[:],
)
v := res1
salp2 = res2
calp2 = res3
sig12 = res4
ssig1 = res5
csig1 = res6
ssig2 = res7
csig2 = res8
eps = res9
domg12 = res10
dv := res11
var to_mul float64
if tripn {
to_mul = 8.0
} else {
to_mul = 1.0
}
if tripb || !(math.Abs(v) >= to_mul*g.tol0_) {
break
}
if v > 0.0 && (numit > g.maxit1_ || calp1/salp1 > calp1b/salp1b) {
salp1b = salp1
calp1b = calp1
} else if v < 0.0 && (numit > g.maxit1_ || calp1/salp1 < calp1a/salp1a) {
salp1a = salp1
calp1a = calp1
}
if numit < g.maxit1_ && dv > 0.0 {
dalp1 := -v / dv
sdalp1 := math.Sin(dalp1)
cdalp1 := math.Cos(dalp1)
nsalp1 := salp1*cdalp1 + calp1*sdalp1
if nsalp1 > 0.0 && math.Abs(dalp1) < math.Pi {
calp1 = calp1*cdalp1 - salp1*sdalp1
salp1 = nsalp1
salp1, calp1 = norm(salp1, calp1)
tripn = math.Abs(v) <= 16.0*g.tol0_
continue
}
}
salp1 = (salp1a + salp1b) / 2.0
calp1 = (calp1a + calp1b) / 2.0
salp1, calp1 = norm(salp1, calp1)
tripn = false
tripb = math.Abs(salp1a-salp1)+(calp1a-calp1) < g.tolb_ || math.Abs(salp1-salp1b)+(calp1-calp1b) < g.tolb_
}
var to_cmp uint64
if outmask&(REDUCEDLENGTH|GEODESICSCALE) != 0 {
to_cmp = DISTANCE
} else {
to_cmp = EMPTY
}
lengthmask := outmask | to_cmp
res1, res2, _, res4, res5 = g._Lengths(
eps, sig12, ssig1, csig1, dn1, ssig2, csig2, dn2, cbet1, cbet2, lengthmask,
C1a[:], C2a[:],
)
s12x = res1
m12x = res2
M12 = res4
M21 = res5
m12x *= g.b
s12x *= g.b
a12 = sig12 * RAD2DEG
if outmask&AREA != 0 {
sdomg12 := math.Sin(domg12)
cdomg12 := math.Cos(domg12)
somg12 = slam12*cdomg12 - clam12*sdomg12
comg12 = clam12*cdomg12 + slam12*sdomg12
}
}
}
if outmask&DISTANCE != 0 {
s12 = 0.0 + s12x
}
if outmask&REDUCEDLENGTH != 0 {
m12 = 0.0 + m12x
}
if outmask&AREA != 0 {
salp0 := salp1 * cbet1
calp0 := math.Hypot(calp1, salp1*sbet1)
if calp0 != 0.0 && salp0 != 0.0 {
ssig1 = sbet1
csig1 = calp1 * cbet1
ssig2 = sbet2
csig2 = calp2 * cbet2
k2 := sq(calp0) * g.ep2
eps = k2 / (2.0*(1.0+math.Sqrt(1.0+k2)) + k2)
A4 := sq(g.a) * calp0 * salp0 * g.e2
ssig1, csig1 = norm(ssig1, csig1)
ssig2, csig2 = norm(ssig2, csig2)
C4a := [_GEODESIC_ORDER]float64{}
g._C4f(eps, C4a[:])
B41 := sin_cos_series(false, ssig1, csig1, C4a[:])
B42 := sin_cos_series(false, ssig2, csig2, C4a[:])
S12 = A4 * (B42 - B41)
} else {
S12 = 0.0
}
if !meridian && somg12 > 1.0 {
somg12, comg12 = math.Sincos(omg12)
}
var alp12 float64
if !meridian && comg12 > -0.7071 && sbet2-sbet1 < 1.75 {
domg12 := 1.0 + comg12
dbet1 := 1.0 + cbet1
dbet2 := 1.0 + cbet2
alp12 = 2.0 * math.Atan2(somg12*(sbet1*dbet2+sbet2*dbet1), domg12*(sbet1*sbet2+dbet1*dbet2))
} else {
salp12 := salp2*calp1 - calp2*salp1
calp12 := calp2*calp1 + salp2*salp1
if salp12 == 0.0 && calp12 < 0.0 {
salp12 = g.tiny_ * calp1
calp12 = -1.0
}
alp12 = math.Atan2(salp12, calp12)
}
S12 += g.c2 * alp12
S12 *= swapp * lonsign * latsign
S12 += 0.0
}
if swapp < 0.0 {
salp2, salp1 = salp1, salp2
calp2, calp1 = calp1, calp2
if outmask&GEODESICSCALE != 0 {
M21, M12 = M12, M21
}
}
salp1 *= swapp * lonsign
calp1 *= swapp * latsign
salp2 *= swapp * lonsign
calp2 *= swapp * latsign
return a12, s12, salp1, calp1, salp2, calp2, m12, M12, M21, S12
}
// _gen_direct returns (a12, lat2, lon2, azi2, s12, m12, M12, M21, S12, outmask)
func (g *Geodesic) _gen_direct(
lat1 float64,
lon1 float64,
azi1 float64,
arcmode bool,
s12_a12 float64,
outmask uint64,
) (float64, float64, float64, float64, float64, float64, float64, float64, float64, uint64) {
if !arcmode {
outmask |= DISTANCE_IN
}
line := NewGeodesicLineWithCapability(*g, lat1, lon1, azi1, outmask)
a12, lat2, lon2, azi2, s12, m12, M12, M21, S12 := line._gen_position(arcmode, s12_a12, outmask)
return a12, lat2, lon2, azi2, s12, m12, M12, M21, S12, outmask
}
// LatLon represents latitude and longitude of a point. All units in degrees
type LatLon struct {
LatDeg, LonDeg float64
}
// DirectCalcLatLon gets the lat and lon of the second point, based on input
// - lat1_deg - Latitude of 1st point [degrees] [-90.,90.]
// - lon1_deg - Longitude of 1st point [degrees] [-180., 180.]
// - azi1_deg - Azimuth at 1st point [degrees] [-180., 180.]
// - s12_m - Distance from 1st to 2nd point [meters] Value may be negative
func (g *Geodesic) DirectCalcLatLon(lat1_deg, lon1_deg, azi1_deg, s12_m float64) LatLon {
capabilities := LATITUDE | LONGITUDE
_, lat2, lon2, _, _, _, _, _, _, _ := g._gen_direct(
lat1_deg, lon1_deg, azi1_deg, false, s12_m, capabilities,
)
return LatLon{LatDeg: lat2, LonDeg: lon2}
}
// LatLonAzi represents latitude, longitude, and azimuth of a point. All units in degrees
type LatLonAzi struct {
LatDeg, LonDeg, AziDeg float64
}
// DirectCalcLatLonAzi gets the lat, lon, and azimuth of the second point, based on input
// - lat1_deg - Latitude of 1st point [degrees] [-90.,90.]
// - lon1_deg - Longitude of 1st point [degrees] [-180., 180.]
// - azi1_deg - Azimuth at 1st point [degrees] [-180., 180.]
// - s12_m - Distance from 1st to 2nd point [meters] Value may be negative
func (g *Geodesic) DirectCalcLatLonAzi(lat1_deg, lon1_deg, azi1_deg, s12_m float64) LatLonAzi {
capabilities := LATITUDE | LONGITUDE | AZIMUTH
_, lat2, lon2, azi2, _, _, _, _, _, _ := g._gen_direct(
lat1_deg, lon1_deg, azi1_deg, false, s12_m, capabilities,
)
return LatLonAzi{LatDeg: lat2, LonDeg: lon2, AziDeg: azi2}
}
// LatLonAziReducedLength: All units in degrees and meters
type LatLonAziReducedLength struct {
LatDeg float64 // Latitude [degrees]
LonDeg float64 // Longitude [degrees]
AziDeg float64 // Azimuth [degrees]
ReducedLengthM float64 // Reduced length of the geodesic [meters]
}
// DirectCalcLatLonAziReducedLength gets the lat, lon, azimuth, and reduced length of geodesic
// of the second point, based on input
// - lat1_deg - Latitude of 1st point [degrees] [-90.,90.]
// - lon1_deg - Longitude of 1st point [degrees] [-180., 180.]
// - azi1_deg - Azimuth at 1st point [degrees] [-180., 180.]
// - s12_m - Distance from 1st to 2nd point [meters] Value may be negative
func (g *Geodesic) DirectCalcLatLonAziReducedLength(lat1_deg, lon1_deg, azi1_deg, s12_m float64) LatLonAziReducedLength {
capabilities := LATITUDE | LONGITUDE | AZIMUTH | REDUCEDLENGTH
_, lat2, lon2, azi2, _, m12, _, _, _, _ := g._gen_direct(
lat1_deg, lon1_deg, azi1_deg, false, s12_m, capabilities,
)
return LatLonAziReducedLength{LatDeg: lat2, LonDeg: lon2, AziDeg: azi2, ReducedLengthM: m12}
}
// LatLonAziGeodesicScales: All units in degrees. Scales are dimensionless
type LatLonAziGeodesicScales struct {
LatDeg float64 // Latitude [degrees]
LonDeg float64 // Longitude [degrees]
AziDeg float64 // Azimuth [degrees]
M12 float64 // Geodesic scale of point 2 relative to point 1 [dimensionless]
M21 float64 // Geodesic scale of point 1 relative to point 2 [dimensionless]
}
// DirectCalcLatLonAziGeodesicScales gets the lat, lon, azimuth, and geodesic scales,
// based on input
// - lat1_deg - Latitude of 1st point [degrees] [-90.,90.]
// - lon1_deg - Longitude of 1st point [degrees] [-180., 180.]
// - azi1_deg - Azimuth at 1st point [degrees] [-180., 180.]
// - s12_m - Distance from 1st to 2nd point [meters] Value may be negative
func (g *Geodesic) DirectCalcLatLonAziGeodesicScales(lat1_deg, lon1_deg, azi1_deg, s12_m float64) LatLonAziGeodesicScales {
capabilities := LATITUDE | LONGITUDE | AZIMUTH | GEODESICSCALE
_, lat2, lon2, azi2, _, _, M12, M21, _, _ := g._gen_direct(
lat1_deg, lon1_deg, azi1_deg, false, s12_m, capabilities,
)
return LatLonAziGeodesicScales{LatDeg: lat2, LonDeg: lon2, AziDeg: azi2, M12: M12, M21: M21}
}
// LatLonAziReducedLengthGeodesicScales: All units in degrees and meters. Scales are dimensionless
type LatLonAziReducedLengthGeodesicScales struct {
LatDeg float64 // Latitude [degrees]
LonDeg float64 // Longitude [degrees]
AziDeg float64 // Azimuth [degrees]
ReducedLengthM float64 // Reduced length of the geodesic [meters]
M12 float64 // Geodesic scale of point 2 relative to point 1 [dimensionless]
M21 float64 // Geodesic scale of point 1 relative to point 2 [dimensionless]
}
// DirectCalcLatLonAziReducedLengthGeodesicScales gets the lat, lon, azimuth, reduced length,
// and geodesic scales based on input
// - lat1_deg - Latitude of 1st point [degrees] [-90.,90.]
// - lon1_deg - Longitude of 1st point [degrees] [-180., 180.]
// - azi1_deg - Azimuth at 1st point [degrees] [-180., 180.]
// - s12_m - Distance from 1st to 2nd point [meters] Value may be negative
func (g *Geodesic) DirectCalcLatLonAziReducedLengthGeodesicScales(
lat1_deg, lon1_deg, azi1_deg, s12_m float64,
) LatLonAziReducedLengthGeodesicScales {
capabilities := LATITUDE | LONGITUDE | AZIMUTH | REDUCEDLENGTH | GEODESICSCALE
_, lat2, lon2, azi2, _, m12, M12, M21, _, _ := g._gen_direct(
lat1_deg, lon1_deg, azi1_deg, false, s12_m, capabilities,
)
return LatLonAziReducedLengthGeodesicScales{
LatDeg: lat2,
LonDeg: lon2,
AziDeg: azi2,
ReducedLengthM: m12,
M12: M12,
M21: M21,
}
}
// AllDirectResults contains all information that can be computed from the direct method
// latitude, longitude, azimuth, reduced length, geodesic scales, area under the geodesic,
// and arc length between point 1 and point 2
type AllDirectResults struct {
LatDeg float64 // Latitude [degrees]
LonDeg float64 // Longitude [degrees]
AziDeg float64 // Azimuth [degrees]
ReducedLengthM float64 // Reduced length of the geodesic [meters]
M12 float64 // Geodesic scale of point 2 relative to point 1 [dimensionless]
M21 float64 // Geodesic scale of point 1 relative to point 2 [dimensionless]
S12M2 float64 // Area under the geodesic [meters^2]
A12Deg float64 // Arc length between point 1 and point 2 [degrees]
}
// DirectCalcAll calculates everything possible for the direct method. Takes inputs
// - lat1_deg - Latitude of 1st point [degrees] [-90.,90.]
// - lon1_deg - Longitude of 1st point [degrees] [-180., 180.]
// - azi1_deg - Azimuth at 1st point [degrees] [-180., 180.]
// - s12_m - Distance from 1st to 2nd point [meters] Value may be negative
func (g *Geodesic) DirectCalcAll(lat1_deg, lon1_deg, azi1_deg, s12_m float64) AllDirectResults {
capabilities := LATITUDE | LONGITUDE | AZIMUTH | REDUCEDLENGTH | GEODESICSCALE | AREA
a12, lat2, lon2, azi2, _, m12, M12, M21, S12, _ := g._gen_direct(
lat1_deg, lon1_deg, azi1_deg, false, s12_m, capabilities,
)
return AllDirectResults{
LatDeg: lat2,
LonDeg: lon2,
AziDeg: azi2,
ReducedLengthM: m12,
M12: M12,
M21: M21,
S12M2: S12,
A12Deg: a12,
}
}
// DirectCalcWithCapabilities allows the user to specify which capabilites they wish to use.
// This function is useful if you want some other subset of capabilities than those offered
// by the other DirectCalc...() methods.
// Takes inputs
// - lat1_deg - Latitude of 1st point [degrees] [-90.,90.]
// - lon1_deg - Longitude of 1st point [degrees] [-180., 180.]
// - azi1_deg - Azimuth at 1st point [degrees] [-180., 180.]
// - capabilities - One or more of the capabilities constant as defined in the file
// geodesiccapability.go. Usually, they are OR'd together, e.g. LATITUDE | LONGITUDE
func (g *Geodesic) DirectCalcWithCapabilities(
lat1_deg, lon1_deg, azi1_deg, s12_m float64,
capabilities uint64,
) AllDirectResults {
a12, lat2, lon2, azi2, _, m12, M12, M21, S12, _ := g._gen_direct(
lat1_deg, lon1_deg, azi1_deg, false, s12_m, capabilities,
)
return AllDirectResults{
LatDeg: lat2,
LonDeg: lon2,
AziDeg: azi2,
ReducedLengthM: m12,
M12: M12,
M21: M21,
S12M2: S12,
A12Deg: a12,
}
}
// InverseCalcDistance returns the distance from point 1 to point 2 in meters. Takes inputs
// - lat1_deg latitude of point 1 [degrees].
// - lon1_deg longitude of point 1 [degrees].
// - lat2_deg latitude of point 2 [degrees].
// - lon2_deg longitude of point 2 [degrees].
func (g *Geodesic) InverseCalcDistance(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) float64 {
capabilities := DISTANCE
_, s12, _, _, _, _, _, _ := g._gen_inverse_azi(lat1_deg, lon1_deg, lat2_deg, lon2_deg, capabilities)
return s12
}
type DistanceArcLength struct {
DistanceM float64 // distance between point 1 and point 2 [meters]
ArcLengthDeg float64 // arc length between point 1 and point 2 [degrees]
}
// InverseCalcDistanceArcLength returns the distance from one point to the next, and the
// arc length between the points. Takes inputs
// - lat1_deg latitude of point 1 [degrees].
// - lon1_deg longitude of point 1 [degrees].
// - lat2_deg latitude of point 2 [degrees].
// - lon2_deg longitude of point 2 [degrees].
func (g *Geodesic) InverseCalcDistanceArcLength(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) DistanceArcLength {
capabilities := DISTANCE
a12, s12, _, _, _, _, _, _ := g._gen_inverse_azi(lat1_deg, lon1_deg, lat2_deg, lon2_deg, capabilities)
return DistanceArcLength{DistanceM: s12, ArcLengthDeg: a12}
}
type DistanceAzimuths struct {
DistanceM float64 // distance between point 1 and point 2 [meters]
Azimuth1Deg float64 // azimuth at point 1 [degrees]
Azimuth2Deg float64 // (forward) azimuth at point 2 [degrees]
}
// InverseCalcDistanceAzimuths returns the distance from one point to the next, and the
// azimuths. Takes inputs
// - lat1_deg latitude of point 1 [degrees].
// - lon1_deg longitude of point 1 [degrees].
// - lat2_deg latitude of point 2 [degrees].
// - lon2_deg longitude of point 2 [degrees].
func (g *Geodesic) InverseCalcDistanceAzimuths(lat1_deg, lon1_deg, lat2_deg, lon2_deg float64) DistanceAzimuths {
capabilities := DISTANCE | AZIMUTH
_, s12, azi1, azi2, _, _, _, _ := g._gen_inverse_azi(lat1_deg, lon1_deg, lat2_deg, lon2_deg, capabilities)
return DistanceAzimuths{DistanceM: s12, Azimuth1Deg: azi1, Azimuth2Deg: azi2}
}
type AzimuthsArcLength struct {
Azimuth1Deg float64 // azimuth at point 1 [degrees]
Azimuth2Deg float64 // (forward) azimuth at point 2 [degrees]
ArcLengthDeg float64 // arc length between point 1 and point 2 [degrees]
}
// InverseCalcAzimuthsArcLength returns the azimuth at point 1, the azimuth at point 2,
// and the arc length between the points. Takes inputs
// - lat1_deg latitude of point 1 [degrees].
// - lon1_deg longitude of point 1 [degrees].
// - lat2_deg latitude of point 2 [degrees].
// - lon2_deg longitude of point 2 [degrees].
func (g *Geodesic) InverseCalcAzimuthsArcLength(
lat1_deg, lon1_deg, lat2_deg, lon2_deg float64,
) AzimuthsArcLength {
capabilities := AZIMUTH
a12, _, azi1, azi2, _, _, _, _ := g._gen_inverse_azi(lat1_deg, lon1_deg, lat2_deg, lon2_deg, capabilities)
return AzimuthsArcLength{Azimuth1Deg: azi1, Azimuth2Deg: azi2, ArcLengthDeg: a12}
}
type DistanceAzimuthsArcLength struct {
DistanceM float64 // distance between point 1 and point 2 [meters]
Azimuth1Deg float64 // azimuth at point 1 [degrees]
Azimuth2Deg float64 // (forward) azimuth at point 2 [degrees]
ArcLengthDeg float64 // arc length between point 1 and point 2 [degrees]
}
// InverseCalcDistanceAzimuthsArcLength returns the distance from one point to the next,
// the azimuth at point 1, the azimuth at point 2, and the arc length between the points.
// Takes inputs
// - lat1_deg latitude of point 1 [degrees].
// - lon1_deg longitude of point 1 [degrees].
// - lat2_deg latitude of point 2 [degrees].
// - lon2_deg longitude of point 2 [degrees].
func (g *Geodesic) InverseCalcDistanceAzimuthsArcLength(
lat1_deg, lon1_deg, lat2_deg, lon2_deg float64,
) DistanceAzimuthsArcLength {
capabilities := DISTANCE | AZIMUTH
a12, s12, azi1, azi2, _, _, _, _ := g._gen_inverse_azi(lat1_deg, lon1_deg, lat2_deg, lon2_deg, capabilities)
return DistanceAzimuthsArcLength{
DistanceM: s12, Azimuth1Deg: azi1, Azimuth2Deg: azi2, ArcLengthDeg: a12,
}
}
type DistanceAzimuthsArcLengthReducedLength struct {
DistanceM float64 // distance between point 1 and point 2 [meters]
Azimuth1Deg float64 // azimuth at point 1 [degrees]
Azimuth2Deg float64 // (forward) azimuth at point 2 [degrees]
ArcLengthDeg float64 // arc length between point 1 and point 2 [degrees]
ReducedLengthM float64 // reduced length of geodesic [meters]
}
// InverseCalcDistanceAzimuthsArcLengthReducedLength returns the distance from one point
// to the next, the azimuth at point 1, the azimuth at point 2, the arc length
// between the points, and the reduceed length of the geodesic.
// Takes inputs
// - lat1_deg latitude of point 1 [degrees].
// - lon1_deg longitude of point 1 [degrees].
// - lat2_deg latitude of point 2 [degrees].
// - lon2_deg longitude of point 2 [degrees].
func (g *Geodesic) InverseCalcDistanceAzimuthsArcLengthReducedLength(
lat1_deg, lon1_deg, lat2_deg, lon2_deg float64,
) DistanceAzimuthsArcLengthReducedLength {
capabilities := DISTANCE | AZIMUTH | REDUCEDLENGTH
a12, s12, azi1, azi2, m12, _, _, _ := g._gen_inverse_azi(
lat1_deg, lon1_deg, lat2_deg, lon2_deg, capabilities,
)
return DistanceAzimuthsArcLengthReducedLength{
DistanceM: s12,
Azimuth1Deg: azi1,
Azimuth2Deg: azi2,
ArcLengthDeg: a12,
ReducedLengthM: m12,
}
}
type DistanceAzimuthsArcLengthReducedLengthScales struct {
DistanceM float64 // distance between point 1 and point 2 [meters]
Azimuth1Deg float64 // azimuth at point 1 [degrees]
Azimuth2Deg float64 // (forward) azimuth at point 2 [degrees]
ArcLengthDeg float64 // arc length between point 1 and point 2 [degrees]
ReducedLengthM float64 // reduced length of geodesic [meters]
M12 float64 // geodesic scale of point 2 relative to point 1 [dimensionless]
M21 float64 // geodesic scale of point 1 relative to point 2 [dimensionless]
}
// InverseCalcDistanceAzimuthsArcLengthReducedLengthScales returns everything described
// by the `DistanceAzimuthsArcLengthReducedLengthScales` type.
// Takes inputs
// - lat1_deg latitude of point 1 [degrees].
// - lon1_deg longitude of point 1 [degrees].
// - lat2_deg latitude of point 2 [degrees].
// - lon2_deg longitude of point 2 [degrees].
func (g *Geodesic) InverseCalcDistanceAzimuthsArcLengthReducedLengthScales(
lat1_deg, lon1_deg, lat2_deg, lon2_deg float64,
) DistanceAzimuthsArcLengthReducedLengthScales {
capabilities := DISTANCE | AZIMUTH | REDUCEDLENGTH | GEODESICSCALE
a12, s12, azi1, azi2, m12, M12, M21, _ := g._gen_inverse_azi(
lat1_deg, lon1_deg, lat2_deg, lon2_deg, capabilities,
)
return DistanceAzimuthsArcLengthReducedLengthScales{
DistanceM: s12,
Azimuth1Deg: azi1,
Azimuth2Deg: azi2,
ArcLengthDeg: a12,
ReducedLengthM: m12,
M12: M12,
M21: M21,
}
}
type AllInverseResults struct {
DistanceM float64 // distance between point 1 and point 2 [meters]
Azimuth1Deg float64 // azimuth at point 1 [degrees]
Azimuth2Deg float64 // (forward) azimuth at point 2 [degrees]
ArcLengthDeg float64 // arc length between point 1 and point 2 [degrees]
ReducedLengthM float64 // reduced length of geodesic [meters]
M12 float64 // geodesic scale of point 2 relative to point 1 [dimensionless]
M21 float64 // geodesic scale of point 1 relative to point 2 [dimensionless]
S12M2 float64 // area under the geodesic [meters^2]
}
// InverseCalcAll returns everything described in the `AllInverseResults` results type.
// Takes inputs
// - lat1_deg latitude of point 1 [degrees].
// - lon1_deg longitude of point 1 [degrees].
// - lat2_deg latitude of point 2 [degrees].
// - lon2_deg longitude of point 2 [degrees].
func (g *Geodesic) InverseCalcAll(
lat1_deg, lon1_deg, lat2_deg, lon2_deg float64,
) AllInverseResults {
capabilities := DISTANCE | AZIMUTH | REDUCEDLENGTH | GEODESICSCALE | AREA
a12, s12, azi1, azi2, m12, M12, M21, S12 := g._gen_inverse_azi(
lat1_deg, lon1_deg, lat2_deg, lon2_deg, capabilities,
)
return AllInverseResults{
DistanceM: s12,
Azimuth1Deg: azi1,
Azimuth2Deg: azi2,
ArcLengthDeg: a12,
ReducedLengthM: m12,
M12: M12,
M21: M21,
S12M2: S12,
}
}
// InverseCalcWithCapabilities allows the user to specify which capabilites they wish to use.
// This function is useful if you want some other subset of capabilities than those offered
// by the other InverseCalc...() methods.
// Takes inputs
// - lat1_deg latitude of point 1 [degrees].
// - lon1_deg longitude of point 1 [degrees].
// - lat2_deg latitude of point 2 [degrees].
// - lon2_deg longitude of point 2 [degrees].
// - capabilities - One or more of the capabilities constant as defined in the file
// geodesiccapability.go. Usually, they are OR'd together, e.g. LATITUDE | LONGITUDE
func (g *Geodesic) InverseCalcWithCapabilities(
lat1_deg, lon1_deg, lat2_deg, lon2_deg float64,
capabilities uint64,
) AllInverseResults {
a12, s12, azi1, azi2, m12, M12, M21, S12 := g._gen_inverse_azi(
lat1_deg, lon1_deg, lat2_deg, lon2_deg, capabilities,
)
return AllInverseResults{
DistanceM: s12,
Azimuth1Deg: azi1,
Azimuth2Deg: azi2,
ArcLengthDeg: a12,
ReducedLengthM: m12,
M12: M12,
M21: M21,
S12M2: S12,
}
}
// InverseLineWithCapabilities: define a GeodesicLine struct in terms of the inverse geodesic
// problem.
// This function sets point 3 of the GeodesicLine to correspond to point 2 of the
// inverse geodesic problem.
func (g *Geodesic) InverseLineWithCapabilities(
lat1_deg, lon1_deg, lat2_deg, lon2_deg float64,
capabilities uint64,
) GeodesicLine {
a12, _, salp1, calp1, _, _, _, _, _, _ := g._gen_inverse(lat1_deg, lon1_deg, lat2_deg, lon2_deg, 0)
azi1 := atan2_deg(salp1, calp1)
if capabilities&(OUT_MASK&DISTANCE_IN) != 0 {
capabilities |= DISTANCE
}
line := new_geodesic_line_all_options(*g, lat1_deg, lon1_deg, azi1, capabilities, salp1, calp1)
line.set_arc(a12)
return line
}
func (g *Geodesic) _gen_direct_line(
lat1_deg, lon1_deg, azi1_deg float64,
arcmode bool,
s12_a12_m float64,
capabilities uint64,
) GeodesicLine {
// Automatically supply DISTANCE_IN if necessary
if !arcmode {
capabilities |= DISTANCE_IN
}
line := NewGeodesicLineWithCapability(
*g,
lat1_deg,
lon1_deg,
azi1_deg,
capabilities,
)
if arcmode {
line.set_arc(s12_a12_m)
} else {
line.set_distance(s12_a12_m)
}
return line
}
// DirectLineWithCapabilities defines a GeodesicLine struct in terms of the the direct
// geodesic problem specified in terms of spherical arc length.
// This function sets point 3 of the GeodesicLine to correspond to point 2 of the
// direct geodesic problem
func (g *Geodesic) DirectLineWithCapabilities(
lat1_deg, lon1_deg, azi1_deg, s12_m float64,
capabilities uint64,
) GeodesicLine {
return g._gen_direct_line(lat1_deg, lon1_deg, azi1_deg, false, s12_m, capabilities)
}
// Line returns a GeodesicLine. This allows points along a geodesic starting at
// lat1_deg, lon1_deg with azimuth azi1_deg to be found.
func (g *Geodesic) LineWithCapabilities(
lat1_deg, lon1_deg, azi1_deg float64,
capabilities uint64,
) GeodesicLine {
return NewGeodesicLineWithCapability(
*g,
lat1_deg,
lon1_deg,
azi1_deg,
capabilities,
)
} | geodesic.go | 0.776369 | 0.598488 | geodesic.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPTopLevelConstantDeclaration283 struct for BTPTopLevelConstantDeclaration283
type BTPTopLevelConstantDeclaration283 struct {
BTPTopLevelNode286
BtType *string `json:"btType,omitempty"`
Declaration *BTPStatementConstantDeclaration273 `json:"declaration,omitempty"`
}
// NewBTPTopLevelConstantDeclaration283 instantiates a new BTPTopLevelConstantDeclaration283 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPTopLevelConstantDeclaration283() *BTPTopLevelConstantDeclaration283 {
this := BTPTopLevelConstantDeclaration283{}
return &this
}
// NewBTPTopLevelConstantDeclaration283WithDefaults instantiates a new BTPTopLevelConstantDeclaration283 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPTopLevelConstantDeclaration283WithDefaults() *BTPTopLevelConstantDeclaration283 {
this := BTPTopLevelConstantDeclaration283{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPTopLevelConstantDeclaration283) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelConstantDeclaration283) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPTopLevelConstantDeclaration283) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPTopLevelConstantDeclaration283) SetBtType(v string) {
o.BtType = &v
}
// GetDeclaration returns the Declaration field value if set, zero value otherwise.
func (o *BTPTopLevelConstantDeclaration283) GetDeclaration() BTPStatementConstantDeclaration273 {
if o == nil || o.Declaration == nil {
var ret BTPStatementConstantDeclaration273
return ret
}
return *o.Declaration
}
// GetDeclarationOk returns a tuple with the Declaration field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPTopLevelConstantDeclaration283) GetDeclarationOk() (*BTPStatementConstantDeclaration273, bool) {
if o == nil || o.Declaration == nil {
return nil, false
}
return o.Declaration, true
}
// HasDeclaration returns a boolean if a field has been set.
func (o *BTPTopLevelConstantDeclaration283) HasDeclaration() bool {
if o != nil && o.Declaration != nil {
return true
}
return false
}
// SetDeclaration gets a reference to the given BTPStatementConstantDeclaration273 and assigns it to the Declaration field.
func (o *BTPTopLevelConstantDeclaration283) SetDeclaration(v BTPStatementConstantDeclaration273) {
o.Declaration = &v
}
func (o BTPTopLevelConstantDeclaration283) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTPTopLevelNode286, errBTPTopLevelNode286 := json.Marshal(o.BTPTopLevelNode286)
if errBTPTopLevelNode286 != nil {
return []byte{}, errBTPTopLevelNode286
}
errBTPTopLevelNode286 = json.Unmarshal([]byte(serializedBTPTopLevelNode286), &toSerialize)
if errBTPTopLevelNode286 != nil {
return []byte{}, errBTPTopLevelNode286
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Declaration != nil {
toSerialize["declaration"] = o.Declaration
}
return json.Marshal(toSerialize)
}
type NullableBTPTopLevelConstantDeclaration283 struct {
value *BTPTopLevelConstantDeclaration283
isSet bool
}
func (v NullableBTPTopLevelConstantDeclaration283) Get() *BTPTopLevelConstantDeclaration283 {
return v.value
}
func (v *NullableBTPTopLevelConstantDeclaration283) Set(val *BTPTopLevelConstantDeclaration283) {
v.value = val
v.isSet = true
}
func (v NullableBTPTopLevelConstantDeclaration283) IsSet() bool {
return v.isSet
}
func (v *NullableBTPTopLevelConstantDeclaration283) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPTopLevelConstantDeclaration283(val *BTPTopLevelConstantDeclaration283) *NullableBTPTopLevelConstantDeclaration283 {
return &NullableBTPTopLevelConstantDeclaration283{value: val, isSet: true}
}
func (v NullableBTPTopLevelConstantDeclaration283) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPTopLevelConstantDeclaration283) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_top_level_constant_declaration_283.go | 0.67822 | 0.52342 | model_btp_top_level_constant_declaration_283.go | starcoder |
package neural
import (
"github.com/gonum/matrix/mat64";
"math"
)
type ActivationFunction func(x mat64.Matrix, y *mat64.Dense)
func NewActivationFunction(name ActivationName) ActivationFunction {
switch name {
case ActivationName_LINEAR:
return func(x mat64.Matrix, y *mat64.Dense) { y.Clone(x) }
case ActivationName_LOGISTIC:
return func(x mat64.Matrix, y *mat64.Dense) {
y.Apply(func(r, c int, v float64) float64 {
return 1 / (1 + math.Exp(-v))
}, x)
}
case ActivationName_RELU:
return func(x mat64.Matrix, y *mat64.Dense) {
y.Apply(func(r, c int, v float64) float64 { return math.Max(0, v) }, x)
}
case ActivationName_TANH:
return func(x mat64.Matrix, y *mat64.Dense) {
y.Apply(func(r, c int, v float64) float64 { return math.Tanh(v) }, x)
}
case ActivationName_SOFTMAX:
return func(x mat64.Matrix, y *mat64.Dense) {
r, c := x.Dims()
for i := 0; i < r; i++ {
exp_sum := 0.0
for j := 0; j < c; j++ {
exp_sum = exp_sum + math.Exp(x.At(i, j))
}
for j := 0; j < c; j++ {
y.Set(i, j, math.Exp(x.At(i, j)) / exp_sum)
}
}
}
}
return nil
}
type DActivationFunction func(y mat64.Matrix, x *mat64.Dense)
func NewDActivationFunction(name ActivationName) DActivationFunction {
switch name {
case ActivationName_LINEAR:
return func(y mat64.Matrix, x *mat64.Dense) {
x.Apply(func(r, c int, v float64) float64 { return 1 }, y)
}
case ActivationName_RELU:
return func(y mat64.Matrix, x *mat64.Dense) {
x.Apply(func(r, c int, v float64) float64 {
if v <= 0 {
return 0
}
return 1
}, y)
}
case ActivationName_LOGISTIC:
return func(y mat64.Matrix, x *mat64.Dense) {
x.Apply(func(r, c int, v float64) float64 {
logistic := 1 / (1 + math.Exp(-v))
return logistic * (1 - logistic)
}, y)
}
case ActivationName_TANH:
return func(y mat64.Matrix, x *mat64.Dense) {
x.Apply(func(r, c int, v float64) float64 {
tanh := math.Tanh(v)
return 1 - tanh * tanh
}, y)
}
case ActivationName_SOFTMAX:
return func(y mat64.Matrix, x *mat64.Dense) {
// TODO(ariw): Finish this.
r, c := y.Dims()
for i := 0; i < r; i++ {
exp_sum := 0.0
for j := 0; j < c; j++ {
exp_sum = exp_sum + math.Exp(y.At(i, j))
}
for j := 0; j < c; j++ {
x.Set(i, j, math.Exp(y.At(i, j)) / exp_sum)
}
}
}
}
return nil
} | neural/activation_function.go | 0.653459 | 0.671995 | activation_function.go | starcoder |
// Package georef implements the rec.georef command,
// i.e. set the georeference of an specimen record.
package georef
import (
"strings"
"github.com/js-arias/biodv/cmdapp"
"github.com/js-arias/biodv/geography"
"github.com/js-arias/biodv/records"
"github.com/pkg/errors"
)
var cmd = &cmdapp.Command{
UsageLine: `rec.georef [-lat|--latitude <value>]
[-lon|--longitude <value>] [-u|--uncertainty <value>]
[-e|--elevation <value>] [-s|--source <value>]
[-v|--validation <value>] [-r|--remove] <record>`,
Short: "set the georeference of an specimen record",
Long: `
Command rec.georef sets the georeference of the specified specimen
record. Options are used to set particular values. If they left empty,
they are ignored. -lat or --latitude option and -lon or--longitude
options should be always defined as a pair.
To eliminate a geographic georeference use the option -r or --remove
option.
To eliminate an string value (-s, or --source, and -v, or --validation,
options, use '-' as value.
Latitude and longitude should be defined using decimal points, and signs
to indicate the hemisphere (negatives for southern and western
hemispheres).
Options are:
-lat <value>
--latitude <value>
Set the latitude of the record, with decimal points. If defined,
it should be paired with -lon or --longitude option.
-lon <value>
--longitude <value>
Set the longitude of the record, with decimal points. If defined,
it should be paired with -lat or --latitude option.
-e <value>
--elevation <value>
Elevation above sea level, in meters.
-u <value>
--uncertainty <value>
The uncertainty of the georeference, in meters.
-s <value>
--source <value>
An ID or a description of the source of the georeference, for
example a GPS device, or a gazetteer service.
-v <value>
--validation <value>
An ID or a description of a validation of the georeference, if
any.
-r
--remove
If set, the latitude and longitude pair of the record will be removed.
<record>
The record to be set.
`,
Run: run,
RegisterFlags: register,
}
func init() {
cmdapp.Add(cmd)
}
var lat float64
var lon float64
var elev int
var uncert int
var source string
var valid string
var remov bool
func register(c *cmdapp.Command) {
c.Flag.Float64Var(&lat, "latitude", geography.MaxLat*2, "")
c.Flag.Float64Var(&lat, "lat", geography.MaxLat*2, "")
c.Flag.Float64Var(&lon, "longitude", geography.MaxLat*2, "")
c.Flag.Float64Var(&lon, "lon", geography.MaxLat*2, "")
c.Flag.IntVar(&elev, "elevation", -1, "")
c.Flag.IntVar(&elev, "e", -1, "")
c.Flag.IntVar(&uncert, "uncertatinty", -1, "")
c.Flag.IntVar(&uncert, "u", -1, "")
c.Flag.StringVar(&source, "source", "", "")
c.Flag.StringVar(&source, "s", "", "")
c.Flag.StringVar(&valid, "validation", "", "")
c.Flag.StringVar(&valid, "v", "", "")
c.Flag.BoolVar(&remov, "remove", false, "")
c.Flag.BoolVar(&remov, "r", false, "")
}
func run(c *cmdapp.Command, args []string) error {
id := strings.Join(args, " ")
if id == "" {
return errors.Errorf("%s: a record should be defined", c.Name())
}
recs, err := records.Open("")
if err != nil {
return errors.Wrap(err, c.Name())
}
rec := recs.Record(id)
if rec == nil {
return nil
}
setRec(rec)
if err := recs.Commit(); err != nil {
return errors.Wrap(err, c.Name())
}
return nil
}
func setRec(rec *records.Record) {
if remov {
geo := geography.NewPosition()
rec.SetGeoRef(geo)
return
}
geo := rec.GeoRef()
if geography.IsValidCoord(lat, lon) {
geo.Lat = lat
geo.Lon = lon
}
if elev >= 0 {
geo.Elevation = uint(elev)
}
if uncert >= 0 {
geo.Uncertainty = uint(uncert)
}
if source == "-" {
geo.Source = ""
} else if source != "" {
geo.Source = source
}
if valid == "-" {
geo.Validation = ""
} else if valid != "" {
geo.Validation = valid
}
rec.SetGeoRef(geo)
} | cmd/biodv/internal/records/georef/georef.go | 0.749454 | 0.527134 | georef.go | starcoder |
package biquad
// Bilinear transform for filter use.
// Comes from the following biquad transfer function (see http://shepazu.github.io/Audio-EQ-Cookbook/audio-eq-cookbook.html):
// H(z) = (b_0 + b_1*z^{-1} + b_2*z^{-2}) / (a_0 + a_1*z^{-1} + a_2*z^{-2})
type blt struct {
// 5 coefficients normalized respect a0.
b0d, b1d, b2d, a1d, a2d float64
// Circular buffers for state storage. x is measured signal. y is filter result.
x, y [3]float64
// points to `n` index in ring buffer.
ptr uint
}
// H(z) = (b_0 + b_1*z^{-1} + b_2*z^{-2}) / (a_0 + a_1*z^{-1} + a_2*z^{-2})
func newBLT(a0, a1, a2, b0, b1, b2 float64) blt {
if a0 == 0 {
panic("a0 can not be 0")
}
return blt{
a1d: a1 / a0,
a2d: a2 / a0,
b0d: b0 / a0,
b1d: b1 / a0,
b2d: b2 / a0,
ptr: 3,
}
}
// simplest implementation of BLT filter using biquad transfer function
func (b *blt) advance(x float64) {
var (
n = b.ptr % 3
nm1 = (b.ptr - 1) % 3
nm2 = (b.ptr - 2) % 3
)
b.x[n] = x // Save sample
b.y[n] = b.b0d*x + b.b1d*b.x[nm1] + b.b2d*b.x[nm2] -
b.a1d*b.y[nm1] - b.a2d*b.y[nm2] // Save filtered value.
// adding one to b.ptr shifts values.
b.ptr++
}
func (b *blt) ynext() float64 {
return b.y[b.ptr%3]
}
func (b *blt) init(xy Signal) {
x, y := xy.XY(0)
b.x[0] = x
b.x[1] = x
b.x[2] = x
b.y[0] = y
b.y[1] = y
b.y[2] = y
}
// Filter applies a bilinear transformation filter to a digital
// signal and returns the filtered result. The length of the data must be greater than 2.
func (b *blt) Filter(signal Signal) (Signal, error) {
var x float64
N := signal.Len()
if N < 3 {
return nil, ErrShortXY
}
fval := make([]float64, N)
b.init(signal)
for i := 0; i < N; i++ {
_, x = signal.XY(i)
b.advance(x)
fval[i] = b.ynext()
}
return filtered{
Signal: signal,
fval: fval,
}, nil
}
// DiscreteProcess takes in the next signal data point
// and processes it. DiscreteProcess expects data points
// to be evenly spaced out in time.
func (b *blt) DiscreteProcess(x float64) {
b.advance(x)
}
// YNext returns the last result of the filter given by
// DiscreteProcess.
func (b *blt) YNext() (y float64) {
return b.ynext()
} | blt.go | 0.787114 | 0.662646 | blt.go | starcoder |
// The byte order fallacy. By <NAME>
// http://commandcenter.blogspot.de/2012/04/byte-order-fallacy.html
package image
import (
"fmt"
"image"
"image/draw"
"reflect"
colorExt "github.com/chai2010/image/color"
)
var (
_ Image = (*Gray)(nil)
_ Image = (*Gray16)(nil)
_ Image = (*Gray32i)(nil)
_ Image = (*Gray32f)(nil)
_ Image = (*Gray64i)(nil)
_ Image = (*Gray64f)(nil)
_ Image = (*GrayA)(nil)
_ Image = (*GrayA32)(nil)
_ Image = (*GrayA64i)(nil)
_ Image = (*GrayA64f)(nil)
_ Image = (*GrayA128i)(nil)
_ Image = (*GrayA128f)(nil)
_ Image = (*RGB)(nil)
_ Image = (*RGB48)(nil)
_ Image = (*RGB96i)(nil)
_ Image = (*RGB96f)(nil)
_ Image = (*RGB192i)(nil)
_ Image = (*RGB192f)(nil)
_ Image = (*RGBA)(nil)
_ Image = (*RGBA64)(nil)
_ Image = (*RGBA128i)(nil)
_ Image = (*RGBA128f)(nil)
_ Image = (*RGBA256i)(nil)
_ Image = (*RGBA256f)(nil)
)
type Image interface {
// Get original type, such as *image.Gray, *image.RGBA, etc.
BaseType() image.Image
// Pix holds the image's pixels, as pixel values in big-endian order format. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*PixelSize].
Pix() []byte
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride() int
// Rect is the image's bounds.
Rect() image.Rectangle
// 1:Gray, 2:GrayA, 3:RGB, 4:RGBA
Channels() int
// Uint8/Uint16/Int32/Int64/Float32/Float64
Depth() reflect.Kind
draw.Image
}
func newRGBAFromImage(m image.Image) *RGBA {
b := m.Bounds()
rgba := NewRGBA(b)
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
pr, pg, pb, pa := m.At(x, y).RGBA()
rgba.SetRGBA(x, y, colorExt.RGBA{
R: uint8(pr >> 8),
G: uint8(pg >> 8),
B: uint8(pb >> 8),
A: uint8(pa >> 8),
})
}
}
return rgba
}
func newRGBA64FromImage(m image.Image) *RGBA64 {
b := m.Bounds()
rgba64 := NewRGBA64(b)
for y := b.Min.Y; y < b.Max.Y; y++ {
for x := b.Min.X; x < b.Max.X; x++ {
pr, pg, pb, pa := m.At(x, y).RGBA()
rgba64.SetRGBA64(x, y, colorExt.RGBA64{
R: uint16(pr),
G: uint16(pg),
B: uint16(pb),
A: uint16(pa),
})
}
}
return rgba64
}
func asBaseType(m Image) image.Image {
switch channels, depth := m.Channels(), m.Depth(); {
case channels == 1 && depth == reflect.Uint8:
return &image.Gray{
Pix: m.Pix(),
Stride: m.Stride(),
Rect: m.Rect(),
}
case channels == 1 && depth == reflect.Uint16:
return &image.Gray16{
Pix: m.Pix(),
Stride: m.Stride(),
Rect: m.Rect(),
}
case channels == 4 && depth == reflect.Uint8:
return &image.RGBA{
Pix: m.Pix(),
Stride: m.Stride(),
Rect: m.Rect(),
}
case channels == 4 && depth == reflect.Uint16:
return &image.RGBA64{
Pix: m.Pix(),
Stride: m.Stride(),
Rect: m.Rect(),
}
}
return m
}
func AsImage(m image.Image) Image {
if p, ok := m.(Image); ok {
return p
}
switch m := m.(type) {
case *image.Gray:
return new(Gray).Init(m.Pix, m.Stride, m.Rect)
case *image.Gray16:
return new(Gray16).Init(m.Pix, m.Stride, m.Rect)
case *image.RGBA:
return new(RGBA).Init(m.Pix, m.Stride, m.Rect)
case *image.RGBA64:
return new(RGBA64).Init(m.Pix, m.Stride, m.Rect)
case *image.YCbCr:
return newRGBAFromImage(m)
}
return newRGBA64FromImage(m)
}
func CloneImage(m image.Image) Image {
if m, ok := m.(Image); ok {
switch channels, depth := m.Channels(), m.Depth(); {
case channels == 1 && depth == reflect.Uint8:
return new(Gray).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 1 && depth == reflect.Uint16:
return new(Gray16).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 1 && depth == reflect.Int32:
return new(Gray32i).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 1 && depth == reflect.Float32:
return new(Gray32f).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 1 && depth == reflect.Int64:
return new(Gray64i).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 1 && depth == reflect.Float64:
return new(Gray64f).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 2 && depth == reflect.Uint8:
return new(GrayA).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 2 && depth == reflect.Uint16:
return new(GrayA32).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 2 && depth == reflect.Int32:
return new(GrayA64i).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 2 && depth == reflect.Float32:
return new(GrayA64f).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 2 && depth == reflect.Int64:
return new(GrayA128i).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 2 && depth == reflect.Float64:
return new(GrayA128f).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 3 && depth == reflect.Uint8:
return new(RGB).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 3 && depth == reflect.Uint16:
return new(RGB48).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 3 && depth == reflect.Int32:
return new(RGB96i).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 3 && depth == reflect.Float32:
return new(RGB96f).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 3 && depth == reflect.Int64:
return new(RGB192i).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 3 && depth == reflect.Float64:
return new(RGB192f).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 4 && depth == reflect.Uint8:
return new(RGBA).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 4 && depth == reflect.Uint16:
return new(RGBA64).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 4 && depth == reflect.Int32:
return new(RGBA128i).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 4 && depth == reflect.Float32:
return new(RGBA128f).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 4 && depth == reflect.Int64:
return new(RGBA256i).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
case channels == 4 && depth == reflect.Float64:
return new(RGBA256f).Init(append([]uint8(nil), m.Pix()...), m.Stride(), m.Rect())
default:
panic(fmt.Errorf("image: CloneImage, invalid format: channels = %v, depth = %v", channels, depth))
}
}
switch m := m.(type) {
case *image.Gray:
return new(Gray).Init(append([]uint8(nil), m.Pix...), m.Stride, m.Rect)
case *image.Gray16:
return new(Gray16).Init(append([]uint8(nil), m.Pix...), m.Stride, m.Rect)
case *image.RGBA:
return new(RGBA).Init(append([]uint8(nil), m.Pix...), m.Stride, m.Rect)
case *image.RGBA64:
return new(RGBA64).Init(append([]uint8(nil), m.Pix...), m.Stride, m.Rect)
case *image.YCbCr:
return newRGBAFromImage(m)
}
return newRGBA64FromImage(m)
}
func NewImage(r image.Rectangle, channels int, depth reflect.Kind) (m Image, err error) {
switch {
case channels == 1 && depth == reflect.Uint8:
m = NewGray(r)
return
case channels == 1 && depth == reflect.Uint16:
m = NewGray16(r)
return
case channels == 1 && depth == reflect.Int32:
m = NewGray32i(r)
return
case channels == 1 && depth == reflect.Float32:
m = NewGray32f(r)
return
case channels == 1 && depth == reflect.Int64:
m = NewGray64i(r)
return
case channels == 1 && depth == reflect.Float64:
m = NewGray64f(r)
return
case channels == 2 && depth == reflect.Uint8:
m = NewGrayA(r)
return
case channels == 2 && depth == reflect.Uint16:
m = NewGrayA32(r)
return
case channels == 2 && depth == reflect.Int32:
m = NewGrayA64f(r)
return
case channels == 2 && depth == reflect.Float32:
m = NewGrayA64f(r)
return
case channels == 2 && depth == reflect.Int64:
m = NewGrayA128f(r)
return
case channels == 2 && depth == reflect.Float64:
m = NewGrayA128f(r)
return
case channels == 3 && depth == reflect.Uint8:
m = NewRGB(r)
return
case channels == 3 && depth == reflect.Uint16:
m = NewRGB48(r)
return
case channels == 3 && depth == reflect.Int32:
m = NewRGB96i(r)
return
case channels == 3 && depth == reflect.Float32:
m = NewRGB96f(r)
return
case channels == 3 && depth == reflect.Int64:
m = NewRGB192i(r)
return
case channels == 3 && depth == reflect.Float64:
m = NewRGB192f(r)
return
case channels == 4 && depth == reflect.Uint8:
m = NewRGBA(r)
return
case channels == 4 && depth == reflect.Uint16:
m = NewRGBA64(r)
return
case channels == 4 && depth == reflect.Int32:
m = NewRGBA128i(r)
return
case channels == 4 && depth == reflect.Float32:
m = NewRGBA128f(r)
return
case channels == 4 && depth == reflect.Int64:
m = NewRGBA256i(r)
return
case channels == 4 && depth == reflect.Float64:
m = NewRGBA256f(r)
return
default:
err = fmt.Errorf("image: NewImage, invalid format: channels = %v, depth = %v", channels, depth)
return
}
} | image.go | 0.646572 | 0.500488 | image.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// PlannerCategoryDescriptions
type PlannerCategoryDescriptions struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// The label associated with Category 1
category1 *string
// The label associated with Category 10
category10 *string
// The label associated with Category 11
category11 *string
// The label associated with Category 12
category12 *string
// The label associated with Category 13
category13 *string
// The label associated with Category 14
category14 *string
// The label associated with Category 15
category15 *string
// The label associated with Category 16
category16 *string
// The label associated with Category 17
category17 *string
// The label associated with Category 18
category18 *string
// The label associated with Category 19
category19 *string
// The label associated with Category 2
category2 *string
// The label associated with Category 20
category20 *string
// The label associated with Category 21
category21 *string
// The label associated with Category 22
category22 *string
// The label associated with Category 23
category23 *string
// The label associated with Category 24
category24 *string
// The label associated with Category 25
category25 *string
// The label associated with Category 3
category3 *string
// The label associated with Category 4
category4 *string
// The label associated with Category 5
category5 *string
// The label associated with Category 6
category6 *string
// The label associated with Category 7
category7 *string
// The label associated with Category 8
category8 *string
// The label associated with Category 9
category9 *string
}
// NewPlannerCategoryDescriptions instantiates a new plannerCategoryDescriptions and sets the default values.
func NewPlannerCategoryDescriptions()(*PlannerCategoryDescriptions) {
m := &PlannerCategoryDescriptions{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreatePlannerCategoryDescriptionsFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreatePlannerCategoryDescriptionsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewPlannerCategoryDescriptions(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PlannerCategoryDescriptions) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetCategory1 gets the category1 property value. The label associated with Category 1
func (m *PlannerCategoryDescriptions) GetCategory1()(*string) {
if m == nil {
return nil
} else {
return m.category1
}
}
// GetCategory10 gets the category10 property value. The label associated with Category 10
func (m *PlannerCategoryDescriptions) GetCategory10()(*string) {
if m == nil {
return nil
} else {
return m.category10
}
}
// GetCategory11 gets the category11 property value. The label associated with Category 11
func (m *PlannerCategoryDescriptions) GetCategory11()(*string) {
if m == nil {
return nil
} else {
return m.category11
}
}
// GetCategory12 gets the category12 property value. The label associated with Category 12
func (m *PlannerCategoryDescriptions) GetCategory12()(*string) {
if m == nil {
return nil
} else {
return m.category12
}
}
// GetCategory13 gets the category13 property value. The label associated with Category 13
func (m *PlannerCategoryDescriptions) GetCategory13()(*string) {
if m == nil {
return nil
} else {
return m.category13
}
}
// GetCategory14 gets the category14 property value. The label associated with Category 14
func (m *PlannerCategoryDescriptions) GetCategory14()(*string) {
if m == nil {
return nil
} else {
return m.category14
}
}
// GetCategory15 gets the category15 property value. The label associated with Category 15
func (m *PlannerCategoryDescriptions) GetCategory15()(*string) {
if m == nil {
return nil
} else {
return m.category15
}
}
// GetCategory16 gets the category16 property value. The label associated with Category 16
func (m *PlannerCategoryDescriptions) GetCategory16()(*string) {
if m == nil {
return nil
} else {
return m.category16
}
}
// GetCategory17 gets the category17 property value. The label associated with Category 17
func (m *PlannerCategoryDescriptions) GetCategory17()(*string) {
if m == nil {
return nil
} else {
return m.category17
}
}
// GetCategory18 gets the category18 property value. The label associated with Category 18
func (m *PlannerCategoryDescriptions) GetCategory18()(*string) {
if m == nil {
return nil
} else {
return m.category18
}
}
// GetCategory19 gets the category19 property value. The label associated with Category 19
func (m *PlannerCategoryDescriptions) GetCategory19()(*string) {
if m == nil {
return nil
} else {
return m.category19
}
}
// GetCategory2 gets the category2 property value. The label associated with Category 2
func (m *PlannerCategoryDescriptions) GetCategory2()(*string) {
if m == nil {
return nil
} else {
return m.category2
}
}
// GetCategory20 gets the category20 property value. The label associated with Category 20
func (m *PlannerCategoryDescriptions) GetCategory20()(*string) {
if m == nil {
return nil
} else {
return m.category20
}
}
// GetCategory21 gets the category21 property value. The label associated with Category 21
func (m *PlannerCategoryDescriptions) GetCategory21()(*string) {
if m == nil {
return nil
} else {
return m.category21
}
}
// GetCategory22 gets the category22 property value. The label associated with Category 22
func (m *PlannerCategoryDescriptions) GetCategory22()(*string) {
if m == nil {
return nil
} else {
return m.category22
}
}
// GetCategory23 gets the category23 property value. The label associated with Category 23
func (m *PlannerCategoryDescriptions) GetCategory23()(*string) {
if m == nil {
return nil
} else {
return m.category23
}
}
// GetCategory24 gets the category24 property value. The label associated with Category 24
func (m *PlannerCategoryDescriptions) GetCategory24()(*string) {
if m == nil {
return nil
} else {
return m.category24
}
}
// GetCategory25 gets the category25 property value. The label associated with Category 25
func (m *PlannerCategoryDescriptions) GetCategory25()(*string) {
if m == nil {
return nil
} else {
return m.category25
}
}
// GetCategory3 gets the category3 property value. The label associated with Category 3
func (m *PlannerCategoryDescriptions) GetCategory3()(*string) {
if m == nil {
return nil
} else {
return m.category3
}
}
// GetCategory4 gets the category4 property value. The label associated with Category 4
func (m *PlannerCategoryDescriptions) GetCategory4()(*string) {
if m == nil {
return nil
} else {
return m.category4
}
}
// GetCategory5 gets the category5 property value. The label associated with Category 5
func (m *PlannerCategoryDescriptions) GetCategory5()(*string) {
if m == nil {
return nil
} else {
return m.category5
}
}
// GetCategory6 gets the category6 property value. The label associated with Category 6
func (m *PlannerCategoryDescriptions) GetCategory6()(*string) {
if m == nil {
return nil
} else {
return m.category6
}
}
// GetCategory7 gets the category7 property value. The label associated with Category 7
func (m *PlannerCategoryDescriptions) GetCategory7()(*string) {
if m == nil {
return nil
} else {
return m.category7
}
}
// GetCategory8 gets the category8 property value. The label associated with Category 8
func (m *PlannerCategoryDescriptions) GetCategory8()(*string) {
if m == nil {
return nil
} else {
return m.category8
}
}
// GetCategory9 gets the category9 property value. The label associated with Category 9
func (m *PlannerCategoryDescriptions) GetCategory9()(*string) {
if m == nil {
return nil
} else {
return m.category9
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *PlannerCategoryDescriptions) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["category1"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory1(val)
}
return nil
}
res["category10"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory10(val)
}
return nil
}
res["category11"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory11(val)
}
return nil
}
res["category12"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory12(val)
}
return nil
}
res["category13"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory13(val)
}
return nil
}
res["category14"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory14(val)
}
return nil
}
res["category15"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory15(val)
}
return nil
}
res["category16"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory16(val)
}
return nil
}
res["category17"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory17(val)
}
return nil
}
res["category18"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory18(val)
}
return nil
}
res["category19"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory19(val)
}
return nil
}
res["category2"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory2(val)
}
return nil
}
res["category20"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory20(val)
}
return nil
}
res["category21"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory21(val)
}
return nil
}
res["category22"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory22(val)
}
return nil
}
res["category23"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory23(val)
}
return nil
}
res["category24"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory24(val)
}
return nil
}
res["category25"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory25(val)
}
return nil
}
res["category3"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory3(val)
}
return nil
}
res["category4"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory4(val)
}
return nil
}
res["category5"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory5(val)
}
return nil
}
res["category6"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory6(val)
}
return nil
}
res["category7"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory7(val)
}
return nil
}
res["category8"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory8(val)
}
return nil
}
res["category9"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCategory9(val)
}
return nil
}
return res
}
// Serialize serializes information the current object
func (m *PlannerCategoryDescriptions) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("category1", m.GetCategory1())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category10", m.GetCategory10())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category11", m.GetCategory11())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category12", m.GetCategory12())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category13", m.GetCategory13())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category14", m.GetCategory14())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category15", m.GetCategory15())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category16", m.GetCategory16())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category17", m.GetCategory17())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category18", m.GetCategory18())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category19", m.GetCategory19())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category2", m.GetCategory2())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category20", m.GetCategory20())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category21", m.GetCategory21())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category22", m.GetCategory22())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category23", m.GetCategory23())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category24", m.GetCategory24())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category25", m.GetCategory25())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category3", m.GetCategory3())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category4", m.GetCategory4())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category5", m.GetCategory5())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category6", m.GetCategory6())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category7", m.GetCategory7())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category8", m.GetCategory8())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("category9", m.GetCategory9())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *PlannerCategoryDescriptions) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetCategory1 sets the category1 property value. The label associated with Category 1
func (m *PlannerCategoryDescriptions) SetCategory1(value *string)() {
if m != nil {
m.category1 = value
}
}
// SetCategory10 sets the category10 property value. The label associated with Category 10
func (m *PlannerCategoryDescriptions) SetCategory10(value *string)() {
if m != nil {
m.category10 = value
}
}
// SetCategory11 sets the category11 property value. The label associated with Category 11
func (m *PlannerCategoryDescriptions) SetCategory11(value *string)() {
if m != nil {
m.category11 = value
}
}
// SetCategory12 sets the category12 property value. The label associated with Category 12
func (m *PlannerCategoryDescriptions) SetCategory12(value *string)() {
if m != nil {
m.category12 = value
}
}
// SetCategory13 sets the category13 property value. The label associated with Category 13
func (m *PlannerCategoryDescriptions) SetCategory13(value *string)() {
if m != nil {
m.category13 = value
}
}
// SetCategory14 sets the category14 property value. The label associated with Category 14
func (m *PlannerCategoryDescriptions) SetCategory14(value *string)() {
if m != nil {
m.category14 = value
}
}
// SetCategory15 sets the category15 property value. The label associated with Category 15
func (m *PlannerCategoryDescriptions) SetCategory15(value *string)() {
if m != nil {
m.category15 = value
}
}
// SetCategory16 sets the category16 property value. The label associated with Category 16
func (m *PlannerCategoryDescriptions) SetCategory16(value *string)() {
if m != nil {
m.category16 = value
}
}
// SetCategory17 sets the category17 property value. The label associated with Category 17
func (m *PlannerCategoryDescriptions) SetCategory17(value *string)() {
if m != nil {
m.category17 = value
}
}
// SetCategory18 sets the category18 property value. The label associated with Category 18
func (m *PlannerCategoryDescriptions) SetCategory18(value *string)() {
if m != nil {
m.category18 = value
}
}
// SetCategory19 sets the category19 property value. The label associated with Category 19
func (m *PlannerCategoryDescriptions) SetCategory19(value *string)() {
if m != nil {
m.category19 = value
}
}
// SetCategory2 sets the category2 property value. The label associated with Category 2
func (m *PlannerCategoryDescriptions) SetCategory2(value *string)() {
if m != nil {
m.category2 = value
}
}
// SetCategory20 sets the category20 property value. The label associated with Category 20
func (m *PlannerCategoryDescriptions) SetCategory20(value *string)() {
if m != nil {
m.category20 = value
}
}
// SetCategory21 sets the category21 property value. The label associated with Category 21
func (m *PlannerCategoryDescriptions) SetCategory21(value *string)() {
if m != nil {
m.category21 = value
}
}
// SetCategory22 sets the category22 property value. The label associated with Category 22
func (m *PlannerCategoryDescriptions) SetCategory22(value *string)() {
if m != nil {
m.category22 = value
}
}
// SetCategory23 sets the category23 property value. The label associated with Category 23
func (m *PlannerCategoryDescriptions) SetCategory23(value *string)() {
if m != nil {
m.category23 = value
}
}
// SetCategory24 sets the category24 property value. The label associated with Category 24
func (m *PlannerCategoryDescriptions) SetCategory24(value *string)() {
if m != nil {
m.category24 = value
}
}
// SetCategory25 sets the category25 property value. The label associated with Category 25
func (m *PlannerCategoryDescriptions) SetCategory25(value *string)() {
if m != nil {
m.category25 = value
}
}
// SetCategory3 sets the category3 property value. The label associated with Category 3
func (m *PlannerCategoryDescriptions) SetCategory3(value *string)() {
if m != nil {
m.category3 = value
}
}
// SetCategory4 sets the category4 property value. The label associated with Category 4
func (m *PlannerCategoryDescriptions) SetCategory4(value *string)() {
if m != nil {
m.category4 = value
}
}
// SetCategory5 sets the category5 property value. The label associated with Category 5
func (m *PlannerCategoryDescriptions) SetCategory5(value *string)() {
if m != nil {
m.category5 = value
}
}
// SetCategory6 sets the category6 property value. The label associated with Category 6
func (m *PlannerCategoryDescriptions) SetCategory6(value *string)() {
if m != nil {
m.category6 = value
}
}
// SetCategory7 sets the category7 property value. The label associated with Category 7
func (m *PlannerCategoryDescriptions) SetCategory7(value *string)() {
if m != nil {
m.category7 = value
}
}
// SetCategory8 sets the category8 property value. The label associated with Category 8
func (m *PlannerCategoryDescriptions) SetCategory8(value *string)() {
if m != nil {
m.category8 = value
}
}
// SetCategory9 sets the category9 property value. The label associated with Category 9
func (m *PlannerCategoryDescriptions) SetCategory9(value *string)() {
if m != nil {
m.category9 = value
}
} | models/planner_category_descriptions.go | 0.706393 | 0.408749 | planner_category_descriptions.go | starcoder |
package packed
// Efficient sequential read/write of packed integers.
type BulkOperationPacked19 struct {
*BulkOperationPacked
}
func newBulkOperationPacked19() BulkOperation {
return &BulkOperationPacked19{newBulkOperationPacked(19)}
}
func (op *BulkOperationPacked19) decodeLongToInt(blocks []int64, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(int64(uint64(block0) >> 45))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>26) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block0)>>7) & 524287)
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block0 & 127) << 12) | (int64(uint64(block1) >> 52)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>33) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block1)>>14) & 524287)
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block1 & 16383) << 5) | (int64(uint64(block2) >> 59)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>40) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>21) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block2)>>2) & 524287)
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block2 & 3) << 17) | (int64(uint64(block3) >> 47)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>28) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block3)>>9) & 524287)
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block3 & 511) << 10) | (int64(uint64(block4) >> 54)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>35) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block4)>>16) & 524287)
valuesOffset++
block5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block4 & 65535) << 3) | (int64(uint64(block5) >> 61)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>42) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>23) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block5)>>4) & 524287)
valuesOffset++
block6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block5 & 15) << 15) | (int64(uint64(block6) >> 49)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>30) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block6)>>11) & 524287)
valuesOffset++
block7 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block6 & 2047) << 8) | (int64(uint64(block7) >> 56)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block7)>>37) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block7)>>18) & 524287)
valuesOffset++
block8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block7 & 262143) << 1) | (int64(uint64(block8) >> 63)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block8)>>44) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block8)>>25) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block8)>>6) & 524287)
valuesOffset++
block9 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block8 & 63) << 13) | (int64(uint64(block9) >> 51)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block9)>>32) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block9)>>13) & 524287)
valuesOffset++
block10 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block9 & 8191) << 6) | (int64(uint64(block10) >> 58)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block10)>>39) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block10)>>20) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block10)>>1) & 524287)
valuesOffset++
block11 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block10 & 1) << 18) | (int64(uint64(block11) >> 46)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block11)>>27) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block11)>>8) & 524287)
valuesOffset++
block12 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block11 & 255) << 11) | (int64(uint64(block12) >> 53)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block12)>>34) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block12)>>15) & 524287)
valuesOffset++
block13 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block12 & 32767) << 4) | (int64(uint64(block13) >> 60)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block13)>>41) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block13)>>22) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block13)>>3) & 524287)
valuesOffset++
block14 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block13 & 7) << 16) | (int64(uint64(block14) >> 48)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block14)>>29) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block14)>>10) & 524287)
valuesOffset++
block15 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block14 & 1023) << 9) | (int64(uint64(block15) >> 55)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block15)>>36) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block15)>>17) & 524287)
valuesOffset++
block16 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block15 & 131071) << 2) | (int64(uint64(block16) >> 62)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block16)>>43) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block16)>>24) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block16)>>5) & 524287)
valuesOffset++
block17 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block16 & 31) << 14) | (int64(uint64(block17) >> 50)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block17)>>31) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block17)>>12) & 524287)
valuesOffset++
block18 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32(((block17 & 4095) << 7) | (int64(uint64(block18) >> 57)))
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block18)>>38) & 524287)
valuesOffset++
values[valuesOffset] = int32(int64(uint64(block18)>>19) & 524287)
valuesOffset++
values[valuesOffset] = int32(block18 & 524287)
valuesOffset++
}
}
func (op *BulkOperationPacked19) DecodeByteToInt(blocks []byte, values []int32, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte0) << 11) | (int64(byte1) << 3) | int64(uint8(byte2)>>5))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte2&31) << 14) | (int64(byte3) << 6) | int64(uint8(byte4)>>2))
valuesOffset++
byte5 := blocks[blocksOffset]
blocksOffset++
byte6 := blocks[blocksOffset]
blocksOffset++
byte7 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte4&3) << 17) | (int64(byte5) << 9) | (int64(byte6) << 1) | int64(uint8(byte7)>>7))
valuesOffset++
byte8 := blocks[blocksOffset]
blocksOffset++
byte9 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte7&127) << 12) | (int64(byte8) << 4) | int64(uint8(byte9)>>4))
valuesOffset++
byte10 := blocks[blocksOffset]
blocksOffset++
byte11 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte9&15) << 15) | (int64(byte10) << 7) | int64(uint8(byte11)>>1))
valuesOffset++
byte12 := blocks[blocksOffset]
blocksOffset++
byte13 := blocks[blocksOffset]
blocksOffset++
byte14 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte11&1) << 18) | (int64(byte12) << 10) | (int64(byte13) << 2) | int64(uint8(byte14)>>6))
valuesOffset++
byte15 := blocks[blocksOffset]
blocksOffset++
byte16 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte14&63) << 13) | (int64(byte15) << 5) | int64(uint8(byte16)>>3))
valuesOffset++
byte17 := blocks[blocksOffset]
blocksOffset++
byte18 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int32((int64(byte16&7) << 16) | (int64(byte17) << 8) | int64(byte18))
valuesOffset++
}
}
func (op *BulkOperationPacked19) DecodeLongToLong(blocks []int64, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
block0 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64(uint64(block0) >> 45)
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>26) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block0)>>7) & 524287
valuesOffset++
block1 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block0 & 127) << 12) | (int64(uint64(block1) >> 52))
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>33) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block1)>>14) & 524287
valuesOffset++
block2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block1 & 16383) << 5) | (int64(uint64(block2) >> 59))
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>40) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>21) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block2)>>2) & 524287
valuesOffset++
block3 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block2 & 3) << 17) | (int64(uint64(block3) >> 47))
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>28) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block3)>>9) & 524287
valuesOffset++
block4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block3 & 511) << 10) | (int64(uint64(block4) >> 54))
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>35) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block4)>>16) & 524287
valuesOffset++
block5 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block4 & 65535) << 3) | (int64(uint64(block5) >> 61))
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>42) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>23) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block5)>>4) & 524287
valuesOffset++
block6 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block5 & 15) << 15) | (int64(uint64(block6) >> 49))
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>30) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block6)>>11) & 524287
valuesOffset++
block7 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block6 & 2047) << 8) | (int64(uint64(block7) >> 56))
valuesOffset++
values[valuesOffset] = int64(uint64(block7)>>37) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block7)>>18) & 524287
valuesOffset++
block8 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block7 & 262143) << 1) | (int64(uint64(block8) >> 63))
valuesOffset++
values[valuesOffset] = int64(uint64(block8)>>44) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block8)>>25) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block8)>>6) & 524287
valuesOffset++
block9 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block8 & 63) << 13) | (int64(uint64(block9) >> 51))
valuesOffset++
values[valuesOffset] = int64(uint64(block9)>>32) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block9)>>13) & 524287
valuesOffset++
block10 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block9 & 8191) << 6) | (int64(uint64(block10) >> 58))
valuesOffset++
values[valuesOffset] = int64(uint64(block10)>>39) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block10)>>20) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block10)>>1) & 524287
valuesOffset++
block11 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block10 & 1) << 18) | (int64(uint64(block11) >> 46))
valuesOffset++
values[valuesOffset] = int64(uint64(block11)>>27) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block11)>>8) & 524287
valuesOffset++
block12 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block11 & 255) << 11) | (int64(uint64(block12) >> 53))
valuesOffset++
values[valuesOffset] = int64(uint64(block12)>>34) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block12)>>15) & 524287
valuesOffset++
block13 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block12 & 32767) << 4) | (int64(uint64(block13) >> 60))
valuesOffset++
values[valuesOffset] = int64(uint64(block13)>>41) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block13)>>22) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block13)>>3) & 524287
valuesOffset++
block14 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block13 & 7) << 16) | (int64(uint64(block14) >> 48))
valuesOffset++
values[valuesOffset] = int64(uint64(block14)>>29) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block14)>>10) & 524287
valuesOffset++
block15 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block14 & 1023) << 9) | (int64(uint64(block15) >> 55))
valuesOffset++
values[valuesOffset] = int64(uint64(block15)>>36) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block15)>>17) & 524287
valuesOffset++
block16 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block15 & 131071) << 2) | (int64(uint64(block16) >> 62))
valuesOffset++
values[valuesOffset] = int64(uint64(block16)>>43) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block16)>>24) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block16)>>5) & 524287
valuesOffset++
block17 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block16 & 31) << 14) | (int64(uint64(block17) >> 50))
valuesOffset++
values[valuesOffset] = int64(uint64(block17)>>31) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block17)>>12) & 524287
valuesOffset++
block18 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = ((block17 & 4095) << 7) | (int64(uint64(block18) >> 57))
valuesOffset++
values[valuesOffset] = int64(uint64(block18)>>38) & 524287
valuesOffset++
values[valuesOffset] = int64(uint64(block18)>>19) & 524287
valuesOffset++
values[valuesOffset] = block18 & 524287
valuesOffset++
}
}
func (op *BulkOperationPacked19) decodeByteToLong(blocks []byte, values []int64, iterations int) {
blocksOffset, valuesOffset := 0, 0
for i := 0; i < iterations; i++ {
byte0 := blocks[blocksOffset]
blocksOffset++
byte1 := blocks[blocksOffset]
blocksOffset++
byte2 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte0) << 11) | (int64(byte1) << 3) | int64(uint8(byte2)>>5))
valuesOffset++
byte3 := blocks[blocksOffset]
blocksOffset++
byte4 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte2&31) << 14) | (int64(byte3) << 6) | int64(uint8(byte4)>>2))
valuesOffset++
byte5 := blocks[blocksOffset]
blocksOffset++
byte6 := blocks[blocksOffset]
blocksOffset++
byte7 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte4&3) << 17) | (int64(byte5) << 9) | (int64(byte6) << 1) | int64(uint8(byte7)>>7))
valuesOffset++
byte8 := blocks[blocksOffset]
blocksOffset++
byte9 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte7&127) << 12) | (int64(byte8) << 4) | int64(uint8(byte9)>>4))
valuesOffset++
byte10 := blocks[blocksOffset]
blocksOffset++
byte11 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte9&15) << 15) | (int64(byte10) << 7) | int64(uint8(byte11)>>1))
valuesOffset++
byte12 := blocks[blocksOffset]
blocksOffset++
byte13 := blocks[blocksOffset]
blocksOffset++
byte14 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte11&1) << 18) | (int64(byte12) << 10) | (int64(byte13) << 2) | int64(uint8(byte14)>>6))
valuesOffset++
byte15 := blocks[blocksOffset]
blocksOffset++
byte16 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte14&63) << 13) | (int64(byte15) << 5) | int64(uint8(byte16)>>3))
valuesOffset++
byte17 := blocks[blocksOffset]
blocksOffset++
byte18 := blocks[blocksOffset]
blocksOffset++
values[valuesOffset] = int64((int64(byte16&7) << 16) | (int64(byte17) << 8) | int64(byte18))
valuesOffset++
}
} | core/util/packed/bulkOperation19.go | 0.572962 | 0.661834 | bulkOperation19.go | starcoder |
package cm
import "github.com/m3db/m3/src/x/pool"
// Sample represents a sampled value.
type Sample struct {
value float64 // sampled value
numRanks int64 // number of ranks represented
delta int64 // delta between min rank and max rank
prev *Sample // previous sample
next *Sample // next sample
}
// SamplePool is a pool of samples.
type SamplePool interface {
// Init initializes the pool.
Init()
// Get gets a sample from the pool.
Get() *Sample
// Put returns a sample to the pool.
Put(sample *Sample)
}
// Stream represents a data sample stream for floating point numbers.
type Stream interface {
// Add adds a sample value.
Add(value float64)
// Flush flushes the internal buffer.
Flush()
// Min returns the minimum value.
Min() float64
// Max returns the maximum value.
Max() float64
// Quantile returns the quantile value.
Quantile(q float64) float64
// Close closes the stream.
Close()
// ResetSetData resets the stream and sets data.
ResetSetData(quantiles []float64)
}
// StreamAlloc allocates a stream.
type StreamAlloc func() Stream
// StreamPool provides a pool for streams.
type StreamPool interface {
// Init initializes the pool.
Init(alloc StreamAlloc)
// Get provides a stream from the pool.
Get() Stream
// Put returns a stream to the pool.
Put(value Stream)
}
// Options represent various options for computing quantiles.
type Options interface {
// SetEps sets the desired epsilon for errors.
SetEps(value float64) Options
// Eps returns the desired epsilon for errors.
Eps() float64
// SetCapacity sets the initial heap capacity.
SetCapacity(value int) Options
// Capacity returns the initial heap capacity.
Capacity() int
// SetInsertAndCompressEvery sets how frequently the timer values are
// inserted into the stream and compressed to reduce write latency for
// high frequency timers.
SetInsertAndCompressEvery(value int) Options
// InsertAndCompressEvery returns how frequently the timer values are
// inserted into the stream and compressed to reduce write latency for
// high frequency timers.
InsertAndCompressEvery() int
// SetFlushEvery sets how frequently the underlying stream is flushed
// to reduce processing time when computing aggregated statistics from
// the stream.
SetFlushEvery(value int) Options
// FlushEvery returns how frequently the underlying stream is flushed
// to reduce processing time when computing aggregated statistics from
// the stream.
FlushEvery() int
// SetStreamPool sets the stream pool.
SetStreamPool(value StreamPool) Options
// StreamPool returns the stream pool.
StreamPool() StreamPool
// SetSamplePool sets the sample pool.
SetSamplePool(value SamplePool) Options
// SamplePool returns the sample pool.
SamplePool() SamplePool
// SetFloatsPool sets the floats pool.
SetFloatsPool(value pool.FloatsPool) Options
// FloatsPool returns the floats pool.
FloatsPool() pool.FloatsPool
// Validate validates the options.
Validate() error
} | src/aggregator/aggregation/quantile/cm/types.go | 0.783285 | 0.459682 | types.go | starcoder |
package jsonlogic
import (
"encoding/json"
"fmt"
"io"
"math"
"sort"
"strings"
"github.com/mitchellh/copystructure"
)
type ErrInvalidOperator struct {
operator string
}
func (e ErrInvalidOperator) Error() string {
return fmt.Sprintf("The operator \"%s\" is not supported", e.operator)
}
func between(operator string, values []interface{}, data interface{}) interface{} {
a := parseValues(values[0], data)
b := parseValues(values[1], data)
c := parseValues(values[2], data)
if operator == "<" {
return less(a, b) && less(b, c)
}
if operator == "<=" {
return (less(a, b) || equals(a, b)) && (less(b, c) || equals(b, c))
}
if operator == ">=" {
return (less(c, b) || equals(c, b)) && (less(b, a) || equals(b, a))
}
return less(c, b) && less(b, a)
}
func unary(operator string, value interface{}) interface{} {
if operator == "+" || operator == "*" || operator == "/" {
return toNumber(value)
}
if operator == "-" {
return -1 * toNumber(value)
}
if operator == "!!" {
return !unary("!", value).(bool)
}
if operator == "abs" {
return abs(value)
}
b := isTrue(value)
if operator == "!" {
return !b
}
return b
}
func _and(values []interface{}) interface{} {
var v float64
isBoolExpression := true
for _, value := range values {
if isSlice(value) {
return value
}
if isBool(value) && !value.(bool) {
return false
}
if isString(value) && toString(value) == "" {
return value
}
if !isNumber(value) {
continue
}
isBoolExpression = false
_value := toNumber(value)
if _value > v {
v = _value
}
}
if isBoolExpression {
return true
}
return v
}
func _or(values []interface{}) interface{} {
for _, value := range values {
if isTrue(value) {
return value
}
}
return false
}
func _inRange(value interface{}, values interface{}) bool {
v := values.([]interface{})
i := v[0]
j := v[1]
if isNumber(value) {
return toNumber(value) >= toNumber(i) && toNumber(j) >= toNumber(value)
}
return toString(value) >= toString(i) && toString(j) >= toString(value)
}
// Expect values to be in alphabetical ascending order
func _inSorted(value interface{}, values interface{}) bool {
valuesSlice := values.([]interface{})
findElement := func(i int) bool {
element := valuesSlice[i]
if isSlice(valuesSlice[i]) {
sliceElement := valuesSlice[i].([]interface{})
start := sliceElement[0]
end := sliceElement[1]
return (toString(start) <= toString(value) && toString(end) >= toString(value)) || toString(end) > toString(value)
}
return toString(element) >= toString(value)
}
i := sort.Search(len(valuesSlice), findElement)
if i >= len(valuesSlice) {
return false
}
if isSlice(valuesSlice[i]) {
sliceElement := valuesSlice[i].([]interface{})
start := sliceElement[0]
end := sliceElement[1]
return toString(start) <= toString(value) && toString(end) >= toString(value)
}
return toString(valuesSlice[i]) == toString(value)
}
func _in(value interface{}, values interface{}) bool {
if isString(values) {
return strings.Contains(values.(string), value.(string))
}
for _, element := range values.([]interface{}) {
if isSlice(element) {
if _inRange(value, element) {
return true
}
continue
}
if isNumber(value) {
if toNumber(element) == value {
return true
}
continue
}
if element == value {
return true
}
}
return false
}
func max(values interface{}) interface{} {
bigger := math.SmallestNonzeroFloat64
for _, n := range values.([]interface{}) {
_n := toNumber(n)
if _n > bigger {
bigger = _n
}
}
return bigger
}
func min(values interface{}) interface{} {
smallest := math.MaxFloat64
for _, n := range values.([]interface{}) {
_n := toNumber(n)
if smallest > _n {
smallest = _n
}
}
return smallest
}
func merge(values interface{}, level int8) interface{} {
result := make([]interface{}, 0)
if isPrimitive(values) || level > 1 {
return append(result, values)
}
if isSlice(values) {
for _, value := range values.([]interface{}) {
_values := merge(value, level+1).([]interface{})
result = append(result, _values...)
}
}
return result
}
func conditional(values, data interface{}) interface{} {
if isPrimitive(values) {
return values
}
parsed := values.([]interface{})
length := len(parsed)
if length == 0 {
return nil
}
for i := 0; i < length-1; i = i + 2 {
v := parsed[i]
if isMap(v) {
v = getVar(parsed[i], data)
}
if isTrue(v) {
return parsed[i+1]
}
}
if length%2 == 1 {
return parsed[length-1]
}
return nil
}
func setProperty(value, data interface{}) interface{} {
_value := value.([]interface{})
object := _value[0]
if !isMap(object) {
return object
}
property := _value[1].(string)
modified, err := copystructure.Copy(object)
if err != nil {
panic(err)
}
_modified := modified.(map[string]interface{})
_modified[property] = parseValues(_value[2], data)
return interface{}(_modified)
}
func missing(values, data interface{}) interface{} {
if isString(values) {
values = []interface{}{values}
}
missing := make([]interface{}, 0)
for _, _var := range values.([]interface{}) {
_value := getVar(_var, data)
if _value == nil {
missing = append(missing, _var)
}
}
return missing
}
func missingSome(values, data interface{}) interface{} {
parsed := values.([]interface{})
number := int(toNumber(parsed[0]))
vars := parsed[1]
missing := make([]interface{}, 0)
found := make([]interface{}, 0)
for _, _var := range vars.([]interface{}) {
_value := getVar(_var, data)
if _value == nil {
missing = append(missing, _var)
} else {
found = append(found, _var)
}
}
if number > len(found) {
return missing
}
return make([]interface{}, 0)
}
func all(values, data interface{}) interface{} {
parsed := values.([]interface{})
var subject interface{}
if isMap(parsed[0]) {
subject = apply(parsed[0], data)
}
if isSlice(parsed[0]) {
subject = parsed[0]
}
if !isTrue(subject) {
return false
}
conditions := solveVars(parsed[1], data)
for _, value := range subject.([]interface{}) {
v := apply(conditions, value)
if !isTrue(v) {
return false
}
}
return true
}
func none(values, data interface{}) interface{} {
parsed := values.([]interface{})
var subject interface{}
if isMap(parsed[0]) {
subject = apply(parsed[0], data)
}
if isSlice(parsed[0]) {
subject = parsed[0]
}
if !isTrue(subject) {
return true
}
conditions := solveVars(parsed[1], data)
for _, value := range subject.([]interface{}) {
v := apply(conditions, value)
if isTrue(v) {
return false
}
}
return true
}
func some(values, data interface{}) interface{} {
parsed := values.([]interface{})
var subject interface{}
if isMap(parsed[0]) {
subject = apply(parsed[0], data)
}
if isSlice(parsed[0]) {
subject = parsed[0]
}
if !isTrue(subject) {
return false
}
conditions := solveVars(parsed[1], data)
for _, value := range subject.([]interface{}) {
v := apply(conditions, value)
if isTrue(v) {
return true
}
}
return false
}
func parseValues(values, data interface{}) interface{} {
if values == nil || isPrimitive(values) {
return values
}
if isMap(values) {
return apply(values, data)
}
parsed := make([]interface{}, 0)
for _, value := range values.([]interface{}) {
if isMap(value) {
parsed = append(parsed, apply(value, data))
} else {
parsed = append(parsed, value)
}
}
return parsed
}
func apply(rules, data interface{}) interface{} {
for operator, values := range rules.(map[string]interface{}) {
if operator == "filter" {
return filter(values, data)
}
if operator == "map" {
return _map(values, data)
}
if operator == "reduce" {
return reduce(values, data)
}
if operator == "all" {
return all(values, data)
}
if operator == "none" {
return none(values, data)
}
if operator == "some" {
return some(values, data)
}
return operation(operator, parseValues(values, data), data)
}
// an empty-map rule should return an empty-map
return make(map[string]interface{})
}
// Apply read the rule and it's data from io.Reader, executes it
// and write back a JSON into an io.Writer result
func Apply(rule, data io.Reader, result io.Writer) error {
if data == nil {
data = strings.NewReader("{}")
}
var _rule interface{}
var _data interface{}
decoder := json.NewDecoder(rule)
err := decoder.Decode(&_rule)
if err != nil {
return err
}
decoder = json.NewDecoder(data)
err = decoder.Decode(&_data)
if err != nil {
return err
}
output, err := ApplyInterface(_rule, _data)
if err != nil {
return err
}
return json.NewEncoder(result).Encode(output)
}
func ApplyRaw(rule, data json.RawMessage) (json.RawMessage, error) {
if data == nil {
data = json.RawMessage("{}")
}
var _rule interface{}
var _data interface{}
err := json.Unmarshal(rule, &_rule)
if err != nil {
return nil, err
}
err = json.Unmarshal(data, &_data)
if err != nil {
return nil, err
}
result, err := ApplyInterface(_rule, _data)
if err != nil {
return nil, err
}
return json.Marshal(&result)
}
func ApplyInterface(rule, data interface{}) (output interface{}, err error) {
defer func() {
if e := recover(); e != nil {
// fmt.Println("stacktrace from panic: \n" + string(debug.Stack()))
err = e.(error)
}
}()
if isMap(rule) {
return apply(rule, data), err
}
return rule, err
} | jsonlogic.go | 0.614278 | 0.405154 | jsonlogic.go | starcoder |
package fp
func (a BoolArray) ZipAllBoolArray(a2 BoolArray, thisDefault Bool, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllStringArray(a2 StringArray, thisDefault Bool, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllIntArray(a2 IntArray, thisDefault Bool, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllInt64Array(a2 Int64Array, thisDefault Bool, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllByteArray(a2 ByteArray, thisDefault Bool, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllRuneArray(a2 RuneArray, thisDefault Bool, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllFloat32Array(a2 Float32Array, thisDefault Bool, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllFloat64Array(a2 Float64Array, thisDefault Bool, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllAnyArray(a2 AnyArray, thisDefault Bool, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllTuple2Array(a2 Tuple2Array, thisDefault Bool, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllBoolList(l2 BoolList, thisDefault Bool, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllStringList(l2 StringList, thisDefault Bool, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllIntList(l2 IntList, thisDefault Bool, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllInt64List(l2 Int64List, thisDefault Bool, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllByteList(l2 ByteList, thisDefault Bool, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllRuneList(l2 RuneList, thisDefault Bool, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllFloat32List(l2 Float32List, thisDefault Bool, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllFloat64List(l2 Float64List, thisDefault Bool, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllAnyList(l2 AnyList, thisDefault Bool, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a BoolArray) ZipAllTuple2List(l2 Tuple2List, thisDefault Bool, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllBoolArray(a2 BoolArray, thisDefault String, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllStringArray(a2 StringArray, thisDefault String, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllIntArray(a2 IntArray, thisDefault String, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllInt64Array(a2 Int64Array, thisDefault String, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllByteArray(a2 ByteArray, thisDefault String, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllRuneArray(a2 RuneArray, thisDefault String, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllFloat32Array(a2 Float32Array, thisDefault String, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllFloat64Array(a2 Float64Array, thisDefault String, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllAnyArray(a2 AnyArray, thisDefault String, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllTuple2Array(a2 Tuple2Array, thisDefault String, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllBoolList(l2 BoolList, thisDefault String, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllStringList(l2 StringList, thisDefault String, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllIntList(l2 IntList, thisDefault String, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllInt64List(l2 Int64List, thisDefault String, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllByteList(l2 ByteList, thisDefault String, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllRuneList(l2 RuneList, thisDefault String, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllFloat32List(l2 Float32List, thisDefault String, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllFloat64List(l2 Float64List, thisDefault String, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllAnyList(l2 AnyList, thisDefault String, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a StringArray) ZipAllTuple2List(l2 Tuple2List, thisDefault String, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllBoolArray(a2 BoolArray, thisDefault Int, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllStringArray(a2 StringArray, thisDefault Int, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllIntArray(a2 IntArray, thisDefault Int, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllInt64Array(a2 Int64Array, thisDefault Int, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllByteArray(a2 ByteArray, thisDefault Int, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllRuneArray(a2 RuneArray, thisDefault Int, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllFloat32Array(a2 Float32Array, thisDefault Int, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllFloat64Array(a2 Float64Array, thisDefault Int, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllAnyArray(a2 AnyArray, thisDefault Int, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllTuple2Array(a2 Tuple2Array, thisDefault Int, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllBoolList(l2 BoolList, thisDefault Int, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllStringList(l2 StringList, thisDefault Int, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllIntList(l2 IntList, thisDefault Int, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllInt64List(l2 Int64List, thisDefault Int, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllByteList(l2 ByteList, thisDefault Int, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllRuneList(l2 RuneList, thisDefault Int, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllFloat32List(l2 Float32List, thisDefault Int, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllFloat64List(l2 Float64List, thisDefault Int, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllAnyList(l2 AnyList, thisDefault Int, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a IntArray) ZipAllTuple2List(l2 Tuple2List, thisDefault Int, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllBoolArray(a2 BoolArray, thisDefault Int64, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllStringArray(a2 StringArray, thisDefault Int64, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllIntArray(a2 IntArray, thisDefault Int64, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllInt64Array(a2 Int64Array, thisDefault Int64, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllByteArray(a2 ByteArray, thisDefault Int64, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllRuneArray(a2 RuneArray, thisDefault Int64, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllFloat32Array(a2 Float32Array, thisDefault Int64, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllFloat64Array(a2 Float64Array, thisDefault Int64, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllAnyArray(a2 AnyArray, thisDefault Int64, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllTuple2Array(a2 Tuple2Array, thisDefault Int64, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllBoolList(l2 BoolList, thisDefault Int64, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllStringList(l2 StringList, thisDefault Int64, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllIntList(l2 IntList, thisDefault Int64, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllInt64List(l2 Int64List, thisDefault Int64, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllByteList(l2 ByteList, thisDefault Int64, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllRuneList(l2 RuneList, thisDefault Int64, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllFloat32List(l2 Float32List, thisDefault Int64, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllFloat64List(l2 Float64List, thisDefault Int64, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllAnyList(l2 AnyList, thisDefault Int64, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Int64Array) ZipAllTuple2List(l2 Tuple2List, thisDefault Int64, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllBoolArray(a2 BoolArray, thisDefault Byte, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllStringArray(a2 StringArray, thisDefault Byte, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllIntArray(a2 IntArray, thisDefault Byte, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllInt64Array(a2 Int64Array, thisDefault Byte, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllByteArray(a2 ByteArray, thisDefault Byte, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllRuneArray(a2 RuneArray, thisDefault Byte, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllFloat32Array(a2 Float32Array, thisDefault Byte, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllFloat64Array(a2 Float64Array, thisDefault Byte, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllAnyArray(a2 AnyArray, thisDefault Byte, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllTuple2Array(a2 Tuple2Array, thisDefault Byte, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllBoolList(l2 BoolList, thisDefault Byte, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllStringList(l2 StringList, thisDefault Byte, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllIntList(l2 IntList, thisDefault Byte, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllInt64List(l2 Int64List, thisDefault Byte, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllByteList(l2 ByteList, thisDefault Byte, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllRuneList(l2 RuneList, thisDefault Byte, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllFloat32List(l2 Float32List, thisDefault Byte, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllFloat64List(l2 Float64List, thisDefault Byte, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllAnyList(l2 AnyList, thisDefault Byte, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a ByteArray) ZipAllTuple2List(l2 Tuple2List, thisDefault Byte, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllBoolArray(a2 BoolArray, thisDefault Rune, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllStringArray(a2 StringArray, thisDefault Rune, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllIntArray(a2 IntArray, thisDefault Rune, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllInt64Array(a2 Int64Array, thisDefault Rune, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllByteArray(a2 ByteArray, thisDefault Rune, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllRuneArray(a2 RuneArray, thisDefault Rune, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllFloat32Array(a2 Float32Array, thisDefault Rune, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllFloat64Array(a2 Float64Array, thisDefault Rune, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllAnyArray(a2 AnyArray, thisDefault Rune, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllTuple2Array(a2 Tuple2Array, thisDefault Rune, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllBoolList(l2 BoolList, thisDefault Rune, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllStringList(l2 StringList, thisDefault Rune, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllIntList(l2 IntList, thisDefault Rune, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllInt64List(l2 Int64List, thisDefault Rune, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllByteList(l2 ByteList, thisDefault Rune, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllRuneList(l2 RuneList, thisDefault Rune, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllFloat32List(l2 Float32List, thisDefault Rune, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllFloat64List(l2 Float64List, thisDefault Rune, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllAnyList(l2 AnyList, thisDefault Rune, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a RuneArray) ZipAllTuple2List(l2 Tuple2List, thisDefault Rune, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllBoolArray(a2 BoolArray, thisDefault Float32, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllStringArray(a2 StringArray, thisDefault Float32, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllIntArray(a2 IntArray, thisDefault Float32, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllInt64Array(a2 Int64Array, thisDefault Float32, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllByteArray(a2 ByteArray, thisDefault Float32, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllRuneArray(a2 RuneArray, thisDefault Float32, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllFloat32Array(a2 Float32Array, thisDefault Float32, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllFloat64Array(a2 Float64Array, thisDefault Float32, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllAnyArray(a2 AnyArray, thisDefault Float32, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllTuple2Array(a2 Tuple2Array, thisDefault Float32, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllBoolList(l2 BoolList, thisDefault Float32, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllStringList(l2 StringList, thisDefault Float32, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllIntList(l2 IntList, thisDefault Float32, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllInt64List(l2 Int64List, thisDefault Float32, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllByteList(l2 ByteList, thisDefault Float32, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllRuneList(l2 RuneList, thisDefault Float32, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllFloat32List(l2 Float32List, thisDefault Float32, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllFloat64List(l2 Float64List, thisDefault Float32, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllAnyList(l2 AnyList, thisDefault Float32, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float32Array) ZipAllTuple2List(l2 Tuple2List, thisDefault Float32, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllBoolArray(a2 BoolArray, thisDefault Float64, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllStringArray(a2 StringArray, thisDefault Float64, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllIntArray(a2 IntArray, thisDefault Float64, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllInt64Array(a2 Int64Array, thisDefault Float64, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllByteArray(a2 ByteArray, thisDefault Float64, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllRuneArray(a2 RuneArray, thisDefault Float64, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllFloat32Array(a2 Float32Array, thisDefault Float64, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllFloat64Array(a2 Float64Array, thisDefault Float64, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllAnyArray(a2 AnyArray, thisDefault Float64, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllTuple2Array(a2 Tuple2Array, thisDefault Float64, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllBoolList(l2 BoolList, thisDefault Float64, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllStringList(l2 StringList, thisDefault Float64, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllIntList(l2 IntList, thisDefault Float64, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllInt64List(l2 Int64List, thisDefault Float64, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllByteList(l2 ByteList, thisDefault Float64, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllRuneList(l2 RuneList, thisDefault Float64, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllFloat32List(l2 Float32List, thisDefault Float64, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllFloat64List(l2 Float64List, thisDefault Float64, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllAnyList(l2 AnyList, thisDefault Float64, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Float64Array) ZipAllTuple2List(l2 Tuple2List, thisDefault Float64, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllBoolArray(a2 BoolArray, thisDefault Any, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllStringArray(a2 StringArray, thisDefault Any, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllIntArray(a2 IntArray, thisDefault Any, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllInt64Array(a2 Int64Array, thisDefault Any, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllByteArray(a2 ByteArray, thisDefault Any, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllRuneArray(a2 RuneArray, thisDefault Any, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllFloat32Array(a2 Float32Array, thisDefault Any, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllFloat64Array(a2 Float64Array, thisDefault Any, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllAnyArray(a2 AnyArray, thisDefault Any, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllTuple2Array(a2 Tuple2Array, thisDefault Any, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllBoolList(l2 BoolList, thisDefault Any, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllStringList(l2 StringList, thisDefault Any, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllIntList(l2 IntList, thisDefault Any, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllInt64List(l2 Int64List, thisDefault Any, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllByteList(l2 ByteList, thisDefault Any, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllRuneList(l2 RuneList, thisDefault Any, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllFloat32List(l2 Float32List, thisDefault Any, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllFloat64List(l2 Float64List, thisDefault Any, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllAnyList(l2 AnyList, thisDefault Any, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a AnyArray) ZipAllTuple2List(l2 Tuple2List, thisDefault Any, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllBoolArray(a2 BoolArray, thisDefault Tuple2, thatDefault Bool) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllStringArray(a2 StringArray, thisDefault Tuple2, thatDefault String) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllIntArray(a2 IntArray, thisDefault Tuple2, thatDefault Int) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllInt64Array(a2 Int64Array, thisDefault Tuple2, thatDefault Int64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllByteArray(a2 ByteArray, thisDefault Tuple2, thatDefault Byte) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllRuneArray(a2 RuneArray, thisDefault Tuple2, thatDefault Rune) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllFloat32Array(a2 Float32Array, thisDefault Tuple2, thatDefault Float32) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllFloat64Array(a2 Float64Array, thisDefault Tuple2, thatDefault Float64) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllAnyArray(a2 AnyArray, thisDefault Tuple2, thatDefault Any) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllTuple2Array(a2 Tuple2Array, thisDefault Tuple2, thatDefault Tuple2) Tuple2Array {
len1 := len(a); len2 := len(a2); maxLen := int(Int(len1).Max(Int(len2)))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
for i := 0; i < maxLen; i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if i < len2 { e2 = a2[i] } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllBoolList(l2 BoolList, thisDefault Tuple2, thatDefault Bool) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllStringList(l2 StringList, thisDefault Tuple2, thatDefault String) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllIntList(l2 IntList, thisDefault Tuple2, thatDefault Int) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllInt64List(l2 Int64List, thisDefault Tuple2, thatDefault Int64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllByteList(l2 ByteList, thisDefault Tuple2, thatDefault Byte) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllRuneList(l2 RuneList, thisDefault Tuple2, thatDefault Rune) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllFloat32List(l2 Float32List, thisDefault Tuple2, thatDefault Float32) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllFloat64List(l2 Float64List, thisDefault Tuple2, thatDefault Float64) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllAnyList(l2 AnyList, thisDefault Tuple2, thatDefault Any) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
}
func (a Tuple2Array) ZipAllTuple2List(l2 Tuple2List, thisDefault Tuple2, thatDefault Tuple2) Tuple2Array {
len1 := len(a); maxLen := int(Int(len1).Max(Int(l2.Size())))
zipped := make([]Tuple2, maxLen)
var e1, e2 Any
xs := l2
for i := 0; i < maxLen && xs.NonEmpty(); i ++ {
if i < len1 { e1 = a[i] } else { e1 = thisDefault }
if xs.NonEmpty() { e2 = *xs.head; xs = *xs.tail } else { e2 = thatDefault }
zipped[i] = Tuple2 { e1, e2 }
}
return zipped
} | fp/bootstrap_array_zipall.go | 0.524882 | 0.407333 | bootstrap_array_zipall.go | starcoder |
package actions
import (
"github.com/LindsayBradford/crem/internal/pkg/dataset/tables"
"github.com/LindsayBradford/crem/internal/pkg/model/action"
"github.com/LindsayBradford/crem/internal/pkg/model/models/catchment/parameters"
"github.com/LindsayBradford/crem/internal/pkg/model/planningunit"
)
type RiverBankRestorationGroup struct {
planningUnitTable tables.CsvTable
parameters parameters.Parameters
bankSedimentContribution BankSedimentContribution
actionMap map[planningunit.Id]*RiverBankRestoration
Container
}
func (r *RiverBankRestorationGroup) WithPlanningUnitTable(planningUnitTable tables.CsvTable) *RiverBankRestorationGroup {
r.planningUnitTable = planningUnitTable
return r
}
func (r *RiverBankRestorationGroup) WithActionsTable(parentSoilsTable tables.CsvTable) *RiverBankRestorationGroup {
r.Container.WithFilter(RiparianType).WithActionsTable(parentSoilsTable)
return r
}
func (r *RiverBankRestorationGroup) WithParameters(parameters parameters.Parameters) *RiverBankRestorationGroup {
r.parameters = parameters
return r
}
func (r *RiverBankRestorationGroup) ManagementActions() []action.ManagementAction {
r.createManagementActions()
actions := make([]action.ManagementAction, 0)
for _, value := range r.actionMap {
actions = append(actions, value)
}
return actions
}
func (r *RiverBankRestorationGroup) createManagementActions() {
r.bankSedimentContribution.Initialise(r.planningUnitTable, r.parameters)
_, rowCount := r.planningUnitTable.ColumnAndRowSize()
r.actionMap = make(map[planningunit.Id]*RiverBankRestoration, rowCount)
for row := uint(0); row < rowCount; row++ {
r.createManagementAction(row)
}
}
func (r *RiverBankRestorationGroup) createManagementAction(rowNumber uint) {
planningUnit := r.planningUnitTable.CellFloat64(planningUnitIndex, rowNumber)
planningUnitAsId := planningunit.Float64ToId(planningUnit)
originalBufferVegetation := r.originalBufferVegetation(rowNumber)
actionedBufferVegetation := r.parameters.GetFloat64(parameters.RiparianBufferVegetationProportionTarget)
if originalBufferVegetation >= actionedBufferVegetation {
return
}
opportunityCostInDollars := r.opportunityCost(planningUnitAsId)
implementationCostInDollars := r.implementationCost(planningUnitAsId)
originalSediment := r.bankSedimentContribution.PlanningUnitSedimentContribution(planningUnitAsId, originalBufferVegetation)
actionedSediment := r.bankSedimentContribution.PlanningUnitSedimentContribution(planningUnitAsId, actionedBufferVegetation)
originalParticulateNitrogen := r.originalParticulateNitrogen(planningUnitAsId)
actionedParticulateNitrogen := r.actionedParticulateNitrogen(planningUnitAsId)
originalFineSediment := r.originalFineSediment(planningUnitAsId)
actionedFineSediment := r.actionedFineSediment(planningUnitAsId)
originalDissolvedNitrogen := r.originalDissolvedNitrogen(planningUnitAsId)
actionedDissolvedNitrogen := r.actionedDissolvedNitrogen(planningUnitAsId)
dissolvedNitrogenRemovalEfficiency := r.dissolvedNitrogenRemovalEfficiency(planningUnitAsId)
r.actionMap[planningUnitAsId] =
NewRiverBankRestoration().
WithPlanningUnit(planningUnitAsId).
WithOriginalBufferVegetation(originalBufferVegetation).
WithActionedBufferVegetation(actionedBufferVegetation).
WithOriginalRiparianSedimentProduction(originalSediment).
WithActionedRiparianSedimentProduction(actionedSediment).
WithOriginalParticulateNitrogen(originalParticulateNitrogen).
WithActionedParticulateNitrogen(actionedParticulateNitrogen).
WithOriginalFineSediment(originalFineSediment).
WithActionedFineSediment(actionedFineSediment).
WithOriginalDissolvedNitrogen(originalDissolvedNitrogen).
WithActionedDissolvedNitrogen(actionedDissolvedNitrogen).
WithDissolvedNitrogenRemovalEfficiency(dissolvedNitrogenRemovalEfficiency).
WithImplementationCost(implementationCostInDollars).
WithOpportunityCost(opportunityCostInDollars)
}
func (r *RiverBankRestorationGroup) originalBufferVegetation(rowNumber uint) float64 {
proportionOfRiparianVegetation := r.planningUnitTable.CellFloat64(riparianVegetationIndex, rowNumber)
return proportionOfRiparianVegetation
}
func (r *RiverBankRestorationGroup) calculateChangeInBufferVegetation(rowNumber uint) float64 {
proportionOfRiparianVegetation := r.originalBufferVegetation(rowNumber)
vegetationTarget := r.parameters.GetFloat64(parameters.RiparianBufferVegetationProportionTarget)
changeInRiparianVegetation := vegetationTarget - proportionOfRiparianVegetation
return changeInRiparianVegetation
} | internal/pkg/model/models/catchment/actions/RiverBankRestorationGroup.go | 0.643217 | 0.423756 | RiverBankRestorationGroup.go | starcoder |
package surface
import (
"math"
"math/rand"
"github.com/hunterloftis/pbr/pkg/geom"
"github.com/hunterloftis/pbr/pkg/render"
"github.com/hunterloftis/pbr/pkg/rgb"
)
// Sphere describes a 3d sphere
type Sphere struct {
mtx *geom.Mtx
mat Material
bounds *geom.Bounds
}
// UnitSphere returns a pointer to a new 1x1x1 Sphere Surface with a given material and optional transforms.
func UnitSphere(m ...Material) *Sphere {
s := &Sphere{
mtx: geom.Identity(),
mat: &DefaultMaterial{},
}
if len(m) > 0 {
s.mat = m[0]
}
return s.transform(geom.Identity())
}
// TODO: unify with cube.transform AABB calc
func (s *Sphere) transform(t *geom.Mtx) *Sphere {
s.mtx = s.mtx.Mult(t)
min := s.mtx.MultPoint(geom.Vec{})
max := s.mtx.MultPoint(geom.Vec{})
for x := -0.5; x <= 0.5; x += 1 {
for y := -0.5; y <= 0.5; y += 1 {
for z := -0.5; z <= 0.5; z += 1 {
pt := s.mtx.MultPoint(geom.Vec{x, y, z})
min = min.Min(pt)
max = max.Max(pt)
}
}
}
s.bounds = geom.NewBounds(min, max)
return s
}
func (s *Sphere) Shift(v geom.Vec) *Sphere {
return s.transform(geom.Shift(v))
}
func (s *Sphere) Scale(v geom.Vec) *Sphere {
return s.transform(geom.Scale(v))
}
func (s *Sphere) Rotate(v geom.Vec) *Sphere {
return s.transform(geom.Rotate(v))
}
func (s *Sphere) Center() geom.Vec {
return s.mtx.MultPoint(geom.Vec{})
}
func (s *Sphere) Bounds() *geom.Bounds {
return s.bounds
}
// Intersect tests whether the sphere intersects a given ray.
// http://tfpsly.free.fr/english/index.html?url=http://tfpsly.free.fr/english/3d/Raytracing.html
// TODO: http://kylehalladay.com/blog/tutorial/math/2013/12/24/Ray-Sphere-Intersection.html
func (s *Sphere) Intersect(ray *geom.Ray, max float64) (obj render.Object, dist float64) {
if ok, near, _ := s.bounds.Check(ray); !ok || near >= max {
return nil, 0
}
i := s.mtx.Inverse()
r := i.MultRay(ray)
op := geom.Vec{}.Minus(r.Origin)
b := op.Dot(geom.Vec(r.Dir))
det := b*b - op.Dot(op) + 0.5*0.5
if det < 0 {
return nil, 0
}
root := math.Sqrt(det)
t1 := b - root
if t1 > 0 {
dist := s.mtx.MultDist(r.Dir.Scaled(t1)).Len()
if dist > bias {
return s, dist
}
}
t2 := b + root
if t2 > 0 {
dist := s.mtx.MultDist(r.Dir.Scaled(t2)).Len()
if dist > bias {
return s, dist
}
}
return nil, 0
}
// At returns the surface normal given a point on the surface.
func (s *Sphere) At(pt geom.Vec, in geom.Dir, rnd *rand.Rand) (normal geom.Dir, bsdf render.BSDF) {
i := s.mtx.Inverse()
p := i.MultPoint(pt)
pu, _ := p.Unit()
n := s.mtx.MultDir(pu)
n2, bsdf := s.mat.At(0, 0, in, n, rnd)
_ = n2
normal = n // TODO: compute normal by combining n and n2 (and a bitangent)
return normal, bsdf
}
func (s *Sphere) Light() rgb.Energy {
return s.mat.Light()
}
func (s *Sphere) Transmit() rgb.Energy {
return s.mat.Transmit()
}
func (s *Sphere) Lights() []render.Object {
if !s.mat.Light().Zero() {
return []render.Object{s}
}
return nil
} | pkg/surface/sphere.go | 0.627951 | 0.437463 | sphere.go | starcoder |
package paunch
// bounding is an object that represents a bounding box. It is meant to be
// used through the Collider interface.
type bounding struct {
start *point
end *point
}
func newBounding(start *point, end *point) *bounding {
var checkStart, checkEnd point
if start.x >= end.x {
checkEnd.x = start.x
checkStart.x = end.x
} else {
checkEnd.x = end.x
checkStart.x = start.x
}
if start.y >= end.y {
checkEnd.y = start.y
checkStart.y = end.y
} else {
checkEnd.y = end.y
checkStart.y = start.y
}
return &bounding{start: &checkStart, end: &checkEnd}
}
func (b *bounding) Move(x, y float64) {
b.start.Move(x, y)
b.end.Move(x, y)
}
func (b *bounding) SetPosition(x, y float64) {
width := b.end.x - b.start.x
height := b.end.y - b.start.y
newBounding := newBounding(newPoint(x, y), newPoint(x+width, y+height))
*b = *newBounding
}
func (b *bounding) Position() (x, y float64) {
return b.start.x, b.start.y
}
func (b *bounding) DistanceToTangentPoint(x, y float64, side Direction) (float64, float64) {
switch side {
case Up:
sideX := x
if x < b.start.x {
sideX = b.start.x
} else if x > b.end.x {
sideX = b.end.x
}
return getPointDistance(newPoint(x, y), newPoint(sideX, b.end.y))
case Down:
sideX := x
if x < b.start.x {
sideX = b.start.x
} else if x > b.end.x {
sideX = b.end.x
}
return getPointDistance(newPoint(x, y), newPoint(sideX, b.start.y))
case Left:
sideY := y
if y < b.start.y {
sideY = b.start.y
} else if y > b.end.y {
sideY = b.end.y
}
return getPointDistance(newPoint(x, y), newPoint(b.start.x, sideY))
case Right:
sideY := y
if y < b.start.y {
sideY = b.start.y
} else if y > b.end.y {
sideY = b.end.y
}
return getPointDistance(newPoint(x, y), newPoint(b.end.x, sideY))
default:
return 0, 0
}
}
func (b *bounding) onPoint(p *point) bool {
if p.x >= b.start.x && p.x <= b.end.x &&
p.y >= b.start.y && p.y <= b.end.y {
return true
}
return false
}
func (b *bounding) onBounding(b2 *bounding) bool {
if b.start.x > b2.end.x || b.end.x < b2.start.x ||
b.start.y > b2.end.y || b.end.y < b2.start.y {
return false
}
return true
}
func (p *point) onBounding(b *bounding) bool {
return b.onPoint(p)
} | bounding.go | 0.878262 | 0.603026 | bounding.go | starcoder |
package solid
import (
"math"
"github.com/cpmech/gosl/chk"
"github.com/cpmech/gosl/fun"
"github.com/cpmech/gosl/fun/dbf"
)
// RjointM1 implements a 1D plasticity model for rod-joints (links/interface)
// Note: σc has opposite sign convention: positive means compressive
type RjointM1 struct {
A_ks float64 // elasticity constant
A_τy0 float64 // initial yield stress
A_kh float64 // hardening modulus
A_μ float64 // friction coefficient
A_h float64 // perimeter of beam element
A_kl float64 // lateral stiffness
}
// add model to factory
func init() {
allocators["rjoint-m1"] = func() Model { return new(RjointM1) }
}
// Set_mu sets μ parameter
func (o *RjointM1) Set_mu(mu float64) {
o.A_μ = mu
}
// Free frees memory
func (o *RjointM1) Free() {
}
// GetRho returns density
func (o *RjointM1) GetRho() float64 {
return 0
}
// Init initialises model
func (o *RjointM1) Init(ndim int, pstress bool, prms dbf.Params) (err error) {
for _, p := range prms {
switch p.N {
case "ks":
o.A_ks = p.V
case "tauy0":
o.A_τy0 = p.V
case "kh":
o.A_kh = p.V
case "mu":
o.A_μ = p.V
case "h":
o.A_h = p.V
case "kl":
o.A_kl = p.V
}
}
ZERO := 1e-7
if o.A_ks < ZERO || o.A_τy0 < ZERO || o.A_μ < ZERO || o.A_h < ZERO || o.A_kl < ZERO {
return chk.Err("invalid parameters: {ks=%g, tauy0=%g, mu=%g, h=%g, kl=%g} must be all > 0", o.A_ks, o.A_τy0, o.A_μ, o.A_h, o.A_kl)
}
return
}
// GetPrms gets (an example) of parameters
func (o RjointM1) GetPrms() dbf.Params {
return []*dbf.P{
&dbf.P{N: "ks", V: 1e4},
&dbf.P{N: "tauy0", V: 20},
&dbf.P{N: "kh", V: 0},
&dbf.P{N: "mu", V: 0.5},
&dbf.P{N: "h", V: 0.1},
&dbf.P{N: "kl", V: 1e4},
}
}
// InitIntVars: unused
func (o *RjointM1) InitIntVars(σ []float64) (s *State, err error) {
return
}
// InitIntVars initialises internal (secondary) variables
func (o RjointM1) InitIntVars1D() (s *OnedState, err error) {
s = NewOnedState(1, 2) // 1:{ωpb} 2:{q1,q2}
return
}
// Update updates stresses for given strains
// Note: σc has opposite sign convention: positive means compressive
func (o *RjointM1) Update(s *OnedState, σcNew, Δω float64) (err error) {
// limit σcNew
if σcNew < 0 {
σcNew = 0
}
// internal values
τ := &s.Sig
ωpb := &s.Alp[0]
// trial stress
τ_tr := (*τ) + o.A_ks*Δω
f_tr := math.Abs(τ_tr) - (o.A_τy0 + o.A_kh*(*ωpb) + o.A_μ*σcNew)
// elastic update
if f_tr <= 0.0 {
*τ = τ_tr
s.Loading = false
return
}
// plastic update
Δγ := f_tr / (o.A_ks + o.A_kh)
*τ = τ_tr - o.A_ks*Δγ*fun.Sign(τ_tr)
*ωpb += Δγ
s.Loading = true
return
}
// CalcD computes D = dσ_new/dε_new consistent with StressUpdate
func (o *RjointM1) CalcD(s *OnedState, firstIt bool) (DτDω, DτDσc float64, err error) {
// elastic
if !s.Loading {
return o.A_ks, 0, nil
}
// plastic
τ := s.Sig
DτDω = o.A_ks * o.A_kh / (o.A_ks + o.A_kh)
DτDσc = o.A_ks * o.A_μ * fun.Sign(τ) / (o.A_ks + o.A_kh)
return
} | mdl/solid/rjointm1.go | 0.667473 | 0.419113 | rjointm1.go | starcoder |
package neuralNetwork
import (
"fmt"
"math"
"gonum.org/v1/gonum/mat"
)
type lossBaseStruct struct{}
// LossFunctions is the interface for matLoss (matSquareLoss,...)
type LossFunctions interface {
Loss(Ytrue, Ypred, Grad *mat.Dense) float64
}
type squareLoss struct{ lossBaseStruct }
func (squareLoss) Loss(Ytrue, Ypred, Grad *mat.Dense) float64 {
nSamples, _ := Ytrue.Dims()
// J:=(h-y)^2/2
// Ydiff := matSub{A: Ypred, B: Ytrue}
// J := metrics.MeanSquaredError(Ytrue, Ypred, nil, "").At(0, 0)
J := matx{}.SumApplied2(Ytrue, Ypred, func(y, h float64) float64 { yd := h - y; return yd * yd / 2. })
// Grad:=(h-y)
if Grad != nil {
//Grad.Scale(1./float64(nSamples), Ydiff)
matx{Dense: Grad}.CopyScaledApplied2(Ytrue, Ypred, 1./float64(nSamples), func(y, h float64) float64 { return h - y })
}
return J
}
type logLoss struct{ lossBaseStruct }
func (logLoss) Loss(Ytrue, Ypred, Grad *mat.Dense) float64 {
nSamples, _ := Ytrue.Dims()
// J:=-y log(h)
//J := -mat.Sum(matMulElem{A: Ytrue, B: base.MatApply1{Matrix: Ypred, Func: math.Log}}) / float64(nSamples)
J := matx{}.SumApplied2(Ytrue, Ypred, func(y, h float64) float64 { return -y * math.Log(h) }) / float64(nSamples)
// Grad:=-y/h
if Grad != nil {
Gfun := func(y, h float64) float64 { return -y / h }
//Grad.Scale(1./float64(nSamples), matApply2{A: Ytrue, B: Ypred, Func: Gfun})
matx{Dense: Grad}.CopyScaledApplied2(Ytrue, Ypred, 1./float64(nSamples), Gfun)
}
return J
}
type crossEntropyLoss struct{ lossBaseStruct }
func (crossEntropyLoss) Loss(Ytrue, Ypred, Grad *mat.Dense) float64 {
nSamples, _ := Ytrue.Dims()
// J:=-y log(h)-(1-y) log(1-h)
Jfun := func(y, h float64) float64 {
eps := 1e-30
if h <= 0 {
h = eps
} else if h >= 1. {
h = 1 - eps
}
return -y*math.Log(h) - (1.-y)*math.Log1p(-h)
}
//fmt.Printf("h11=%f J11=%f\n", Ypred.At(0, 0), Jfun(Ytrue.At(0, 0), Ypred.At(0, 0))/float64(nSamples))
J := matx{}.SumApplied2(Ytrue, Ypred, Jfun) / float64(nSamples)
if Grad != nil {
// Grad:=-y/h+(1-y)/(1-h)
Gfun := func(y, h float64) float64 {
eps := 1e-12
if h <= 0 {
h = eps
} else if h >= 1. {
h = 1 - eps
}
return -y/h + (1-y)/(1-h)
}
//Grad.Scale(1./float64(nSamples), matApply2{A: Ytrue, B: Ypred, Func: Gfun})
matx{Dense: Grad}.CopyScaledApplied2(Ytrue, Ypred, 1./float64(nSamples), Gfun)
}
return J
}
// SupportedLoss are the map[string]Losser of available matrix loss function providers
var SupportedLoss = map[string]LossFunctions{
"square": squareLoss{},
"log": logLoss{},
"cross-entropy": crossEntropyLoss{},
}
// NewLoss creates a LossFunctions by its name
func NewLoss(name string) LossFunctions {
loss, ok := SupportedLoss[name]
if !ok {
panic(fmt.Errorf("loss %s is unknown", name))
}
return loss
} | neural_network/loss.go | 0.730386 | 0.457985 | loss.go | starcoder |
package main
// Owner structure contains the name of the Company and the name of the Lessor of the basic machine
//type Owner struct {
// Company string `json:"company"`
// Lessor string `json:"lessor"`
//}
// BasicMachine structure contains all the features of a basic machine
type BasicMachine struct {
ID string `json:"id"`
Lessor string `json:"lessor"`
Status string `json:"status"`
ReservePrice uint64 `json:"reserveprice"`
WorkedHours uint64 `json:"workedhours"`
PricePerHour uint64 `json:"priceperhour"`
Lessee string `json:"lessee"`
RentalTime string `json:"rentaltime"`
PlaceOfDelivery string `json:"placeofdelivery"`
WorkHours uint64 `json:"workhours"`
}
// IotaPayload structure contains the features of an iotapayload message for MAM
type IotaPayload struct {
Seed string `json:"seed"`
MamState string `json:"mamState"`
Root string `json:"root"`
Mode string `json:"mode"`
SideKey string `json:"sideKey"`
}
// SetStatusAvailable function set the status of the machine to available
func (ba *BasicMachine) SetStatusAvailable() {
ba.Status = "AVAILABLE"
}
// SetStatusReserved function set the status of the machine to reserved
func (ba *BasicMachine) SetStatusReserved() {
ba.Status = "RESERVED"
}
// SetStatusSent function set the status of the machine to sent
func (ba *BasicMachine) SetStatusSent() {
ba.Status = "SENT"
}
// SetStatusReceived function set the status of the machine to received
func (ba *BasicMachine) SetStatusReceived() {
ba.Status = "RECEIVED"
}
// SetStatusWorking function set the status of the machine to working
func (ba *BasicMachine) SetStatusWorking() {
ba.Status = "WORKING"
}
// SetStatusReturned function set the status of the machine to returned
func (ba *BasicMachine) SetStatusReturned() {
ba.Status = "RETURNED"
}
// SetStatusInConpany function set the status of the machine to in company
func (ba *BasicMachine) SetStatusInConpany() {
ba.Status = "IN COMPANY"
}
// SetStatusInMaintenance function set the status of the machine to in maintenance
func (ba *BasicMachine) SetStatusInMaintenance() {
ba.Status = "IN MAINTENANCE"
}
// SetLessee function set the Lessee of the machine to NO LESSEEE
func (ba *BasicMachine) SetLessee() {
ba.Lessee = "NO LESSEE"
}
// SetRentalTime function set the rental time of the machine to NO RESERVED TIME
func (ba *BasicMachine) SetRentalTime() {
ba.RentalTime = "NO RESERVE TIME"
}
// SetPlaceOfDelivery function set the place where te machine will be delivered to NO DESTINATION
func (ba *BasicMachine) SetPlaceOfDelivery() {
ba.PlaceOfDelivery = "NO DESTINATION"
}
// SetWorkHours function set the work hours of the machine to zero
func (ba *BasicMachine) SetWorkHours() {
ba.WorkHours = 0
} | smart-contract/contract-tutorial/basic-asset.go | 0.526343 | 0.434581 | basic-asset.go | starcoder |
package zebra
// Solution holds the info who drinks water and who owns the zebra
type Solution struct {
DrinksWater, OwnsZebra string
}
// House accomodates a person of certain nationality and so on
type House struct {
n Nationality
c Colour
a Animal
d Drink
s Smoke
}
// HouseSet has all five houses
type HouseSet [5]*House
// the five properties of the houses + their possible values
type Nationality uint8
const (
English Nationality = iota
Swede
Dane
Norwegian
Japanese
)
type Colour uint8
const (
Red Colour = iota
Green
White
Yellow
Blue
)
type Animal uint8
const (
Dog Animal = iota
Birds
Cats
Horse
Zebra
)
type Drink uint8
const (
Tea Drink = iota
Coffee
Milk
Beer
Water
)
type Smoke uint8
const (
PallMall Smoke = iota
Dunhill
Blend
BlueMaster
Prince
)
// string representations of the properties
var (
nationalities = [...]string{"English", "Swede", "Dane", "Norwegian", "Japanese"}
colours = [...]string{"red", "green", "white", "yellow", "blue"}
animals = [...]string{"dog", "birds", "cats", "horse", "zebra"}
drinks = [...]string{"tea", "coffee", "milk", "beer", "water"}
smokes = [...]string{"Pall Mall", "Dunhill", "Blend", "Blue Master", "Prince"}
)
// simpleBruteForce solution
func simpleBruteForce() (int, HouseSet) {
var v []House
for n := range nationalities {
for c := range colours {
for a := range animals {
for d := range drinks {
for s := range smokes {
h := House{
n: Nationality(n),
c: Colour(c),
a: Animal(a),
d: Drink(d),
s: Smoke(s),
}
if !h.Valid() {
continue
}
v = append(v, h)
}
}
}
}
}
n := len(v)
valid := 0
var validSet HouseSet
for a := 0; a < n; a++ {
if v[a].n != Norwegian { // Condition 10:
continue
}
for b := 0; b < n; b++ {
if b == a {
continue
}
if v[b].hasDupeAttr(&v[a]) {
continue
}
for c := 0; c < n; c++ {
if c == b || c == a {
continue
}
if v[c].d != Milk { // Condition 9:
continue
}
if v[c].hasDupeAttr(&v[b], &v[a]) {
continue
}
for d := 0; d < n; d++ {
if d == c || d == b || d == a {
continue
}
if v[d].hasDupeAttr(&v[c], &v[b], &v[a]) {
continue
}
for e := 0; e < n; e++ {
if e == d || e == c || e == b || e == a {
continue
}
if v[e].hasDupeAttr(&v[d], &v[c], &v[b], &v[a]) {
continue
}
set := HouseSet{&v[a], &v[b], &v[c], &v[d], &v[e]}
if set.Valid() {
valid++
validSet = set
}
}
}
}
}
}
return valid, validSet
}
// hasDupeAttr returns true if House h as any duplicate attributes with any of the houses in list
func (h *House) hasDupeAttr(list ...*House) bool {
for _, b := range list {
if h.n == b.n || h.c == b.c || h.a == b.a || h.d == b.d || h.s == b.s {
return true
}
}
return false
}
// Valid checks if house h has valid attributes according to the given conditions
func (h *House) Valid() bool {
// Condition 2:
if h.n == English && h.c != Red || h.n != English && h.c == Red {
return false
}
// Condition 3:
if h.n == Swede && h.a != Dog || h.n != Swede && h.a == Dog {
return false
}
// Condition 4:
if h.n == Dane && h.d != Tea || h.n != Dane && h.d == Tea {
return false
}
// Condition 6:
if h.c == Green && h.d != Coffee || h.c != Green && h.d == Coffee {
return false
}
// Condition 7:
if h.a == Birds && h.s != PallMall || h.a != Birds && h.s == PallMall {
return false
}
// Condition 8:
if h.c == Yellow && h.s != Dunhill || h.c != Yellow && h.s == Dunhill {
return false
}
// Condition 11:
if h.a == Cats && h.s == Blend {
return false
}
// Condition 12:
if h.a == Horse && h.s == Dunhill {
return false
}
// Condition 13:
if h.d == Beer && h.s != BlueMaster || h.d != Beer && h.s == BlueMaster {
return false
}
// Condition 14:
if h.n == Japanese && h.s != Prince || h.n != Japanese && h.s == Prince {
return false
}
// Condition 15:
if h.n == Norwegian && h.c == Blue {
return false
}
// Condition 16:
if h.d == Water && h.s == Blend {
return false
}
return true
}
// Valid checks if the set of houses hs has valid attributes according to the given conditions
func (hs *HouseSet) Valid() bool {
ni := make(map[Nationality]int, 5)
ci := make(map[Colour]int, 5)
ai := make(map[Animal]int, 5)
di := make(map[Drink]int, 5)
si := make(map[Smoke]int, 5)
for i, h := range hs {
ni[h.n] = i
ci[h.c] = i
ai[h.a] = i
di[h.d] = i
si[h.s] = i
}
// Condition 5:
if ci[Green]+1 != ci[White] {
return false
}
// Condition 11:
if dist(ai[Cats], si[Blend]) != 1 {
return false
}
// Condition 12:
if dist(ai[Horse], si[Dunhill]) != 1 {
return false
}
// Condition 15:
if dist(ni[Norwegian], ci[Blue]) != 1 {
return false
}
// Condition 16:
if dist(di[Water], si[Blend]) != 1 {
return false
}
// Condition 9: (already tested elsewhere)
if hs[2].d != Milk {
return false
}
// Condition 10: (already tested elsewhere)
if hs[0].n != Norwegian {
return false
}
return true
}
// dist - a helper to return the distance of an attribute within a house set
func dist(a, b int) int {
if a > b {
return a - b
}
return b - a
}
// SolvePuzzle calls the solver
func SolvePuzzle() Solution {
_, result := simpleBruteForce()
solution := Solution{}
for _, house := range result {
if house.d == Water {
solution.DrinksWater = nationalities[house.n]
}
if house.a == Zebra {
solution.OwnsZebra = nationalities[house.n]
}
}
return solution
} | zebra-puzzle/zebra_puzzle.go | 0.556882 | 0.469824 | zebra_puzzle.go | starcoder |
package expr
import (
"errors"
"fmt"
"math"
"strconv"
"unicode"
)
type Num float64
var (
ErrParen = errors.New("parenthesis mismatch")
ErrUnexpectedNumber = errors.New("unexpected number")
ErrUnexpectedIdentifier = errors.New("unexpected identifier")
ErrBadCall = errors.New("function call expected")
ErrBadVar = errors.New("variable expected in assignment")
ErrBadOp = errors.New("unknown operator or function")
ErrOperandMissing = errors.New("missing operand")
)
// Supported arithmetic operations
type arithOp int
const (
unaryMinus arithOp = iota + 1
unaryLogicalNot
unaryBitwiseNot
power
multiply
divide
remainder
plus
minus
shl
shr
lessThan
lessOrEquals
greaterThan
greaterOrEquals
equals
notEquals
bitwiseAnd
bitwiseXor
bitwiseOr
logicalAnd
logicalOr
assign
comma
)
var ops = map[string]arithOp{
"-u": unaryMinus, "!u": unaryLogicalNot, "^u": unaryBitwiseNot,
"**": power, "*": multiply, "/": divide, "%": remainder,
"+": plus, "-": minus,
"<<": shl, ">>": shr,
"<": lessThan, "<=": lessOrEquals, ">": greaterThan, ">=": greaterOrEquals,
"==": equals, "!=": notEquals,
"&": bitwiseAnd, "^": bitwiseXor, "|": bitwiseOr,
"&&": logicalAnd, "||": logicalOr,
"=": assign, ",": comma,
}
func isUnary(op arithOp) bool {
return op >= unaryMinus && op <= unaryBitwiseNot
}
func isLeftAssoc(op arithOp) bool {
return !isUnary(op) && op != assign && op != power && op != comma
}
func boolNum(b bool) Num {
if b {
return 1
} else {
return 0
}
}
type Expr interface {
Eval() Num
}
// Constant expression always returns the same value when evaluated
type constExpr struct {
value Num
}
func (e *constExpr) Eval() Num {
return e.value
}
func (e *constExpr) String() string {
return fmt.Sprintf("#%v", e.value)
}
// Mutable variable expression returns the currently stored value of the variable
type Var interface {
Expr
Set(value Num)
Get() Num
}
type varExpr struct {
value Num
}
func NewVar(value Num) Var {
return &varExpr{value: value}
}
func (e *varExpr) Eval() Num {
return e.value
}
func (e *varExpr) Set(value Num) {
e.value = value
}
func (e *varExpr) Get() Num {
return e.value
}
func (e *varExpr) String() string {
return fmt.Sprintf("{%v}", e.value)
}
type Func func(f *FuncContext) Num
type FuncContext struct {
f Func
Args []Expr
Vars map[string]Var
Env interface{}
}
func (f *FuncContext) Eval() Num {
return f.f(f)
}
func (f *FuncContext) String() string {
return fmt.Sprintf("fn%v", f.Args)
}
// Operator expression returns the result of the operator applied to 1 or 2 arguments
type unaryExpr struct {
op arithOp
arg Expr
}
func newUnaryExpr(op arithOp, arg Expr) Expr {
return &unaryExpr{op: op, arg: arg}
}
func (e *unaryExpr) Eval() (res Num) {
switch e.op {
case unaryMinus:
res = -e.arg.Eval()
case unaryBitwiseNot:
// Bitwise operation can only be applied to integer values
res = Num(^int64(e.arg.Eval()))
case unaryLogicalNot:
res = boolNum(e.arg.Eval() == 0)
}
return res
}
func (e *unaryExpr) String() string {
return fmt.Sprintf("<%v>(%v)", e.op, e.arg)
}
type binaryExpr struct {
op arithOp
a Expr
b Expr
}
func newBinaryExpr(op arithOp, a, b Expr) (Expr, error) {
if op == assign {
if _, ok := a.(*varExpr); !ok {
return nil, ErrBadVar
}
}
return &binaryExpr{op: op, a: a, b: b}, nil
}
func (e *binaryExpr) Eval() (res Num) {
switch e.op {
case power:
res = Num(math.Pow(float64(e.a.Eval()), float64(e.b.Eval())))
case multiply:
res = e.a.Eval() * e.b.Eval()
case divide:
tmp := e.b.Eval()
if tmp != 0 {
res = e.a.Eval() / tmp
}
case remainder:
tmp := e.b.Eval()
if tmp != 0 {
res = Num(math.Remainder(float64(e.a.Eval()), float64(tmp)))
}
case plus:
res = e.a.Eval() + e.b.Eval()
case minus:
res = e.a.Eval() - e.b.Eval()
case shl:
res = Num(int64(e.a.Eval()) << uint(e.b.Eval()))
case shr:
res = Num(int64(e.a.Eval()) >> uint(e.b.Eval()))
case lessThan:
res = boolNum(e.a.Eval() < e.b.Eval())
case lessOrEquals:
res = boolNum(e.a.Eval() <= e.b.Eval())
case greaterThan:
res = boolNum(e.a.Eval() > e.b.Eval())
case greaterOrEquals:
res = boolNum(e.a.Eval() >= e.b.Eval())
case equals:
res = boolNum(e.a.Eval() == e.b.Eval())
case notEquals:
res = boolNum(e.a.Eval() != e.b.Eval())
case bitwiseAnd:
return Num(int64(e.a.Eval()) & int64(e.b.Eval()))
case bitwiseXor:
return Num(int64(e.a.Eval()) ^ int64(e.b.Eval()))
case bitwiseOr:
return Num(int64(e.a.Eval()) | int64(e.b.Eval()))
case logicalAnd:
if a := e.a.Eval(); a != 0 {
if b := e.b.Eval(); b != 0 {
res = b
}
}
case logicalOr:
if a := e.a.Eval(); a != 0 {
res = a
} else if b := e.b.Eval(); b != 0 {
res = b
}
case assign:
res = e.b.Eval()
e.a.(*varExpr).Set(res)
case comma:
e.a.Eval()
res = e.b.Eval()
}
return res
}
func (e *binaryExpr) String() string {
return fmt.Sprintf("<%v>(%v, %v)", e.op, e.a, e.b)
}
const (
tokNumber = 1 << iota
tokWord
tokOp
tokOpen
tokClose
)
func tokenize(input []rune) (tokens []string, err error) {
pos := 0
expected := tokOpen | tokNumber | tokWord
for pos < len(input) {
tok := []rune{}
c := input[pos]
if unicode.IsSpace(c) {
pos++
continue
}
if unicode.IsNumber(c) {
if expected&tokNumber == 0 {
return nil, ErrUnexpectedNumber
}
expected = tokOp | tokClose
for (c == '.' || unicode.IsNumber(c)) && pos < len(input) {
tok = append(tok, input[pos])
pos++
if pos < len(input) {
c = input[pos]
} else {
c = 0
}
}
} else if unicode.IsLetter(c) {
if expected&tokWord == 0 {
return nil, ErrUnexpectedIdentifier
}
expected = tokOp | tokOpen | tokClose
for (unicode.IsLetter(c) || unicode.IsNumber(c) || c == '_') && pos < len(input) {
tok = append(tok, input[pos])
pos++
if pos < len(input) {
c = input[pos]
} else {
c = 0
}
}
} else if c == '(' || c == ')' {
tok = append(tok, c)
pos++
if c == '(' && (expected&tokOpen) != 0 {
expected = tokNumber | tokWord | tokOpen | tokClose
} else if c == ')' && (expected&tokClose) != 0 {
expected = tokOp | tokClose
} else {
return nil, ErrParen
}
} else {
if expected&tokOp == 0 {
if c != '-' && c != '^' && c != '!' {
return nil, ErrOperandMissing
}
tok = append(tok, c, 'u')
pos++
} else {
var lastOp string
for !unicode.IsLetter(c) && !unicode.IsNumber(c) && !unicode.IsSpace(c) &&
c != '_' && c != '(' && c != ')' && pos < len(input) {
if _, ok := ops[string(tok)+string(input[pos])]; ok {
tok = append(tok, input[pos])
lastOp = string(tok)
} else if lastOp == "" {
tok = append(tok, input[pos])
} else {
break
}
pos++
if pos < len(input) {
c = input[pos]
} else {
c = 0
}
}
if lastOp == "" {
return nil, ErrBadOp
}
}
expected = tokNumber | tokWord | tokOpen
}
tokens = append(tokens, string(tok))
}
return tokens, nil
}
// Simple string stack implementation
type stringStack []string
func (ss *stringStack) Push(s string) {
*ss = append(*ss, s)
}
func (ss *stringStack) Peek() string {
if l := len(*ss); l == 0 {
return ""
} else {
return (*ss)[l-1]
}
}
func (ss *stringStack) Pop() string {
if l := len(*ss); l == 0 {
return ""
} else {
s := (*ss)[l-1]
*ss = (*ss)[:l-1]
return s
}
}
// Simple expression stack implementation
type exprStack []Expr
func (es *exprStack) Push(e Expr) {
*es = append(*es, e)
}
func (es *exprStack) Peek() Expr {
if l := len(*es); l == 0 {
return nil
} else {
return (*es)[l-1]
}
}
func (es *exprStack) Pop() Expr {
if l := len(*es); l == 0 {
return nil
} else {
e := (*es)[l-1]
*es = (*es)[:l-1]
return e
}
}
const (
parenAllowed = iota
parenExpected
parenForbidden
)
func Parse(input string, vars map[string]Var, funcs map[string]Func) (Expr, error) {
os := stringStack{}
es := exprStack{}
paren := parenAllowed
if tokens, err := tokenize([]rune(input)); err != nil {
return nil, err
} else {
for _, token := range tokens {
parenNext := parenAllowed
if token == "(" {
if paren == parenExpected {
os.Push("{")
} else if paren == parenAllowed {
os.Push("(")
} else {
return nil, ErrBadCall
}
} else if paren == parenExpected {
return nil, ErrBadCall
} else if token == ")" {
for len(os) > 0 && os.Peek() != "(" && os.Peek() != "{" {
if expr, err := bind(os.Pop(), funcs, &es); err != nil {
return nil, err
} else {
es.Push(expr)
}
}
if len(os) == 0 {
return nil, ErrParen
}
if open := os.Pop(); open == "{" {
f := funcs[os.Pop()]
args := list(es.Pop())
es.Push(&FuncContext{f: f, Vars: vars, Args: args})
}
parenNext = parenForbidden
} else if n, err := strconv.ParseFloat(token, 64); err == nil {
// Number
es.Push(&constExpr{value: Num(n)})
parenNext = parenForbidden
} else if _, ok := funcs[token]; ok {
// Function
os.Push(token)
parenNext = parenExpected
} else if op, ok := ops[token]; ok {
o2 := os.Peek()
for ops[o2] != 0 && ((isLeftAssoc(op) && op >= ops[o2]) || op > ops[o2]) {
if expr, err := bind(o2, funcs, &es); err != nil {
return nil, err
} else {
es.Push(expr)
}
os.Pop()
o2 = os.Peek()
}
os.Push(token)
} else {
// Variable
if v, ok := vars[token]; ok {
es.Push(v)
} else {
v = NewVar(0)
vars[token] = v
es.Push(v)
}
parenNext = parenForbidden
}
paren = parenNext
}
if paren == parenExpected {
return nil, ErrBadCall
}
for len(os) > 0 {
op := os.Pop()
if op == "(" || op == ")" {
return nil, ErrParen
}
if expr, err := bind(op, funcs, &es); err != nil {
return nil, err
} else {
es.Push(expr)
}
}
if len(es) == 0 {
return &constExpr{}, nil
} else {
e := es.Pop()
return e, nil
}
}
}
func bind(name string, funcs map[string]Func, stack *exprStack) (Expr, error) {
if op, ok := ops[name]; ok {
if isUnary(op) {
if stack.Peek() == nil {
return nil, ErrOperandMissing
} else {
return newUnaryExpr(op, stack.Pop()), nil
}
} else {
b := stack.Pop()
a := stack.Pop()
if a == nil || b == nil {
return nil, ErrOperandMissing
}
return newBinaryExpr(op, a, b)
}
} else {
return nil, ErrBadCall
}
}
func list(e Expr) []Expr {
if e == nil {
return []Expr{}
} else if b, ok := e.(*binaryExpr); ok && b.op == comma {
return append([]Expr{b.a}, list(b.b)...)
} else {
return []Expr{e}
}
} | expr.go | 0.605449 | 0.437523 | expr.go | starcoder |
package shingo
import (
"math"
"time"
"github.com/pkg/errors"
)
// AppendMACD appends Moving Average Convergence Divergence indicators to each candlestick
func (cs *Candlesticks) AppendMACD(args IndicatorInputArg) error {
period1 := args.MacdLarge
period2 := args.MacdSmall
signalLine := args.MacdSignal
limit := args.Limit
if period1 == 0 {
return errors.New("MacdLarge must be greater than zero")
}
if period2 == 0 {
return errors.New("MacdSmall must be greater than zero")
}
if signalLine == 0 {
return errors.New("signalLine must be greater than zero")
}
if period1 >= period2 {
return errors.New("Period1 must be less than Period2 in MACD")
}
cs.mux.Lock()
defer cs.mux.Unlock()
cl := cs.Total()
if cl < 1 {
return nil
}
if limit < 1 {
limit = cs.Total()
}
if err := cs.GenerateIndicator(IndicatorTypeEMA, IndicatorInputArg{
Period: period1,
Limit: cl,
}); err != nil {
return errors.Wrap(err, "Error generating period1 indicator")
}
if err := cs.GenerateIndicator(IndicatorTypeEMA, IndicatorInputArg{
Period: period2,
Limit: cl,
}); err != nil {
return errors.Wrap(err, "Error generating period2 indicator")
}
cst, err := NewCandlesticks(IntervalOneDay, 100)
if err != nil {
return errors.Wrap(err, "Error creating candlesticks for macd signal line")
}
for i := 0; i < cl; i++ {
v := cs.ItemAtIndex(i)
ema1 := v.GetEMA(period1)
ema2 := v.GetEMA(period2)
if ema1 == nil || ema2 == nil {
continue
}
val := ema1.Value - ema2.Value
c, err := NewCandlestick(0, val, 0, 0, time.Time{}, 0)
if err != nil {
return errors.Wrap(err, "Error creating candlestick in macd signal line")
}
cst.AppendCandlestick(c)
}
cstl := cst.Total()
err = cst.GenerateIndicator(IndicatorTypeEMA, IndicatorInputArg{
Period: signalLine,
Limit: cl,
})
if err != nil {
return errors.Wrap(err, "Error creating ema for macd signal line")
}
endIdx := cl - cstl
var count int
for i := cl - 1; i >= endIdx; i-- {
if count == limit {
return nil
}
v := cs.ItemAtIndex(i)
ci := int(math.Abs(float64(cl - i - cstl)))
vi := cst.ItemAtIndex(ci)
if vi != nil && vi.GetEMA(signalLine) == nil {
continue
}
macdValue := v.GetEMA(period1).Value - v.GetEMA(period2).Value
signalValue := vi.GetEMA(signalLine).Value
v.setMACD(period1, period2, signalLine, macdValue, signalValue)
count++
}
return nil
}
// GetMACD gets MACD value for this candlestick given fast, slow and signal line value
func (c *Candlestick) GetMACD(period1, period2, signal int) *MACDDelta {
if c.Indicators == nil || c.Indicators.MACDs == nil {
return nil
}
if c.Indicators.MACDs[period1] == nil || c.Indicators.MACDs[period1][period2] == nil {
return nil
}
return c.Indicators.MACDs[period1][period2][signal]
}
func (c *Candlestick) setMACD(period1 int, period2 int, signal int, macdValue float64, signalValue float64) {
if c.Indicators.MACDs == nil {
c.Indicators.MACDs = make(map[int]map[int]map[int]*MACDDelta)
}
if c.Indicators.MACDs[period1] == nil {
c.Indicators.MACDs[period1] = make(map[int]map[int]*MACDDelta)
}
if c.Indicators.MACDs[period1][period2] == nil {
c.Indicators.MACDs[period1][period2] = make(map[int]*MACDDelta)
}
if c.Indicators.MACDs[period1][period2][signal] == nil {
c.Indicators.MACDs[period1][period2][signal] = &MACDDelta{
MACDValue: macdValue,
SignalValue: signalValue,
MACDHistogram: macdValue - signalValue,
}
}
} | macd.go | 0.57332 | 0.420421 | macd.go | starcoder |
package semver
import (
"fmt"
"github.com/spf13/cast"
"regexp"
"strings"
)
var (
aliasRegex = regexp.MustCompile(`^([^,\s]+)\s+as\s+([^,\s]+)$`)
stabilityRegex = `[._-]?(?:(dev|stable|beta|b|RC|alpha|a|patch|pl|p)((?:[.-]?\d+)*)?)`
branchRegex = regexp.MustCompile(`^v?(\d+)(\.(?:\d+|[xX*]))?(\.(?:\d+|[xX*]))?(\.(?:\d+|[xX*]))?$`)
versionRegex =
// Match normal version string (1.2.3)
`^v?([0-9]{1,5})(\.[0-9]+)?(\.[0-9]+)?(\.[0-9]+)?` +
// Match pre-release info (-beta.2). This supports dot, underscore, dash or nothing as a prefix to match Composers rules
stabilityRegex + "?([.-]?dev)?"
// Match metadata (E.G + build.1234)
versionRegexC = regexp.MustCompile(`(?i)` + versionRegex + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$`)
dateTimeRegex = regexp.MustCompile(`^v?(\d{4}(?:[.:-]?\d{2}){1,6}(?:[.:-]?\d{1,3})?)` + stabilityRegex + `?$`)
stabilityRegexC = regexp.MustCompile(`(?i)` + stabilityRegex)
branchMatcher = regexp.MustCompile(`(?i)(.*?)[.-]?dev$`)
replaceRegex = regexp.MustCompile(`([^0-9]+)`)
)
func NewVersion(version string) (*Version, error) {
originalVersion := version
alias := aliasRegex.FindStringSubmatch(version)
if alias != nil {
version = alias[1]
}
if match, _ := regexp.Match("(?i)^(?:dev-)?(?:master|trunk|default)$", []byte(version)); match {
return &Version{
Major: 9999999,
Stability: "dev",
State: "",
Original: originalVersion,
}, nil
}
if len(version) > 4 && "dev-" == strings.ToLower(version[0:4]) {
return &Version{
Parsed: fmt.Sprintf("dev-%s", version[4:]),
Original: originalVersion,
isBranch: true,
}, nil
}
versionMatch := versionRegexC.FindStringSubmatch(version)
if versionMatch != nil {
stability := expandStability(versionMatch[5])
return &Version{
Major: cast.ToInt(versionMatch[1]),
Minor: parseVersionNumber(versionMatch[2]),
Patch: parseVersionNumber(versionMatch[3]),
Extra: parseVersionNumber(versionMatch[4]),
PreRelease: strings.TrimLeft(versionMatch[6], ".-"),
Stability: stability,
State: strings.TrimLeft(versionMatch[7], "-"),
Metadata: versionMatch[9],
Original: originalVersion,
}, nil
}
dateTimeMatch := dateTimeRegex.FindStringSubmatch(version)
if dateTimeMatch != nil {
versionString := replaceRegex.ReplaceAllString(dateTimeMatch[1], `.`)
return &Version{
Stability: expandStability(dateTimeMatch[2]),
Patch: cast.ToInt(dateTimeMatch[3]),
Parsed: versionString,
Original: originalVersion,
isDate: true,
}, nil
}
branchMatches := branchMatcher.FindStringSubmatch(version)
if nil != branchMatches {
return NormalizeBranch(branchMatches[1])
}
return nil, fmt.Errorf("unable to parse version %s", version)
}
func NormalizeBranch(branch string) (*Version, error) {
valid := map[string]bool{"master": true, "trunk": true, "default": true}
if valid[branch] {
return NewVersion(branch)
}
branchMatches := branchRegex.FindStringSubmatch(branch)
if nil != branchMatches {
versionString := ""
matchesLength := len(branchMatches)
for i := 1; i < 5; i++ {
if i < matchesLength && "" != branchMatches[i] {
versionString += strings.Replace(strings.Replace(branchMatches[i], "X", "x", -1), "*", "x", -1)
} else {
versionString += ".x"
}
}
return NewVersion(strings.Replace(versionString, "x", "9999999", -1) + "-dev")
}
return NewVersion("dev-" + branch)
}
func expandStability(stability string) string {
switch strings.ToLower(stability) {
case "alpha", "a":
return "alpha"
case "beta", "b":
return "beta"
case "p", "pl":
return "patch"
case "rc":
return "RC"
}
return stability
}
func ParseStability(stability string) string {
if "" == stability {
return stability
}
if len(stability) >= 4 && ("dev-" == strings.ToLower(stability[0:4]) || "-dev" == strings.ToLower(stability[len(stability)-4:])) {
return "dev"
}
stabilityMatch := stabilityRegexC.FindStringSubmatch(stability)
if nil != stabilityMatch {
switch strings.ToLower(stabilityMatch[1]) {
case "alpha", "a":
return "alpha"
case "beta", "b":
return "beta"
case "rc":
return "RC"
case "dev":
return "dev"
}
}
return "stable"
}
func parseVersionNumber(version string) int {
if "" == version {
return 0
}
return cast.ToInt(strings.TrimPrefix(version, "."))
} | parser.go | 0.510008 | 0.438905 | parser.go | starcoder |
package plaid
import (
"encoding/json"
)
// AuthGetNumbers An object containing identifying numbers used for making electronic transfers to and from the `accounts`. The identifying number type (ACH, EFT, IBAN, or BACS) used will depend on the country of the account. An account may have more than one number type. If a particular identifying number type is not used by any `accounts` for which data has been requested, the array for that type will be empty.
type AuthGetNumbers struct {
// An array of ACH numbers identifying accounts.
Ach []NumbersACH `json:"ach"`
// An array of EFT numbers identifying accounts.
Eft []NumbersEFT `json:"eft"`
// An array of IBAN numbers identifying accounts.
International []NumbersInternational `json:"international"`
// An array of BACS numbers identifying accounts.
Bacs []NumbersBACS `json:"bacs"`
AdditionalProperties map[string]interface{}
}
type _AuthGetNumbers AuthGetNumbers
// NewAuthGetNumbers instantiates a new AuthGetNumbers object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewAuthGetNumbers(ach []NumbersACH, eft []NumbersEFT, international []NumbersInternational, bacs []NumbersBACS) *AuthGetNumbers {
this := AuthGetNumbers{}
this.Ach = ach
this.Eft = eft
this.International = international
this.Bacs = bacs
return &this
}
// NewAuthGetNumbersWithDefaults instantiates a new AuthGetNumbers object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewAuthGetNumbersWithDefaults() *AuthGetNumbers {
this := AuthGetNumbers{}
return &this
}
// GetAch returns the Ach field value
func (o *AuthGetNumbers) GetAch() []NumbersACH {
if o == nil {
var ret []NumbersACH
return ret
}
return o.Ach
}
// GetAchOk returns a tuple with the Ach field value
// and a boolean to check if the value has been set.
func (o *AuthGetNumbers) GetAchOk() (*[]NumbersACH, bool) {
if o == nil {
return nil, false
}
return &o.Ach, true
}
// SetAch sets field value
func (o *AuthGetNumbers) SetAch(v []NumbersACH) {
o.Ach = v
}
// GetEft returns the Eft field value
func (o *AuthGetNumbers) GetEft() []NumbersEFT {
if o == nil {
var ret []NumbersEFT
return ret
}
return o.Eft
}
// GetEftOk returns a tuple with the Eft field value
// and a boolean to check if the value has been set.
func (o *AuthGetNumbers) GetEftOk() (*[]NumbersEFT, bool) {
if o == nil {
return nil, false
}
return &o.Eft, true
}
// SetEft sets field value
func (o *AuthGetNumbers) SetEft(v []NumbersEFT) {
o.Eft = v
}
// GetInternational returns the International field value
func (o *AuthGetNumbers) GetInternational() []NumbersInternational {
if o == nil {
var ret []NumbersInternational
return ret
}
return o.International
}
// GetInternationalOk returns a tuple with the International field value
// and a boolean to check if the value has been set.
func (o *AuthGetNumbers) GetInternationalOk() (*[]NumbersInternational, bool) {
if o == nil {
return nil, false
}
return &o.International, true
}
// SetInternational sets field value
func (o *AuthGetNumbers) SetInternational(v []NumbersInternational) {
o.International = v
}
// GetBacs returns the Bacs field value
func (o *AuthGetNumbers) GetBacs() []NumbersBACS {
if o == nil {
var ret []NumbersBACS
return ret
}
return o.Bacs
}
// GetBacsOk returns a tuple with the Bacs field value
// and a boolean to check if the value has been set.
func (o *AuthGetNumbers) GetBacsOk() (*[]NumbersBACS, bool) {
if o == nil {
return nil, false
}
return &o.Bacs, true
}
// SetBacs sets field value
func (o *AuthGetNumbers) SetBacs(v []NumbersBACS) {
o.Bacs = v
}
func (o AuthGetNumbers) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["ach"] = o.Ach
}
if true {
toSerialize["eft"] = o.Eft
}
if true {
toSerialize["international"] = o.International
}
if true {
toSerialize["bacs"] = o.Bacs
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *AuthGetNumbers) UnmarshalJSON(bytes []byte) (err error) {
varAuthGetNumbers := _AuthGetNumbers{}
if err = json.Unmarshal(bytes, &varAuthGetNumbers); err == nil {
*o = AuthGetNumbers(varAuthGetNumbers)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "ach")
delete(additionalProperties, "eft")
delete(additionalProperties, "international")
delete(additionalProperties, "bacs")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableAuthGetNumbers struct {
value *AuthGetNumbers
isSet bool
}
func (v NullableAuthGetNumbers) Get() *AuthGetNumbers {
return v.value
}
func (v *NullableAuthGetNumbers) Set(val *AuthGetNumbers) {
v.value = val
v.isSet = true
}
func (v NullableAuthGetNumbers) IsSet() bool {
return v.isSet
}
func (v *NullableAuthGetNumbers) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableAuthGetNumbers(val *AuthGetNumbers) *NullableAuthGetNumbers {
return &NullableAuthGetNumbers{value: val, isSet: true}
}
func (v NullableAuthGetNumbers) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableAuthGetNumbers) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_auth_get_numbers.go | 0.727975 | 0.434281 | model_auth_get_numbers.go | starcoder |
package animation
import (
"image/color"
"math"
"time"
"github.com/misterikkit/tinytimer/graphics"
)
// Interface provides a frame of pixels that changes over time.
type Interface interface {
// Frame returns the animation's current pixel values.
Frame() []color.RGBA
// Update updates an animation's frame based on the current time, and returns
// true if the animation is complete.
Update(time.Time) bool
}
type spinner struct {
frame []color.RGBA
dots []graphics.Sprite
}
const spinnerCount = 7
var size = graphics.PixelWidth * 1.5
var divide = graphics.Circ / spinnerCount
// NewSpinner initializes a spinner animation.
func NewSpinner(c color.RGBA) Interface {
s := spinner{
frame: make([]color.RGBA, graphics.FrameSize),
dots: make([]graphics.Sprite, 0, spinnerCount),
}
for i := 0; i < spinnerCount; i++ {
s.dots = append(s.dots, graphics.Sprite{Size: size, Color: c})
}
return &s
}
func (s *spinner) Frame() []color.RGBA { return s.frame }
var period = time.Second * spinnerCount
// Update computes the current frame of animation.
func (s *spinner) Update(now time.Time) bool {
graphics.Fill(s.frame, graphics.Black)
// compute fraction through the period
elapsed := float32(now.Sub(now.Truncate(period)).Nanoseconds())
progress := elapsed / float32(period.Nanoseconds())
for i := range s.dots {
// The value of Position has an upper bound of `2*Circ`. The max progress is
// 1.0 and divide*spinnerCount==Circ.
s.dots[i].Position = graphics.Circ*progress + divide*float32(i)
s.dots[i].Render(s.frame)
}
return false
}
type loader struct {
frame []color.RGBA
bar, dot graphics.Sprite
bgFrom, bgTo color.RGBA
// bg color.RGBA
start, end time.Time
done bool
}
// NewLoader initializes a loader animation.
func NewLoader(fg, bgFrom, bgTo color.RGBA, start, end time.Time) Interface {
return &loader{
frame: make([]color.RGBA, graphics.FrameSize),
bar: graphics.Sprite{Color: fg},
bgFrom: bgFrom,
bgTo: bgTo,
// bg: bg,
dot: graphics.Sprite{Color: graphics.White, Size: graphics.PixelWidth},
start: start,
end: end,
}
}
func (l *loader) Frame() []color.RGBA { return l.frame }
func (l *loader) Update(now time.Time) bool {
if l.done {
return true
}
progress := float32(1.0)
if now.Before(l.end) {
progress = float32(now.Sub(l.start)) / float32(l.end.Sub(l.start))
} else {
l.done = true
}
graphics.Fill(l.frame, graphics.Add(
graphics.Scale(l.bgFrom, 1.0-progress),
graphics.Scale(l.bgTo, progress),
))
l.bar.Size = graphics.Circ * progress
l.bar.Position = l.bar.Size / 2.0
elapsed := float32(now.Sub(l.start).Seconds())
l.dot.Position = elapsed * graphics.Circ
l.bar.Render(l.frame)
l.dot.Render(l.frame)
return l.done
}
type flasher struct {
frame []color.RGBA
c color.RGBA
end time.Time
}
// NewFlasher initializes a flasher animation.
func NewFlasher(c color.RGBA, end time.Time) Interface {
return &flasher{make([]color.RGBA, graphics.FrameSize), c, end}
}
func (f *flasher) Frame() []color.RGBA { return f.frame }
func (f *flasher) Update(now time.Time) bool {
progress := f.end.Sub(now).Seconds() * math.Pi * 2
s := float32(math.Sin(progress))
s = s * s // stay smooth. stay positive
val := graphics.Scale(f.c, s)
graphics.Fill(f.frame, val)
return !now.Before(f.end) // This is double negated to return true on the exact frame.
}
type fader struct {
frame []color.RGBA
from, to Interface
start, end time.Time
}
// NewFader initializes a fader animation that blends from and to.
func NewFader(from, to Interface, start, end time.Time) Interface {
return &fader{
frame: make([]color.RGBA, graphics.FrameSize),
from: from,
to: to,
start: start,
end: end,
}
}
func (f *fader) Frame() []color.RGBA { return f.frame }
func (f *fader) Update(now time.Time) bool {
if now.After(f.end) {
done := f.to.Update(now)
copy(f.frame, f.to.Frame()) // Unfortunate that copy is required
// TODO: f.frame = f.to.Frame()
return done
}
f.from.Update(now)
f.to.Update(now)
progress := float32(now.Sub(f.start)) / float32(f.end.Sub(f.start))
for i := range f.frame {
f.frame[i] = graphics.Add(
graphics.Scale(f.from.Frame()[i], 1.0-progress),
graphics.Scale(f.to.Frame()[i], progress),
)
}
return false
} | animation/animation.go | 0.799833 | 0.477067 | animation.go | starcoder |
package xcore
import (
"log"
"sync"
"time"
)
// XCacheEntry is the cache basic structure to save some data in memory.
type XCacheEntry struct {
// The cache entry has a time to measure expiration if needed, or time of entry in cache:
// - ctime is the creation time (used to validate the object against its source).
ctime time.Time
// - rtime is the last read time (used to clean the cache: the less accessed objects are removed).
rtime time.Time
// The data as itself is an interface to whatever the user need to cache.
data interface{}
}
// XCache is the main cache structure, that contains a collection of XCacheEntries and some metadata.
type XCache struct {
// "ID": XCache has a unique id (informative).
ID string
// "Maxitems": The user can creates a cache with a maximum number of elements into it. In this case, when the cache reaches the maximum number of elements stored, then the system makes a clean of 10% of the oldest elements. This type of use is not recommended since is it heavy in CPU use to clean the cache.
Maxitems int
// "Validator" is a function that can be set to check the validity of the data (for instance if the data originates from a file or a database). The validator is called for each Get (and can be heavy for CPU or can wait a long time, for instance if the check is an external database on another cluster). Beware of this.
Validator func(string, time.Time) bool
// "Expire": The user can also create an expiration duration, so every elements in the cache is invalidated after a certain amount of time. It is more recommended to use the cache with an expiration duration. The obsolete objects are destroyed when the user tries to use them and return a "non existence" on Get. (this does not use CPU or extra locks).
Expire time.Duration
// Not available from outside for security, access of data is based on a mutex
// "mutex": The cache owns a mutex to lock access to data to read/write/delete/clean the data, to allow concurrency and multithreading of the cache.
mutex sync.Mutex
// "pile": The pile keeps the "ordered by date of reading" object keys, so it's fast to clean the data.
items map[string]*XCacheEntry
// "items": The items are a map to cache entries, acceved by the key of entries.
pile []string
}
// NewXCache function will create a new XCache structure.
// The XCache is resident in memory, supports multithreading and concurrency.
// "id" is the unique id of the XCache.
// "maxitems" is the max authorized quantity of objects into the XCache. If 0, the cache hast no limit in quantity of objects.
// "expire" is a max duration of the objects into the cache. If 0, no limit
// Returns the *XCache created.
func NewXCache(id string, maxitems int, expire time.Duration) *XCache {
if LOG {
log.Printf("Creating cache with data {id: %s, maxitems: %d, expire: %d}", id, maxitems, expire)
}
return &XCache{
ID: id,
Maxitems: maxitems,
Validator: nil,
Expire: expire,
items: make(map[string]*XCacheEntry),
}
}
// Set will set an entry in the cache.
// If the entry already exists, just replace it with a new creation date.
// If the entry does not exist, it will insert it in the cache and if the cache if full (maxitems reached), then a clean is called to remove 10%.
// Returns nothing.
func (c *XCache) Set(key string, indata interface{}) {
c.mutex.Lock()
// check if the entry already exists
_, ok := c.items[key]
c.items[key] = &XCacheEntry{ctime: time.Now(), rtime: time.Now(), data: indata}
if ok {
c.removeFromPile(key)
}
c.pile = append(c.pile, key)
c.mutex.Unlock()
if c.Maxitems > 0 && len(c.items) >= c.Maxitems {
// We need a cleaning
c.Clean(10)
}
}
// removeFromPile will remove an entry key from the ordered pile.
func (c *XCache) removeFromPile(key string) {
// removes the key and append it to the end
for i, x := range c.pile {
if x == key {
if i == len(c.pile)-1 {
c.pile = c.pile[:i]
} else {
c.pile = append(c.pile[:i], c.pile[i+1:]...)
}
break
}
}
}
// Get will get the value of an entry.
// If the entry does not exists, returns nil, false.
// If the entry exists and is invalidated by time or validator function, then returns nil, true.
// If the entry is good, return <value>, false.
func (c *XCache) Get(key string) (interface{}, bool) {
c.mutex.Lock()
if x, ok := c.items[key]; ok {
c.mutex.Unlock()
if c.Validator != nil {
if b := c.Validator(key, x.ctime); !b {
if LOG {
log.Println("Validator invalids entry: " + key)
}
c.mutex.Lock()
delete(c.items, key)
c.removeFromPile(key)
c.mutex.Unlock()
return nil, true
}
}
// expired ?
if c.Expire != 0 {
if x.ctime.Add(c.Expire).Before(time.Now()) {
if LOG {
log.Println("Cache timeout Expired: " + key)
}
c.mutex.Lock()
delete(c.items, key)
c.removeFromPile(key)
c.mutex.Unlock()
return nil, true
}
}
x.rtime = time.Now()
c.removeFromPile(key)
c.pile = append(c.pile, key)
return x.data, false
}
c.mutex.Unlock()
return nil, false
}
// Del will delete the entry of the cache if it exists.
func (c *XCache) Del(key string) {
c.mutex.Lock()
delete(c.items, key)
// we should check if the entry exists before trying to removing
c.removeFromPile(key)
c.mutex.Unlock()
}
// Count will return the quantity of entries in the cache.
func (c *XCache) Count() int {
c.mutex.Lock()
x := len(c.items)
c.mutex.Unlock()
return x
}
// Clean will delete expired entries, and free perc% of max items based on time.
// perc = 0 to 100 (percentage to clean).
// Returns quantity of removed entries.
// It Will **not** verify the cache against its source (if Validator is set). If you want to scan that, use the Verify function.
func (c *XCache) Clean(perc int) int {
if LOG {
log.Println("Cleaning cache")
}
i := 0
c.mutex.Lock()
// 1. clean all expired items
if c.Expire != 0 {
for k, x := range c.items {
if x.ctime.Add(c.Expire).Before(time.Now()) {
if LOG {
log.Println("Cache timeout Expired: " + k)
}
delete(c.items, k)
i++
}
}
}
// 2. clean perc% of olders
// How many do we have to clean ?
total := len(c.items)
num := total * perc / 100
if LOG {
log.Println("Quantity of elements to remove from cache:", num)
}
for i = 0; i < num; i++ {
delete(c.items, c.pile[i])
}
c.pile = c.pile[i:]
c.mutex.Unlock()
return i
}
// Verify will first, Clean(0) keeping all the entries, then will delete expired entries using the Validator function.
// Returns the quantity of removed entries.
// Based on what the validator function does, calling Verify can be **very** slow and cpu dependant. Be very careful.
func (c *XCache) Verify() int {
// 1. clean all expired items, do not touch others
i := c.Clean(0)
// 2. If there is a validator, verifies anything
if c.Validator != nil {
for k, x := range c.items {
if b := c.Validator(k, x.ctime); !b {
if LOG {
log.Println("Validator invalids entry: " + k)
}
c.mutex.Lock()
delete(c.items, k)
c.mutex.Unlock()
i++
}
}
}
return i
}
// Flush will empty the whole cache and free all the memory of it.
// Returns nothing.
func (c *XCache) Flush() {
c.mutex.Lock()
// how to really deletes the data ? ( to free memory)
c.items = make(map[string]*XCacheEntry)
c.mutex.Unlock()
} | xcache.go | 0.540924 | 0.513485 | xcache.go | starcoder |
package structures
import (
"errors"
"fmt"
"log"
"math/big"
"math/rand"
"github.com/offchainlabs/arbitrum/packages/arb-checkpointer/ckptcontext"
"github.com/offchainlabs/arbitrum/packages/arb-util/common"
"github.com/offchainlabs/arbitrum/packages/arb-util/hashing"
"github.com/offchainlabs/arbitrum/packages/arb-util/machine"
"github.com/offchainlabs/arbitrum/packages/arb-util/protocol"
"github.com/offchainlabs/arbitrum/packages/arb-util/value"
"github.com/offchainlabs/arbitrum/packages/arb-validator-core/valprotocol"
)
var zeroBytes32 common.Hash // deliberately zeroed
type Node struct {
prevHash common.Hash
prev *Node // Node with hash prevHash if non-nil
deadline common.TimeTicks
disputable *valprotocol.DisputableNode
linkType valprotocol.ChildType
vmProtoData *valprotocol.VMProtoData
machine machine.Machine // nil if unknown
assertion *protocol.ExecutionAssertion // nil if not valid node or unknown
depth uint64
nodeDataHash common.Hash
innerHash common.Hash
hash common.Hash
successorHashes [valprotocol.MaxChildType + 1]common.Hash
numStakers uint64
}
func (node *Node) String() string {
return fmt.Sprintf("Node(type: %v, disputable: %v, deadline: %v, protodata: %v)", node.linkType, node.disputable, node.deadline.Val, node.vmProtoData)
}
func NewInitialNode(mach machine.Machine) *Node {
ret := &Node{
prevHash: common.Hash{},
prev: nil,
deadline: common.TimeTicks{Val: big.NewInt(0)},
disputable: nil,
linkType: 0,
vmProtoData: valprotocol.NewVMProtoData(
mach.Hash(),
common.Hash{},
big.NewInt(0),
big.NewInt(0),
big.NewInt(0),
),
machine: mach,
depth: 0,
}
ret.setHash(common.Hash{})
return ret
}
func NewValidNodeFromPrev(
prev *Node,
disputable *valprotocol.DisputableNode,
params valprotocol.ChainParams,
currentTime *common.TimeBlocks,
) *Node {
return NewNodeFromPrev(
prev,
disputable,
valprotocol.ValidChildType,
params,
currentTime,
disputable.ValidAfterVMProtoData(prev.vmProtoData),
)
}
func NewRandomNodeFromValidPrev(prev *Node, inboxStack *MessageStack, messageCount uint64) *Node {
assertion := protocol.NewExecutionAssertionFromValues(
common.RandHash(),
common.RandHash(),
rand.Uint64(),
messageCount,
[]value.Value{value.NewInt64Value(0), value.NewInt64Value(2)},
[]value.Value{value.NewInt64Value(1), value.NewInt64Value(2)},
)
disputableNode := valprotocol.NewRandomDisputableNode(
NewExecutionAssertionStubFromWholeAssertion(assertion, prev.VMProtoData().InboxTop, inboxStack),
)
nextNode := NewValidNodeFromPrev(
prev,
disputableNode,
valprotocol.NewRandomChainParams(),
common.NewTimeBlocks(common.RandBigInt()),
)
_ = nextNode.UpdateValidOpinion(nil, assertion)
return nextNode
}
func NewRandomInvalidNodeFromValidPrev(
prev *Node,
stub *valprotocol.ExecutionAssertionStub,
kind valprotocol.ChildType,
params valprotocol.ChainParams,
) *Node {
disputableNode := valprotocol.NewRandomDisputableNode(stub)
nextNode := NewInvalidNodeFromPrev(
prev,
disputableNode,
kind,
params,
common.NewTimeBlocks(common.RandBigInt()),
)
_ = nextNode.UpdateInvalidOpinion()
return nextNode
}
func NewInvalidNodeFromPrev(
prev *Node,
disputable *valprotocol.DisputableNode,
kind valprotocol.ChildType,
params valprotocol.ChainParams,
currentTime *common.TimeBlocks,
) *Node {
return NewNodeFromPrev(
prev,
disputable,
kind,
params,
currentTime,
prev.vmProtoData,
)
}
func NewNodeFromPrev(
prev *Node,
disputable *valprotocol.DisputableNode,
kind valprotocol.ChildType,
params valprotocol.ChainParams,
currentTime *common.TimeBlocks,
vmProtoData *valprotocol.VMProtoData,
) *Node {
deadlineTicks := valprotocol.CalculateNodeDeadline(
disputable.Assertion,
params,
prev.deadline,
common.TicksFromBlockNum(currentTime),
)
ret := &Node{
prevHash: prev.hash,
prev: prev,
deadline: deadlineTicks,
disputable: disputable,
linkType: kind,
vmProtoData: vmProtoData,
depth: prev.depth + 1,
}
ret.setHash(ret.calculateNodeDataHash(params))
return ret
}
func (node *Node) LinkSuccessor(successor *Node) error {
if successor.prevHash != node.hash {
return errors.New("node is not successor")
}
node.successorHashes[successor.linkType] = successor.hash
return nil
}
func (node *Node) Hash() common.Hash {
return node.hash
}
func (node *Node) LinkType() valprotocol.ChildType {
return node.linkType
}
func (node *Node) PrevHash() common.Hash {
return node.prevHash
}
func (node *Node) Prev() *Node {
return node.prev
}
func (node *Node) ClearPrev() {
node.prev = nil
node.prevHash = zeroBytes32
}
func (node *Node) UnlinkPrev() bool {
hasPrev := node.prev != nil
if hasPrev {
node.prev.successorHashes[node.LinkType()] = zeroBytes32
node.ClearPrev()
}
return hasPrev
}
func (node *Node) Deadline() common.TimeTicks {
return node.deadline
}
func (node *Node) Disputable() *valprotocol.DisputableNode {
return node.disputable
}
func (node *Node) VMProtoData() *valprotocol.VMProtoData {
return node.vmProtoData
}
func (node *Node) Machine() machine.Machine {
return node.machine
}
func (node *Node) Assertion() *protocol.ExecutionAssertion {
return node.assertion
}
func (node *Node) UpdateValidOpinion(machine machine.Machine, assertion *protocol.ExecutionAssertion) error {
if node.linkType != valprotocol.ValidChildType {
return errors.New("node is invalid")
}
node.machine = machine
node.assertion = assertion
return nil
}
func (node *Node) UpdateInvalidOpinion() error {
if node.linkType == valprotocol.ValidChildType {
return errors.New("node is valid")
}
node.machine = node.prev.machine.Clone()
return nil
}
func (node *Node) Depth() uint64 {
return node.depth
}
func (node *Node) NodeDataHash() common.Hash {
return node.nodeDataHash
}
func (node *Node) SuccessorHashes() [valprotocol.MaxChildType + 1]common.Hash {
return node.successorHashes
}
func (node *Node) NumStakers() uint64 {
return node.numStakers
}
func (node *Node) AddStaker() {
node.numStakers++
}
func (node *Node) RemoveStaker() {
node.numStakers--
}
func (node *Node) HasAncestor() bool {
emptyHash := common.Hash{}
return node.prevHash != emptyHash
}
func (node *Node) Equals(node2 *Node) bool {
return node.hash == node2.hash
}
func (node *Node) calculateNodeDataHash(params valprotocol.ChainParams) common.Hash {
if node.disputable == nil {
return common.Hash{}
}
if node.linkType == valprotocol.ValidChildType {
return hashing.SoliditySHA3(
hashing.Uint256(node.prev.VMProtoData().MessageCount),
hashing.Bytes32(node.disputable.Assertion.LastMessageHash),
hashing.Bytes32(node.disputable.Assertion.LastLogHash),
)
} else {
challengeDataHash, challengePeriodTicks := node.ChallengeNodeData(params)
return hashing.SoliditySHA3(
hashing.Bytes32(challengeDataHash),
hashing.TimeTicks(challengePeriodTicks),
)
}
}
func (node *Node) ChallengeNodeData(params valprotocol.ChainParams) (common.Hash, common.TimeTicks) {
vmProtoData := node.prev.vmProtoData
switch node.linkType {
case valprotocol.InvalidInboxTopChildType:
inboxLeft := new(big.Int).Add(vmProtoData.InboxCount, node.disputable.AssertionParams.ImportedMessageCount)
inboxLeft = inboxLeft.Sub(node.disputable.MaxInboxCount, inboxLeft)
ret := valprotocol.InboxTopChallengeDataHash(
node.disputable.Assertion.AfterInboxHash,
node.disputable.MaxInboxTop,
inboxLeft,
)
challengePeriod := params.GracePeriod.Add(common.TicksFromBlockNum(common.NewTimeBlocks(big.NewInt(1))))
return ret, challengePeriod
case valprotocol.InvalidExecutionChildType:
ret := valprotocol.ExecutionDataHash(
node.disputable.AssertionParams.NumSteps,
node.disputable.Assertion,
)
challengePeriod := params.GracePeriod.Add(node.disputable.Assertion.CheckTime(params))
return ret, challengePeriod
default:
log.Fatal("Unhandled challenge type", node.linkType)
return common.Hash{}, common.TimeTicks{}
}
}
func (node *Node) setHash(nodeDataHash common.Hash) {
var prevHashArr common.Hash
if node.prev != nil {
prevHashArr = node.prev.hash
}
innerHash := hashing.SoliditySHA3(
hashing.Bytes32(node.vmProtoData.Hash()),
hashing.TimeTicks(node.deadline),
hashing.Bytes32(nodeDataHash),
hashing.Uint256(new(big.Int).SetUint64(uint64(node.linkType))),
)
hash := hashing.SoliditySHA3(
hashing.Bytes32(prevHashArr),
hashing.Bytes32(innerHash),
)
node.nodeDataHash = nodeDataHash
node.innerHash = innerHash
node.hash = hash
}
func Link(nd *Node, prev *Node) error {
if nd.prevHash != prev.hash {
return errors.New("node is not parent")
}
nd.prev = prev
prev.successorHashes[nd.linkType] = nd.hash
return nil
}
func (node *Node) MarshalForCheckpoint(ctx *ckptcontext.CheckpointContext, includeMachine bool) *NodeBuf {
var machineHash *common.HashBuf
if includeMachine && node.machine != nil {
ctx.AddMachine(node.machine)
machineHash = node.machine.Hash().MarshalToBuf()
}
var disputableNodeBuf *valprotocol.DisputableNodeBuf
if node.disputable != nil {
disputableNodeBuf = node.disputable.MarshalToBuf()
}
return &NodeBuf{
PrevHash: node.prevHash.MarshalToBuf(),
Deadline: node.deadline.MarshalToBuf(),
DisputableNode: disputableNodeBuf,
LinkType: uint32(node.linkType),
VmProtoData: node.vmProtoData.MarshalToBuf(),
MachineHash: machineHash,
Assertion: node.assertion,
Depth: node.depth,
NodeDataHash: node.nodeDataHash.MarshalToBuf(),
InnerHash: node.innerHash.MarshalToBuf(),
Hash: node.hash.MarshalToBuf(),
}
}
func (x *NodeBuf) UnmarshalFromCheckpoint(ctx ckptcontext.RestoreContext) (*Node, error) {
var disputableNode *valprotocol.DisputableNode
if x.DisputableNode != nil {
disputableNode = x.DisputableNode.Unmarshal()
}
node := &Node{
prevHash: x.PrevHash.Unmarshal(),
prev: nil,
deadline: x.Deadline.Unmarshal(),
disputable: disputableNode,
linkType: valprotocol.ChildType(x.LinkType),
vmProtoData: x.VmProtoData.Unmarshal(),
machine: nil,
assertion: x.Assertion,
depth: x.Depth,
nodeDataHash: x.NodeDataHash.Unmarshal(),
innerHash: x.InnerHash.Unmarshal(),
hash: x.Hash.Unmarshal(),
numStakers: 0,
}
if x.MachineHash != nil {
node.machine = ctx.GetMachine(x.MachineHash.Unmarshal())
}
// can't set up prev and successorHash fields yet; caller must do this later
return node, nil
}
func GeneratePathProof(from, to *Node) []common.Hash {
// returns nil if no proof exists
if to == nil {
return nil
}
if from == to {
return []common.Hash{}
}
sub := GeneratePathProof(from, to.prev)
if sub == nil {
return nil
}
return append(sub, to.innerHash)
}
func (node *Node) EqualsFull(n2 *Node) bool {
return node.Equals(n2) &&
node.depth == n2.depth &&
node.vmProtoData.Equals(n2.vmProtoData) &&
node.linkType == n2.linkType &&
node.successorHashes == n2.successorHashes &&
node.numStakers == n2.numStakers
}
func GetConflictAncestor(n1, n2 *Node) (*Node, *Node, error) {
for n1.depth > n2.depth {
n1 = n1.prev
}
for n2.depth > n1.depth {
n2 = n2.prev
}
// Now n1 and n2 are at the same height so we can start looking for a challenge
for n1.prev != n2.prev {
n1 = n1.prev
n2 = n2.prev
}
if n1.linkType == n2.linkType {
return n1, n2, errors.New("no conflict")
}
return n1, n2, nil
} | packages/arb-validator/structures/node.go | 0.531939 | 0.432902 | node.go | starcoder |
package main
import (
"fmt"
"io"
"text/tabwriter"
"time"
"github.com/srishanbhattarai/nepcal/dateconv"
)
// calendar struct represents the state required to render the B.S. calendar using a tabwriter
// that writes out to an io.Writer.
type calendar struct {
val int
w *tabwriter.Writer
}
// newCalendar returns a new instance of calendar with the initial value of 1 with the provided io.Writer.
func newCalendar(w io.Writer) *calendar {
tabw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0)
return &calendar{
val: 1,
w: tabw,
}
}
// Render prints the BS calendar for the given time.Time.
// For printing formatted/aligned output, we use a tabwriter from the
// standard library. It doesn't support ANSI escapes so we cant have
// color/other enhancements to the output.(https://github.com/srishanbhattarai/nepcal/issues/4)
func (c *calendar) Render(ad time.Time) {
bs := dateconv.ToBS(ad)
c.renderBSDateHeader(bs)
c.renderStaticDaysHeader()
c.renderFirstRow(ad, bs)
c.renderCalWithoutFirstRow(ad, bs)
c.w.Flush()
}
// renderFirstRow renders the first row of the calendar. The reason this needs
// to be handled separately is because there is a skew in each month which
// determines which day the month starts from - we need to tab space the 'skew' number
// of days, then start printing from the day after the skew.
func (c *calendar) renderFirstRow(ad time.Time, bs dateconv.BSDate) {
offset := c.calculateSkew(ad, bs)
for i := 0; i < offset; i++ {
fmt.Fprintf(c.w, "\t")
}
for i := 0; i < (7 - offset); i++ {
fmt.Fprintf(c.w, "\t%d", c.val)
c.next()
}
fmt.Fprint(c.w, "\n")
}
// renderCalWithoutFirstRow renders the rest of the calendar without the first row.
// renderFirstRow will handle that due to special circumstances. We basically loop over
// each row and print 7 numbers until we are at the end of the month.
func (c *calendar) renderCalWithoutFirstRow(ad time.Time, bs dateconv.BSDate) {
bsyy, bsmm, _ := bs.Date()
daysInMonth, ok := dateconv.BsDaysInMonthsByYear(bsyy, time.Month(bsmm))
if !ok {
return
}
for c.val < daysInMonth {
start := daysInMonth - c.val
end := start + 7
for i := start; i < end; i++ {
if c.val > daysInMonth {
break
}
fmt.Fprintf(c.w, "\t%d", c.val)
c.next()
}
fmt.Fprint(c.w, "\n")
}
}
// renderStaticDaysHeader prints the static list of days for the calendar
func (c *calendar) renderStaticDaysHeader() {
for _, v := range []string{"Su", "Mo", "Tu", "We", "Th", "Fr", "Sa"} {
fmt.Fprintf(c.w, "%s\t", v)
}
fmt.Fprint(c.w, "\n")
}
// renderBSDateHeader prints the date corresponding to the time e. This will
// be the header of the calendar.
func (c *calendar) renderBSDateHeader(e dateconv.BSDate) {
yy, mm, dd := e.Date()
if month, ok := dateconv.GetBSMonthName(time.Month(mm)); ok {
fmt.Fprintf(c.w, "\t\t%s %d, %d\n\t", month, dd, yy)
}
}
// calculateSkew calculates the offset at the beginning of the month. Given an AD and
// BS date, we calculate the diff in days from the BS date to the start of the month in BS.
// We subtract that from the AD date, and get the weekday.
// For example, a skew of 2 means the month starts from Tuesday.
func (c *calendar) calculateSkew(ad time.Time, bs dateconv.BSDate) int {
_, _, bsdd := bs.Date()
dayDiff := (bsdd % 7) - 1
adWithoutbsDiffDays := ad.AddDate(0, 0, -dayDiff)
d := adWithoutbsDiffDays.Weekday()
// Since time.Weekday is an iota and not an iota + 1 we can avoid
// subtracting 1 from the return value.
return int(d)
}
// next increments the value counter.
func (c *calendar) next() {
c.val++
} | calendar.go | 0.775605 | 0.408395 | calendar.go | starcoder |
package meta
import (
"fmt"
"sort"
"strconv"
"strings"
"time"
)
const (
installedMetsensorMake = iota
installedMetsensorModel
installedMetsensorSerial
installedMetsensorMark
installedMetsensorIMSComment
installedMetsensorHumidityAccuracy
installedMetsensorPressureAccuracy
installedMetsensorTemperatureAccuracy
installedMetsensorLatitude
installedMetsensorLongitude
installedMetsensorElevation
installedMetsensorDatum
installedMetsensorStart
installedMetsensorStop
installedMetsensorLast
)
type MetSensorAccuracy struct {
Humidity float64
Pressure float64
Temperature float64
humidity string // shadow variable to maintain formatting
pressure string // shadow variable to maintain formatting
temperature string // shadow variable to maintain formatting
}
type InstalledMetSensor struct {
Install
Point
Mark string
IMSComment string
Accuracy MetSensorAccuracy
}
type InstalledMetSensorList []InstalledMetSensor
func (m InstalledMetSensorList) Len() int { return len(m) }
func (m InstalledMetSensorList) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
func (m InstalledMetSensorList) Less(i, j int) bool { return m[i].Install.Less(m[j].Install) }
func (m InstalledMetSensorList) encode() [][]string {
data := [][]string{{
"Make",
"Model",
"Serial",
"Mark",
"IMS Comment",
"Humidity",
"Pressure",
"Temperature",
"Latitude",
"Longitude",
"Elevation",
"Datum",
"Start Date",
"End Date",
}}
for _, v := range m {
data = append(data, []string{
strings.TrimSpace(v.Make),
strings.TrimSpace(v.Model),
strings.TrimSpace(v.Serial),
strings.TrimSpace(v.Mark),
strings.TrimSpace(v.IMSComment),
strings.TrimSpace(v.Accuracy.humidity),
strings.TrimSpace(v.Accuracy.pressure),
strings.TrimSpace(v.Accuracy.temperature),
strings.TrimSpace(v.latitude),
strings.TrimSpace(v.longitude),
strings.TrimSpace(v.elevation),
strings.TrimSpace(v.Datum),
v.Start.Format(DateTimeFormat),
v.End.Format(DateTimeFormat),
})
}
return data
}
func (m *InstalledMetSensorList) decode(data [][]string) error {
var installedMetsensors []InstalledMetSensor
if len(data) > 1 {
for _, d := range data[1:] {
if len(d) != installedMetsensorLast {
return fmt.Errorf("incorrect number of installed metsensor fields")
}
var err error
var h, p, t float64
if h, err = strconv.ParseFloat(d[installedMetsensorHumidityAccuracy], 64); err != nil {
return err
}
if p, err = strconv.ParseFloat(d[installedMetsensorPressureAccuracy], 64); err != nil {
return err
}
if t, err = strconv.ParseFloat(d[installedMetsensorTemperatureAccuracy], 64); err != nil {
return err
}
var lat, lon, elev float64
if lat, err = strconv.ParseFloat(d[installedMetsensorLatitude], 64); err != nil {
return err
}
if lon, err = strconv.ParseFloat(d[installedMetsensorLongitude], 64); err != nil {
return err
}
if elev, err = strconv.ParseFloat(d[installedMetsensorElevation], 64); err != nil {
return err
}
var start, end time.Time
if start, err = time.Parse(DateTimeFormat, d[installedMetsensorStart]); err != nil {
return err
}
if end, err = time.Parse(DateTimeFormat, d[installedMetsensorStop]); err != nil {
return err
}
installedMetsensors = append(installedMetsensors, InstalledMetSensor{
Install: Install{
Equipment: Equipment{
Make: strings.TrimSpace(d[installedMetsensorMake]),
Model: strings.TrimSpace(d[installedMetsensorModel]),
Serial: strings.TrimSpace(d[installedMetsensorSerial]),
},
Span: Span{
Start: start,
End: end,
},
},
Point: Point{
Latitude: lat,
Longitude: lon,
Elevation: elev,
Datum: strings.TrimSpace(d[installedMetsensorDatum]),
latitude: strings.TrimSpace(d[installedMetsensorLatitude]),
longitude: strings.TrimSpace(d[installedMetsensorLongitude]),
elevation: strings.TrimSpace(d[installedMetsensorElevation]),
},
Mark: strings.TrimSpace(d[installedMetsensorMark]),
IMSComment: strings.TrimSpace(d[installedMetsensorIMSComment]),
Accuracy: MetSensorAccuracy{
Humidity: h,
Pressure: p,
Temperature: t,
humidity: strings.TrimSpace(d[installedMetsensorHumidityAccuracy]),
pressure: strings.TrimSpace(d[installedMetsensorPressureAccuracy]),
temperature: strings.TrimSpace(d[installedMetsensorTemperatureAccuracy]),
},
})
}
*m = InstalledMetSensorList(installedMetsensors)
}
return nil
}
func LoadInstalledMetSensors(path string) ([]InstalledMetSensor, error) {
var m []InstalledMetSensor
if err := LoadList(path, (*InstalledMetSensorList)(&m)); err != nil {
return nil, err
}
sort.Sort(InstalledMetSensorList(m))
return m, nil
} | meta/metsensor.go | 0.628521 | 0.415847 | metsensor.go | starcoder |
package main
import (
"bufio"
"flag"
"fmt"
"image/color"
"io"
"os"
"strconv"
"strings"
"gioui.org/app"
"gioui.org/io/key"
"gioui.org/io/system"
"gioui.org/unit"
gc "github.com/ajstarks/giocanvas"
)
// NameValue defines data
type NameValue struct {
name string
note string
value float64
}
// ChartOptions define all the components of a chart
type ChartOptions struct {
showtitle, showscatter, showarea, showframe, showlegend, showbar bool
title, legend, color string
xlabelInterval int
}
func minmax(data []NameValue) (float64, float64) {
min := data[0].value
max := data[0].value
for _, d := range data {
if d.value > max {
max = d.value
}
if d.value < min {
min = d.value
}
}
return min, max
}
// DataRead reads tab separated values into a NameValue slice
func DataRead(r io.Reader) ([]NameValue, error) {
var d NameValue
var data []NameValue
var err error
scanner := bufio.NewScanner(r)
for scanner.Scan() {
t := scanner.Text()
if len(t) == 0 { // skip blank lines
continue
}
if t[0] == '#' && len(t) > 2 { // process titles
// title = strings.TrimSpace(t[1:])
continue
}
fields := strings.Split(t, "\t")
if len(fields) < 2 {
continue
}
if len(fields) == 3 {
d.note = fields[2]
} else {
d.note = ""
}
d.name = fields[0]
d.value, err = strconv.ParseFloat(fields[1], 64)
if err != nil {
d.value = 0
}
data = append(data, d)
}
err = scanner.Err()
return data, err
}
func xaxis(canvas *gc.Canvas, x, y, width, height float32, interval int, data []NameValue) {
for i, d := range data {
xp := float32(gc.MapRange(float64(i), 0, float64(len(data)-1), float64(x), float64(width)))
if interval > 0 && i%interval == 0 {
canvas.TextMid(xp, y-3, 1.5, d.name, color.NRGBA{0, 0, 0, 255})
canvas.Line(xp, y, xp, height, 0.1, color.NRGBA{0, 0, 0, 128})
}
}
canvas.Line(x, height, width, height, 0.1, color.NRGBA{0, 0, 0, 128})
canvas.Line(width, height, width, y, 0.1, color.NRGBA{0, 0, 0, 128})
}
func frame(canvas *gc.Canvas, x, y, width, height float32, color color.NRGBA) {
canvas.CornerRect(x, height, width-x, height-y, color)
}
func dotchart(canvas *gc.Canvas, x, y, width, height float32, data []NameValue, datacolor color.NRGBA) {
min, max := minmax(data)
for i, d := range data {
xp := float32(gc.MapRange(float64(i), 0, float64(len(data)-1), float64(x), float64(width)))
yp := float32(gc.MapRange(d.value, min, max, float64(y), float64(height)))
canvas.Circle(xp, yp, 0.3, datacolor)
}
}
func barchart(canvas *gc.Canvas, x, y, width, height float32, data []NameValue, datacolor color.NRGBA) {
min, max := minmax(data)
for i, d := range data {
xp := float32(gc.MapRange(float64(i), 0, float64(len(data)-1), float64(x), float64(width)))
yp := float32(gc.MapRange(d.value, min, max, float64(y), float64(height)))
canvas.VLine(xp, y, yp-y, 0.1, datacolor)
}
}
func areachart(canvas *gc.Canvas, x, y, width, height float32, data []NameValue, datacolor color.NRGBA) {
min, max := minmax(data)
l := len(data)
ax := make([]float32, l+2)
ay := make([]float32, l+2)
ax[0] = x
ay[0] = y
ax[l+1] = width
ay[l+1] = y
for i, d := range data {
xp := float32(gc.MapRange(float64(i), 0, float64(len(data)-1), float64(x), float64(width)))
yp := float32(gc.MapRange(d.value, min, max, float64(y), float64(height)))
ax[i+1] = xp
ay[i+1] = yp
}
datacolor.A = 128
canvas.Polygon(ax, ay, datacolor)
}
func chart(s string, w, h int, data []NameValue, chartopts ChartOptions) {
width := float32(w)
height := float32(h)
size := app.Size(unit.Px(width), unit.Px(height))
title := app.Title(s)
win := app.NewWindow(title, size)
black := color.NRGBA{0, 0, 0, 255}
datacolor := gc.ColorLookup(chartopts.color)
framecolor := color.NRGBA{0, 0, 0, 20}
canvas := gc.NewCanvas(width, height, system.FrameEvent{})
for e := range win.Events() {
switch e := e.(type) {
case system.FrameEvent:
if chartopts.showtitle {
canvas.Text(10, 90, 3, chartopts.title, black)
}
if chartopts.showlegend {
canvas.Text(10, 84, 2.5, chartopts.legend, datacolor)
canvas.HLine(20, 85, 2, 1, datacolor)
}
if chartopts.xlabelInterval > 0 {
xaxis(canvas, 10, 15, 90, 70, chartopts.xlabelInterval, data)
}
if chartopts.showframe {
frame(canvas, 10, 15, 90, 70, framecolor)
}
if chartopts.showscatter {
dotchart(canvas, 10, 15, 90, 70, data, datacolor)
}
if chartopts.showarea {
areachart(canvas, 10, 15, 90, 70, data, datacolor)
}
if chartopts.showbar {
barchart(canvas, 10, 15, 90, 70, data, datacolor)
}
e.Frame(canvas.Context.Ops)
case key.Event:
switch e.Name {
case "Q", key.NameEscape:
os.Exit(0)
}
}
}
}
func main() {
var opts ChartOptions
var w, h int
flag.IntVar(&w, "width", 1200, "canvas width")
flag.IntVar(&h, "height", 900, "canvas height")
flag.IntVar(&opts.xlabelInterval, "xlabel", 0, "show x axis")
flag.StringVar(&opts.title, "chartitle", "", "chart title")
flag.StringVar(&opts.legend, "chartlegend", "", "chart legend")
flag.StringVar(&opts.color, "color", "maroon", "chart data color")
flag.BoolVar(&opts.showtitle, "title", true, "show title")
flag.BoolVar(&opts.showlegend, "legend", false, "show legend")
flag.BoolVar(&opts.showbar, "bar", false, "show bar chart")
flag.BoolVar(&opts.showarea, "area", false, "show area chart")
flag.BoolVar(&opts.showscatter, "scatter", false, "show scatter chart")
flag.BoolVar(&opts.showframe, "frame", false, "show frame")
flag.Parse()
data, err := DataRead(os.Stdin)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
go chart("charts", w, h, data, opts)
app.Main()
} | chartest/main.go | 0.509032 | 0.465752 | main.go | starcoder |
package reports
import (
"bytes"
"encoding/binary"
"fmt"
"math"
"strconv"
)
// ZWaveSensorType describes the sensor type in a sensormultilevel report
type ZWaveSensorType byte
const (
Undefined = iota
Temperature
General
Luminance
Power
RelativeHumidity
Velocity
Direction
AtmosphericPressure
BarometricPressure
SolarRadiation
DewPoint
RainRate
TideLevel
Weight
Voltage
Current
CO2Level
AirFlow
TankCapacity
Distance
AnglePosition
Rotation
WaterTemperature
SoilTemperature
SeismicIntensity
SeismicMagnitude
Ultraviolet
ElectricalResistivity
ElectricalConductivity
Loudness
Moisture
)
// SensorMultiLevel is send from a zwave multilevel sensor to advertise a sensor reading
type SensorMultiLevel struct {
*report
ValueType ZWaveSensorType `json:"value_type"`
Size byte `json:"size"`
Scale byte `json:"scale"`
Precision byte `json:"precision"`
Value float64 `json:"value"`
TypeString string `json:"type_string"`
Unit string `json:"unit"`
data []byte
}
// NewSensorMultiLevel decodes raw binary data in to a SensorMultiLevel
func NewSensorMultiLevel(data []byte) (*SensorMultiLevel, error) {
sml := &SensorMultiLevel{data: data}
if len(data) < 2 {
return nil, fmt.Errorf("To short, expected at least 2 bytes, got %d", len(data))
}
sml.ValueType = ZWaveSensorType(data[0])
sml.Size = (data[1] & 0x07) // Size (3 bits)
sml.Scale = (data[1] & 0x18) >> 0x03 // Scale (2 bits)
sml.Precision = (data[1] & 0xE0) >> 0x05 // Precision (3 bits)
if len(data) < 2+int(sml.Size) {
return nil, fmt.Errorf("To short, expected at least %d bytes, got %d", (2 + sml.Size), len(data))
}
buf := bytes.NewReader(data[2:])
var err error
switch sml.Size {
case 1:
val := int8(0)
err = binary.Read(buf, binary.BigEndian, &val)
sml.Value = float64(val)
case 2:
val := int16(0)
err = binary.Read(buf, binary.BigEndian, &val)
sml.Value = float64(val)
case 4:
val := int32(0)
err = binary.Read(buf, binary.BigEndian, &val)
sml.Value = float64(val)
}
if sml.Precision > 0 {
sml.Value /= math.Pow(10, float64(sml.Precision))
}
switch sml.ValueType {
case Temperature:
sml.TypeString = "Temperature"
switch sml.Scale {
case 0:
sml.Unit = "C"
case 1:
sml.Unit = "F"
}
case General:
sml.TypeString = "General"
switch sml.Scale {
case 0:
sml.Unit = "%"
}
case Luminance:
sml.TypeString = "Luminance"
switch sml.Scale {
case 0:
sml.Unit = "%"
case 1:
sml.Unit = "lux"
}
case Power:
sml.TypeString = "Power"
switch sml.Scale {
case 0:
sml.Unit = "W"
case 1:
sml.Unit = "BTU/h"
}
case RelativeHumidity:
sml.TypeString = "RelativeHumidity"
switch sml.Scale {
case 0:
sml.Unit = "%"
}
case Velocity:
sml.TypeString = "Velocity"
switch sml.Scale {
case 0:
sml.Unit = "m/s"
case 1:
sml.Unit = "mph"
}
case Direction:
sml.TypeString = "Direction"
case AtmosphericPressure:
sml.TypeString = "AtmosphericPressure"
switch sml.Scale {
case 0:
sml.Unit = "kPa"
case 1:
sml.Unit = "inHg"
}
case BarometricPressure:
sml.TypeString = "BarometricPressure"
switch sml.Scale {
case 0:
sml.Unit = "kPa"
case 1:
sml.Unit = "inHg"
}
case SolarRadiation:
sml.TypeString = "SolarRadiation"
sml.Unit = "W/m2"
case DewPoint:
sml.TypeString = "DewPoint"
switch sml.Scale {
case 0:
sml.Unit = "C"
case 1:
sml.Unit = "F"
}
case RainRate:
sml.TypeString = "RainRate"
switch sml.Scale {
case 0:
sml.Unit = "mm/h"
case 1:
sml.Unit = "in/h"
}
case TideLevel:
sml.TypeString = "TideLevel"
switch sml.Scale {
case 0:
sml.Unit = "m"
case 1:
sml.Unit = "ft"
}
case Weight:
sml.TypeString = "Weight"
switch sml.Scale {
case 0:
sml.Unit = "kg"
case 1:
sml.Unit = "lb"
}
case Voltage:
sml.TypeString = "Voltage"
switch sml.Scale {
case 0:
sml.Unit = "V"
case 1:
sml.Unit = "mV"
}
case Current:
sml.TypeString = "Current"
switch sml.Scale {
case 0:
sml.Unit = "A"
case 1:
sml.Unit = "mA"
}
case CO2Level:
sml.TypeString = "CO2"
sml.Unit = "ppm"
case AirFlow:
sml.TypeString = "AirFlow"
switch sml.Scale {
case 0:
sml.Unit = "m3/h"
case 1:
sml.Unit = "cfm"
}
case TankCapacity:
sml.TypeString = "TankCapacity"
switch sml.Scale {
case 0:
sml.Unit = "l"
case 1:
sml.Unit = "cbm"
case 2:
sml.Unit = "gal"
}
case Distance:
sml.TypeString = "Distance"
switch sml.Scale {
case 0:
sml.Unit = "m"
case 1:
sml.Unit = "cm"
case 2:
sml.Unit = "ft"
}
case AnglePosition:
sml.TypeString = "AnglePosition"
switch sml.Scale {
case 0:
sml.Unit = "%"
case 1:
sml.Unit = "deg N"
case 2:
sml.Unit = "deg S"
}
case Rotation:
sml.TypeString = "Rotation"
switch sml.Scale {
case 0:
sml.Unit = "rpm"
case 1:
sml.Unit = "hz"
}
case WaterTemperature:
sml.TypeString = "WaterTemperature"
switch sml.Scale {
case 0:
sml.Unit = "C"
case 1:
sml.Unit = "F"
}
case SoilTemperature:
sml.TypeString = "SoilTemperature"
switch sml.Scale {
case 0:
sml.Unit = "C"
case 1:
sml.Unit = "F"
}
case SeismicIntensity:
sml.TypeString = "SeismicIntensity"
switch sml.Scale {
case 0:
sml.Unit = "mercalli"
case 1:
sml.Unit = "EU macroseismic"
case 2:
sml.Unit = "liedu"
case 3:
sml.Unit = "shindo"
}
case SeismicMagnitude:
sml.TypeString = "SeismicMagnitude"
switch sml.Scale {
case 0:
sml.Unit = "local"
case 1:
sml.Unit = "moment"
case 2:
sml.Unit = "surface wave"
case 3:
sml.Unit = "body wave"
}
case Ultraviolet:
sml.TypeString = "Ultraviolet"
sml.Unit = ""
case ElectricalResistivity:
sml.TypeString = "ElectricalResistivity"
sml.Unit = "ohm"
case ElectricalConductivity:
sml.TypeString = "ElectricalConductivity"
sml.Unit = "siemens/m"
case Loudness:
sml.TypeString = "Loudness"
switch sml.Scale {
case 0:
sml.Unit = "db"
case 1:
sml.Unit = "dBA"
}
case Moisture:
sml.TypeString = "Moisture"
switch sml.Scale {
case 0:
sml.Unit = "%"
case 1:
sml.Unit = "content"
case 2:
sml.Unit = "k ohm"
case 3:
sml.Unit = "water activity"
}
default:
sml.TypeString = "Unknown (" + strconv.Itoa(int(sml.ValueType)) + ")"
}
return sml, err
}
func (sml SensorMultiLevel) String() string {
return fmt.Sprintf("%f %s %s", sml.Value, sml.TypeString, sml.Unit)
} | commands/reports/sensor_multi_level.go | 0.583322 | 0.470189 | sensor_multi_level.go | starcoder |
package polling
import (
"encoding/json"
"fmt"
"testing"
"github.com/ingrammicro/concerto/api/types"
"github.com/ingrammicro/concerto/utils"
"github.com/stretchr/testify/assert"
)
// PingMocked test mocked function
func PingMocked(t *testing.T, pingIn *types.PollingPing) *types.PollingPing {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dOut, err := json.Marshal(pingIn)
assert.Nil(err, "Polling test data corrupted")
// call service
payload := make(map[string]interface{})
cs.On("Post", "/command_polling/pings", &payload).Return(dOut, 201, nil)
pingOut, status, err := ds.Ping()
assert.Nil(err, "Error getting ping")
assert.Equal(status, 201, "Ping returned invalid response")
assert.Equal(pingIn.PendingCommands, true, "Ping returned no pending command available")
pingIn.PendingCommands = false
assert.Equal(pingIn.PendingCommands, false, "Ping returned pending command available")
return pingOut
}
// PingFailErrMocked test mocked function
func PingFailErrMocked(t *testing.T, pingIn *types.PollingPing) *types.PollingPing {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dIn, err := json.Marshal(pingIn)
assert.Nil(err, "Polling test data corrupted")
dIn = nil
// call service
payload := make(map[string]interface{})
cs.On("Post", "/command_polling/pings", &payload).Return(dIn, 404, fmt.Errorf("mocked error"))
pingOut, _, err := ds.Ping()
assert.NotNil(err, "We are expecting an error")
assert.Nil(pingOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return pingOut
}
// PingFailStatusMocked test mocked function
func PingFailStatusMocked(t *testing.T, pingIn *types.PollingPing) *types.PollingPing {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dIn, err := json.Marshal(pingIn)
assert.Nil(err, "Polling test data corrupted")
dIn = nil
// call service
payload := make(map[string]interface{})
cs.On("Post", "/command_polling/pings", &payload).Return(dIn, 499, fmt.Errorf("error 499 Mocked error"))
pingOut, status, err := ds.Ping()
assert.Equal(status, 499, "Ping returned an unexpected status code")
assert.NotNil(err, "We are expecting a status code error")
assert.Nil(pingOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return pingOut
}
// PingFailJSONMocked test mocked function
func PingFailJSONMocked(t *testing.T, pingIn *types.PollingPing) *types.PollingPing {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
payload := make(map[string]interface{})
cs.On("Post", "/command_polling/pings", &payload).Return(dIn, 201, nil)
pingOut, _, err := ds.Ping()
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(pingOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return pingOut
}
// GetNextCommandMocked test mocked function
func GetNextCommandMocked(t *testing.T, commandIn *types.PollingCommand) *types.PollingCommand {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dOut, err := json.Marshal(commandIn)
assert.Nil(err, "GetNextCommand test data corrupted")
// call service
cs.On("Get", "/command_polling/command").Return(dOut, 200, nil)
commandOut, status, err := ds.GetNextCommand()
assert.Nil(err, "Error getting polling command")
assert.Equal(status, 200, "GetNextCommand returned invalid response")
assert.Equal(*commandIn, *commandOut, "GetNextCommand returned different nodes")
return commandOut
}
// GetNextCommandFailErrMocked test mocked function
func GetNextCommandFailErrMocked(t *testing.T, commandIn *types.PollingCommand) *types.PollingCommand {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dIn, err := json.Marshal(commandIn)
assert.Nil(err, "GetNextCommand test data corrupted")
dIn = nil
// call service
cs.On("Get", "/command_polling/command").Return(dIn, 404, fmt.Errorf("mocked error"))
commandOut, _, err := ds.GetNextCommand()
assert.NotNil(err, "We are expecting an error")
assert.Nil(commandOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return commandOut
}
// GetNextCommandFailStatusMocked test mocked function
func GetNextCommandFailStatusMocked(t *testing.T, commandIn *types.PollingCommand) *types.PollingCommand {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dIn, err := json.Marshal(commandIn)
assert.Nil(err, "GetNextCommand test data corrupted")
dIn = nil
// call service
cs.On("Get", "/command_polling/command").Return(dIn, 499, fmt.Errorf("error 499 Mocked error"))
commandOut, status, err := ds.GetNextCommand()
assert.Equal(status, 499, "GetNextCommand returned an unexpected status code")
assert.NotNil(err, "We are expecting a status code error")
assert.Nil(commandOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return commandOut
}
// GetNextCommandFailJSONMocked test mocked function
func GetNextCommandFailJSONMocked(t *testing.T, commandIn *types.PollingCommand) *types.PollingCommand {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
cs.On("Get", "/command_polling/command").Return(dIn, 200, nil)
commandOut, _, err := ds.GetNextCommand()
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(commandOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return commandOut
}
// UpdateCommandMocked test mocked function
func UpdateCommandMocked(t *testing.T, commandIn *types.PollingCommand) *types.PollingCommand {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dOut, err := json.Marshal(commandIn)
assert.Nil(err, "UpdateCommand test data corrupted")
// call service
payload := make(map[string]interface{})
cs.On("Put", fmt.Sprintf("/command_polling/commands/%s", commandIn.ID), &payload).Return(dOut, 200, nil)
commandOut, status, err := ds.UpdateCommand(&payload, commandIn.ID)
assert.Nil(err, "Error getting polling command")
assert.Equal(status, 200, "UpdateCommand returned invalid response")
assert.Equal(*commandIn, *commandOut, "UpdateCommand returned different nodes")
return commandOut
}
// UpdateCommandFailErrMocked test mocked function
func UpdateCommandFailErrMocked(t *testing.T, commandIn *types.PollingCommand) *types.PollingCommand {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dIn, err := json.Marshal(commandIn)
assert.Nil(err, "UpdateCommand test data corrupted")
dIn = nil
// call service
payload := make(map[string]interface{})
cs.On("Put", fmt.Sprintf("/command_polling/commands/%s", commandIn.ID), &payload).Return(dIn, 400, fmt.Errorf("mocked error"))
commandOut, _, err := ds.UpdateCommand(&payload, commandIn.ID)
assert.NotNil(err, "We are expecting an error")
assert.Nil(commandOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return commandOut
}
// UpdateCommandFailStatusMocked test mocked function
func UpdateCommandFailStatusMocked(t *testing.T, commandIn *types.PollingCommand) *types.PollingCommand {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dIn, err := json.Marshal(commandIn)
assert.Nil(err, "UpdateCommand test data corrupted")
dIn = nil
// call service
payload := make(map[string]interface{})
cs.On("Put", fmt.Sprintf("/command_polling/commands/%s", commandIn.ID), &payload).Return(dIn, 499, fmt.Errorf("error 499 Mocked error"))
commandOut, status, err := ds.UpdateCommand(&payload, commandIn.ID)
assert.Equal(status, 499, "UpdateCommand returned an unexpected status code")
assert.NotNil(err, "We are expecting a status code error")
assert.Nil(commandOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return commandOut
}
// UpdateCommandFailJSONMocked test mocked function
func UpdateCommandFailJSONMocked(t *testing.T, commandIn *types.PollingCommand) *types.PollingCommand {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
payload := make(map[string]interface{})
cs.On("Put", fmt.Sprintf("/command_polling/commands/%s", commandIn.ID), &payload).Return(dIn, 200, nil)
commandOut, _, err := ds.UpdateCommand(&payload, commandIn.ID)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(commandOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return commandOut
}
// ReportBootstrapLogMocked test mocked function
func ReportBootstrapLogMocked(t *testing.T, commandIn *types.PollingContinuousReport) *types.PollingContinuousReport {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dOut, err := json.Marshal(commandIn)
assert.Nil(err, "ReportBootstrapLog test data corrupted")
// call service
payload := make(map[string]interface{})
cs.On("Post", fmt.Sprintf("/command_polling/bootstrap_logs"), &payload).Return(dOut, 201, nil)
commandOut, status, err := ds.ReportBootstrapLog(&payload)
assert.Nil(err, "Error posting report command")
assert.Equal(status, 201, "ReportBootstrapLog returned invalid response")
assert.Equal(commandOut.Stdout, "Bootstrap log created", "ReportBootstrapLog returned unexpected message")
return commandOut
}
// ReportBootstrapLogFailErrMocked test mocked function
func ReportBootstrapLogFailErrMocked(t *testing.T, commandIn *types.PollingContinuousReport) *types.PollingContinuousReport {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dIn, err := json.Marshal(commandIn)
assert.Nil(err, "ReportBootstrapLog test data corrupted")
dIn = nil
// call service
payload := make(map[string]interface{})
cs.On("Post", fmt.Sprintf("/command_polling/bootstrap_logs"), &payload).Return(dIn, 400, fmt.Errorf("mocked error"))
commandOut, _, err := ds.ReportBootstrapLog(&payload)
assert.NotNil(err, "We are expecting an error")
assert.Nil(commandOut, "Expecting nil output")
assert.Equal(err.Error(), "mocked error", "Error should be 'mocked error'")
return commandOut
}
// ReportBootstrapLogFailStatusMocked test mocked function
func ReportBootstrapLogFailStatusMocked(t *testing.T, commandIn *types.PollingContinuousReport) *types.PollingContinuousReport {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// to json
dIn, err := json.Marshal(commandIn)
assert.Nil(err, "ReportBootstrapLog test data corrupted")
dIn = nil
// call service
payload := make(map[string]interface{})
cs.On("Post", fmt.Sprintf("/command_polling/bootstrap_logs"), &payload).Return(dIn, 499, fmt.Errorf("error 499 Mocked error"))
commandOut, status, err := ds.ReportBootstrapLog(&payload)
assert.Equal(status, 499, "ReportBootstrapLog returned an unexpected status code")
assert.NotNil(err, "We are expecting a status code error")
assert.Nil(commandOut, "Expecting nil output")
assert.Contains(err.Error(), "499", "Error should contain http code 499")
return commandOut
}
// ReportBootstrapLogFailJSONMocked test mocked function
func ReportBootstrapLogFailJSONMocked(t *testing.T, commandIn *types.PollingContinuousReport) *types.PollingContinuousReport {
assert := assert.New(t)
// wire up
cs := &utils.MockConcertoService{}
ds, err := NewPollingService(cs)
assert.Nil(err, "Couldn't load polling service")
assert.NotNil(ds, "Polling service not instanced")
// wrong json
dIn := []byte{10, 20, 30}
// call service
payload := make(map[string]interface{})
cs.On("Post", fmt.Sprintf("/command_polling/bootstrap_logs"), &payload).Return(dIn, 201, nil)
commandOut, _, err := ds.ReportBootstrapLog(&payload)
assert.NotNil(err, "We are expecting a marshalling error")
assert.Nil(commandOut, "Expecting nil output")
assert.Contains(err.Error(), "invalid character", "Error message should include the string 'invalid character'")
return commandOut
} | api/polling/polling_api_mocked.go | 0.694406 | 0.522629 | polling_api_mocked.go | starcoder |
package boolq
import (
"fmt"
"github.com/kylebrandt/boolq/parse"
)
// An Asker is something that can be queried using boolq. The string passed
// to Ask will be the component in an expression. For example with the expression
// `(foo:bar AND baz:biz)` Ask will be called twice, once with the argument "foo:bar"
// and another time with the argument "baz:biz"
type Asker interface {
Ask(string) (bool, error)
}
// AskExpr takes an expression and an Asker. It then parses the expression
// calling the Asker's Ask on expressions AskNodes and returns if the
// expression is true or not for the given asker.
func AskExpr(expr string, asker Asker) (bool, error) {
q, err := parse.Parse(expr)
if err != nil {
return false, err
}
return walk(q.Root, asker)
}
// AskParsedExpr is like AskExpr but takes an expression that has already
// been parsed by parse.Parse on the expression. This is useful if you are calling
// the same expression multiple times.
func AskParsedExpr(q *Tree, asker Asker) (bool, error) {
if q.Tree.Root == nil {
return true, nil
}
return walk(q.Root, asker)
}
type Tree struct {
*parse.Tree
}
// Parse parses an expression and returns the parsed expression.
// It can be used wtih AskParsedExpr
func Parse(text string) (*Tree, error) {
tree := &Tree{}
if text == "" {
tree.Tree = &parse.Tree{}
return tree, nil
}
var err error
tree.Tree, err = parse.Parse(text)
return tree, err
}
func walk(node parse.Node, asker Asker) (bool, error) {
switch node := node.(type) {
case *parse.AskNode:
return asker.Ask(node.Text)
case *parse.BinaryNode:
return walkBinary(node, asker)
case *parse.UnaryNode:
return walkUnary(node, asker)
default:
return false, fmt.Errorf("can not walk type %v", node.Type())
}
}
func walkBinary(node *parse.BinaryNode, asker Asker) (bool, error) {
l, err := walk(node.Args[0], asker)
if err != nil {
return false, err
}
r, err := walk(node.Args[1], asker)
if err != nil {
return false, err
}
if node.OpStr == "AND" {
return l && r, nil
}
if node.OpStr == "OR" {
return l || r, nil
}
return false, fmt.Errorf("Unrecognized operator: %v", node.OpStr)
}
func walkUnary(node *parse.UnaryNode, asker Asker) (bool, error) {
r, err := walk(node.Arg, asker)
if err != nil {
return false, err
}
if node.OpStr == "!" {
return !r, nil
}
return false, fmt.Errorf("unknown unary operator: %v", node.OpStr)
} | vendor/github.com/kylebrandt/boolq/boolq.go | 0.733833 | 0.505554 | boolq.go | starcoder |
package treap
import (
"math"
"math/rand"
"time"
)
const (
// Random priorities in the Treap will be in the range of [1, MaxInt64].
maxPriority = math.MaxInt64
minPriority = 1
// The priority given to Treap nodes during deletion.
deletePriority = 0
)
// Treap is a balanced binary search tree.
type Treap struct {
root *node
}
// node represents a value and its priority in a Treap.
type node struct {
value string
priority int64
left *node
right *node
}
// NewTreap returns a new Treap.
func NewTreap() *Treap {
return &Treap{}
}
// Search returns true if the given value is in the Treap.
// Otherwise, returns false.
func (t *Treap) Search(value string) bool {
if t.root == nil {
return false
}
return binarySearch(t.root, value) != nil
}
// Insert inserts the given value into the Treap.
func (t *Treap) Insert(value string) {
rand.Seed(time.Now().UnixNano())
t.root = insert(t.root, value,
rand.Int63n(maxPriority-minPriority)+minPriority)
}
// insert inserts a node with the passed value and priority into the Treap.
func insert(n *node, value string, priority int64) *node {
if n == nil {
return &node{
value: value,
priority: priority,
}
}
if value == n.value {
return n
} else if value < n.value {
n.left = insert(n.left, value, priority)
if n.priority < n.left.priority {
n = rotateRight(n, n.left)
}
} else {
n.right = insert(n.right, value, priority)
if n.priority < n.right.priority {
n = rotateLeft(n, n.right)
}
}
return n
}
// Delete deletes the given value from the Treap.
func (t *Treap) Delete(value string) {
t.root = delete(t.root, value)
}
// delete finds and deletes the node with the given value from the Treap.
func delete(n *node, value string) *node {
if n == nil {
return nil
}
// delete the node with value after it's been rotated down to a leaf
if n.left == nil && n.right == nil && n.value == value {
return nil
}
if n.value == value {
n.priority = deletePriority
if n.right == nil && n.left != nil {
pivot := rotateRight(n, n.left)
pivot.right = delete(n, value)
return pivot
} else if n.left == nil && n.right != nil {
pivot := rotateLeft(n, n.right)
pivot.left = delete(n, value)
return pivot
} else if n.right.priority > n.left.priority {
pivot := rotateLeft(n, n.right)
pivot.left = delete(n, value)
return pivot
} else {
pivot := rotateRight(n, n.left)
pivot.right = delete(n, value)
return pivot
}
}
if value < n.value {
n.left = delete(n.left, value)
} else {
n.right = delete(n.right, value)
}
return n
}
// binarySearch performs a binary search starting from the
// passed node for the passed value.
// If the passed value is found, a pointer to the node with
// the value is returned. Otherwise, nil is returned.
func binarySearch(n *node, value string) *node {
for n != nil {
if value == n.value {
return n
}
if value < n.value {
n = n.left
} else {
n = n.right
}
}
return nil
}
// rotateRight does a tree rotation to the right given the passed root and pivot.
// After the rotation, the root will be the right child of the pivot.
// The pivot will be returned.
func rotateRight(root, pivot *node) *node {
root.left = pivot.right
pivot.right = root
return pivot
}
// rotateLeft does a tree rotation to the left given the passed root and pivot.
// After the rotation, the root will be the left child of the pivot.
// The pivot will be returned.
func rotateLeft(root, pivot *node) *node {
root.right = pivot.left
pivot.left = root
return pivot
} | treap.go | 0.830353 | 0.586464 | treap.go | starcoder |
// Package sgrad provides facilities for the computation of a finite-difference
// gradient image from a source SippImage and a 2x2 kernel.
// There are two versions, one using float64s and complex128s, and another using
// int32s. The latter makes it easier to guarantee bit accuracy and numerical
// stability. It is not intended as a performance optimisation.
package sgrad
import (
"image"
"math"
)
import (
. "github.com/Causticity/sipp/scomplex"
. "github.com/Causticity/sipp/simage"
)
// SippGradKernels are 2x2 arrays of complex numbers, defined in the same way as
// images are stored in memory, i.e. in row-major order from the top-left corner
// down.
type SippGradKernel [2][2]complex128
type SippGradInt32Kernel [2][2]ComplexInt32
var defaultKernel = SippGradKernel {
{-1 + 0i, 0 + 1i},
{0 - 1i, 1 + 0i},
}
var defaultInt32Kernel = SippGradInt32Kernel {
{{-1, 0}, {0, 1}},
{{0, -1}, {1, 0}},
}
// TODO: The non-int32 functions below could be reimplemented to use only
// floating-point arithmetic, with a conversion to complex only at the end. As
// it is now it goes back and forth unnecessarily. This is optimisation and
// should be done only with proper profiling and a specific performance target.
// byKernel applies a SippGradKernel to a pixel and its neighbours to produce a
// finite difference as a complex number.
func byKernel(kern SippGradKernel, pix, right, below, belowRight float64) complex128 {
return kern[0][0]*complex(pix, 0) +
kern[0][1]*complex(right, 0) +
kern[1][0]*complex(below, 0) +
kern[1][1]*complex(belowRight, 0)
}
// byInt32Kernel applies a SippGradInt32Kernel to a pixel and its neighbours to
// produce a finite difference as a ComplexInt32.
func byInt32Kernel(kern SippGradInt32Kernel,
pix, right, below, belowRight int32) ComplexInt32 {
return kern[0][0].Mult(ComplexInt32{pix, 0}).Add(
kern[0][1].Mult(ComplexInt32{right, 0}).Add(
kern[1][0].Mult(ComplexInt32{below, 0}).Add(
kern[1][1].Mult(ComplexInt32{belowRight, 0}))))
}
// Use a SippGradKernel to create a finite-differences complex gradient image,
// one pixel narrower and shorter than the original. We'd rather reduce the size
// of the output image than arbitrarily wrap around or extend the source image,
// as any such procedure could introduce errors into the statistics.
func FdgradKernel(src SippImage, kern SippGradKernel) (grad *ComplexImage) {
// Create the dst image from the bounds of the src
srect := src.Bounds()
grad = new(ComplexImage)
grad.Rect = image.Rect(0, 0, srect.Dx()-1, srect.Dy()-1)
grad.Pix = make([]complex128, grad.Rect.Dx()*grad.Rect.Dy())
grad.MinRe = math.MaxFloat64
grad.MinIm = math.MaxFloat64
grad.MaxRe = -math.MaxFloat64
grad.MaxIm = -math.MaxFloat64
grad.MaxMod = 0.0
dsti := 0
for y := 0; y < grad.Rect.Dy(); y++ {
for x := 0; x < grad.Rect.Dx(); x++ {
val := byKernel(kern, src.Val(x, y),
src.Val(x+1, y), src.Val(x, y+1), src.Val(x+1, y+1))
grad.Pix[dsti] = val
dsti++
re := real(val)
im := imag(val)
modsq := re*re + im*im
if re < grad.MinRe {
grad.MinRe = re
}
if re > grad.MaxRe {
grad.MaxRe = re
}
if im < grad.MinIm {
grad.MinIm = im
}
if im > grad.MaxIm {
grad.MaxIm = im
}
// store the maximum squared value, then take the root afterwards
if modsq > grad.MaxMod {
grad.MaxMod = modsq
}
}
}
grad.MaxMod = math.Sqrt(grad.MaxMod)
return
}
// Use a SippGradInt32Kernel to create a finite-differences ComplexInt32
// gradient image, one pixel narrower and shorter than the original. We'd rather
// reduce the size of the output image than arbitrarily wrap around or extend
// the source image, as any such procedure could introduce errors into the
// statistics.
func FdgradInt32Kernel(src SippImage,
kern SippGradInt32Kernel) (grad *ComplexInt32Image) {
// Create the dst image from the bounds of the src
srect := src.Bounds()
grad = new(ComplexInt32Image)
grad.Rect = image.Rect(0, 0, srect.Dx()-1, srect.Dy()-1)
grad.Pix = make([]ComplexInt32, grad.Rect.Dx()*grad.Rect.Dy())
grad.MinRe = math.MaxInt32
grad.MaxRe = math.MinInt32
grad.MinIm = math.MaxInt32
grad.MaxIm = math.MinInt32
grad.MaxMod = 0.0
dsti := 0
for y := 0; y < grad.Rect.Dy(); y++ {
for x := 0; x < grad.Rect.Dx(); x++ {
val := byInt32Kernel(kern, src.IntVal(x, y),
src.IntVal(x+1, y), src.IntVal(x, y+1), src.IntVal(x+1, y+1))
grad.Pix[dsti] = val
dsti++
modsq := float64(val.Re*val.Re) + float64(val.Im*val.Im)
// store the maximum squared value, then take the root afterwards
if modsq > grad.MaxMod {
grad.MaxMod = modsq
}
if val.Re < grad.MinRe {
grad.MinRe = val.Re
}
if val.Re > grad.MaxRe {
grad.MaxRe = val.Re
}
if val.Im < grad.MinIm {
grad.MinIm = val.Im
}
if val.Im > grad.MaxIm {
grad.MaxIm = val.Im
}
}
}
grad.MaxMod = math.Sqrt(grad.MaxMod)
return
}
// Use a default SippGradKernel to compute a finite-differences gradient. See
// FdgradKernel for details.
func Fdgrad(src SippImage) (grad *ComplexImage) {
return FdgradKernel(src, defaultKernel)
}
// Use a default SippGradInt32Kernel to compute a finite-differences gradient.
// See FdgradInt32Kernel for details.
func FdgradInt32(src SippImage) (grad *ComplexInt32Image) {
return FdgradInt32Kernel(src, defaultInt32Kernel)
} | sgrad/sgrad.go | 0.65379 | 0.700107 | sgrad.go | starcoder |
package main
import (
"crypto/sha256"
"fmt"
"math/big"
"golang.org/x/crypto/ripemd160"
)
type Point struct {
x, y *big.Int
}
func NewPoint() Point {
return Point{new(big.Int), new(big.Int)}
}
func (P Point) String() string {
return fmt.Sprintf("Point(%d, %d)", P.x, P.y)
}
func (P Point) Equals(Q Point) bool {
return P.x.Cmp(Q.x) == 0 && P.y.Cmp(Q.y) == 0
}
func (P Point) Set(Q Point) Point {
P.x.Set(Q.x)
P.y.Set(Q.y)
return P
}
var p = new(big.Int)
var identity = NewPoint()
/*
* Elliptic curve point addition. Unneeded side-cases are omitted for
* simplicity. See:
* https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication#Point_addition
* https://stackoverflow.com/a/31089415
* https://en.wikipedia.org/wiki/Modular_multiplicative_inverse#Using_Euler's_theorem
* https://crypto.stanford.edu/pbc/notes/elliptic/explicit.html
*/
func (R Point) ECPointAdd(P, Q Point) Point {
if P.Equals(identity) {
return R.Set(Q)
}
s := new(big.Int) // The slope
if P.Equals(Q) {
// s = 3Px^2 / 2Py mod p
s.Mul(big.NewInt(2), P.y)
s.ModInverse(s, p)
s.Mul(s, big.NewInt(3))
s.Mul(s, P.x)
s.Mul(s, P.x)
} else {
// s = (Qy - Py) / (Qx - Px) mod p
s.Sub(Q.x, P.x)
s.Mod(s, p)
s.ModInverse(s, p)
s.Mul(s, new(big.Int).Sub(Q.y, P.y))
}
x := new(big.Int)
y := new(big.Int)
// x = (s^2 - Px - Qx) mod p
x.Mul(s, s)
x.Sub(x, P.x)
x.Sub(x, Q.x)
x.Mod(x, p)
// y = s*(Px - x) - Py mod p
y.Sub(P.x, x)
y.Mul(y, s)
y.Sub(y, P.y)
y.Mod(y, p)
return R.Set(Point{x, y})
}
/*
* Elliptic curve point multiplication. This is an implimentation of
* the Double-and-add algorithm with increasing index described here:
* https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication#Double-and-add
*/
func (Q Point) ECPointMul(d *big.Int, P Point) Point {
N := NewPoint().Set(P)
Q.Set(identity)
for i := 0; i < d.BitLen(); i++ {
if d.Bit(i) == 1 {
Q.ECPointAdd(Q, N)
}
N.ECPointAdd(N, N)
}
return Q
}
/*
* The compressed serialization of the public key. See:
* Mastering Bitcoin, pages 73-75.
* https://www.ntirawen.com/2019/03/bitcoin-compressed-and-uncompressed.html
* https://www.secg.org/sec2-v2.pdf - Section 2.4.1.
* https://www.secg.org/sec1-v2.pdf - Section 2.3.3.
*/
func (R Point) Serialize() []byte {
b := R.x.Bytes()
a := make([]byte, 33-len(b))
a[0] = byte(2 + R.y.Bit(0))
return append(a, b...)
}
/*
* RIPEMD-160 hash.
*/
func r160(data []byte) []byte {
h := ripemd160.New()
h.Write(data)
return h.Sum(nil)
}
/*
* SHA-256 hash.
*/
func s256(data []byte) []byte {
h := sha256.New()
h.Write(data)
return h.Sum(nil)
}
/*
* Encode the data in Bitcoin's Base58 format. See:
* https://en.bitcoin.it/wiki/Base58Check_encoding#Base58_symbol_chart
*/
func base58(data []byte) string {
zero := big.NewInt(0)
base := big.NewInt(58)
const alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZ" +
"abcdefghijkmnopqrstuvwxyz"
var s []byte
x := new(big.Int).SetBytes(data)
r := new(big.Int)
for x.Cmp(zero) != 0 {
x.DivMod(x, base, r)
s = append(s, alphabet[r.Int64()])
}
for i := 0; data[i] == 0; i++ {
s = append(s, alphabet[0])
}
// Reverse the array.
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
return string(s)
}
func main() {
fmt.Println("Example from Mastering Bitcoin, pages 69-70.")
/*
* Secp256k1 parameters. See:
* https://en.bitcoin.it/wiki/Secp256k1
* https://www.secg.org/sec2-v2.pdf - Section 2.4.1.
*/
p.SetBit(p, 256, 1)
p.Sub(p, big.NewInt(1<<32+977))
fmt.Println("\tp:")
fmt.Println(p)
G := NewPoint()
G.x.SetString("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d9"+
"59f2815b16f81798", 16)
G.y.SetString("483ada7726a3c4655da4fbfc0e1108a8fd17b448a6855419"+
"9c47d08ffb10d4b8", 16)
fmt.Println("\tG:")
fmt.Println(G)
/*
* A private key must be a whole number from 1 to 0xffffffff...
* fffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140, one
* less than the order of the base point (or "generator point")
* G. See:
* https://en.bitcoin.it/wiki/Private_key#Range_of_valid_ECDSA_private_keys
*/
var privateKey = new(big.Int)
// dice: write use Base 6
// with:
privateKey.SetString("038109007313a5807b2eccc082c8c3fbb988a973"+
"cacf1a7df9ce725c31b14776", 16)
fmt.Println("\tprivateKey:")
fmt.Println(privateKey)
/*
* Generate the public key. See:
* Mastering Bitcoin, page 63.
*/
var publicKey = NewPoint()
publicKey.ECPointMul(privateKey, G)
fmt.Println("\tpublicKey:")
fmt.Println(publicKey)
serializedPublicKey := publicKey.Serialize()
fmt.Println("\tserializedPublicKey:")
fmt.Printf("%x\n", serializedPublicKey)
publicKeyHash := r160(s256(serializedPublicKey))
fmt.Println("\tpublicKeyHash:")
fmt.Printf("%x\n", publicKeyHash)
/*
* Calculate the checksum needed for Bitcoin's Base58Check
* format. See:
* Mastering Bitcoin, page 58
* https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses#How_to_create_Bitcoin_Address - Steps 5-7.
*/
version := []byte{0}
versionPlusHash := append(version, publicKeyHash...)
checksum := s256(s256(versionPlusHash))[:4]
fmt.Println("\tchecksum:")
fmt.Printf("%x\n", checksum)
/*
* A Bitcoin address is just the public key hash encoded in
* Bitcoin's Base58Check format. See:
* Mastering Bitcoin, page 66.
*/
address := base58(append(versionPlusHash, checksum...))
fmt.Println("\taddress:")
fmt.Println(address)
} | btcbook_addr_03.go | 0.712832 | 0.501465 | btcbook_addr_03.go | starcoder |
package continuous
import (
"github.com/jtejido/stats"
"github.com/jtejido/stats/err"
"github.com/jtejido/trig"
"math"
"math/rand"
)
// Log-logistic distribution
// Also known as the Fisk distribution.
// https://en.wikipedia.org/wiki/Log-logistic_distribution
// https://en.wikipedia.org/wiki/Shifted_log-logistic_distribution
type LogLogistic struct {
scale, shape, location float64 // α, β, γ
src rand.Source
}
func NewLogLogistic(scale, shape, location float64) (*LogLogistic, error) {
return NewLogLogisticWithSource(scale, shape, location, nil)
}
func NewLogLogisticWithSource(scale, shape, location float64, src rand.Source) (*LogLogistic, error) {
if scale <= 0 || shape <= 0 {
return nil, err.Invalid()
}
return &LogLogistic{scale, shape, location, nil}, nil
}
// α ∈ (0,∞)
// β ∈ (0,∞)
// γ ∈ (-∞,∞)
func (ll *LogLogistic) Parameters() stats.Limits {
return stats.Limits{
"α": stats.Interval{0, math.Inf(1), true, true},
"β": stats.Interval{0, math.Inf(1), true, true},
"γ": stats.Interval{math.Inf(-1), math.Inf(1), true, true},
}
}
// x ∈ [𝛿,∞)
func (ll *LogLogistic) Support() stats.Interval {
return stats.Interval{ll.location, math.Inf(1), false, true}
}
func (ll *LogLogistic) Probability(x float64) float64 {
if ll.Support().IsWithinInterval(x) {
x -= ll.location
num := (ll.shape / ll.scale) * math.Pow(x/ll.scale, ll.shape-1)
denom := (1 + math.Pow(x/ll.scale, ll.shape)) * (1 + math.Pow(x/ll.scale, ll.shape))
return num / denom
}
return 0
}
func (ll *LogLogistic) Distribution(x float64) float64 {
if ll.Support().IsWithinInterval(x) {
x -= ll.location
xOveraPownegB := math.Pow(x/ll.scale, -ll.shape)
return 1 / (1 + xOveraPownegB)
}
return 0
}
func (ll *LogLogistic) Inverse(p float64) float64 {
if p <= 0 {
return 0
}
if p >= 1 {
return math.Inf(1)
}
return (ll.scale * math.Pow(p/(1-p), 1/ll.shape)) + ll.location
}
func (ll *LogLogistic) Mean() float64 {
if ll.shape <= 1 {
return math.NaN()
}
theta := math.Pi / ll.shape
return ll.scale*theta*trig.Csc(theta) + ll.location
}
func (ll *LogLogistic) Median() float64 {
return ll.scale
}
func (ll *LogLogistic) Mode() float64 {
if ll.shape <= 1 {
return ll.location
}
return ll.location + (ll.scale * math.Pow((ll.shape-1)/(ll.shape+1), 1/ll.shape))
}
func (ll *LogLogistic) Variance() float64 {
if ll.shape <= 2 {
return math.NaN()
}
theta := math.Pi / ll.shape
return (ll.scale * ll.scale) * theta * (2*trig.Csc(2*theta) - theta*(trig.Csc(theta)*trig.Csc(theta)))
}
func (ll *LogLogistic) Skewness() float64 {
if ll.shape <= 3 {
return math.NaN()
}
theta := math.Pi / ll.shape
num := 3*trig.Csc(3*theta) - 6*theta*trig.Csc(2*theta)*trig.Csc(theta) + 2*(theta*theta)*(trig.Csc(theta)*trig.Csc(theta)*trig.Csc(theta))
denom := math.Sqrt(theta) * math.Pow(2*trig.Csc(2*theta)-theta*(trig.Csc(theta)*trig.Csc(theta)), 3./2)
return num / denom
}
func (ll *LogLogistic) ExKurtosis() float64 {
if ll.shape <= 4 {
return math.NaN()
}
theta := math.Pi / ll.shape
num := 4*trig.Csc(4*theta) - 12*theta*trig.Csc(3*theta)*trig.Csc(theta) + 12*(theta*theta)*trig.Csc(2*theta)*(trig.Csc(theta)*trig.Csc(theta)) - 3*(theta*theta*theta)*(trig.Csc(theta)*trig.Csc(theta)*trig.Csc(theta)*trig.Csc(theta))
denom := theta * math.Pow(2*trig.Csc(2*theta)-theta*(trig.Csc(theta)*trig.Csc(theta)), 2)
return (num / denom) - 3
}
func (ll *LogLogistic) Rand() float64 {
var rnd float64
if ll.src == nil {
rnd = rand.Float64()
} else {
rnd = rand.New(ll.src).Float64()
}
return ll.Inverse(rnd)
} | dist/continuous/log_logistic.go | 0.783823 | 0.456834 | log_logistic.go | starcoder |
package main
import (
"errors"
"fmt"
"log"
)
const emptyByte byte = 0b00000000
// Masks to set a particular bit to 1
const (
mask0 byte = 0b10000000
mask1 byte = 0b01000000
mask2 byte = 0b00100000
mask3 byte = 0b00010000
mask4 byte = 0b00001000
mask5 byte = 0b00000100
mask6 byte = 0b00000010
mask7 byte = 0b00000001
)
// Masks to set a particular bit to 0
const (
clearMask0 byte = 0b01111111
clearMask1 byte = 0b10111111
clearMask2 byte = 0b11011111
clearMask3 byte = 0b11101111
clearMask4 byte = 0b11110111
clearMask5 byte = 0b11111011
clearMask6 byte = 0b11111101
clearMask7 byte = 0b11111110
)
// Number of bits we can process in this program
const (
minBitsToProcess uint16 = 8
maxBitsToProcess uint16 = 1024
)
type Config struct {
BitCount uint16 // For now, support only 128 bits max
ShuffleMap map[uint16]uint16
}
// WorkUnit is a single work unit of shuffle
type WorkUnit struct {
Input []byte // The input byte slice
BitSetterMap map[uint16]bool // The map bit position with their values (boolean) that should go in final result
Output []byte // The output byte slice will be put here
Validated bool // Is the shuffle map and input valid according to config
Config Config // The config, as received by the program.
}
// Shuffle is the main function which will shuffle the bits
func (w *WorkUnit) Shuffle() error {
err := w.validateConfig()
if w.Validated != true {
return errors.New(fmt.Sprintf("E#B0FY3 - Config not validated! Error: %v", err))
}
w.buildBitSetterMap()
var result []byte
result = w.Input
for i := uint16(0); i < w.Config.BitCount; i++ {
if bitToSetBool, ok := w.BitSetterMap[i]; ok == true {
// Found the bit to set in the setter map
result, err = setBitOnByteArray(bitToSetBool, result, i)
if err != nil {
return errors.New(fmt.Sprintf("E#ATZUS - Could not set bit on byte array: %v", err))
}
}
}
w.Output = result
return nil
}
// validateConfig validates the config in the WorkUnit
func (w *WorkUnit) validateConfig() error {
// Ensure that the number of bytes in the Config are not 0 or greater than 128
if w.Config.BitCount < minBitsToProcess {
w.Validated = false
return errors.New(fmt.Sprintf("E#9R6OP - need at least %v bits for the swapping to be done", minBitsToProcess))
}
if w.Config.BitCount > maxBitsToProcess {
w.Validated = false
return errors.New(fmt.Sprintf("E#9R6Q8 - cannot process more than %v bits for now", maxBitsToProcess))
}
// Ensure that the number of bits mentioned is exactly divisible by 8
if w.Config.BitCount%8 != 0 {
w.Validated = false
return errors.New(fmt.Sprintf("E#9R6YV - number of bits to process must be a multiple of 8. Got: %v", w.Config.BitCount))
}
// Ensure that the input is exactly the size mentioned in the Config
if len(w.Input)*8 != int(w.Config.BitCount) {
w.Validated = false
return errors.New(fmt.Sprintf("E#9R71S - expected %v bits in input, got %v", w.Config.BitCount, len(w.Input)*8))
}
// Ensure that the list of Shufflings that we have to do have no logical overlapping
var all []uint16
var exists = false
var repeatedBitPosition uint16
// The way this works is: for each shuffle-map entry, we shove both indexes into a single array of
// index positions (the `all` array). If another bit position is found in any future map entry,
// it would already exist in `all` array and will be detected by the element existence search
for key, value := range w.Config.ShuffleMap {
if elementExistsInSlice(key, all) {
exists = true
repeatedBitPosition = key
break
} else {
all = append(all, key)
}
if elementExistsInSlice(value, all) {
exists = true
repeatedBitPosition = value
break
} else {
all = append(all, value)
}
}
if exists {
w.Validated = false
return errors.New(fmt.Sprintf("E#B0G5P - Cannot continue -- Repetitions in the shuffling map for bit position: %v", repeatedBitPosition))
}
w.Validated = true
return nil
}
// buildBitSetterMap builds the BitSetterMap in the WorkUnit
func (w *WorkUnit) buildBitSetterMap() {
for key, value := range w.Config.ShuffleMap {
err := errors.New("E#B0G0N")
valAtValueIndex, err := getBit(w.Input[int(value/8)], value%8)
if err != nil {
fmt.Println("E#AT666 - Error from getBit:", err)
}
valAtKeyIndex, err := getBit(w.Input[key/8], key%8)
if err != nil {
fmt.Println("E#AT6C0 - Error from getBit:", err)
}
if valAtKeyIndex != valAtValueIndex {
w.BitSetterMap[key] = valAtValueIndex
w.BitSetterMap[value] = valAtKeyIndex
}
}
}
// buildBitSetterMap builds the BitSetterMap in the WorkUnit
func (w *WorkUnit) buildFullBitMap() {
for key, value := range w.Config.ShuffleMap {
err := errors.New("E#O1HYH")
val, err := getBit(w.Input[int(value/8)], value%8)
if err != nil {
fmt.Println("E#AT666 - Error from getBit:", err)
} else {
w.BitSetterMap[key] = val
}
val, err = getBit(w.Input[key/8], key%8)
if err != nil {
fmt.Println("E#AT6C0 - Error from getBit:", err)
} else {
w.BitSetterMap[value] = val
}
}
for i := uint16(0); i < w.Config.BitCount; i++ {
_, ok := w.BitSetterMap[i]
if !ok {
// The value was not in the map. So get it from the original input
ter, err := getBitFromByteArray(w.Input, i)
if err == nil {
w.BitSetterMap[i] = ter
}
}
}
}
// elementExistsInSlice tells if a given element exists in a slice
func elementExistsInSlice(element uint16, ins []uint16) bool {
for _, i := range ins {
if element == i {
return true
}
}
return false
}
// getBitFromByteArray gets one bit from the onByteSlice at the give atPosition
func getBitFromByteArray(onByteSlice []byte, atPosition uint16) (bool, error) {
// Check that the atPosition is valid
byteSliceLength := len(onByteSlice)
if atPosition < 0 || atPosition > uint16(byteSliceLength*8)-1 {
return false, errors.New(fmt.Sprintf("E#AU1SC - At position not within acceptable range: %v", atPosition))
}
// Calculate byte's index in the slice
byteIndexInSlice := atPosition / 8
bitIndexInByte := atPosition % 8
// Extract byte
byteToGetBitFrom := onByteSlice[byteIndexInSlice]
bitToReturn, err := getBit(byteToGetBitFrom, bitIndexInByte)
if err != nil {
return false, errors.New(fmt.Sprintf("E#AU07G - Could not get bit from byte: %v", err))
}
return bitToReturn, nil
}
// setBitOnByteArray sets the bit at the atPosition in the onByteSlice and return the resulting byte slice.
func setBitOnByteArray(bit bool, onByteSlice []byte, atPosition uint16) ([]byte, error) {
// Check that the atPosition is valid
byteSliceLength := len(onByteSlice)
if atPosition < 0 || atPosition > uint16(byteSliceLength*8)-1 {
return onByteSlice, errors.New("E#ATYR9 - At position is not within acceptable range")
}
// Calculate byte's index in the slice
byteIndexInSlice := atPosition / 8
bitIndexInByte := atPosition % 8
// Extract byte
byteToSetBitOn := onByteSlice[byteIndexInSlice]
byteToSetBitOn, err := setBit(bit, byteToSetBitOn, bitIndexInByte)
if err != nil {
return onByteSlice, errors.New(fmt.Sprintf("E#ATZC9 - Can't set bit on Byte: %v", err))
}
// Set the byte back
onByteSlice[byteIndexInSlice] = byteToSetBitOn
return onByteSlice, nil
}
/// getBit gets the value of a the bit at atPosition index from the fromByte
func getBit(fromByte byte, atPosition uint16) (bool, error) {
if atPosition > 7 || atPosition < 0 {
return false, errors.New("E#9NHL1 - only bits 0-7 are supported by this function")
}
var mask byte
switch atPosition {
case 0:
mask = mask0
case 1:
mask = mask1
case 2:
mask = mask2
case 3:
mask = mask3
case 4:
mask = mask4
case 5:
mask = mask5
case 6:
mask = mask6
case 7:
mask = mask7
default:
errMsg := fmt.Sprintf("E#ATB0D - INVALID POSITION: %v", atPosition)
log.Println(errMsg)
return false, errors.New(errMsg)
}
byt := fromByte
result := byt & mask
if result != 0 {
// The result was not 0 so the bit in the input was set to 1
return true, nil
}
return false, nil
}
// setBit sets a bit to 0 or 1 for a byte, at a given position
// The bit to be set is expressed as a boolean - true means 1, false means 0
func setBit(bit bool, onByte byte, atPosition uint16) (byte, error) {
if atPosition > 7 || atPosition < 0 {
return emptyByte, errors.New("E#9NHNL - only bits 0-7 are supported by this function")
}
var mask byte
byt := onByte
var result byte
if bit {
switch atPosition {
case 0:
mask = mask0
case 1:
mask = mask1
case 2:
mask = mask2
case 3:
mask = mask3
case 4:
mask = mask4
case 5:
mask = mask5
case 6:
mask = mask6
case 7:
mask = mask7
default:
errMsg := fmt.Sprintf("E#AT6RW - INVALID POSITION: %v", atPosition)
log.Println(errMsg)
return emptyByte, errors.New(errMsg)
}
result = byt | mask
} else {
switch atPosition {
case 0:
mask = clearMask0
case 1:
mask = clearMask1
case 2:
mask = clearMask2
case 3:
mask = clearMask3
case 4:
mask = clearMask4
case 5:
mask = clearMask5
case 6:
mask = clearMask6
case 7:
mask = clearMask7
default:
errMsg := fmt.Sprintf("E#ATAZN - INVALID POSITION: %v", atPosition)
log.Println(errMsg)
return emptyByte, errors.New(errMsg)
}
result = byt & mask
}
return result, nil
} | shuffler.go | 0.503418 | 0.438004 | shuffler.go | starcoder |
package bitty
/*
Copyright 2020 IBM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Symbolic enables Unit to provide a standard, exponent, and symbol
type Symbolic interface {
// Standard returns the standard for the unit as defined by the SI brochure,
// 9th Edition, page 145:
// https://www.bipm.org/utils/common/pdf/si-brochure/SI-Brochure-9.pdf
Standard() UnitStandard
// Exponent returns the supported exponent as an int
// To calculate the value from the standard, symbol, and size the formulas
// are:
// - Given the Standard is SI, value as v is equal to (10^e)size
// - Given the Standard is IEC, value as v is equal to (2^(e*10))size
Exponent() int
// Symbol returns the supported symbol as a UnitSymbol
Symbol() UnitSymbol
}
// Sizer enables Unit to get a size measured by bit, byte, or arbitrary data
// Unit kind
type Sizer interface {
// Size returns the size of the Unit
Size() float64
// BitSize returns the size of the Unit measured in bits
BitSize() float64
// ByteSize returns the size of the Unit measured in bytes
ByteSize() float64
// SizeInUnit returns the size of the Unit measured in an arbitrary
// UnitSymbol from Bit up to YiB or YB
SizeInUnit(UnitSymbol) float64
}
// Calculator enables Units to be calculated against each other
// All returns are diminshing or increasing UnitSymbol measurements as defined
// by the SI and IEC
type Calculator interface {
// Add attempts to add one Unit to another
Add(Unit) Unit
// Subtract attempts to subtract one Unit from another
Subtract(Unit) Unit
// Multiply attempts to multiply one Unit by another
Multiply(Unit) Unit
// Divide attempts to divide one Unit from another
Divide(Unit) Unit
}
// Unit enables Unit kinds to interact with each other
type Unit interface {
Symbolic
Sizer
Calculator
}
// BaseUnitSymbolPair represents the bit and byte pairs
type BaseUnitSymbolPair struct {
standard *UnitStandard
}
// NewBaseUnitSymbolPair takes a UnitStandard and returns a new UnitSymbolPair
func NewBaseUnitSymbolPair(std UnitStandard) UnitSymbolPair {
return &BaseUnitSymbolPair{standard: &std}
}
// Standard returns the UnitStandard of a BaseUnitSymbolPair if it exists or SI
func (b *BaseUnitSymbolPair) Standard() UnitStandard {
if b.standard == nil {
return SI
}
return *b.standard
}
// Exponent returns the exponent of a BaseUnitSymbolPair: 0
func (b *BaseUnitSymbolPair) Exponent() int {
return 0
}
// Least returns the least UnitSymbol of a BaseUnitSymbolPair: a Bit
func (b *BaseUnitSymbolPair) Least() UnitSymbol {
return Bit
}
// Greatest returns the greatest UnitSymbol of a BaseUnitSymbolPair: a Byte
func (b *BaseUnitSymbolPair) Greatest() UnitSymbol {
return Byte
} | base.go | 0.805096 | 0.40754 | base.go | starcoder |
package adagrad
import (
"github.com/nlpodyssey/spago/gd"
"github.com/nlpodyssey/spago/mat"
"github.com/nlpodyssey/spago/nn"
)
var _ gd.MethodConfig = &Config[float32]{}
// Config provides configuration settings for an AdaGrad optimizer.
type Config[T mat.DType] struct {
gd.MethodConfig
LR T
Epsilon T
}
// NewConfig returns a new AdaGrad Config.
func NewConfig[T mat.DType](lr, epsilon T) Config[T] {
return Config[T]{
LR: lr,
Epsilon: epsilon,
}
}
// NewDefaultConfig returns a new Config with generically reasonable default values.
func NewDefaultConfig[T mat.DType]() Config[T] {
return Config[T]{
LR: 0.01,
Epsilon: 1.0e-8,
}
}
var _ gd.Method[float32] = &AdaGrad[float32]{}
// AdaGrad assigns a different learning rate to each parameter using the sum of squares of its all historical gradients.
// References
// Adaptive Subgradient Methods for Online Learning and Stochastic Optimization
// http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
type AdaGrad[T mat.DType] struct {
Config[T]
}
// New returns a new AdaGrad optimizer, initialized according to the given configuration.
func New[T mat.DType](c Config[T]) *AdaGrad[T] {
return &AdaGrad[T]{Config: c}
}
const m = 0
// Label returns the enumeration-like value which identifies this gradient descent method.
func (o *AdaGrad[_]) Label() int {
return gd.AdaGrad
}
// NewSupport returns a new support structure with the given dimensions.
func (o *AdaGrad[T]) NewSupport(r, c int) *nn.Payload[T] {
return &nn.Payload[T]{
Label: o.Label(),
Data: []mat.Matrix[T]{mat.NewEmptyDense[T](r, c)}, // m at index 0
}
}
// Delta returns the difference between the current params and where the method wants it to be.
func (o *AdaGrad[T]) Delta(param nn.Param[T]) mat.Matrix[T] {
return o.calcDelta(param.Grad(), gd.GetOrSetPayload[T](param, o).Data)
}
// m = m + grads*grads
// delta = (grads / (sqrt(m) + eps)) * lr
func (o *AdaGrad[T]) calcDelta(grads mat.Matrix[T], supp []mat.Matrix[T]) mat.Matrix[T] {
supp[m].AddInPlace(grads.Prod(grads))
buf := supp[m].Sqrt() // TODO: this was "buf := mat.SqrtMatrix(supp[m])", is it the same?
buf.AddScalarInPlace(o.Epsilon)
delta := grads.Div(buf)
delta.ProdScalarInPlace(o.LR)
return delta
} | gd/adagrad/adagrad.go | 0.76454 | 0.427576 | adagrad.go | starcoder |
package nlp
const (
Ins = iota
Del
Sub
Match
)
// DefaultOptions is the default options: insertion cost is 1, deletion cost is
// 1, substitution cost is 2, and two runes match iff they are the same.
var DefaultOptions Options = Options{
InsCost: 1,
DelCost: 1,
SubCost: 2,
Matches: func(sourceCharacter rune, targetCharacter rune) bool {
return sourceCharacter == targetCharacter
},
}
type (
EditOperation int
EditScript []EditOperation
MatchFunction func(rune, rune) bool
Options struct {
InsCost int
DelCost int
SubCost int
Matches MatchFunction
}
)
func (operation EditOperation) String() string {
if operation == Match {
return "match"
} else if operation == Ins {
return "ins"
} else if operation == Sub {
return "sub"
}
return "del"
}
func SimilarityForStrings(source, target string) float32 {
distance := DistanceForStrings([]rune(source), []rune(target), DefaultOptions)
total := len([]rune(source)) + len([]rune(target))
return float32(total-distance) / float32(total)
}
// DistanceForStrings returns the edit distance between source and target.
func DistanceForStrings(source []rune, target []rune, op Options) int {
return DistanceForMatrix(MatrixForStrings(source, target, op))
}
// DistanceForMatrix reads the edit distance off the given Levenshtein matrix.
func DistanceForMatrix(matrix [][]int) int {
return matrix[len(matrix)-1][len(matrix[0])-1]
}
// MatrixForStrings generates a 2-D array representing the dynamic programming
// table used by the Levenshtein algorithm, as described e.g. here:
// http://www.let.rug.nl/kleiweg/lev/
// The reason for putting the creation of the table into a separate function is
// that it cannot only be used for reading of the edit distance between two
// strings, but also e.g. to backtrace an edit script that provides an
// alignment between the characters of both strings.
func MatrixForStrings(source []rune, target []rune, op Options) [][]int {
// Make a 2-D matrix. Rows correspond to prefixes of source, columns to
// prefixes of target. Cells will contain edit distances.
// Cf. http://www.let.rug.nl/~kleiweg/lev/levenshtein.html
height := len(source) + 1
width := len(target) + 1
matrix := make([][]int, height)
// Initialize trivial distances (from/to empty string). That is, fill
// the left column and the top row with row/column indices.
for i := 0; i < height; i++ {
matrix[i] = make([]int, width)
matrix[i][0] = i
}
for j := 1; j < width; j++ {
matrix[0][j] = j
}
// Fill in the remaining cells: for each prefix pair, choose the
// (edit history, operation) pair with the lowest cost.
for i := 1; i < height; i++ {
for j := 1; j < width; j++ {
delCost := matrix[i-1][j] + op.DelCost
matchSubCost := matrix[i-1][j-1]
if !op.Matches(source[i-1], target[j-1]) {
matchSubCost += op.SubCost
}
insCost := matrix[i][j-1] + op.InsCost
matrix[i][j] = min(delCost, min(matchSubCost, insCost))
}
}
return matrix
}
func min(a int, b int) int {
if b < a {
return b
}
return a
}
func max(a int, b int) int {
if b > a {
return b
}
return a
} | bot/nlp/comparisons.go | 0.756178 | 0.465873 | comparisons.go | starcoder |
package algo
// InsSort sorts list using the insertion sort algorithm. It works by extending
// a sorted subset of the list by one element at the time.
func InsSort(list []int) []int {
// u represent the start position of the unsorted portion of the list. The
// first element is always sorted, thus start checking the second element of
// the list.
for u := 1; u < len(list); u++ {
// Check the element against every previous element and insert it into the
// already sorted subset of the list.
for i := 0; i < u; i++ {
if list[u] < list[i] {
// swap
list[u], list[i] = list[i], list[u]
}
}
}
return list
}
// SelSort sorts list using the selection sort algorithm. It works by locating
// the smallest entry from an unsorted portion of the list and moving it to the
// end of the sorted portion of the list.
func SelSort(list []int) []int {
// u represent the start position of the unsorted portion of the list.
// Initially the entire list is unsorted.
for u := 0; u < len(list); u++ {
// Locate the smallest integer in the unsorted portion of the list.
// min represent the minimal value of the unsorted portion of the list.
min := list[u]
// minPos represent the position of min in list.
minPos := u
for i := u + 1; i < len(list); i++ {
v := list[i]
if v < min {
min = v
minPos = i
}
}
// Place smallest integer from the unsorted portion of the list at the end
// of the sorted portion of the list.
if u != minPos {
list[u], list[minPos] = min, list[u]
}
}
return list
}
// BubbleSort sorts list using the bubble sort algorithm. It works by comparing
// adjecent entites and interchanging them if they are not in the correct order
// relative to each other. Each pass will pull the smallest entity to the start
// of the unsorted portion of the list. Watching the algorithm at work, one sees
// the small entities bubble to the top of the list.
func BubbleSort(list []int) []int {
// u represent the position of the unsorted portion of the list. Initially
// the entire list is unsorted.
for u := 0; u < len(list); u++ {
for j := len(list) - 1; j > u; j-- {
// i represent the position directly in front of j.
i := j - 1
// Compare the adjecent entities and swap them if they are not in the
// correct order.
if list[i] > list[j] {
list[i], list[j] = list[j], list[i]
}
}
}
return list
}
// TODO(u): Write a concurrent version of QuickSort that performs each recursive
// call in a goroutine.
// TODO(u): Write a version of QuickSort that selects the pivot entry based on
// the median of a few random samples.
// QuickSort sorts list in place using the quicksort algorithm. It works by
// partitioning the list around a selected pivot entry. Every element in the
// first partition of the list is less than or equal to the pivot entry and
// every element in the second partition of the list is greater than the pivot
// entry. The quicksort algorithm is then applied recursively on each partition
// until the partition length is less than or equal to 1 in which case the
// partition is always sorted.
func QuickSort(list []int) []int {
if len(list) <= 1 {
// A list of one element is always sorted.
return list
}
// Partition the list in two.
q := partition(list)
// Apply the quicksort algorithm on the smaller partition.
QuickSort(list[:q])
// Apply the quicksort algorithm on the larger partition.
QuickSort(list[q+1:])
return list
}
// partition partitions the list around a selected pivot entry. The list is
// divided into three parts; list[:q], the smaller partition containing all
// elements less than or equal to the pivot entry; list[q], the pivot entry; and
// list[q+1:], the larger partition containing all elements greater than the
// pivot entry.
func partition(list []int) (q int) {
// The last element of the list is selected as the pivot entry.
r := len(list) - 1
pivot := list[r]
for j := 0; j < r; j++ {
// All elements in the smaller partition are less than or equal to the
// pivot entry and all elements in the larger partition are greater the pivot entry.
if list[j] <= pivot {
if q != j {
// Swap to include list[j] in the smaller partition.
list[q], list[j] = list[j], list[q]
}
// Grow the smaller partition.
q++
}
}
if q != r {
// Swap to place the pivot entry in between the smaller and the larger
// partitions.
list[q], list[r] = list[r], list[q]
}
return q
} | archive/cs/algo/sort.go | 0.609524 | 0.533397 | sort.go | starcoder |
package stream
import "math"
// Accumulator used to calculate statistics for a period of time by processing provided values
type Accumulator struct {
streamKey string
intervalStart int64
intervalEnd int64
intervalType IntervalType
minimum float64
maximum float64
count uint64
sum float64
sampleCount uint32
targetSampleCount uint32
samplingRateDenominator uint32
sampleValues []OrdinalValue
finalised bool
}
// NewAccumulator creates an accumulator
func NewAccumulator(streamKey string, intervalStart int64, intervalEnd int64, intervalType IntervalType, targetSampleCount uint32) (*Accumulator) {
accumulator := Accumulator{
streamKey: streamKey,
intervalStart: intervalStart,
intervalEnd: intervalEnd,
intervalType: intervalType,
samplingRateDenominator: 1,
targetSampleCount: targetSampleCount,
sampleValues: make([]OrdinalValue, 0, 100),
finalised: false}
return &accumulator
}
// Accumulate values from a channel
func (accumulator *Accumulator) Accumulate(input chan OrdinalValue, output chan IntervalStatistics, done chan bool) {
for v := range input {
accumulator.Include(v)
}
output <- accumulator.Finalise()
done <- true
}
// Finalise calculates statistics from the accumulator and prevents any further accumulation
func (accumulator *Accumulator) Finalise() IntervalStatistics {
// generate statistics based on the captured sample values
var sampleSum float64
var sampleMean float64
var sampleStandardDeviation float64
var coefficientOfVariation float64
var mean float64
if accumulator.count > 0 {
mean = accumulator.sum / float64(accumulator.count)
}
if accumulator.sampleCount > 0 {
// calculate sample mean
for _,v := range accumulator.sampleValues {
sampleSum += v.Value
}
sampleMean = sampleSum / float64(accumulator.sampleCount)
}
var sumSquareError float64
for _,v := range accumulator.sampleValues {
sumSquareError += math.Pow(v.Value - sampleMean, 2)
}
sampleStandardDeviation = math.Sqrt(sumSquareError / float64(accumulator.sampleCount))
if sampleStandardDeviation > 0 {
coefficientOfVariation = sampleMean / sampleStandardDeviation
}
return IntervalStatistics{
StreamKey: accumulator.streamKey,
IntervalStart: accumulator.intervalStart,
IntervalEnd: accumulator.intervalEnd,
IntervalType: accumulator.intervalType,
Minimum: accumulator.minimum,
Maximum: accumulator.maximum,
Count: accumulator.count,
Sum: accumulator.sum,
Mean: mean,
SampleCount: accumulator.sampleCount,
SampleMean: sampleMean,
SampleStandardDeviation: sampleStandardDeviation,
SampleSum: sampleSum,
CoefficientOfVariation: coefficientOfVariation}
}
// Include a new value within the accumulation
func (accumulator *Accumulator) Include(ordinalValue OrdinalValue) {
value:=ordinalValue.Value
if accumulator.finalised {
panic("Accumulator cannot include any more values after finalisation")
}
if accumulator.count == 0 || accumulator.minimum > value {
accumulator.minimum = value
}
if accumulator.count == 0 || accumulator.maximum < value {
accumulator.maximum = value
}
accumulator.count++
accumulator.sum += value
if accumulator.count % uint64(accumulator.samplingRateDenominator) == 0 {
accumulator.sampleValues = append(accumulator.sampleValues, ordinalValue)
accumulator.sampleCount++
// check if there are now too many samples
if float64(accumulator.sampleCount) > float64(accumulator.targetSampleCount) * 1.5 {
// adjust the sampling rate
accumulator.samplingRateDenominator *= 2
var sampleSubset = make([]OrdinalValue, 0)
// and remove half the samples
for i,v := range accumulator.sampleValues {
if (i + 1) % 2 == 0 {
sampleSubset = append(sampleSubset, v)
}
}
accumulator.sampleValues = sampleSubset
accumulator.sampleCount = uint32(len(accumulator.sampleValues))
}
}
} | stream/accumulator.go | 0.872476 | 0.624666 | accumulator.go | starcoder |
package rango
import "math"
type Matrix struct {
m00, m01, m02, m03 float64
m10, m11, m12, m13 float64
m20, m21, m22, m23 float64
m30, m31, m32, m33 float64
}
func deg2rad(deg float64) float64 {
return (math.Pi * deg) / 180.0
}
func Identity() Matrix {
return Matrix{
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
}
}
func MatrixMultiply(a Matrix, b Matrix) Matrix {
m := Matrix{}
m.m00 = a.m00*b.m00 + a.m01*b.m10 + a.m02*b.m20 + a.m03*b.m30
m.m10 = a.m10*b.m00 + a.m11*b.m10 + a.m12*b.m20 + a.m13*b.m30
m.m20 = a.m20*b.m00 + a.m21*b.m10 + a.m22*b.m20 + a.m23*b.m30
m.m30 = a.m30*b.m00 + a.m31*b.m10 + a.m32*b.m20 + a.m33*b.m30
m.m01 = a.m00*b.m01 + a.m01*b.m11 + a.m02*b.m21 + a.m03*b.m31
m.m11 = a.m10*b.m01 + a.m11*b.m11 + a.m12*b.m21 + a.m13*b.m31
m.m21 = a.m20*b.m01 + a.m21*b.m11 + a.m22*b.m21 + a.m23*b.m31
m.m31 = a.m30*b.m01 + a.m31*b.m11 + a.m32*b.m21 + a.m33*b.m31
m.m02 = a.m00*b.m02 + a.m01*b.m12 + a.m02*b.m22 + a.m03*b.m32
m.m12 = a.m10*b.m02 + a.m11*b.m12 + a.m12*b.m22 + a.m13*b.m32
m.m22 = a.m20*b.m02 + a.m21*b.m12 + a.m22*b.m22 + a.m23*b.m32
m.m32 = a.m30*b.m02 + a.m31*b.m12 + a.m32*b.m22 + a.m33*b.m32
m.m03 = a.m00*b.m03 + a.m01*b.m13 + a.m02*b.m23 + a.m03*b.m33
m.m13 = a.m10*b.m03 + a.m11*b.m13 + a.m12*b.m23 + a.m13*b.m33
m.m23 = a.m20*b.m03 + a.m21*b.m13 + a.m22*b.m23 + a.m23*b.m33
m.m33 = a.m30*b.m03 + a.m31*b.m13 + a.m32*b.m23 + a.m33*b.m33
return m
}
func RotateX(rx float64) Matrix {
rxRad := deg2rad(rx)
cosrx := math.Cos(rxRad)
sinrx := math.Sin(rxRad)
return Matrix{
1, 0, 0, 0,
0, cosrx, -sinrx, 0,
0, sinrx, cosrx, 0,
0, 0, 0, 1,
}
}
func RotateY(ry float64) Matrix {
ryRad := deg2rad(ry)
cosry := math.Cos(ryRad)
sinry := math.Sin(ryRad)
return Matrix{
cosry, 0, sinry, 0,
0, 1, 0, 0,
-sinry, 0, cosry, 0,
0, 0, 0, 1,
}
}
func RotateZ(rz float64) Matrix {
rzRad := deg2rad(rz)
cosrz := math.Cos(rzRad)
sinrz := math.Sin(rzRad)
return Matrix{
cosrz, -sinrz, 0, 0,
sinrz, cosrz, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
}
}
func Translate(tx, ty, tz float64) Matrix {
return Matrix{
1, 0, 0, tx,
0, 1, 0, ty,
0, 0, 1, tz,
0, 0, 0, 1,
}
}
func Scale(sx, sy, sz float64) Matrix {
return Matrix{
sx, 0, 0, 0,
0, sy, 0, 0,
0, 0, sz, 0,
0, 0, 0, 1,
}
}
func Rotate(rx, ry, rz float64) Matrix {
return MatrixMultiply(MatrixMultiply(RotateX(rx), RotateY(ry)), RotateZ(rz))
}
func MatrixVecMultiply(m Matrix, vector Vector) Vector {
vec := Vector{}
vec.X = m.m00*vector.X + m.m01*vector.Y + m.m02*vector.Z + m.m03
vec.Y = m.m10*vector.X + m.m11*vector.Y + m.m12*vector.Z + m.m13
vec.Z = m.m20*vector.X + m.m21*vector.Y + m.m22*vector.Z + m.m23
return vec
} | rango/matrix.go | 0.770637 | 0.66794 | matrix.go | starcoder |
package main
import (
"bufio"
"bytes"
"io"
)
// eof represents a marker rune for the end of the reader, defining it here
// gives us a possibility to recognize it in the Scanner
var eof = rune(0)
type Scanner struct {
r *reader
}
// NewScanner creates a Scanner struct, which contains a buffered rune reader.
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: &reader{r: bufio.NewReader(r)}}
}
// Scan will scan individual runes, and identify if a specific rune is ...
// encountered
func (s *Scanner) Scan() (token Token, pos Pos, lit string) {
// Read the next rune
ch, pos := s.r.read()
switch {
case isWhitespace(ch):
return s.scanWhitespace()
case isPercent(ch):
return s.scanField()
case isHyphen(ch):
s.r.unread()
return s.scanCard()
default:
// Otherwise read the individual character
switch ch {
case eof:
return EOF, pos, ""
}
}
return ILLEGAL, pos, string(ch)
}
// scanField will scan the FIELD Token, as well as return the literal
// string contained in that FIELD Token.
func (s *Scanner) scanField() (token Token, pos Pos, lit string) {
// Save the position of the field
_, pos = s.r.curr()
// Create buffer, here we'll write the runes into
var buf bytes.Buffer
// The next character should be another percent
ch, _ := s.r.read()
if !isPercent(ch) {
s.r.unread()
return ILLEGAL, pos, ""
}
// We read until we see the first non-whitespace character
for {
if ch, _ = s.r.read(); !isWhitespace(ch) {
s.r.unread()
break
}
}
// Read until:
// * double percent
// * eof
// * card
for {
ch, _ = s.r.read()
if ch == eof {
break
} else if isPercent(ch) {
// Peak ahead
chNext, _ := s.r.read()
s.r.unread()
if isPercent(chNext) {
s.r.unread()
break
}
} else if isHyphen(ch) {
// We start at depth 1, because
// we already read the first hyphen
if s.isCard(1) {
break
}
// isCard will step back one too much, we
// want to write the initial hyphen
s.r.read()
}
// Write runes into buffer
_, _ = buf.WriteRune(ch)
}
return FIELD, pos, buf.String()
}
// scanCard will scan the CARD Token
func (s *Scanner) scanCard() (token Token, pos Pos, lit string) {
// Save the position of the field
_, pos = s.r.curr()
// Create buffer, here we'll write the runes into
var buf bytes.Buffer
// When a hyphen is found it should be three consecutive hyphens
if !s.isCard(0) {
return ILLEGAL, pos, ""
}
// Read until:
// * eof
// * not a hyphen
// * more than 3 consecutive hyphens
i := 0
for {
ch, _ := s.r.read()
if ch == eof {
break
} else if !isHyphen(ch) {
s.r.unread()
break
} else if i > 2 {
s.r.unread()
break
} else {
_, _ = buf.WriteRune(ch)
i++
}
}
return CARD, pos, buf.String()
}
// scanWhiteSpace will scan WHITESPACE Tokens
func (s *Scanner) scanWhitespace() (token Token, pos Pos, lit string) {
// Save the position of the field
_, pos = s.r.curr()
// Create buffer, here we'll write the runes into
var buf bytes.Buffer
// Read until:
// * eof
// * discontinuation of whitespaces
for {
ch, _ := s.r.read()
if ch == eof {
break
} else if !isWhitespace(ch) {
s.r.unread()
break
}
_, _ = buf.WriteRune(ch)
}
return WS, pos, buf.String()
}
// isCard will identify if a encountered `-` (hyphen) is part of a CARD Token,
// the next three characters should also be `-`
func (s *Scanner) isCard(depth int) bool {
ch, _ := s.r.read()
if !isHyphen(ch) || depth > 2 {
s.r.unreadRepeat(depth + 1)
return false
}
if depth < 2 {
return s.isCard(depth + 1)
}
s.r.unreadRepeat(depth + 1)
return true
}
func isWhitespace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\n'
}
func isLetter(ch rune) bool {
return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z')
}
func isPercent(ch rune) bool {
return ch == '%'
}
func isHyphen(ch rune) bool {
return ch == '-'
} | scanner.go | 0.680029 | 0.422505 | scanner.go | starcoder |
package combination
import (
"github.com/tidepool-org/platform/data"
"github.com/tidepool-org/platform/data/types/bolus"
"github.com/tidepool-org/platform/structure"
)
const (
SubType = "dual/square" // TODO: Rename Type to "bolus/combination"; remove SubType
DurationMaximum = 86400000
DurationMinimum = 0
ExtendedMaximum = 100.0
ExtendedMinimum = 0.0
NormalMaximum = 100.0
NormalMinimum = 0.0
)
type Combination struct {
bolus.Bolus `bson:",inline"`
Duration *int `json:"duration,omitempty" bson:"duration,omitempty"`
DurationExpected *int `json:"expectedDuration,omitempty" bson:"expectedDuration,omitempty"`
Extended *float64 `json:"extended,omitempty" bson:"extended,omitempty"`
ExtendedExpected *float64 `json:"expectedExtended,omitempty" bson:"expectedExtended,omitempty"`
Normal *float64 `json:"normal,omitempty" bson:"normal,omitempty"`
NormalExpected *float64 `json:"expectedNormal,omitempty" bson:"expectedNormal,omitempty"`
}
func New() *Combination {
return &Combination{
Bolus: bolus.New(SubType),
}
}
func (c *Combination) Parse(parser structure.ObjectParser) {
if !parser.HasMeta() {
parser = parser.WithMeta(c.Meta())
}
c.Bolus.Parse(parser)
c.Duration = parser.Int("duration")
c.DurationExpected = parser.Int("expectedDuration")
c.Extended = parser.Float64("extended")
c.ExtendedExpected = parser.Float64("expectedExtended")
c.Normal = parser.Float64("normal")
c.NormalExpected = parser.Float64("expectedNormal")
}
func (c *Combination) Validate(validator structure.Validator) {
if !validator.HasMeta() {
validator = validator.WithMeta(c.Meta())
}
c.Bolus.Validate(validator)
if c.SubType != "" {
validator.String("subType", &c.SubType).EqualTo(SubType)
}
if c.NormalExpected != nil {
validator.Int("duration", c.Duration).Exists().EqualTo(DurationMinimum)
validator.Int("expectedDuration", c.DurationExpected).Exists().InRange(DurationMinimum, DurationMaximum)
validator.Float64("extended", c.Extended).Exists().EqualTo(ExtendedMinimum)
validator.Float64("expectedExtended", c.ExtendedExpected).Exists().InRange(ExtendedMinimum, ExtendedMaximum)
} else {
validator.Int("duration", c.Duration).Exists().InRange(DurationMinimum, DurationMaximum)
expectedDurationValidator := validator.Int("expectedDuration", c.DurationExpected)
if c.Duration != nil && *c.Duration >= DurationMinimum && *c.Duration <= DurationMaximum {
expectedDurationValidator.InRange(*c.Duration, DurationMaximum)
} else {
expectedDurationValidator.InRange(DurationMinimum, DurationMaximum)
}
if c.ExtendedExpected != nil {
expectedDurationValidator.Exists()
} else {
expectedDurationValidator.NotExists()
}
validator.Float64("extended", c.Extended).Exists().InRange(ExtendedMinimum, ExtendedMaximum)
expectedExtendedValidator := validator.Float64("expectedExtended", c.ExtendedExpected)
if c.Extended != nil && *c.Extended >= ExtendedMinimum && *c.Extended <= ExtendedMaximum {
if *c.Extended == ExtendedMinimum {
expectedExtendedValidator.Exists()
}
expectedExtendedValidator.InRange(*c.Extended, ExtendedMaximum)
} else {
expectedExtendedValidator.InRange(ExtendedMinimum, ExtendedMaximum)
}
}
validator.Float64("normal", c.Normal).Exists().InRange(NormalMinimum, NormalMaximum)
expectedNormalValidator := validator.Float64("expectedNormal", c.NormalExpected)
if c.Normal != nil && *c.Normal >= NormalMinimum && *c.Normal <= NormalMaximum {
if *c.Normal == NormalMinimum {
// If Normal is zero, then _either_:
if c.Extended != nil {
if c.NormalExpected == nil {
validator.Float64("extended", c.Extended).GreaterThan(ExtendedMinimum)
} else {
validator.Float64("extended", c.Extended).Exists()
}
} else {
expectedNormalValidator.GreaterThan(NormalMinimum)
}
}
expectedNormalValidator.InRange(*c.Normal, NormalMaximum)
} else {
expectedNormalValidator.InRange(NormalMinimum, NormalMaximum)
}
}
// IsValid returns true if there is no error in the validator
func (c *Combination) IsValid(validator structure.Validator) bool {
return !(validator.HasError())
}
func (c *Combination) Normalize(normalizer data.Normalizer) {
if !normalizer.HasMeta() {
normalizer = normalizer.WithMeta(c.Meta())
}
c.Bolus.Normalize(normalizer)
} | data/types/bolus/combination/combination.go | 0.59514 | 0.406273 | combination.go | starcoder |
package measurements
import "fmt"
type MassUnit int32
const (
Kilogram MassUnit = iota
Gram
Pound
Ounce
)
var MassUnitName = map[MassUnit]string{
Kilogram: "kg",
Gram: "g",
Pound: "lb",
Ounce: "oz",
}
var MassUnitValue = map[string]MassUnit{
"kg": Kilogram,
"g": Gram,
"lb": Pound,
"oz": Ounce,
}
func (s MassUnit) String() string {
return MassUnitName[s]
}
type Mass interface {
Unit() MassUnit
Value() float64
String() string
To(unit MassUnit) Mass
ToKilogram() Mass
ToGram() Mass
ToPound() Mass
ToOunce() Mass
}
type mass struct {
unit MassUnit
value float64
}
func NewMass(unit MassUnit, value float64) Mass {
return &mass{
unit: unit,
value: value,
}
}
func (s *mass) Unit() MassUnit {
return s.unit
}
func (s *mass) Value() float64 {
return s.value
}
func (s mass) String() string {
return fmt.Sprintf("%.2f %s", s.value, s.unit)
}
func FromOunce(value float64) Mass {
return &mass{Ounce, value}
}
func FromPound(value float64) Mass {
return &mass{Pound, value}
}
func FromGram(value float64) Mass {
return &mass{Gram, value}
}
func FromKilogram(value float64) Mass {
return &mass{Kilogram, value}
}
func (s *mass) To(unit MassUnit) Mass {
switch unit {
default: // Kilogram
return s.ToKilogram()
case Gram:
return s.ToGram()
case Pound:
return s.ToPound()
case Ounce:
return s.ToOunce()
}
}
func (s *mass) ToKilogram() Mass {
switch s.Unit() {
default: // Kilogram
return FromKilogram(s.Value())
case Gram:
return FromKilogram(s.Value() / 1000)
case Pound:
return FromKilogram(s.Value() / 2.205)
case Ounce:
return FromKilogram(s.Value() / 35.274)
}
}
func (s *mass) ToGram() Mass {
switch s.Unit() {
default: // Kilogram
return FromGram(s.Value() * 1000)
case Gram:
return FromGram(s.Value())
case Pound:
return FromGram(s.Value() * 454)
case Ounce:
return FromGram(s.Value() * 28.35)
}
}
func (s *mass) ToPound() Mass {
switch s.Unit() {
default: // Kilogram
return FromPound(s.Value() * 2.205)
case Gram:
return FromPound(s.Value() / 454)
case Pound:
return FromPound(s.Value())
case Ounce:
return FromPound(s.Value() / 16)
}
}
func (s *mass) ToOunce() Mass {
switch s.Unit() {
default: // Kilogram
return FromOunce(s.Value() * 35.274)
case Gram:
return FromOunce(s.Value() / 28.35)
case Pound:
return FromOunce(s.Value() * 16)
case Ounce:
return FromOunce(s.Value())
}
} | mass.go | 0.703957 | 0.515986 | mass.go | starcoder |
package iso20022
// Set of characteristics that apply to the credit side of the payment transactions included in the direct debit initiation.
type PaymentInstructionInformation4 struct {
// Unique identification, as assigned by a sending party, to unambiguously identify the payment information group within the message.
PaymentInformationIdentification *Max35Text `xml:"PmtInfId"`
// Specifies the means of payment that will be used to move the amount of money.
PaymentMethod *PaymentMethod2Code `xml:"PmtMtd"`
// Identifies whether a single entry per individual transaction or a batch entry for the sum of the amounts of all transactions within the group of a message is requested.
// Usage: Batch booking is used to request and not order a possible batch booking.
BatchBooking *BatchBookingIndicator `xml:"BtchBookg,omitempty"`
// Number of individual transactions contained in the payment information group.
NumberOfTransactions *Max15NumericText `xml:"NbOfTxs,omitempty"`
// Total of all individual amounts included in the group, irrespective of currencies.
ControlSum *DecimalNumber `xml:"CtrlSum,omitempty"`
// Set of elements used to further specify the type of transaction.
PaymentTypeInformation *PaymentTypeInformation20 `xml:"PmtTpInf,omitempty"`
// Date and time at which the creditor requests that the amount of money is to be collected from the debtor.
RequestedCollectionDate *ISODate `xml:"ReqdColltnDt"`
// Party to which an amount of money is due.
Creditor *PartyIdentification32 `xml:"Cdtr"`
// Unambiguous identification of the account of the creditor to which a credit entry will be posted as a result of the payment transaction.
CreditorAccount *CashAccount16 `xml:"CdtrAcct"`
// Financial institution servicing an account for the creditor.
CreditorAgent *BranchAndFinancialInstitutionIdentification4 `xml:"CdtrAgt"`
// Unambiguous identification of the account of the creditor agent at its servicing agent in the payment chain.
CreditorAgentAccount *CashAccount16 `xml:"CdtrAgtAcct,omitempty"`
// Ultimate party to which an amount of money is due.
UltimateCreditor *PartyIdentification32 `xml:"UltmtCdtr,omitempty"`
// Specifies which party/parties will bear the charges associated with the processing of the payment transaction.
ChargeBearer *ChargeBearerType1Code `xml:"ChrgBr,omitempty"`
// Account used to process charges associated with a transaction.
//
// Usage: Charges account should be used when charges have to be booked to an account different from the account identified in debtor's account.
ChargesAccount *CashAccount16 `xml:"ChrgsAcct,omitempty"`
// Agent that services a charges account.
//
// Usage: Charges account agent should only be used when the charges account agent is different from the creditor agent.
ChargesAccountAgent *BranchAndFinancialInstitutionIdentification4 `xml:"ChrgsAcctAgt,omitempty"`
// Credit party that signs the mandate.
CreditorSchemeIdentification *PartyIdentification32 `xml:"CdtrSchmeId,omitempty"`
// Set of elements used to provide information on the individual transaction(s) included in the message.
DirectDebitTransactionInformation []*DirectDebitTransactionInformation9 `xml:"DrctDbtTxInf"`
}
func (p *PaymentInstructionInformation4) SetPaymentInformationIdentification(value string) {
p.PaymentInformationIdentification = (*Max35Text)(&value)
}
func (p *PaymentInstructionInformation4) SetPaymentMethod(value string) {
p.PaymentMethod = (*PaymentMethod2Code)(&value)
}
func (p *PaymentInstructionInformation4) SetBatchBooking(value string) {
p.BatchBooking = (*BatchBookingIndicator)(&value)
}
func (p *PaymentInstructionInformation4) SetNumberOfTransactions(value string) {
p.NumberOfTransactions = (*Max15NumericText)(&value)
}
func (p *PaymentInstructionInformation4) SetControlSum(value string) {
p.ControlSum = (*DecimalNumber)(&value)
}
func (p *PaymentInstructionInformation4) AddPaymentTypeInformation() *PaymentTypeInformation20 {
p.PaymentTypeInformation = new(PaymentTypeInformation20)
return p.PaymentTypeInformation
}
func (p *PaymentInstructionInformation4) SetRequestedCollectionDate(value string) {
p.RequestedCollectionDate = (*ISODate)(&value)
}
func (p *PaymentInstructionInformation4) AddCreditor() *PartyIdentification32 {
p.Creditor = new(PartyIdentification32)
return p.Creditor
}
func (p *PaymentInstructionInformation4) AddCreditorAccount() *CashAccount16 {
p.CreditorAccount = new(CashAccount16)
return p.CreditorAccount
}
func (p *PaymentInstructionInformation4) AddCreditorAgent() *BranchAndFinancialInstitutionIdentification4 {
p.CreditorAgent = new(BranchAndFinancialInstitutionIdentification4)
return p.CreditorAgent
}
func (p *PaymentInstructionInformation4) AddCreditorAgentAccount() *CashAccount16 {
p.CreditorAgentAccount = new(CashAccount16)
return p.CreditorAgentAccount
}
func (p *PaymentInstructionInformation4) AddUltimateCreditor() *PartyIdentification32 {
p.UltimateCreditor = new(PartyIdentification32)
return p.UltimateCreditor
}
func (p *PaymentInstructionInformation4) SetChargeBearer(value string) {
p.ChargeBearer = (*ChargeBearerType1Code)(&value)
}
func (p *PaymentInstructionInformation4) AddChargesAccount() *CashAccount16 {
p.ChargesAccount = new(CashAccount16)
return p.ChargesAccount
}
func (p *PaymentInstructionInformation4) AddChargesAccountAgent() *BranchAndFinancialInstitutionIdentification4 {
p.ChargesAccountAgent = new(BranchAndFinancialInstitutionIdentification4)
return p.ChargesAccountAgent
}
func (p *PaymentInstructionInformation4) AddCreditorSchemeIdentification() *PartyIdentification32 {
p.CreditorSchemeIdentification = new(PartyIdentification32)
return p.CreditorSchemeIdentification
}
func (p *PaymentInstructionInformation4) AddDirectDebitTransactionInformation() *DirectDebitTransactionInformation9 {
newValue := new (DirectDebitTransactionInformation9)
p.DirectDebitTransactionInformation = append(p.DirectDebitTransactionInformation, newValue)
return newValue
} | PaymentInstructionInformation4.go | 0.807992 | 0.470311 | PaymentInstructionInformation4.go | starcoder |
package genetic
import (
"fmt"
"math"
)
/**
The likelihood of a given gene mutating. (e.g. 0.06 is a 6% chance)
*/
var mutationRate float64
/**
Gene is atructure intended to represent a single gene
A, B, and C are constant coefficients that are used by f
f is a function that evalutes the three constant coefficients
and two inputs to return an integer value representing a color
level (RGB)
format is a format-string representation of the function body
*/
type Gene struct {
A, B, C float64
F func(float64, float64, float64, int, int) int
Format string
}
/**
NewGene creates a new gene with a random set of coefficients and a
random function
*/
func NewGene() *Gene {
g := Gene{
randCoefficient(),
randCoefficient(),
randCoefficient(),
nil,
"",
}
(&g).generateNewGeneFunction()
return &g
}
/**
randIntWithNeg returns an integer between -numRange and numRange
*/
func randIntWithNeg(numRange int) int {
return numRange - murphy.Intn(numRange*2)
}
/**
randFloatWithNeg creates a random floating point number between the integer
and its negative equivalent
*/
func randFloatWithNeg(numRange int) float64 {
return murphy.Float64() * float64(numRange)
}
/**
randCoefficient generates a random number between -255 and 255
for use as a coefficient in a Gene
*/
func randCoefficient() float64 {
return randFloatWithNeg(max(TargetWidth, TargetHeight))
}
/**
generateNewGeneFunction creates a function appropriate for a Gene
struct randomly.
*/
func (gene *Gene) generateNewGeneFunction() {
switch murphy.Intn(3) {
/*
case 0:
gene.F = func(a, b, c, x, y int) int {
return (a * x / (b*y + 1)) + c
}
gene.Format = "(%d * x / (%d*y + 1)) + %d"
case 1:
gene.F = func(a, b, c, x, y int) int {
return a + b + c + x + y
}
gene.Format = "%d + %d + %d + x + y"
case 2:
gene.F = func(a, b, c, x, y int) int {
return a * x * b * y * c
}
gene.Format = "%d * x * %d * y * %d"
case 3:
gene.F = func(a, b, c, x, y int) int {
return a*x + b*y + c
}
gene.Format = "%d * x + %d * y + %d"
*/
case 0:
gene.F = func(a, b, c float64, x, y int) int {
xf, yf := float64(x), float64(y)
return -1 * int(math.Sqrt(((a-xf)*(a-xf)+(b-yf)*(b-yf)))+c*c)
}
gene.Format = "-1*(((%f - x)^2 + (%f - y)^2)^.5 + %f^2)"
case 1:
gene.F = func(a, b, c float64, x, y int) int {
xf, yf := float64(x), float64(y)
return int(c - c*math.Abs(a-xf)/a - c*math.Abs(b-yf)/b)
}
gene.Format = "c-c*|a-x|/a - c*|b-yf|/b a=%f b=%f c=%f"
/*
case 2:
gene.F = func(a, b, c float64, x, y int) int {
return int(c + b + a)
}
gene.Format = "%f + %f + %f"
*/
default:
gene.F = func(a, b, c float64, x, y int) int {
xf, yf := float64(x), float64(y)
return int(math.Sqrt(((a-xf)*(a-xf) + (b-yf)*(b-yf))) + c*c)
}
gene.Format = "((%f - x)^2 + (%f - y)^2)^.5 + %f^2"
}
}
/**
Copy returns an exact duplicate of this gene.
*/
func (gene *Gene) Copy() *Gene {
return &Gene{
gene.A,
gene.B,
gene.C,
gene.F,
gene.Format,
}
}
/**
Mutate the gene by changing the constants involved
*/
func (gene *Gene) Mutate() {
victim := murphy.Intn(3)
switch victim {
case 0:
gene.A = randCoefficient()
case 1:
gene.B = randCoefficient()
case 2:
gene.C = randCoefficient()
}
}
/**
Evaluates the gene's function for the given x and y
*/
func (gene *Gene) EvalWith(x, y int) int {
return gene.F(gene.A, gene.B, gene.C, x, y)
}
/**
Returns a string representation of the Gene
*/
func (gene *Gene) String() string {
functionString := fmt.Sprintf(gene.Format, gene.A, gene.B, gene.C)
return fmt.Sprintf("f(x,y) = %s", functionString)
} | genetic/gene.go | 0.793586 | 0.74556 | gene.go | starcoder |
package p126
/**
Given two words (beginWord and endWord), and a dictionary's word list, find all shortest transformation sequence(s) from beginWord to endWord, such that:
Only one letter can be changed at a time
Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
For example,
Given:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log","cog"]
Return
[
["hit","hot","dot","dog","cog"],
["hit","hot","lot","log","cog"]
]
Note:
Return an empty list if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
You may assume no duplicates in the word list.
You may assume beginWord and endWord are non-empty and are not the same.
*/
type ladderNode struct {
word string
used bool
prev []*ladderNode
}
func (l *ladderNode) addPrev(p *ladderNode) {
l.prev = append(l.prev, p)
}
func wordsDiff1(a, b string) bool {
res := 0
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
res++
if res >= 2 {
break
}
}
}
return res == 1
}
func findLadders(beginWord string, endWord string, wordList []string) [][]string {
//BFS
bfsQueue := []*ladderNode{{word: beginWord}}
ladderList := make([]*ladderNode, len(wordList))
for i, w := range wordList {
ladderList[i] = &ladderNode{word: w, prev: make([]*ladderNode, 0)}
}
find := false
for len(bfsQueue) > 0 {
if find {
break
}
preBfsQueue := bfsQueue
bfsQueue = make([]*ladderNode, 0)
for _, pnode := range preBfsQueue {
for _, word := range ladderList {
if word.used {
continue
}
if wordsDiff1(pnode.word, word.word) {
if !find && word.word == endWord {
bfsQueue = bfsQueue[:0]
find = true
}
if !find || (find && word.word == endWord) {
word.addPrev(pnode)
if len(word.prev) == 1 {
bfsQueue = append(bfsQueue, word)
}
}
}
}
}
for _, pnode := range bfsQueue {
pnode.used = true
}
}
var dfs func(l *ladderNode) [][]string
dfs = func(l *ladderNode) [][]string {
if l.prev == nil || len(l.prev) == 0 {
return [][]string{{l.word}}
}
result := make([][]string, 0)
for _, p := range l.prev {
pres := dfs(p)
for _, one := range pres {
result = append(result, append(one, l.word))
}
}
return result
}
res := make([][]string, 0)
for _, pnode := range bfsQueue {
res = append(res, dfs(pnode)...)
}
return res
} | algorithms/p126/126.go | 0.772788 | 0.420659 | 126.go | starcoder |
package main
import (
"flag"
"fmt"
"log"
"os"
"runtime"
"strconv"
"github.com/crhym3/imgdiff"
)
const usageText = `Compare two images and optionally output resulting diff image.
Supported image formats: png, jpeg, gif, tiff, bmp and webp.
Exit code will be non-zero if the difference is above specified threshold.
Threshold value can also be a percentage, e.g. 0.5%.
Currently supported comparison algorithms are 'binary' and 'perceptual'.
Binary algorithm simply compares the two images' pixels as is.
Default is perceptual. Change using -a option.
Images can either be local file paths or URLs.
Output is usually a file path. Specify '-' to write to stdout instead.
Resulting image format is inferred from the output file extension
or -of argument otherwise. It defaults to png.
Examples:
# compare two local PNG images using perceptual algorithm
# and store the result in pdiff.png
imgdiff -o pdiff.png image1.png image2.png
# compare remote images w/o storing the result
imgdiff http://example.org/image1.jpg http://example.org/image2.jpg
# use binary comparison algorithm
imgdiff -a binary -o bdiff.png image1.gif image2.gif
# use threshold of 0.1%
imgdiff -t 0.1% image1.tiff image2.tiff
`
var (
version string // set by linker -X
// cmd line arguments
threshold = thresholdVar{value: 100}
algorithm = flag.String("a", "perceptual", "diff algorithm")
output = flag.String("o", "", "diff output")
outputFmt = flag.String("of", "", "output image format when -o -")
// perceptual args
gamma = flag.Float64("g", 2.2, "gamma adjustment; perceptual only")
lum = flag.Float64("lum", 100.0, "luminance factor; perceptual only")
fov = flag.Float64("fov", 45.0, "field of view; perceptual only")
cf = flag.Float64("cf", 1.0, "color factor; perceptual only")
nocolor = flag.Bool("nocolor", false, "don't use color during comparison; perceptual only")
)
func init() {
flag.Var(&threshold, "t", "threshold value")
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
log.SetFlags(0)
flag.Usage = usage
run()
}
func run() {
flag.Parse()
if flag.NArg() == 1 && flag.Arg(0) == "version" {
fmt.Println(version)
return
}
if flag.NArg() != 2 {
log.Fatal("invalid number of positional arguments")
}
img1 := readImage(flag.Arg(0))
img2 := readImage(flag.Arg(1))
res, n, err := newDiffer().Compare(img1, img2)
if err != nil {
log.Fatal(err)
}
np := float64(n) / float64(res.Bounds().Dx()*res.Bounds().Dy())
if threshold.percent && !(np > threshold.value) || !(float64(n) > threshold.value) {
return
}
fmt.Printf("difference: %d pixel(s), %f%%\n", n, np)
defer os.Exit(1)
if *output == "" {
return
}
writeImage(*output, *outputFmt, res)
}
func usage() {
fmt.Fprintf(os.Stderr, "%s\nUsage: imgdiff [options] image1 image2\n", usageText)
flag.PrintDefaults()
}
func newDiffer() imgdiff.Differ {
switch *algorithm {
case "binary":
return imgdiff.NewBinary()
case "perceptual":
return imgdiff.NewPerceptual(*gamma, *lum, *fov, *cf, *nocolor)
}
log.Fatalf("unsupported diff algorithm: %s", *algorithm)
return nil
}
type thresholdVar struct {
value float64
percent bool
}
func (v *thresholdVar) String() string {
unit := ""
if v.percent {
unit = "%"
}
return fmt.Sprintf("%g%s", v.value, unit)
}
func (v *thresholdVar) Set(t string) error {
if len(t) == 0 {
v.value = 0
return nil
}
percent := false
if t[len(t)-1] == '%' {
percent = true
t = t[:len(t)-1]
}
val, err := strconv.ParseFloat(t, 64)
if err != nil {
return err
}
v.percent = percent
v.value = val
return nil
} | cmd/imgdiff/main.go | 0.67405 | 0.490114 | main.go | starcoder |
package asm
import (
"encoding/binary"
. "github.com/mmcloughlin/avo/build"
"github.com/mmcloughlin/avo/operand"
)
func ConstBytes(name string, data []byte) operand.Mem {
m := GLOBL(name, RODATA|NOPTR)
switch {
case len(data)%8 == 0:
constBytes8(0, data)
case len(data)%4 == 0:
constBytes4(0, data)
default:
i := (len(data) / 8) * 8
constBytes8(0, data[:i])
constBytes1(i, data[i:])
}
return m
}
func ConstArray16(name string, elems ...uint16) operand.Mem {
data := make([]byte, 2*len(elems))
for i, elem := range elems {
binary.LittleEndian.PutUint16(data[i*2:], elem)
}
return ConstBytes(name, data)
}
func ConstArray32(name string, elems ...uint32) operand.Mem {
data := make([]byte, 4*len(elems))
for i, elem := range elems {
binary.LittleEndian.PutUint32(data[i*4:], elem)
}
return ConstBytes(name, data)
}
func ConstArray64(name string, elems ...uint64) operand.Mem {
data := make([]byte, 8*len(elems))
for i, elem := range elems {
binary.LittleEndian.PutUint64(data[i*8:], elem)
}
return ConstBytes(name, data)
}
func ConstShuffleMask32(name string, indices ...uint32) operand.Mem {
data := make([]byte, 4*len(indices))
for i, index := range indices {
for j := 0; j < 4; j++ {
data[i*4+j] = byte(index*4 + uint32(j))
}
}
return ConstBytes(name, data)
}
func ConstShuffleMask64(name string, indices ...uint64) operand.Mem {
data := make([]byte, 8*len(indices))
for i, index := range indices {
for j := 0; j < 8; j++ {
data[i*8+j] = byte(index*8 + uint64(j))
}
}
return ConstBytes(name, data)
}
func ConstLoadMask32(name string, indices ...uint32) operand.Mem {
data := make([]uint32, len(indices))
for i, index := range indices {
data[i] = index << 31
}
return ConstArray32(name, data...)
}
func ConstLoadMask64(name string, indices ...uint64) operand.Mem {
data := make([]uint64, len(indices))
for i, index := range indices {
data[i] = index << 63
}
return ConstArray64(name, data...)
}
func constBytes8(offset int, data []byte) {
for i := 0; i < len(data); i += 8 {
DATA(offset+i, operand.U64(binary.LittleEndian.Uint64(data[i:i+8])))
}
}
func constBytes4(offset int, data []byte) {
for i := 0; i < len(data); i += 4 {
DATA(offset+i, operand.U32(binary.LittleEndian.Uint32(data[i:i+4])))
}
}
func constBytes1(offset int, data []byte) {
for i, b := range data {
DATA(offset+i, operand.U8(b))
}
} | build/internal/asm/asm.go | 0.54577 | 0.42316 | asm.go | starcoder |
package query
import (
"fmt"
"math"
"strings"
"github.com/JODA-Explore/BETZE/dataset"
)
//TODO Unique and count
// A Predicate represents operations performed during the filter phase
type Predicate interface {
// Returns the estimated selectivity of filtering the given dataset with the predicate
Selectivity(d dataset.DataSet) float64
// Translates the predicate to a human readable format
String() string
}
// AndPredicate evaluates the boolean AND operation between two predicates
type AndPredicate struct {
Lhs Predicate
Rhs Predicate
}
func (q AndPredicate) String() string {
return fmt.Sprintf("(%s AND %s)", q.Lhs.String(), q.Rhs.String())
}
// Selectivity implements Predicate.Selectivity by multiplying the selectivities of the sub-predicates
func (p AndPredicate) Selectivity(d dataset.DataSet) float64 {
lhs := p.Lhs.Selectivity(d)
rhs := p.Rhs.Selectivity(d)
return lhs * rhs
}
// OrPredicate evaluates the boolean OR operation between two predicates
type OrPredicate struct {
Lhs Predicate
Rhs Predicate
}
func (q OrPredicate) String() string {
return fmt.Sprintf("(%s OR %s)", q.Lhs.String(), q.Rhs.String())
}
// Selectivity implements Predicate.Selectivity by adding the selectivities of the sub-predicates
func (p OrPredicate) Selectivity(d dataset.DataSet) float64 {
lhs := p.Lhs.Selectivity(d)
rhs := p.Rhs.Selectivity(d)
return math.Min(lhs+rhs, 1.0)
}
// Predicate evaluating the existence of the given path
type ExistsPredicate struct {
Path string
}
func (q ExistsPredicate) String() string {
return fmt.Sprintf("EXISTS('%s')", q.Path)
}
func (p ExistsPredicate) Selectivity(d dataset.DataSet) float64 {
dataPath := d.Paths[p.Path]
if dataPath == nil {
return 0.0
}
if dataPath.Count == nil {
return 0.5
}
return float64(*dataPath.Count) / float64(d.GetSize())
}
// Predicate evaluating the type of the given path
type IsStringPredicate struct {
Path string
}
func (q IsStringPredicate) String() string {
return fmt.Sprintf("ISSTRING('%s')", q.Path)
}
func (p IsStringPredicate) Selectivity(d dataset.DataSet) float64 {
dataPath := d.Paths[p.Path]
if dataPath == nil || dataPath.Stringtype == nil {
return 0.0
}
strType := dataPath.Stringtype
if strType.Count != nil {
return getTypeSelectivity(d, strType.Count)
}
return float64(*dataPath.Count) / float64(d.GetSize())
}
// IntEqualityPredicate evaluates the Number equality operation between a path and a given number
type IntEqualityPredicate struct {
Path string
Number int64
}
func (q IntEqualityPredicate) String() string {
return fmt.Sprintf("'%s' == %d", q.Path, q.Number)
}
// Selectivity implements Predicate.Selectivity by estimating the selectivity given the data set.
// If no DataPath with matching type and path exists, 0 is returned
// If a data path exists, but has no count, 0.01 is assumed and returned (predicate selects 1% of all documents)
// If a count exists, equality assumes that exactly one element is chosen, hence a selectivity of 1/count is returned.
// If min and max exists, a uniform distribution is assumed if the value is within the bounds and a selectivity of (1/(max-min)) is returned.
func (p IntEqualityPredicate) Selectivity(d dataset.DataSet) float64 {
dataPath := d.Paths[p.Path]
if dataPath == nil || dataPath.Inttype == nil {
return 0.0
}
intType := dataPath.Inttype
typeSelectivity := getTypeSelectivity(d, intType.Count)
if intType.Min != nil && p.Number < *intType.Min {
return 0.0
}
if intType.Max != nil && p.Number > *intType.Max {
return 0.0
}
if intType.Count == nil {
return 0.01 * typeSelectivity
}
if intType.Min != nil && intType.Max != nil {
return (1.0 / float64((*intType.Max-*intType.Min)+1)) * typeSelectivity
}
return (1.0 / float64(*intType.Count)) * typeSelectivity
}
// FloatComparisonPredicate evaluates the Number comparison (<,>,<=,>=) operation between a path and a given number
type FloatComparisonPredicate struct {
Path string
Number float64
Smaller bool
Equal bool
}
func (q FloatComparisonPredicate) String() string {
var cmpstr = ">"
if q.Smaller {
cmpstr = "<"
}
if q.Equal {
cmpstr += "="
}
return fmt.Sprintf("'%s' %s %f", q.Path, cmpstr, q.Number)
}
// Selectivity implements Predicate.Selectivity by estimating the selectivity given the data set.
// If no DataPath with matching type and path exists, 0 is returned
// If a data path exists, but has no count, 0.33333 is assumed and returned (predicate selects 1/3 of all documents)
// If min and max exists, a uniform distribution is assumed if the value is within the bounds and a selectivity of (1/(max-min)) is returned.
func (p FloatComparisonPredicate) Selectivity(d dataset.DataSet) float64 {
dataPath := d.Paths[p.Path]
if dataPath == nil || dataPath.Floattype == nil {
return 0.0
}
floatType := dataPath.Floattype
intType := dataPath.Inttype
typeSelectivity := getTypeSelectivity(d, floatType.Count) + getTypeSelectivity(d, intType.Count)
if floatType.Min != nil && p.Number < *floatType.Min {
if p.Smaller {
return 0.0
}
return 1.0 * typeSelectivity
}
if floatType.Max != nil && p.Number > *floatType.Max {
if !p.Smaller {
return 0.0
}
return 1.0 * typeSelectivity
}
if floatType.Min != nil && floatType.Max != nil {
// TODO Equal?
abs := (float64(p.Number-*floatType.Min) + 1.0) / (float64(*floatType.Max-*floatType.Min) + 1.0)
if p.Smaller {
return abs
}
return (1.0 - abs) * typeSelectivity
}
return (1.0 / 3.0) * typeSelectivity
}
// StrEqualityPredicate evaluates the String equality operation between a path and a given string
type StrEqualityPredicate struct {
Path string
Str string
}
func (q StrEqualityPredicate) String() string {
return fmt.Sprintf("'%s' == \"%s\"", q.Path, q.Str)
}
// Selectivity implements Predicate.Selectivity by estimating the selectivity given the data set.
// If no DataPath with matching type and path exists, 0 is returned
// If a data path exists, but has no count, 0.01 is assumed and returned (predicate selects 1% of all documents)
// If a count exists, equality assumes that exactly one element is chosen, hence a selectivity of 1/count is returned.
// If min and max exists, a uniform distribution is assumed if the value is within the bounds and a selectivity of (1/(max-min)) is returned.
func (p StrEqualityPredicate) Selectivity(d dataset.DataSet) float64 {
dataPath := d.Paths[p.Path]
if dataPath == nil || dataPath.Stringtype == nil {
return 0.0
}
strType := dataPath.Stringtype
typeSelectivity := getTypeSelectivity(d, strType.Count)
if strType.Min != nil && p.Str < *strType.Min {
return 0.0
}
if strType.Max != nil && p.Str > *strType.Max {
return 0.0
}
if strType.Count == nil {
return 0.01 * typeSelectivity
}
return (1.0 / float64(*strType.Count)) * typeSelectivity
}
// StrPrefixPredicate checks if a given path contains a string with the given prefix
type StrPrefixPredicate struct {
Path string
Prefix string
}
func (q StrPrefixPredicate) String() string {
return fmt.Sprintf("HAS_PREFIX('%s',\"%s\")", q.Path, q.Prefix)
}
// Selectivity implements Predicate.Selectivity by estimating the selectivity given the data set.
// If no DataPath with matching type and path exists, 0 is returned
// If a data path exists, but has no count, 0.01 is assumed and returned (predicate selects 1% of all documents)
// If a count and prefix list exists with a matching prefix, uniform distribution is assumed and 1/#prefixes is returned
func (p StrPrefixPredicate) Selectivity(d dataset.DataSet) float64 {
dataPath := d.Paths[p.Path]
if dataPath == nil || dataPath.Stringtype == nil {
return 0.0
}
strType := dataPath.Stringtype
typeSelectivity := getTypeSelectivity(d, strType.Count)
if strType.Count == nil {
return 0.01 * typeSelectivity
}
if strType.Prefixes != nil && len(strType.Prefixes) > 0 {
contains := false
for _, prefix := range strType.Prefixes {
if strings.HasPrefix(prefix, p.Prefix) {
contains = true
break
}
}
if !contains {
return 0.0
}
return (1.0 / float64(len(strType.Prefixes))) * typeSelectivity
}
return (1.0 / float64(*strType.Count)) * typeSelectivity
}
// BoolEqualityPredicate evaluates the boolean equality operation between a path and a boolean
type BoolEqualityPredicate struct {
Path string
Value bool
}
func (q BoolEqualityPredicate) String() string {
return fmt.Sprintf("'%s' == %t", q.Path, q.Value)
}
// Selectivity implements Predicate.Selectivity by estimating the selectivity given the data set.
// If no DataPath with matching type and path exists, 0 is returned
// If a data path exists, but has no count, 0.5 is assumed and returned (predicate selects 50% of all documents)
// If true/false counts exist, an exact selectivity is returned.
func (p BoolEqualityPredicate) Selectivity(d dataset.DataSet) float64 {
dataPath := d.Paths[p.Path]
if dataPath == nil || dataPath.Booltype == nil {
return 0.0
}
boolType := dataPath.Booltype
typeSelectivity := getTypeSelectivity(d, boolType.Count)
if typeSelectivity == 0 {
return 0.0
}
if boolType.Count != nil && boolType.TrueCount != nil && p.Value { // Value is true and we have a true-count
return (float64(*boolType.TrueCount) / float64(*boolType.Count)) * typeSelectivity
}
if boolType.Count != nil && boolType.FalseCount != nil && !p.Value { // Value is false and we have a false-count
return (float64(*boolType.FalseCount) / float64(*boolType.Count)) * typeSelectivity
}
return 0.5 * typeSelectivity
}
func getTypeSelectivity(dataset dataset.DataSet, typeCount *uint64) float64 {
if typeCount != nil && *typeCount == 0 { // Type does not exist
return 0.0
}
if typeCount != nil && dataset.Count != nil { //We know both counts, calculate type selectivity
return float64(*typeCount) / float64(*dataset.Count)
}
return 0.33 //We do not know how selective the type is, estimate 0.33
}
// ObjectSizeComparisonPredicate evaluates the Number comparison (<,>,<=,>=) operation between the number of members in a path and a given number
type ObjectSizeComparisonPredicate struct {
Path string
Number uint64
Smaller bool
Equal bool
}
func (q ObjectSizeComparisonPredicate) String() string {
var cmpstr = ">"
if q.Smaller {
cmpstr = "<"
}
if q.Equal {
cmpstr += "="
}
return fmt.Sprintf("MEMBERCOUNT('%s') %s %d", q.Path, cmpstr, q.Number)
}
// Selectivity implements Predicate.Selectivity by estimating the selectivity given the data set.
// If no DataPath with matching type and path exists, 0 is returned
// If a data path exists, but has no count, 0.33333 is assumed and returned (predicate selects 1/3 of all documents)
// If min and max exists, a uniform distribution is assumed if the value is within the bounds and a selectivity of (1/(max-min)) is returned.
func (p ObjectSizeComparisonPredicate) Selectivity(d dataset.DataSet) float64 {
dataPath := d.Paths[p.Path]
if dataPath == nil || dataPath.Objecttype == nil {
return 0.0
}
objectType := dataPath.Objecttype
typeSelectivity := getTypeSelectivity(d, objectType.Count)
if objectType.MinMembers != nil && p.Number < *objectType.MinMembers {
if p.Smaller {
return 0.0
}
return 1.0 * typeSelectivity
}
if objectType.MaxMembers != nil && p.Number > *objectType.MaxMembers {
if !p.Smaller {
return 0.0
}
return 1.0 * typeSelectivity
}
if objectType.MinMembers != nil && objectType.MaxMembers != nil {
// TODO Equal?
abs := (float64(p.Number-*objectType.MinMembers) + 1.0) / (float64(*objectType.MaxMembers-*objectType.MinMembers) + 1.0)
if p.Smaller {
return abs * typeSelectivity
}
return (1.0 - abs) * typeSelectivity
}
return (1.0 / 3.0) * typeSelectivity
}
// ArraySizeComparisonPredicate evaluates the Number comparison (<,>,<=,>=) operation between the number of entries in an array path and a given number
type ArraySizeComparisonPredicate struct {
Path string
Number uint64
Smaller bool
Equal bool
}
func (q ArraySizeComparisonPredicate) String() string {
var cmpstr = ">"
if q.Smaller {
cmpstr = "<"
}
if q.Equal {
cmpstr += "="
}
return fmt.Sprintf("SIZE('%s') %s %d", q.Path, cmpstr, q.Number)
}
// Selectivity implements Predicate.Selectivity by estimating the selectivity given the data set.
// If no DataPath with matching type and path exists, 0 is returned
// If a data path exists, but has no count, 0.33333 is assumed and returned (predicate selects 1/3 of all documents)
// If min and max exists, a uniform distribution is assumed if the value is within the bounds and a selectivity of (1/(max-min)) is returned.
func (p ArraySizeComparisonPredicate) Selectivity(d dataset.DataSet) float64 {
dataPath := d.Paths[p.Path]
if dataPath == nil || dataPath.Arraytype == nil {
return 0.0
}
arrayType := dataPath.Arraytype
typeSelectivity := getTypeSelectivity(d, arrayType.Count)
if arrayType.MinSize != nil && p.Number < *arrayType.MinSize {
if p.Smaller {
return 0.0
}
return 1.0 * typeSelectivity
}
if arrayType.MaxSize != nil && p.Number > *arrayType.MaxSize {
if !p.Smaller {
return 0.0
}
return 1.0 * typeSelectivity
}
if arrayType.MinSize != nil && arrayType.MaxSize != nil {
// TODO Equal?
abs := (float64(p.Number-*arrayType.MinSize) + 1.0) / (float64(*arrayType.MaxSize-*arrayType.MinSize) + 1.0)
if p.Smaller {
return abs * typeSelectivity
}
return (1.0 - abs) * typeSelectivity
}
return (1.0 / 3.0) * typeSelectivity
} | query/predicates.go | 0.595845 | 0.558628 | predicates.go | starcoder |
package lowess
import (
"errors"
"math"
"sort"
)
type Coord struct {
X float64
Y float64
}
type CoordSlice []Coord
func (s CoordSlice) Len() int {
return len(s)
}
func (s CoordSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s CoordSlice) Less(i, j int) bool {
return s[i].X < s[j].X
}
type coordDist struct {
coord Coord
dist float64
}
type coordDistSlice []coordDist
func (s coordDistSlice) Len() int {
return len(s)
}
func (s coordDistSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s coordDistSlice) Less(i, j int) bool {
return s[i].dist < s[j].dist
}
func CoordsToArrays(coords []Coord) ([]float64, []float64) {
var xCoords []float64
var yCoords []float64
for i := 0; i < len(coords); i++ {
xCoords = append(xCoords, coords[i].X)
yCoords = append(yCoords, coords[i].Y)
}
return xCoords, yCoords
}
func findDist(a float64, b float64) float64 {
return math.Abs(a - b)
}
func findMax(values []float64) float64 {
max := 0.0
for _, value := range values {
if value > max {
max = value
}
}
return max
}
func findNearest(sortedCoords CoordSlice, targetX float64, bandwidth float64) (coordDistSlice, error) {
if bandwidth <= 0 || bandwidth > 1 {
return nil, errors.New("findnearest: the bandwidth must be >0 and <=1")
}
sort.Sort(sortedCoords)
totalWidth := sortedCoords[len(sortedCoords)-1].X - sortedCoords[0].X
windowWidth := bandwidth * totalWidth
minX := targetX - windowWidth/2
maxX := targetX + windowWidth/2
var distances coordDistSlice
for i := 0; i < len(sortedCoords); i++ {
if sortedCoords[i].X >= minX && sortedCoords[i].X <= maxX {
distances = append(distances, coordDist{
coord: sortedCoords[i],
dist: findDist(targetX, sortedCoords[i].X),
},
)
}
}
sort.Sort(distances)
return distances, nil
}
func tricubeWeightFunction(sortedCoordDists coordDistSlice) []float64 {
//https://uk.mathworks.com/help/curvefit/smoothing-data.html
weights := make([]float64, len(sortedCoordDists))
maxDist := sortedCoordDists[len(sortedCoordDists)-1].dist
for i := 0; i < len(sortedCoordDists); i++ {
weights[i] = math.Pow(1-math.Pow(math.Abs(sortedCoordDists[i].dist/maxDist), 3), 3)
}
return weights
}
func weightedMean(values []float64, weights []float64) (float64, error) {
if len(weights) != len(values) {
return 0, errors.New("regression: weighted mean requires equal length weight and value slices")
}
var sumWeights float64
for i := 0; i < len(weights); i++ {
sumWeights = sumWeights + weights[i]
}
var sumWeightedValues float64
for i := 0; i < len(values); i++ {
sumWeightedValues = sumWeightedValues + values[i]*weights[i]
}
return sumWeightedValues / sumWeights, nil
}
func wLSRegression(coordinates coordDistSlice, weights []float64) (float64, float64, error) {
if len(weights) != len(coordinates) {
return 0, 0, errors.New("regression: wls regressions requires coordinate and weight slices of equal length")
}
var xCoords []float64
var yCoords []float64
for i := 0; i < len(coordinates); i++ {
xCoords = append(xCoords, coordinates[i].coord.X)
yCoords = append(yCoords, coordinates[i].coord.Y)
}
weightedMeanX, err := weightedMean(xCoords, weights)
if err != nil {
return 0, 0, err
}
weightedMeanY, err := weightedMean(yCoords, weights)
if err != nil {
return 0, 0, err
}
var sumNumerator float64
var sumDenominator float64
for i := 0; i < len(xCoords); i++ {
sumNumerator = sumNumerator + weights[i]*(xCoords[i]-weightedMeanX)*(yCoords[i]-weightedMeanY)
sumDenominator = sumDenominator + weights[i]*math.Pow(xCoords[i]-weightedMeanX, 2)
}
// This deals with flat lines caused by identical Y values or insufficiently wide bandwidth.
var slope float64
if sumDenominator == 0{
slope = 0.0
}else{
slope = sumNumerator / sumDenominator
}
var intercept = weightedMeanY - slope*weightedMeanX
return slope, intercept, nil
}
func CalcLOESS(estimationPoints []float64, coordinates []Coord, bandwidth float64) ([]Coord, error) {
var loessPoints []Coord
if bandwidth <= 0 || bandwidth > 1 {
return nil, errors.New("CalcLOESS: the bandwidth must be >0 and <=1")
}
// For each estimation point, calculate WLS regression line from nearest coordinates, then evaluate.
for i := 0; i < len(estimationPoints); i++ {
var widthCoords coordDistSlice
// Capture coordinates within the width
widthCoords, err := findNearest(coordinates, estimationPoints[i], bandwidth)
if err != nil {
return []Coord{}, err
}
weights := tricubeWeightFunction(widthCoords)
slope, intercept, err := wLSRegression(widthCoords, weights)
if err != nil {
return []Coord{}, err
}
//fmt.Println("Weights:", weights)
//fmt.Println("Slope: ", slope)
//fmt.Println("Intercept: ", intercept)
estimatedValue := slope*estimationPoints[i] + intercept
//fmt.Println("\033[0;92m", estimatedValue, "\033[0m")
loessPoints = append(loessPoints, Coord{
X: estimationPoints[i],
Y: estimatedValue,
})
}
return loessPoints, nil
} | lowess.go | 0.803868 | 0.453141 | lowess.go | starcoder |
package parser
import (
"bufio"
"bytes"
"io"
"strconv"
)
// Scanner is a lexical scanner.
type Scanner struct {
r *bufio.Reader
pos TokenPos
}
// NewScanner returns a new instance of Scanner.
func NewScanner(r io.Reader) *Scanner {
return &Scanner{r: bufio.NewReader(r), pos: TokenPos{Char: 0, Lines: []int{}}}
}
// read reads the next rune from the buffered reader.
// Returns the rune(0) if reached the end or error occurs.
func (s *Scanner) read() rune {
ch, _, err := s.r.ReadRune()
if err != nil {
return eof
}
if ch == '\n' {
s.pos.Lines = append(s.pos.Lines, s.pos.Char)
s.pos.Char = 0
} else {
s.pos.Char++
}
return ch
}
// unread places the previously read rune back on the reader.
func (s *Scanner) unread() {
_ = s.r.UnreadRune()
if s.pos.Char == 0 {
s.pos.Char = s.pos.Lines[len(s.pos.Lines)-1]
s.pos.Lines = s.pos.Lines[:len(s.pos.Lines)-1]
} else {
s.pos.Char--
}
}
// Scan returns the next token and parsed value.
func (s *Scanner) Scan() Token {
var startPos, endPos TokenPos
ch := s.read()
if isWhitespace(ch) {
s.skipWhitespace()
ch = s.read()
}
if isIdent(ch) {
s.unread()
return s.scanIdent()
}
// Track token positions.
startPos = s.pos
defer func() { endPos = s.pos }()
switch ch {
case eof:
return &ConstToken{t: 0, start: startPos, end: endPos}
case ':':
return &ConstToken{t: tCOLON, start: startPos, end: endPos}
case ';':
return &ConstToken{t: tSEMICOLON, start: startPos, end: endPos}
case ',':
return &ConstToken{t: tCOMMA, start: startPos, end: endPos}
case '(':
return &ConstToken{t: tLPAREN, start: startPos, end: endPos}
case ')':
return &ConstToken{t: tRPAREN, start: startPos, end: endPos}
case '=':
return &ConstToken{t: tEQ, start: startPos, end: endPos}
case '-':
if ch2 := s.read(); ch2 == '-' {
s.unread()
s.unread()
s.skipComment()
return s.Scan()
}
}
return &ConstToken{t: tILLEGAL, start: startPos, end: endPos}
}
func (s *Scanner) scanIdent() Token {
var startPos, endPos TokenPos
var buf bytes.Buffer
startPos = s.pos
defer func() { endPos = s.pos }()
buf.WriteRune(s.read())
for {
if ch := s.read(); ch == eof {
break
} else if !isIdent(ch) {
s.unread()
break
} else {
_, _ = buf.WriteRune(ch)
}
}
switch buf.String() {
case "def":
return &ConstToken{t: tDEF, start: startPos, end: endPos}
case "call":
return &ConstToken{t: tCALL, start: startPos, end: endPos}
case "spawn":
return &ConstToken{t: tSPAWN, start: startPos, end: endPos}
case "case":
return &ConstToken{t: tCASE, start: startPos, end: endPos}
case "close":
return &ConstToken{t: tCLOSE, start: startPos, end: endPos}
case "else":
return &ConstToken{t: tELSE, start: startPos, end: endPos}
case "endif":
return &ConstToken{t: tENDIF, start: startPos, end: endPos}
case "endselect":
return &ConstToken{t: tENDSELECT, start: startPos, end: endPos}
case "if":
return &ConstToken{t: tIF, start: startPos, end: endPos}
case "let":
return &ConstToken{t: tLET, start: startPos, end: endPos}
case "newchan":
return &ConstToken{t: tNEWCHAN, start: startPos, end: endPos}
case "select":
return &ConstToken{t: tSELECT, start: startPos, end: endPos}
case "send":
return &ConstToken{t: tSEND, start: startPos, end: endPos}
case "recv":
return &ConstToken{t: tRECV, start: startPos, end: endPos}
case "tau":
return &ConstToken{t: tTAU, start: startPos, end: endPos}
case "letmem":
return &ConstToken{t: tLETMEM, start: startPos, end: endPos}
case "read":
return &ConstToken{t: tREAD, start: startPos, end: endPos}
case "write":
return &ConstToken{t: tWRITE, start: startPos, end: endPos}
case "letsync":
return &ConstToken{t: tLETSYNC, start: startPos, end: endPos}
case "mutex":
return &ConstToken{t: tMUTEX, start: startPos, end: endPos}
case "rwmutex":
return &ConstToken{t: tRWMUTEX, start: startPos, end: endPos}
case "lock":
return &ConstToken{t: tLOCK, start: startPos, end: endPos}
case "unlock":
return &ConstToken{t: tUNLOCK, start: startPos, end: endPos}
case "rlock":
return &ConstToken{t: tRLOCK, start: startPos, end: endPos}
case "runlock":
return &ConstToken{t: tRUNLOCK, start: startPos, end: endPos}
}
if i, err := strconv.Atoi(buf.String()); err == nil {
return &DigitsToken{num: i, start: startPos, end: endPos}
}
return &IdentToken{str: buf.String(), start: startPos, end: endPos}
}
func (s *Scanner) skipComment() {
for {
if ch := s.read(); ch == eof {
break
} else if ch == '\n' {
break
}
}
}
func (s *Scanner) skipWhitespace() {
for {
if ch := s.read(); ch == eof {
break
} else if !isWhitespace(ch) {
s.unread()
break
}
}
} | parser/scanner.go | 0.580947 | 0.496765 | scanner.go | starcoder |
package unit
import (
"path"
"strings"
)
// UnitType provides an enumeration over the different types of systemd unit
// file types
type UnitType uint
const (
// UnitTypeUnknown represents a generic or unknown unit type
UnitTypeUnknown UnitType = iota
// UnitTypeService represents a systemd.service(5)
UnitTypeService UnitType = iota
// UnitTypeSocket represents a systemd.socket(5)
UnitTypeSocket UnitType = iota
// UnitTypeDevice represents a systemd.device(5)
UnitTypeDevice UnitType = iota
// UnitTypeMount represents a systemd.mount(5)
UnitTypeMount UnitType = iota
// UnitTypeAutoMount represents a systemd.automount(5)
UnitTypeAutoMount UnitType = iota
// UnitTypeSwap represents a systemd.swap(5)
UnitTypeSwap UnitType = iota
// UnitTypeTarget represents a systemd.target(5)
UnitTypeTarget UnitType = iota
// UnitTypePath represents a systemd.path(5)
UnitTypePath UnitType = iota
// UnitTypeTimer represents a systemd.timer(5)
UnitTypeTimer UnitType = iota
// UnitTypeSnapshot represents a systemd.snapshot(5)
UnitTypeSnapshot UnitType = iota
// UnitTypeSlice represents a systemd.slice(5)
UnitTypeSlice UnitType = iota
// UnitTypeScope represents a systemd.scope(5)
UnitTypeScope UnitType = iota
)
// UnitTypeFromName takes a service name in the form of "foo.service" and
// returns an appropriate UnitType representing that service type. If the
// service type isn't recognized from the suffix then UnitTypeUnknown is
// returned.
func UnitTypeFromName(s string) UnitType {
basename := path.Base(s)
components := strings.Split(basename, ".")
if len(components) == 0 {
return UnitTypeUnknown
}
switch strings.ToLower(components[len(components)-1]) {
case "service":
return UnitTypeService
case "socket":
return UnitTypeSocket
case "device":
return UnitTypeDevice
case "mount":
return UnitTypeMount
case "automount":
return UnitTypeAutoMount
case "swap":
return UnitTypeSwap
case "target":
return UnitTypeTarget
case "path":
return UnitTypePath
case "timer":
return UnitTypeTimer
case "snapshot":
return UnitTypeSnapshot
case "slice":
return UnitTypeSlice
case "scope":
return UnitTypeScope
}
return UnitTypeUnknown
}
// Suffix is the dual of UnitTypeFromName and generates the correct unit file
// suffix based on the type
func (u UnitType) Suffix() string {
switch u {
case UnitTypeService:
return "service"
case UnitTypeSocket:
return "socket"
case UnitTypeDevice:
return "device"
case UnitTypeMount:
return "mount"
case UnitTypeAutoMount:
return "automount"
case UnitTypeSwap:
return "swap"
case UnitTypeTarget:
return "target"
case UnitTypePath:
return "path"
case UnitTypeTimer:
return "timer"
case UnitTypeSnapshot:
return "snapshot"
case UnitTypeSlice:
return "slice"
case UnitTypeScope:
return "scope"
}
return ""
}
// String provides a stringified-version of the type
func (u UnitType) String() string {
return u.Suffix()
}
// UnitTypeString returns a stringified version of the unit type, in title case
func (u UnitType) UnitTypeString() string {
return strings.Title(u.Suffix())
}
// HasProperties returns true if the unit type has type-specific properties
func (u UnitType) HasProperties() bool {
switch u {
case UnitTypeTarget, UnitTypeSnapshot, UnitTypeUnknown:
return false
}
return true
} | resource/systemd/unit/types.go | 0.738952 | 0.60577 | types.go | starcoder |
package topdown
import (
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/topdown/builtins"
)
// Helper: sets of vertices can be represented as Arrays or Sets.
func foreachVertex(collection *ast.Term, f func(*ast.Term)) {
switch v := collection.Value.(type) {
case ast.Set:
v.Foreach(f)
case *ast.Array:
v.Foreach(f)
}
}
// numberOfEdges returns the number of elements of an array or a set (of edges)
func numberOfEdges(collection *ast.Term) int {
switch v := collection.Value.(type) {
case ast.Set:
return v.Len()
case *ast.Array:
return v.Len()
}
return 0
}
func builtinReachable(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
// Return the empty set if the first argument is not an object.
graph, ok := args[0].Value.(ast.Object)
if !ok {
return iter(ast.NewTerm(ast.NewSet()))
}
// This is a queue that holds all nodes we still need to visit. It is
// initialised to the initial set of nodes we start out with.
queue := []*ast.Term{}
foreachVertex(args[1], func(t *ast.Term) {
queue = append(queue, t)
})
// This is the set of nodes we have reached.
reached := ast.NewSet()
// Keep going as long as we have nodes in the queue.
for len(queue) > 0 {
// Get the edges for this node. If the node was not in the graph,
// `edges` will be `nil` and we can ignore it.
node := queue[0]
if edges := graph.Get(node); edges != nil {
// Add all the newly discovered neighbors.
foreachVertex(edges, func(neighbor *ast.Term) {
if !reached.Contains(neighbor) {
queue = append(queue, neighbor)
}
})
// Mark the node as reached.
reached.Add(node)
}
queue = queue[1:]
}
return iter(ast.NewTerm(reached))
}
// pathBuilder is called recursively to build an array of paths that are reachable from the root
func pathBuilder(graph ast.Object, root *ast.Term, path []*ast.Term, paths []*ast.Term, reached ast.Set) []*ast.Term {
if edges := graph.Get(root); edges != nil {
path = append(path, root)
if numberOfEdges(edges) >= 1 {
foreachVertex(edges, func(neighbor *ast.Term) {
if reached.Contains(neighbor) {
// If we've already reached this node, return current path (avoid infinite recursion)
paths = append(paths, path...)
} else {
reached.Add(root)
paths = pathBuilder(graph, neighbor, path, paths, reached)
}
})
} else {
paths = append(paths, path...)
}
} else {
// Node is nonexistent (not in graph). Commit the current path (without adding this root)
paths = append(paths, path...)
}
return paths
}
func builtinReachablePaths(bctx BuiltinContext, args []*ast.Term, iter func(*ast.Term) error) error {
// Return an error if the first argument is not an object.
graph, err := builtins.ObjectOperand(args[0].Value, 1)
if err != nil {
return err
}
// This is a queue that holds all nodes we still need to visit. It is
// initialised to the initial set of nodes we start out with.
var queue []*ast.Term
foreachVertex(args[1], func(t *ast.Term) {
queue = append(queue, t)
})
results := ast.NewSet()
for _, node := range queue {
// Find reachable paths from edges in root node in queue and append arrays to the results set
if edges := graph.Get(node); edges != nil {
if numberOfEdges(edges) >= 1 {
foreachVertex(edges, func(neighbor *ast.Term) {
paths := pathBuilder(graph, neighbor, []*ast.Term{node}, []*ast.Term{}, ast.NewSet(node))
results.Add(ast.ArrayTerm(paths...))
})
} else {
results.Add(ast.ArrayTerm(node))
}
}
}
return iter(ast.NewTerm(results))
}
func init() {
RegisterBuiltinFunc(ast.ReachableBuiltin.Name, builtinReachable)
RegisterBuiltinFunc(ast.ReachablePathsBuiltin.Name, builtinReachablePaths)
} | topdown/reachable.go | 0.751648 | 0.404125 | reachable.go | starcoder |
package draw
import "fmt"
type Rect struct {
X int
Y int
Width int
Height int
}
func NewRect(s *Size) *Rect {
return &Rect{0, 0, s.Width, s.Height}
}
func NewRectFromSize(lt *Point, s *Size) *Rect {
return &Rect{lt.X, lt.Y, s.Width, s.Height}
}
func NewRectFromPoint(lt *Point, rb *Point) *Rect {
return &Rect{lt.X, lt.Y, rb.X - lt.X, rb.Y - lt.Y}
}
func (r *Rect) Clone() *Rect {
return &Rect{r.X, r.Y, r.Width, r.Height}
}
func (r *Rect) Left() int {
return r.X
}
func (r *Rect) Right() int {
return r.X + r.Width
}
func (r *Rect) Top() int {
return r.Y
}
func (r *Rect) Bottom() int {
return r.Y + r.Height
}
func (r *Rect) TopLeft() *Point {
return NewPoint(r.X, r.Y)
}
func (r *Rect) TopRight() *Point {
return NewPoint(r.X+r.Width, r.Y)
}
func (r *Rect) BottomLeft() *Point {
return NewPoint(r.X, r.Y+r.Height)
}
func (r *Rect) BottomRight() *Point {
return NewPoint(r.X+r.Width, r.Y+r.Height)
}
func (r *Rect) Center() *Point {
return NewPoint(r.X+r.Width/2, r.Y+r.Height/2)
}
func (r *Rect) Empty() bool {
return r.Width <= 0 || r.Height <= 0
}
func (r *Rect) Location() *Point {
return NewPoint(r.X, r.Y)
}
func (r *Rect) SetLocation(pt *Point) {
r.X = pt.X
r.Y = pt.Y
}
func (r *Rect) Offset(ox, oy int) *Rect {
r.X = r.X + ox
r.Y = r.Y + oy
return r
}
func (r *Rect) Contains(pt *Point) bool {
return r.X < pt.X && r.Y < pt.Y && pt.X < r.Right() && pt.Y < r.Bottom()
}
func (r *Rect) RegionLeft() *Rect {
return NewRectFromSize(r.TopLeft(), NewSize(r.Width/2, r.Height))
}
func (r *Rect) RegionRight() *Rect {
rect := r.RegionLeft()
rect.SetLocation(NewPoint(r.Left()+r.Width/2, r.Top()))
return rect
}
func (r *Rect) RegionTop() *Rect {
return NewRectFromSize(r.TopLeft(), NewSize(r.Width, r.Height/2))
}
func (r *Rect) RegionBottom() *Rect {
rect := r.RegionTop()
rect.SetLocation(NewPoint(r.Left(), r.Top()+r.Width/2))
return rect
}
func (r *Rect) RegionTopLeft() *Rect {
return NewRectFromSize(r.TopLeft(), NewSize(r.Width/2, r.Height/2))
}
func (r *Rect) RegionTopRight() *Rect {
rt := r.RegionTopLeft()
rt.SetLocation(NewPoint(r.X+r.Width/2, r.Y))
return rt
}
func (r *Rect) RegionBottomLeft() *Rect {
rt := r.RegionTopLeft()
rt.SetLocation(NewPoint(r.X, r.Y+r.Height/2))
return rt
}
func (r *Rect) RegionMiddleCenter() *Rect {
rt := r.RegionTopLeft()
rt.SetLocation(NewPoint(r.X+r.Width/3, r.Y+r.Height/3))
return rt
}
func (r *Rect) RegionBottomRight() *Rect {
rt := r.RegionTopLeft()
rt.SetLocation(NewPoint(r.X+r.Width/2, r.Y+r.Height/2))
return rt
}
func (r *Rect) RegionCenter() *Rect {
rt := NewRect(NewSize(r.Width/2, r.Height))
rt.SetLocation(NewPoint(r.X+r.Width/3, r.Y))
return rt
}
func (r *Rect) RegionMiddle() *Rect {
rt := NewRect(NewSize(r.Width, r.Height/2))
rt.SetLocation(NewPoint(r.X, r.Y+r.Height/3))
return rt
}
func (r *Rect) String() string {
return fmt.Sprintf("Rect(x:%d, y:%d, width:%d, height:%d)", r.X, r.Y, r.Width, r.Height)
} | rect.go | 0.78789 | 0.477006 | rect.go | starcoder |
package runningvariance
import (
"fmt"
"math"
)
// Stat assumulates the data required for computing the statistics.
type Stat struct {
N int64
M1, M2, M3, M4 float64
}
// String implements Stringer.
func (s *Stat) String() string {
return fmt.Sprintf("N=%d μ=%f σ=%f skew=%f ek=%f", s.N, s.Mean(), s.StdDev(), s.Skewness(), s.ExcessKurtosis())
}
// Push updates the statistics after adding a new value to the series.
func (s *Stat) Push(x float64) {
n1 := float64(s.N)
s.N++
n := float64(s.N)
delta := x - s.M1
delta_n := delta / n
delta_n2 := delta_n * delta_n
term1 := delta * delta_n * n1
s.M1 += delta_n
s.M4 += term1*delta_n2*(n*n-3*n+3) + 6*delta_n2*s.M2 - 4*delta_n*s.M3
s.M3 += term1*delta_n*(n-2) - 3*delta_n*s.M2
s.M2 += term1
}
func (s *Stat) Mean() float64 {
return s.M1
}
func (s *Stat) Variance() float64 {
if s.N > 1 {
return s.M2 / (float64(s.N) - 1.0)
} else {
return 0.0
}
}
func (s *Stat) StdDev() float64 {
return math.Sqrt(s.Variance())
}
/*
Skewness returns the skewness, a measure of the asymmetry of the probability
distribution.
For a simple distibution with a single peak, positive skewness means the peak is
closer to the left side, and negavive skewness means the peak is closer to the
right side. A zero value means that the tails on both sides of the mean
balance out overall.
WARNING: currently seems to be returning incorrect results, more work needed.
*/
func (s *Stat) Skewness() float64 {
return math.Sqrt(float64(s.N)) * s.M3 / math.Pow(s.M2, 1.5)
}
/*
ExcessKurtosis returns the kurtosis of the data minus 3 (the “excess kurtosis”),
which gives an idea about how tail-heavy the distibution is.
Positive excess kurtotis means the distribution has a fatter tail than
the normal distribution. Similarly, negative excess kurtotis means a thinner
tail.
*/
func (s *Stat) ExcessKurtosis() float64 {
return float64(s.N)*s.M4/(s.M2*s.M2) - 3.0
}
func Combined(a, b *Stat) Stat {
var c Stat
c.N = a.N + b.N
an := float64(a.N)
bn := float64(b.N)
cn := float64(c.N)
delta := b.M1 - a.M1
delta2 := delta * delta
delta3 := delta * delta2
delta4 := delta2 * delta2
c.M1 = (an*a.M1 + bn*b.M1) / cn
c.M2 = a.M2 + b.M2 + delta2*an*bn/cn
c.M3 = a.M3 + b.M3 + delta3*an*bn*(an-bn)/(cn*cn)
c.M3 += 3.0 * delta * (an*b.M2 - bn*a.M2) / cn
c.M4 = a.M4 + b.M4 + delta4*an*bn*(an*an-an*bn+bn*bn)/
(cn*cn*cn)
c.M4 += 6.0*delta2*(an*an*b.M2+bn*bn*a.M2)/(cn*cn) +
4.0*delta*(an*b.M3-bn*a.M3)/cn
return c
}
func (s *Stat) Combine(b *Stat) {
*s = Combined(s, b)
} | runningvariance.go | 0.807271 | 0.561275 | runningvariance.go | starcoder |
package scenegraph
import (
"github.com/mattkimber/gandalf/geometry"
"github.com/mattkimber/gandalf/magica/types"
)
func (n *Node) Decompose() (graph Map, pointData []types.PointData, sizeData []types.Size) {
id := 0
shapeID := 0
graph = make(Map)
pointData = make([]types.PointData, 0)
sizeData = make([]types.Size, 0)
_ = n.decomposeWithIDs(&id, &shapeID, graph, &pointData, &sizeData)
return
}
func (n *Node) decomposeWithIDs(id, shapeID *int, graph Map, pointData *[]types.PointData, sizeData *[]types.Size) (rootID int) {
rootTranslation := types.Translation{
NodeID: *id,
Attributes: types.Dictionary{},
ReservedID: -1,
LayerID: 0,
Frames: []types.Frame{{X: 0, Y: 0, Z: 0}},
}
graph[*id] = &rootTranslation
*id++
rootGroup := types.Group{
NodeID: *id,
Attributes: types.Dictionary{},
}
rootTranslation.ChildNodeID = rootGroup.NodeID
graph[*id] = &rootGroup
*id++
// If this node has models, add them to the point and size data
if len(n.Models) > 0 {
translationNodeIDs := make([]int, 0)
for _, model := range n.Models {
*pointData = append(*pointData, model.Points)
*sizeData = append(*sizeData, model.Size)
shp := types.Shape{
NodeID: *id,
Attributes: types.Dictionary{},
Models: []int{*shapeID},
}
graph[*id] = &shp
*id++
*shapeID++
trn := types.Translation{
NodeID: *id,
Attributes: types.Dictionary{},
ChildNodeID: shp.NodeID,
ReservedID: -1,
LayerID: 0,
Frames: []types.Frame{{
X: n.Location.X,
Y: n.Location.Y,
Z: n.Location.Z,
},
},
}
graph[*id] = &trn
translationNodeIDs = append(translationNodeIDs, *id)
*id++
}
rootGroup.ChildNodes = translationNodeIDs
} else {
childNodeIDs := make([]int, 0)
for _, node := range n.Children {
var childID int
childID = node.decomposeWithIDs(id, shapeID, graph, pointData, sizeData)
childNodeIDs = append(childNodeIDs, childID)
}
rootGroup.ChildNodes = childNodeIDs
}
return rootTranslation.NodeID
}
func Compose(graph Map, current types.SceneGraphItem, x, y, z, layer int, allowedLayers []int, pointData []types.PointData, sizeData []types.Size) (result Node) {
if current.GetType() == types.SGTranslation {
tn := current.(*types.Translation)
for _, frame := range tn.Frames {
x += frame.X
y += frame.Y
z += frame.Z
}
layer = tn.LayerID
}
if current.GetType() == types.SGShape {
isAllowed := false
if len(allowedLayers) == 0 {
isAllowed = true
} else {
for _, allowedLayer := range allowedLayers {
if allowedLayer == layer {
isAllowed = true
break
}
}
}
shp := current.(*types.Shape)
size := types.Size{}
models := make([]Model, len(shp.Models))
if isAllowed {
for idx, child := range shp.Models {
models[idx] = Model{Points: pointData[child], Size: sizeData[child]}
size = sizeData[child]
}
}
result.Models = models
result.Location = geometry.Point{X: x - (size.X / 2), Y: y - (size.Y / 2), Z: z - (size.Z / 2)}
}
children := make([]Node, 0)
for _, child := range current.GetChildren() {
next, ok := graph[child]; if ok {
children = append(children, Compose(graph, next, x, y, z, layer, allowedLayers, pointData, sizeData))
}
}
result.Children = children
return result
} | magica/scenegraph/compose.go | 0.569853 | 0.415136 | compose.go | starcoder |
package unit
const (
// DATA represents the unit used for data volume (e.g., bytes)
DATA = iota
// TIME represents the unit used for time mesurements (e.g., seconds)
TIME
// BW represents the unit used for bandwidth mesurements (e.g., B/s)
BW
)
func getDataUnits() map[int]string {
return map[int]string{
0: "B",
1: "KB",
2: "MB",
3: "GB",
4: "TB",
}
}
func getBWUnits() map[int]string {
return map[int]string{
0: "B/s",
1: "KB/s",
2: "MB/s",
3: "GB/s",
4: "TB/s",
}
}
func getTimeUnits() map[int]string {
return map[int]string{
3: "seconds",
2: "milliseconds",
1: "microseconds",
0: "nanoseconds",
}
}
// FromString translates a type identifier that is easy to manipate to a unit dataset
func FromString(unitID string) (int, int) {
dataTypeData := getDataUnits()
for lvl, val := range dataTypeData {
if val == unitID {
return DATA, lvl
}
}
timeTypeData := getTimeUnits()
for lvl, val := range timeTypeData {
if val == unitID {
return TIME, lvl
}
}
bwTypeData := getBWUnits()
for lvl, val := range bwTypeData {
if val == unitID {
return BW, lvl
}
}
return -1, -1
}
// ToString converts data about a dataset's unit to a string that is readable
func ToString(unitType int, unitScale int) string {
switch unitType {
case DATA:
internalUnitData := getDataUnits()
return internalUnitData[unitScale]
case TIME:
internalUnitData := getTimeUnits()
return internalUnitData[unitScale]
case BW:
internalUnitData := getBWUnits()
return internalUnitData[unitScale]
}
return ""
}
func IsValidScale(unitType int, newUnitScale int) bool {
switch unitType {
case DATA:
internalUnitData := getDataUnits()
_, ok := internalUnitData[newUnitScale]
return ok
case TIME:
internalUnitData := getTimeUnits()
_, ok := internalUnitData[newUnitScale]
return ok
case BW:
internalUnitData := getBWUnits()
_, ok := internalUnitData[newUnitScale]
return ok
}
return false
}
// IsMax checks if a unit can be scaled up further
func IsMax(unitType int, unitScale int) bool {
switch unitType {
case DATA:
internalUnitData := getDataUnits()
_, ok := internalUnitData[unitScale+1]
return ok
case TIME:
internalUnitData := getTimeUnits()
_, ok := internalUnitData[unitScale+1]
return ok
case BW:
internalUnitData := getBWUnits()
_, ok := internalUnitData[unitScale+1]
return ok
}
return false
}
// IsMin checks if a unit can be scaled down further
func IsMin(unitType int, unitScale int) bool {
switch unitType {
case DATA:
internalUnitData := getDataUnits()
_, ok := internalUnitData[unitScale-1]
return ok
case TIME:
internalUnitData := getTimeUnits()
_, ok := internalUnitData[unitScale-1]
return ok
case BW:
internalUnitData := getBWUnits()
_, ok := internalUnitData[unitScale-1]
return ok
}
return false
} | tools/internal/pkg/unit/unit.go | 0.743075 | 0.643343 | unit.go | starcoder |
package client
import (
"encoding/json"
)
// RuleMatch struct for RuleMatch
type RuleMatch struct {
// An array of HTTP methods (e.g. GET, POST, PUT, DELETE, ...). When ORY Oathkeeper searches for rules to decide what to do with an incoming request to the proxy server, it compares the HTTP method of the incoming request with the HTTP methods of each rules. If a match is found, the rule is considered a partial match. If the matchesUrl field is satisfied as well, the rule is considered a full match.
Methods []string `json:"methods,omitempty"`
// This field represents the URL pattern this rule matches. When ORY Oathkeeper searches for rules to decide what to do with an incoming request to the proxy server, it compares the full request URL (e.g. https://mydomain.com/api/resource) without query parameters of the incoming request with this field. If a match is found, the rule is considered a partial match. If the matchesMethods field is satisfied as well, the rule is considered a full match. You can use regular expressions in this field to match more than one url. Regular expressions are encapsulated in brackets < and >. The following example matches all paths of the domain `mydomain.com`: `https://mydomain.com/<.*>`.
Url *string `json:"url,omitempty"`
}
// NewRuleMatch instantiates a new RuleMatch object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewRuleMatch() *RuleMatch {
this := RuleMatch{}
return &this
}
// NewRuleMatchWithDefaults instantiates a new RuleMatch object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewRuleMatchWithDefaults() *RuleMatch {
this := RuleMatch{}
return &this
}
// GetMethods returns the Methods field value if set, zero value otherwise.
func (o *RuleMatch) GetMethods() []string {
if o == nil || o.Methods == nil {
var ret []string
return ret
}
return o.Methods
}
// GetMethodsOk returns a tuple with the Methods field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RuleMatch) GetMethodsOk() ([]string, bool) {
if o == nil || o.Methods == nil {
return nil, false
}
return o.Methods, true
}
// HasMethods returns a boolean if a field has been set.
func (o *RuleMatch) HasMethods() bool {
if o != nil && o.Methods != nil {
return true
}
return false
}
// SetMethods gets a reference to the given []string and assigns it to the Methods field.
func (o *RuleMatch) SetMethods(v []string) {
o.Methods = v
}
// GetUrl returns the Url field value if set, zero value otherwise.
func (o *RuleMatch) GetUrl() string {
if o == nil || o.Url == nil {
var ret string
return ret
}
return *o.Url
}
// GetUrlOk returns a tuple with the Url field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *RuleMatch) GetUrlOk() (*string, bool) {
if o == nil || o.Url == nil {
return nil, false
}
return o.Url, true
}
// HasUrl returns a boolean if a field has been set.
func (o *RuleMatch) HasUrl() bool {
if o != nil && o.Url != nil {
return true
}
return false
}
// SetUrl gets a reference to the given string and assigns it to the Url field.
func (o *RuleMatch) SetUrl(v string) {
o.Url = &v
}
func (o RuleMatch) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Methods != nil {
toSerialize["methods"] = o.Methods
}
if o.Url != nil {
toSerialize["url"] = o.Url
}
return json.Marshal(toSerialize)
}
type NullableRuleMatch struct {
value *RuleMatch
isSet bool
}
func (v NullableRuleMatch) Get() *RuleMatch {
return v.value
}
func (v *NullableRuleMatch) Set(val *RuleMatch) {
v.value = val
v.isSet = true
}
func (v NullableRuleMatch) IsSet() bool {
return v.isSet
}
func (v *NullableRuleMatch) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableRuleMatch(val *RuleMatch) *NullableRuleMatch {
return &NullableRuleMatch{value: val, isSet: true}
}
func (v NullableRuleMatch) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableRuleMatch) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | clients/oathkeeper/go/model_rule_match.go | 0.821403 | 0.409693 | model_rule_match.go | starcoder |
package gokalman
import "github.com/gonum/matrix/mat64"
// FilterType allows for quick comparison of filters.
type FilterType uint8
func (f FilterType) String() string {
switch f {
case CKFType:
return "CKF"
case EKFType:
return "EKF"
case UKFType:
return "UKF"
case SRIFType:
return "SRIF"
default:
panic("unknown filter")
}
}
const (
// CKFType definition would be a tautology
CKFType FilterType = iota + 1
// EKFType definition would be a tautology
EKFType
// UKFType definition would be a tautology
UKFType
// SRIFType definition would be a tautology
SRIFType
)
// LDKF defines a linear dynamics Kalman Filter.
type LDKF interface {
Update(measurement, control *mat64.Vector) (Estimate, error)
GetNoise() Noise
GetStateTransition() mat64.Matrix
GetInputControl() mat64.Matrix
GetMeasurementMatrix() mat64.Matrix
SetStateTransition(mat64.Matrix)
SetInputControl(mat64.Matrix)
SetMeasurementMatrix(mat64.Matrix)
SetNoise(Noise)
Reset()
String() string
}
// NLDKF defines a non-linear dynamics Kalman Filter.
// Operates and is architectured slightly differently than LDKF.
type NLDKF interface {
Prepare(Φ, Htilde *mat64.Dense)
Predict() (est Estimate, err error)
Update(realObservation, computedObservation *mat64.Vector) (est Estimate, err error)
EKFEnabled() bool
EnableEKF()
DisableEKF()
PreparePNT(Γ *mat64.Dense)
SetNoise(n Noise)
}
// Estimate is returned from Update() in any KF.
// This allows to avoid some computations in other filters, e.g. in the Information filter.
type Estimate interface {
IsWithinNσ(N float64) bool // IsWithinNσ returns whether the estimation is within the N*σ bounds.
State() *mat64.Vector // Returns \hat{x}_{k+1}^{+}
Measurement() *mat64.Vector // Returns \hat{y}_{k}^{+}
Innovation() *mat64.Vector // Returns y_{k} - H*\hat{x}_{k+1}^{-}
Covariance() mat64.Symmetric // Return P_{k+1}^{+}
PredCovariance() mat64.Symmetric // Return P_{k+1}^{-}
String() string // Must implement the stringer interface.
} | kalman.go | 0.729809 | 0.440951 | kalman.go | starcoder |
Package app contains OpenEBS Dynamic Local PV provisioner
Provisioner is created using the external storage provisioner library:
https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner
Local PVs are an extension to hostpath volumes, but are more secure.
https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems
Local PVs are great in cases like:
- The Stateful Workload can take care of replicating the data across
nodes to handle cases like a complete node (and/or its storage) failure.
- For long running Stateful Workloads, the Backup/Recovery is provided
by Operators/tools that can make use the Workload mounts and do not
require the capabilities to be available in the underlying storage. Or
if the hostpaths are created on external storage like EBS/GPD, administrator
have tools that can periodically take snapshots/backups.
While the Kubernetes Local PVs are mainly recommended for cases where a complete
storage device should be assigned to a Pod. OpenEBS Dynamic Local PV provisioner
will help provisioning the Local PVs dynamically by integrating into the features
offered by OpenEBS Node Storage Device Manager, and also offers the
flexibility to either select a complete storage device or
a hostpath (or subpath) directory.
Infact in some cases, the Kubernetes nodes may have limited number of storage
devices attached to the node and hostpath based Local PVs offer efficient management
of the storage available on the node.
Inspiration:
------------
The implementation has been influenced by the prior work done by the Kubernetes community,
specifically the following:
- https://github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/tree/master/examples/hostpath-provisioner
- https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner
- https://github.com/rancher/local-path-provisioner
How it works:
-------------
Step 1: Multiple Storage Classes can be created by the Kubernetes Administrator,
to specify the required type of OpenEBS Local PV to be used by an application.
A simple StorageClass looks like:
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: hostpath
annotations:
#Define a new OpenEBS CAS Type called `local`
#which indicates that Data is stored
#directly onto hostpath. The hostpath can be:
#- device (as block or mounted path)
#- hostpath (sub directory on OS or mounted path)
openebs.io/cas-type: local
cas.openebs.io/config: |
#- name: StorageType
# value: "storage-device"
# (Default)
- name: StorageType
value: "hostpath"
# If the StorageType is hostpath, then BasePath
# specifies the location where the volume subdirectory
# should be created.
# (Default)
- name: BasePath
value: "/var/openebs/local"
provisioner: openebs.io/local
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
Step 2: The application developers will request for storage via PVC as follows:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-hp
spec:
accessModes:
- ReadWriteOnce
storageClassName: hostpath
resources:
requests:
storage: 2Gi
---
Step 3: A Local PV (type=hostpath) provisioned via the OpenEBS Dynamic
Local PV Provisioner looks like this:
---
apiVersion: v1
kind: PersistentVolume
metadata:
annotations:
pv.kubernetes.io/provisioned-by: openebs.io/local
creationTimestamp: 2019-05-02T15:44:35Z
finalizers:
- kubernetes.io/pv-protection
name: pvc-2fe08284-6cf1-11e9-be8b-42010a800155
resourceVersion: "2062"
selfLink: /api/v1/persistentvolumes/pvc-2fe08284-6cf1-11e9-be8b-42010a800155
uid: 2fedaff8-6cf1-11e9-be8b-42010a800155
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 2Gi
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
name: pvc-hp
namespace: default
resourceVersion: "2060"
uid: 2fe08284-6cf1-11e9-be8b-42010a800155
hostPath:
path: /var/openebs/local/pvc-2fe08284-6cf1-11e9-be8b-42010a800155
type: DirectoryOrCreate
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- gke-kmova-helm-default-pool-6c1271a5-n8b0
persistentVolumeReclaimPolicy: Delete
storageClassName: hostpath
status:
phase: Bound
---
Note that the location of the hostpaths on the node are abstracted from the
application developers and are under the Administrators control.
Implementation Details:
-----------------------
(a) The configuration of whether to select a complete storage device
of a hostpath is determined by the StorageClass annotations, inline
with other configuration options provided by OpenEBS.
(b) When using StorageType as device, the Local Provisioner will
interact with the OpenEBS Node Storage Device Manager (NDM) to
identify the device to be used.
(c) The StorageClass can work either with `waitForConsumer`, in which
case, the PV is created on the node where the Pod is scheduled or
vice versa. (Note: In the initial version, only `waitForConsumer` is
supported.)
(d) When using the hostpath, the administrator can select the location using:
- BasePath: By default, the hostpath volumes will be created under `/var/openebs/local`.
This default path can be changed by passing the "OPENEBS_IO_BASE_PATH" ENV
variable to the Hostpath Provisioner Pod. It is also possible to specify
a different location using the CAS Policy `BasePath` in the StorageClass.
The location of the hostpaths used in the above configuration options can be:
- OS Disk - possibly a folder dedicated to saving data on each node.
- Additional Disks - mounted as ext4 or any other filesystem
- External Storage - mounted as ext4 or any other filesystem
Future Work:
------------
The current implementation provides basic support for using Local PVs. This will
be enhanced in the upcoming releases with the following features:
- Ability to use a git backed hostpath, so data can be backed up to a github/gitlab
- Ability to use hostpaths that are managed by NDM - that monitors for usage, helps
with expanding the storage of a given hostpath. For example - use LVM or ZFS to
create a host mount with attached disks. Where additional disks can be added or failed
disks replaced without impacting the workloads running on their hostpaths.
- Integrate with Projects like Valero or Kasten that can handle backup and restor of data
stored on the Hostpath PVs attached to a workload.
- Provide tools that can help with recovering from situations where PVs are tied to nodes
that can never recover. For example, a Stateful Workload can be associated with a
Hostpath PV on Node-z. Say Node-z becomes inaccassible for reasons beyond the control
like - a site/zone/rack disaster or the disks went up in flames. The PV will still be
having the Node Affinity to Node-z, which will make the Workload to get stuck in pending
state.
- Move towards using a CSI based Hostpath provisioner. Some of the features required to
use the Hostpath PVs like the Volume Topology are not yet available in the CSI. As the
CSI driver stabilizes, this can be moved into CSI.
- Provide an option to specify a white list of paths which can be used by the application
developers. The white list can be tied into application developers namespace. For example:
all /var/developer1/* for PVCs in namespace developer1, etc.
- Ability to enforce runtime capacity limits
- Ability to enforce provisioning limits based on the node capacity or the number of PVs
already provisioned.
*/
package app | cmd/provisioner-localpv/app/doc.go | 0.90188 | 0.542379 | doc.go | starcoder |
package dependencygraph2
import (
"github.com/google/gapid/core/math/interval"
)
// memoryWrite stores a WriteMemEffect, together with a memory span affected by that write.
// The span may be smaller than the whole write.
type memoryWrite struct {
// effect WriteMemEffect
node NodeID
span interval.U64Span
}
// memoryWriteList represents a collection of memory writes, together with the regions of memory affected by each write.
// memoryWriteList implements the `interval.MutableList` interface, enabling the algorithms in `interval` for efficient queries and updates.
type memoryWriteList []memoryWrite
// Length returns the number of elements in the list
// Implements `interval.List.Length`
func (l *memoryWriteList) Length() int {
return len(*l)
}
// GetSpan returns the span for the element at index in the list
// Implements `interval.List.GetSpan`
func (l *memoryWriteList) GetSpan(index int) interval.U64Span {
return (*l)[index].span
}
// SetSpan sets the span for the element at index in the list
// Implements `interval.MutableList.SetSpan`
func (l *memoryWriteList) SetSpan(index int, span interval.U64Span) {
(*l)[index].span = span
}
// New creates a new element at the specifed index with the specified span
// Implements `interval.MutableList.New`
func (l *memoryWriteList) New(index int, span interval.U64Span) {
(*l)[index].span = span
}
// Copy count list entries
// Implements `interval.MutableList.Copy`
func (l *memoryWriteList) Copy(to, from, count int) {
copy((*l)[to:to+count], (*l)[from:from+count])
}
// Resize adjusts the length of the array
// Implements `interval.MutableList.Resize`
func (l *memoryWriteList) Resize(length int) {
if cap(*l) > length {
*l = (*l)[:length]
} else {
old := *l
capacity := cap(*l) * 2
if capacity < length {
capacity = length
}
*l = make(memoryWriteList, length, capacity)
copy(*l, old)
}
}
// memoryAccess stores a memory span together with a bool indicating whether that span has been written (true) or only read (false)
type memoryAccess struct {
mode AccessMode
span interval.U64Span
}
// memoryAccessList represents a collection of memory access
// memoryAccessList implements the `interval.MutableList` interface, enabling the algorithms in `interval` for efficient queries and updates.
type memoryAccessList []memoryAccess
// Length returns the number of elements in the list
// Implements `interval.List.Length`
func (l *memoryAccessList) Length() int {
return len(*l)
}
// GetSpan returns the span for the element at index in the list
// Implements `interval.List.GetSpan`
func (l *memoryAccessList) GetSpan(index int) interval.U64Span {
return (*l)[index].span
}
// SetSpan sets the span for the element at index in the list
// Implements `interval.MutableList.SetSpan`
func (l *memoryAccessList) SetSpan(index int, span interval.U64Span) {
(*l)[index].span = span
}
// New creates a new element at the specifed index with the specified span
// Implements `interval.MutableList.New`
func (l *memoryAccessList) New(index int, span interval.U64Span) {
(*l)[index].span = span
}
// Copy count list entries
// Implements `interval.MutableList.Copy`
func (l *memoryAccessList) Copy(to, from, count int) {
copy((*l)[to:to+count], (*l)[from:from+count])
}
// Resize adjusts the length of the array
// Implements `interval.MutableList.Resize`
func (l *memoryAccessList) Resize(length int) {
if cap(*l) > length {
*l = (*l)[:length]
} else {
old := *l
capacity := cap(*l) * 2
if capacity < length {
capacity = length
}
*l = make(memoryAccessList, length, capacity)
copy(*l, old)
}
}
func (l memoryAccessList) GetValue(index int) interface{} {
return l[index].mode
}
func (l *memoryAccessList) SetValue(index int, value interface{}) {
(*l)[index].mode = value.(AccessMode)
}
func (l *memoryAccessList) Insert(index int, count int) {
*l = append(*l, make(memoryAccessList, count)...)
if index != len(*l) && count > 0 {
copy((*l)[index+count:], (*l)[index:])
}
}
func (l *memoryAccessList) Delete(index int, count int) {
if index+count != len(*l) && count > 0 {
copy((*l)[index:], (*l)[index+count:])
}
*l = (*l)[:len(*l)-count]
}
func (l *memoryAccessList) AddRead(s interval.U64Span) {
f := func(x interface{}) interface{} {
if x == nil {
return ACCESS_READ
}
// There is already an access. Always mark the plain read, but be
// careful about the dependency read: if the same node already accessed
// before with a write, then the read is not relevant for dependency
// since it will be reading what the very same node just wrote.
m := x.(AccessMode)
m |= ACCESS_PLAIN_READ
if m&ACCESS_DEP_WRITE == 0 {
m |= ACCESS_DEP_READ
}
return m
}
interval.Update(l, s, f)
}
func (l *memoryAccessList) AddWrite(s interval.U64Span) {
f := func(x interface{}) interface{} {
if x == nil {
return ACCESS_WRITE
}
m := x.(AccessMode)
return m | ACCESS_WRITE
}
interval.Update(l, s, f)
} | gapis/resolve/dependencygraph2/memory_intervals.go | 0.835785 | 0.426979 | memory_intervals.go | starcoder |
package reflecthelper
import "reflect"
// GetKind gets the kind of the val of reflect.Value.
func GetKind(val reflect.Value) (res reflect.Kind) {
if !val.IsValid() {
return
}
res = val.Type().Kind()
return
}
// GetElemKind gets the elem kind from the val of reflect.Value.
func GetElemKind(val reflect.Value) (res reflect.Kind) {
if !val.IsValid() {
return
}
res = GetKind(val)
if IsKindTypeElemable(res) {
res = val.Type().Elem().Kind()
} else if res == reflect.Interface {
res = val.Elem().Kind()
}
return
}
// GetChildElemTypeKind returns the child elems' (root child) kind of the type of val reflect.Value.
func GetChildElemTypeKind(val reflect.Value) (res reflect.Kind) {
if !val.IsValid() {
return
}
val = UnwrapInterfaceValue(val)
res = GetKind(val)
if !IsKindTypeElemable(res) {
return
}
res = getChildElemTypeKind(val)
return
}
func getChildElemTypeKind(val reflect.Value) (res reflect.Kind) {
elemType := val.Type().Elem()
res = elemType.Kind()
for IsKindTypeElemable(res) {
elemType = elemType.Elem()
res = elemType.Kind()
}
return
}
// GetChildElemPtrKind gets the child elements' (root child) ptr kind of the val of reflect.Value.
func GetChildElemPtrKind(val reflect.Value) (res reflect.Kind) {
if !val.IsValid() {
return
}
res = GetKind(val)
valType := val.Type()
for res == reflect.Ptr {
valType = valType.Elem()
res = valType.Kind()
}
return
}
// GetChildElemValueKind gets the child elements' (root child) kind of the val reflect.Value and it only works on ptr kind.
func GetChildElemValueKind(val reflect.Value) (res reflect.Kind) {
res = GetChildElemPtrKind(UnwrapInterfaceValue(val))
return
}
// IsKindValueElemable checks the kind of reflect.Value that can call Elem method.
func IsKindValueElemable(kind reflect.Kind) bool {
return kind == reflect.Ptr || kind == reflect.Interface
}
// IsValueElemable checks whether the val of reflect.Value could call Elem method.
func IsValueElemable(val reflect.Value) bool {
return IsKindValueElemable(GetKind(val))
}
// IsValueElemableParentElem checks whether the res have elemable kind for parent and elem.
func IsValueElemableParentElem(res reflect.Value) bool {
return IsKindValueElemable(GetKind(res)) && IsKindValueElemable(GetElemKind(res))
}
// IsKindTypeElemable checks the kind of reflect.Type that can call Elem method.
func IsKindTypeElemable(kind reflect.Kind) bool {
return kind == reflect.Array ||
kind == reflect.Chan ||
kind == reflect.Map ||
kind == reflect.Ptr ||
kind == reflect.Slice
}
// IsKindBool checks whether the kind is bool or not.
func IsKindBool(kind reflect.Kind) bool {
return kind == reflect.Bool
}
// IsKindValueBytesSlice checks whether the val of reflect.Value is byte slice.
func IsKindValueBytesSlice(val reflect.Value) bool {
if !val.IsValid() {
return false
}
if !IsKindSlice(GetKind(val)) {
return false
}
return GetElemKind(val) == reflect.Uint8
}
// IsKindSlice checks whether the kind is slice or not.
func IsKindSlice(kind reflect.Kind) bool {
return kind == reflect.Slice
}
// IsKindArray checks whether the kind is array or not.
func IsKindArray(kind reflect.Kind) bool {
return kind == reflect.Array
}
// IsKindList checks whether the kind is array or slice.
func IsKindList(kind reflect.Kind) bool {
return IsKindSlice(kind) || IsKindArray(kind)
}
// IsKindComplex checks whether the kind is complex or not.
func IsKindComplex(kind reflect.Kind) bool {
return kind >= reflect.Complex64 && kind <= reflect.Complex128
}
// IsKindFloat checks whether the kind is float or not.
func IsKindFloat(kind reflect.Kind) bool {
return kind >= reflect.Float32 && kind <= reflect.Float64
}
// IsKindInt checks whether the kind is int or not.
func IsKindInt(kind reflect.Kind) bool {
return kind >= reflect.Int && kind <= reflect.Int64
}
// IsKindUint checks whether the kind is uint or not.
func IsKindUint(kind reflect.Kind) bool {
return kind >= reflect.Uint && kind <= reflect.Uintptr
}
// IsKindUnsafePointer checks whether the kind is unsafe ptr or not.
func IsKindUnsafePointer(kind reflect.Kind) bool {
return kind == reflect.UnsafePointer
}
// IsKindString checks whether the kind is string or not.
func IsKindString(kind reflect.Kind) bool {
return kind == reflect.String
}
// IsKindPtr checks whether the input kind is reflect.Ptr.
func IsKindPtr(kind reflect.Kind) bool {
return kind == reflect.Ptr
}
// IsKindInterface checks whether the input kind is reflect.Interface.
func IsKindInterface(kind reflect.Kind) bool {
return kind == reflect.Interface
}
// IsKindStruct checks whether the input kind is reflect.Struct.
func IsKindStruct(kind reflect.Kind) bool {
return kind == reflect.Struct
}
// IsKindMap checks whether the input kind is reflect.Map.
func IsKindMap(kind reflect.Kind) bool {
return kind == reflect.Map
}
// IsKindChan checks whether the input kind is reflect.Chan.
func IsKindChan(kind reflect.Kind) bool {
return kind == reflect.Chan
}
// IsKindValueNil checks whether the input val of reflect.Value can call IsNil method.
func IsKindValueNil(val reflect.Value) bool {
return IsKindNil(GetKind(val))
}
// IsKindNil checks whether the input kind can call IsNil method.
func IsKindNil(kind reflect.Kind) bool {
switch kind {
case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
return true
}
return false
} | vendor/github.com/fairyhunter13/reflecthelper/v4/kind.go | 0.722429 | 0.66386 | kind.go | starcoder |
package base
import (
"bytes"
sdkTypes "github.com/cosmos/cosmos-sdk/types"
"github.com/persistenceOne/persistenceSDK/constants/errors"
"github.com/persistenceOne/persistenceSDK/schema/types"
"github.com/persistenceOne/persistenceSDK/utilities/meta"
)
var _, _ types.Data = (*Data_AccAddressData)(nil), (*AccAddressData)(nil)
func (accAddressData Data_AccAddressData) Compare(sortable types.Data) int {
compareAccAddressData, Error := accAddressDataFromInterface(sortable)
if Error != nil {
panic(Error)
}
return bytes.Compare(accAddressData.AccAddressData.Value.GetBytes(), compareAccAddressData.AccAddressData.Value.GetBytes())
}
func (accAddressData Data_AccAddressData) String() string {
return accAddressData.AccAddressData.Value.String()
}
func (accAddressData Data_AccAddressData) GetTypeID() types.ID {
return NewID("A")
}
func (accAddressData Data_AccAddressData) ZeroValue() types.Data {
return NewAccAddressData(sdkTypes.AccAddress{})
}
func (accAddressData Data_AccAddressData) GenerateHashID() types.ID {
if accAddressData.Compare(accAddressData.ZeroValue()) == 0 {
return NewID("")
}
return NewID(meta.Hash(accAddressData.AccAddressData.Value.String()))
}
func (accAddressData Data_AccAddressData) AsAccAddress() (sdkTypes.AccAddress, error) {
return accAddressData.AccAddressData.Value.AsSDKTypesAccAddress(), nil
}
func (accAddressData Data_AccAddressData) AsListData() (types.ListData, error) {
zeroValue, _ := Data_ListData{}.ZeroValue().AsListData()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData Data_AccAddressData) AsString() (string, error) {
zeroValue, _ := Data_StringData{}.ZeroValue().AsString()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData Data_AccAddressData) AsDec() (sdkTypes.Dec, error) {
zeroValue, _ := Data_DecData{}.ZeroValue().AsDec()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData Data_AccAddressData) AsHeight() (types.Height, error) {
zeroValue, _ := Data_HeightData{}.ZeroValue().AsHeight()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData Data_AccAddressData) AsID() (types.ID, error) {
zeroValue, _ := Data_IdData{}.ZeroValue().AsID()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData Data_AccAddressData) Get() interface{} {
return accAddressData.AccAddressData.Value
}
func (accAddressData Data_AccAddressData) Unmarshal(dAtA []byte) error {
return accAddressData.AccAddressData.Unmarshal(dAtA)
}
func (accAddressData *Data_AccAddressData) Reset() { *accAddressData = Data_AccAddressData{} }
func (*Data_AccAddressData) ProtoMessage() {}
func accAddressDataFromInterface(data types.Data) (Data_AccAddressData, error) {
switch value := data.(type) {
case *Data_AccAddressData:
return *value, nil
default:
return Data_AccAddressData{}, errors.MetaDataError
}
}
func NewAccAddressData(value sdkTypes.AccAddress) *Data_AccAddressData {
return &Data_AccAddressData{
AccAddressData: &AccAddressData{
Value: NewAccAddressFromSDKTypesAccAddress(value),
},
}
}
func ReadAccAddressData(dataString string) (types.Data, error) {
if dataString == "" {
return Data_AccAddressData{}.ZeroValue(), nil
}
accAddress, Error := sdkTypes.AccAddressFromBech32(dataString)
if Error != nil {
return Data_AccAddressData{}.ZeroValue(), Error
}
return NewAccAddressData(accAddress), nil
}
func (accAddressData AccAddressData) Compare(sortable types.Data) int {
compareAccAddressData, Error := dummyAccAddressDataFromInterface(sortable)
if Error != nil {
panic(Error)
}
return bytes.Compare(accAddressData.Value.AsSDKTypesAccAddress().Bytes(), compareAccAddressData.Value.AsSDKTypesAccAddress().Bytes())
}
func (accAddressData AccAddressData) String() string {
return accAddressData.Value.String()
}
func (accAddressData AccAddressData) GetTypeID() types.ID {
return NewID("A")
}
func (accAddressData AccAddressData) ZeroValue() types.Data {
return NewAccAddressData(sdkTypes.AccAddress{})
}
func (accAddressData AccAddressData) GenerateHashID() types.ID {
if accAddressData.Compare(accAddressData.ZeroValue()) == 0 {
return NewID("")
}
return NewID(meta.Hash(accAddressData.Value.String()))
}
func (accAddressData AccAddressData) AsAccAddress() (sdkTypes.AccAddress, error) {
return accAddressData.Value.AsSDKTypesAccAddress(), nil
}
func (accAddressData AccAddressData) AsListData() (types.ListData, error) {
zeroValue, _ := ListData{}.ZeroValue().AsListData()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData AccAddressData) AsString() (string, error) {
zeroValue, _ := StringData{}.ZeroValue().AsString()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData AccAddressData) AsDec() (sdkTypes.Dec, error) {
zeroValue, _ := DecData{}.ZeroValue().AsDec()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData AccAddressData) AsHeight() (types.Height, error) {
zeroValue, _ := HeightData{}.ZeroValue().AsHeight()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData AccAddressData) AsID() (types.ID, error) {
zeroValue, _ := IDData{}.ZeroValue().AsID()
return zeroValue, errors.IncorrectFormat
}
func (accAddressData AccAddressData) Get() interface{} {
return accAddressData.Value
}
func dummyAccAddressDataFromInterface(data types.Data) (AccAddressData, error) {
switch value := data.(type) {
case *AccAddressData:
return *value, nil
default:
return AccAddressData{}, errors.MetaDataError
}
} | schema/types/base/accAddressData.go | 0.641759 | 0.406862 | accAddressData.go | starcoder |
package binomial
import . "github.com/howz97/algorithm/util"
const (
isNil = 0
notNil = 1
)
// Binomial is a binomial queue
type Binomial struct {
size int
trees []*node
}
// New return a binomial queue with default capacity
func New() *Binomial {
return &Binomial{
trees: make([]*node, 8),
}
}
// Merge bq1 to bq. ErrExceedCap returned when merge would exceed capacity
func (b *Binomial) Merge(b2 *Binomial) {
if len(b2.trees) > len(b.trees) {
*b, *b2 = *b2, *b
}
b.size += b2.size
n := len(b.trees)
var carry *node
for i := 0; i < n; i++ {
switch carry.isNil()<<2 + b2.isNil(i)<<1 + b.isNil(i) {
case 2: // 010
b.trees[i] = b2.trees[i]
case 3: // 011
carry = merge(b.trees[i], b2.trees[i])
b.trees[i] = nil
case 4: // 100
b.trees[i] = carry
carry = nil
case 5: // 101
carry = merge(carry, b.trees[i])
b.trees[i] = nil
case 6: // 110
fallthrough
case 7: // 111
carry = merge(carry, b2.trees[i])
default: // 000, 001
}
}
if carry != nil {
b.trees = append(b.trees, carry)
}
}
func (b *Binomial) isNil(i int) int {
if i >= len(b.trees) {
return isNil
}
return b.trees[i].isNil()
}
func (b *Binomial) Push(p Comparable) {
b.Merge(&Binomial{
size: 1,
trees: []*node{{p: p}},
})
}
func (b *Binomial) Pop() Comparable {
index := 0 // index of node to pop
for ; index < len(b.trees); index++ {
if b.trees[index] != nil {
break
}
}
for i := index + 1; i < len(b.trees); i++ {
if b.trees[i] != nil && b.trees[i].Cmp(b.trees[index]) == Less {
index = i
}
}
// remove tree at index
popNode := b.trees[index]
b.trees[index] = nil
b.size -= 1 << uint(index)
// trees left by popNode become a new binomial
trees := popNode.son
b2 := &Binomial{
trees: make([]*node, index),
}
for i := index - 1; i >= 0; i-- {
b2.trees[i] = trees
sibling := trees.sibling
trees.sibling = nil
trees = sibling
}
b2.size = 1<<uint(index) - 1
// merge b2 back
b.Merge(b2)
return popNode.p
}
// Size get the current size of this binomial queue
func (b *Binomial) Size() int {
return b.size
}
type node struct {
p Comparable
sibling *node
son *node
}
func (n *node) isNil() int {
if n == nil {
return isNil
}
return notNil
}
func (n *node) Cmp(other *node) Result {
return n.p.Cmp(other.p)
}
// both a and b MUST not be nil
func merge(a, b *node) *node {
if a.Cmp(b) == More {
*a, *b = *b, *a
}
b.sibling = a.son
a.son = b
return a
} | pq/binomial/binomial.go | 0.601242 | 0.404802 | binomial.go | starcoder |
package texture
import (
g2d "github.com/jphsd/graphics2d"
"math"
)
// Fractal holds the pieces necessary for fractal generation. Xfm defines the affine transformation
// applied successively to the coordinate space and CFunc, how the multiple resultant values should be combined.
// An optional filter can be specified which will be applied to the final result.
type Fractal struct {
Src Field
Xfm *g2d.Aff3
CFunc func(...float64) float64
FFunc func(float64) float64
Octaves int
Rem float64
N int
}
// NewFractal returns a new Fractal instance.
func NewFractal(src Field, xfm *g2d.Aff3, comb func(...float64) float64, octaves float64) *Fractal {
n := int(math.Floor(octaves))
r := octaves - float64(n)
vn := n + 1
if r > 0 {
vn++
}
return &Fractal{src, xfm, comb, nil, n, r, vn}
}
// Eval2 implements the Field interface.
func (f *Fractal) Eval2(x, y float64) float64 {
nv := make([]float64, f.N)
for i := 0; i <= f.Octaves; i++ {
nv[i] = f.Src.Eval2(x, y)
pt := f.Xfm.Apply([]float64{x, y})
x, y = pt[0][0], pt[0][1]
}
if f.Rem > 0 {
// Note linear and not geometric...
nv[f.Octaves] = f.Rem * f.Src.Eval2(x, y)
}
v := clamp(f.CFunc(nv...))
if f.FFunc == nil {
return v
}
return f.FFunc(v)
}
const (
// MaxOctaves is the maximum number of iterations a Fractal can perform
MaxOctaves = 10
)
// FBM holds the precomputed weights for an fBM.
type FBM struct {
Weights []float64
}
// NewFBM returns a new FBM instance based on the Hurst and Lacunarity parameters.
func NewFBM(hurst, lacunarity float64) *FBM {
w := make([]float64, MaxOctaves)
for i := 0; i < MaxOctaves; i++ {
w[i] = math.Pow(lacunarity, -hurst*float64(i+1))
}
return &FBM{w}
}
// Combine takes the values from the successive applications of the affine transform and
// combines them using the precomputed weights.
func (f *FBM) Combine(values ...float64) float64 {
res := 0.0
for i := 0; i < len(values); i++ {
res += values[i] * f.Weights[i]
}
return res
}
// MF holds the precomputed weights and offset for an multifractal.
type MF struct {
Weights []float64
Offset float64
}
// NewMF returns a new MF instance based on the Hurst and Lacunarity parameters.
func NewMF(hurst, lacunarity, offset float64) *MF {
w := make([]float64, MaxOctaves)
for i := 0; i < MaxOctaves; i++ {
w[i] = math.Pow(lacunarity, -hurst*float64(i+1))
}
return &MF{w, offset}
}
// Combine takes the values from the successive applications of the affine transform and
// combines them using the precomputed weights and offset.
func (f *MF) Combine(values ...float64) float64 {
res := 0.0
for i := 0; i < len(values); i++ {
res += (values[i] + f.Offset) * f.Weights[i]
}
return res
} | fractal.go | 0.882276 | 0.587085 | fractal.go | starcoder |
package pix
// Implementation of functions in this file based on
// https://fgiesen.wordpress.com/2009/12/13/decoding-morton-codes/
type MortonCode uint32
func smoosh2(x MortonCode) uint32 {
x &= 0x09249249 // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0
x = (x ^ (x >> 2)) & 0x030c30c3 // x = ---- --98 ---- 76-- --54 ---- 32-- --10
x = (x ^ (x >> 4)) & 0x0300f00f // x = ---- --98 ---- ---- 7654 ---- ---- 3210
x = (x ^ (x >> 8)) & 0xff0000ff // x = ---- --98 ---- ---- ---- ---- 7654 3210
x = (x ^ (x >> 16)) & 0x000003ff // x = ---- ---- ---- ---- ---- --98 7654 3210
return uint32(x)
}
// "Insert" two 0 bits after each of the 10 low bits of x
func spread2(x uint32) MortonCode {
x &= 0x000003ff // x = ---- ---- ---- ---- ---- --98 7654 3210
x = (x ^ (x << 16)) & 0xff0000ff // x = ---- --98 ---- ---- ---- ---- 7654 3210
x = (x ^ (x << 8)) & 0x0300f00f // x = ---- --98 ---- ---- 7654 ---- ---- 3210
x = (x ^ (x << 4)) & 0x030c30c3 // x = ---- --98 ---- 76-- --54 ---- 32-- --10
x = (x ^ (x << 2)) & 0x09249249 // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0
return MortonCode(x)
}
func mortonCode(x, y, z uint8) MortonCode {
return (spread2(uint32(z)) << 2) + (spread2(uint32(y)) << 1) + spread2(uint32(x))
}
func mortonX(code MortonCode) uint8 { return uint8(smoosh2(code >> 0)) }
func mortonY(code MortonCode) uint8 { return uint8(smoosh2(code >> 1)) }
func mortonZ(code MortonCode) uint8 { return uint8(smoosh2(code >> 2)) }
// Each mask contains 0 in the slot for its component, and 1s elsewhere,
// assuming 10 bits per number and filling in the high 2 bits with 1 to be safe.
const xMask = MortonCode(0b11110110110110110110110110110110) // mortonCode(0, 1023, 1023)
const yMask = MortonCode(0b11101101101101101101101101101101) // mortonCode(1023, 0, 1023)
const zMask = MortonCode(0b11011011011011011011011011011011) // mortonCode(1023, 1023, 0)
// Less-than and greater than in morton space; the masks are used
// to equalize the values of all other coordinates to 1
func ltMortonX(a, b MortonCode) bool { return a|xMask < b|xMask } // a.x < b.x
func ltMortonY(a, b MortonCode) bool { return a|yMask < b|yMask } // a.y < b.y
func ltMortonZ(a, b MortonCode) bool { return a|zMask < b|zMask } // a.z < b.z
func gtMortonX(a, b MortonCode) bool { return a|xMask > b|xMask } // a.x > b.x
func gtMortonY(a, b MortonCode) bool { return a|yMask > b|yMask } // a.y > b.y
func gtMortonZ(a, b MortonCode) bool { return a|zMask > b|zMask } // a.z > b.z
// ---
// 2d
// "Insert" a 0 bit after each of the 16 low bits of x
func spread1(x uint32) uint32 {
x &= 0x0000ffff // x = ---- ---- ---- ---- fedc ba98 7654 3210
x = (x ^ (x << 8)) & 0x00ff00ff // x = ---- ---- fedc ba98 ---- ---- 7654 3210
x = (x ^ (x << 4)) & 0x0f0f0f0f // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
x = (x ^ (x << 2)) & 0x33333333 // x = --fe --dc --ba --98 --76 --54 --32 --10
x = (x ^ (x << 1)) & 0x55555555 // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
return x
}
func smoosh1(x uint32) uint32 {
x &= 0x55555555 // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
x = (x ^ (x >> 1)) & 0x33333333 // x = --fe --dc --ba --98 --76 --54 --32 --10
x = (x ^ (x >> 2)) & 0x0f0f0f0f // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
x = (x ^ (x >> 4)) & 0x00ff00ff // x = ---- ---- fedc ba98 ---- ---- 7654 3210
x = (x ^ (x >> 8)) & 0x0000ffff // x = ---- ---- ---- ---- fedc ba98 7654 3210
return x
}
func interleave2(x, y uint32) uint32 { return (spread1(y) << 1) + spread1(x) }
func deinterleave2x(code uint32) uint16 { return uint16(smoosh1(code >> 0)) }
func deinterleave2y(code uint32) uint16 { return uint16(smoosh1(code >> 1)) } | morton.go | 0.726911 | 0.509215 | morton.go | starcoder |
package mathg
import "math"
type Quaternion Vec4
func (q *Quaternion) IsZero() bool {
return math.Abs(q.X) < epsilon && math.Abs(q.Y) < epsilon && math.Abs(q.Z) < epsilon && math.Abs(q.W) < epsilon
}
func (q *Quaternion) IsEqual(q1 *Quaternion) bool {
return math.Abs(q.X-q1.X) < epsilon && math.Abs(q.Y-q1.Y) < epsilon && math.Abs(q.Z-q1.Z) < epsilon && math.Abs(q.W-q1.W) < epsilon
}
func (q *Quaternion) Zero() {
q.X = 0.
q.Y = 0.
q.Z = 0.
q.W = 0.
}
func (q *Quaternion) Null() {
q.X = 0.
q.Y = 0.
q.Z = 0.
q.W = 1.
}
func (q *Quaternion) Multiply(q1 *Quaternion) *Quaternion {
return &Quaternion{
q.W*q1.X + q.X*q1.W + q.Y*q1.Z - q.Z*q1.Y,
q.W*q1.Y + q.Y*q1.W + q.Z*q1.X - q.X*q1.Z,
q.W*q1.Z + q.Z*q1.W + q.X*q1.Y - q.Y*q1.X,
q.W*q1.W - q.X*q1.X - q.Y*q1.Y - q.Z*q1.Z,
}
}
func (q *Quaternion) MultiplyScalar(scalar float64) *Quaternion {
return &Quaternion{q.X * scalar, q.Y * scalar, q.Z * scalar, q.W * scalar}
}
func (q *Quaternion) Divide(q1 *Quaternion) *Quaternion {
d := math.Pow(q1.X, 2) + math.Pow(q1.Y, 2) + math.Pow(q1.Z, 2) + math.Pow(q1.W, 2)
return &Quaternion{
(q1.X*q.X + q1.Y*q.Y + q1.Z*q.Z + q1.W*q.W) / d,
(q1.X*q.Y + q1.Y*q.X + q1.Z*q.W + q1.W*q.Z) / d,
(q1.X*q.Z + q1.Y*q.W + q1.Z*q.X + q1.W*q.Y) / d,
(q1.X*q.W + q1.Y*q.Z + q1.Z*q.Y + q1.W*q.X) / d,
}
}
func (q *Quaternion) DivideScalar(scalar float64) *Quaternion {
return &Quaternion{q.X / scalar, q.Y / scalar, q.Z / scalar, q.W / scalar}
}
func (q *Quaternion) Magnitude() float64 {
return math.Sqrt(math.Pow(q.X, 2) + math.Pow(q.Y, 2) + math.Pow(q.Z, 2) + math.Pow(q.W, 2))
}
func (q *Quaternion) LengthSquared() float64 {
return math.Pow(q.X, 2) + math.Pow(q.Y, 2) + math.Pow(q.Z, 2) + math.Pow(q.W, 2)
}
func (q *Quaternion) Negative() *Quaternion {
return &Quaternion{-1 * q.X, -1 * q.Y, -1 * q.Z, -1 * q.W}
}
func (q *Quaternion) Conjugate() *Quaternion {
return &Quaternion{-1 * q.X, -1 * q.Y, -1 * q.Z, q.W}
}
func (q *Quaternion) Inverse() *Quaternion {
l := 1. / (math.Pow(q.X, 2) + math.Pow(q.Y, 2) + math.Pow(q.Z, 2) + math.Pow(q.W, 2))
c := q.Conjugate()
return &Quaternion{c.X * l, c.Y * l, c.Z * l, c.W * l}
}
func (q *Quaternion) Normalize() *Quaternion {
l := 1. / q.Magnitude()
return &Quaternion{q.X * l, q.Y * l, q.Z * l, q.W * l}
}
func (q *Quaternion) Dot(q1 *Quaternion) float64 {
return q.X*q1.X + q.Y*q1.Y + q.Z*q1.Z + q.W + q1.W
}
func (q *Quaternion) Power(exponent float64) *Quaternion {
if math.Abs(q.W) < 1.-epsilon {
alpha := math.Acos(q.W)
new_alpha := alpha * exponent
s := math.Sin(new_alpha) / math.Sin(alpha)
return &Quaternion{q.X * s, q.Y * s, q.Z * s, math.Cos(q.W)}
} else {
return q
}
}
func (v *Vec4) ToQuaternionFromAxisAngle(angle float64) *Quaternion {
half := angle * 0.5
s := math.Sin(half)
return &Quaternion{v.X * s, v.Y * s, v.Z * s, math.Cos(half)}
}
func (v *Vec3) ToQuaternion(v1 *Vec3) *Quaternion {
d := v.Dot(v1)
als := v.LengthSquared()
bls := v1.LengthSquared()
c := v.Cross(v1)
q := &Quaternion{c.X, c.Y, c.Z, d + math.Sqrt(als*bls)}
return q.Normalize()
}
func (m *Mat4) ToQuaternion() *Quaternion {
scale := m.M11 + m.M22 + m.M33
if scale > 0. {
sqrt := math.Sqrt(scale + 1.)
half := 0.5 / sqrt
return &Quaternion{
(m.M23 - m.M32) * half,
(m.M31 - m.M13) * half,
(m.M12 - m.M21) * half,
sqrt * 0.5,
}
} else if (m.M11 >= m.M22) && (m.M11 >= m.M33) {
sqrt := math.Sqrt(1. + m.M11 - m.M22 - m.M33)
half := 0.5 / sqrt
return &Quaternion{
0.5 * sqrt,
(m.M12 + m.M21) * half,
(m.M13 + m.M31) * half,
(m.M23 - m.M32) * half,
}
} else if m.M22 > m.M33 {
sqrt := math.Sqrt(1. + m.M22 - m.M11 - m.M33)
half := 0.5 / sqrt
return &Quaternion{
(m.M21 + m.M12) * half,
0.5 * sqrt,
(m.M32 + m.M23) * half,
(m.M31 - m.M13) * half,
}
} else {
sqrt := math.Sqrt(1. + m.M33 - m.M11 - m.M22)
half := 0.5 / sqrt
return &Quaternion{
(m.M31 + m.M13) * half,
(m.M32 + m.M23) * half,
(0.5 * sqrt),
(m.M12 - m.M21) * half,
}
}
}
func (q *Quaternion) Lerp(q1 *Quaternion, percent float64) *Quaternion {
return &Quaternion{
q.X + (q1.X-q.X)*percent,
q.Y + (q1.Y-q.Y)*percent,
q.Z + (q1.Z-q.Z)*percent,
q.W + (q1.W-q.W)*percent,
}
}
func (q *Quaternion) Slerp(q1 *Quaternion, percent float64) *Quaternion {
var tmp *Quaternion = q1
var f0, f1 float64
d := q.Dot(q1)
if d < 0. {
tmp = q1.Negative()
d = -d
}
if d > 0.9995 {
f0 = 1.0 - percent
f1 = percent
} else {
theta := math.Acos(d)
sin_theta := math.Sin(theta)
f0 = math.Sin((1.0-percent)*theta) / sin_theta
f1 = math.Sin(percent*theta) / sin_theta
}
return &Quaternion{
q.X*f0 + tmp.X*f1,
q.Y*f0 + tmp.Y*f1,
q.Z*f0 + tmp.Z*f1,
q.W*f0 + tmp.W*f1,
}
}
func (q *Quaternion) Angle(q1 *Quaternion) float64 {
s := 1. / math.Sqrt(q.LengthSquared()*q1.LengthSquared())
return math.Acos(q.Dot(q1) * s)
} | quaternion.go | 0.862352 | 0.627038 | quaternion.go | starcoder |
package loganal
import (
"regexp"
"strings"
)
// A matcher implements incrementally consuming a string using
// regexps.
type matcher struct {
str string // string being matched
pos int
groups []string // match groups
// matchPos is the byte position of the beginning of the
// match in str.
matchPos int
// literals maps from literal strings to the index of the
// next occurrence of that string.
literals map[string]int
}
func newMatcher(str string) *matcher {
return &matcher{str: str, literals: map[string]int{}}
}
func (m *matcher) done() bool {
return m.pos >= len(m.str)
}
// consume searches for r in the remaining text. If found, it consumes
// up to the end of the match, fills m.groups with the matched groups,
// and returns true.
func (m *matcher) consume(r *regexp.Regexp) bool {
idx := r.FindStringSubmatchIndex(m.str[m.pos:])
if idx == nil {
m.groups = m.groups[:0]
return false
}
if len(idx)/2 <= cap(m.groups) {
m.groups = m.groups[:len(idx)/2]
} else {
m.groups = make([]string, len(idx)/2, len(idx))
}
for i := range m.groups {
if idx[i*2] >= 0 {
m.groups[i] = m.str[m.pos+idx[i*2] : m.pos+idx[i*2+1]]
} else {
m.groups[i] = ""
}
}
m.matchPos = m.pos + idx[0]
m.pos += idx[1]
return true
}
// peek returns whether r matches the remaining text.
func (m *matcher) peek(r *regexp.Regexp) bool {
return r.MatchString(m.str[m.pos:])
}
// lineHasLiteral returns whether any of literals is found before the
// end of the current line.
func (m *matcher) lineHasLiteral(literals ...string) bool {
// Find the position of the next literal.
nextLiteral := len(m.str)
for _, literal := range literals {
next, ok := m.literals[literal]
if !ok || next < m.pos {
// Update the literal position.
i := strings.Index(m.str[m.pos:], literal)
if i < 0 {
next = len(m.str)
} else {
next = m.pos + i
}
m.literals[literal] = next
}
if next < nextLiteral {
nextLiteral = next
}
}
// If the next literal comes after this line, this line
// doesn't have any of literals.
if nextLiteral != len(m.str) {
eol := strings.Index(m.str[m.pos:], "\n")
if eol >= 0 && eol+m.pos < nextLiteral {
return false
}
}
return true
}
// hasPrefix returns whether the remaining text begins with s.
func (m *matcher) hasPrefix(s string) bool {
return strings.HasPrefix(m.str[m.pos:], s)
}
// line consumes and returns the remainder of the current line, not
// including the line terminator.
func (m *matcher) line() string {
if i := strings.Index(m.str[m.pos:], "\n"); i >= 0 {
line := m.str[m.pos : m.pos+i]
m.pos += i + 1
return line
} else {
line := m.str[m.pos:]
m.pos = len(m.str)
return line
}
}
// peekLine returns the remainder of the current line, not including
// the line terminator, and the position of the beginning of the next
// line.
func (m *matcher) peekLine() (string, int) {
if i := strings.Index(m.str[m.pos:], "\n"); i >= 0 {
return m.str[m.pos : m.pos+i], m.pos + i + 1
} else {
return m.str[m.pos:], len(m.str)
}
} | internal/loganal/matcher.go | 0.603114 | 0.503235 | matcher.go | starcoder |
package SetSimilaritySearch
import (
"errors"
"sort"
)
// Pair is a pair of slice indexes to the sets in the input to all-pairs
// algorithms.
type Pair struct {
X int
Y int
Similarity float64
}
type postingListEntry struct {
setIndex int
tokenPosition int
setSize int
}
// AllPairs finds all pairs of transformed sets with similarity greater than a
// threshold. This is an implementation of the All-Pair-Binary algorithm in the
// paper "Scaling Up All Pairs Similarity Search" by Bayardo et al., with
// position and length filter enhancement.
// Currently supported similarity functions are "jaccard" and "cosine".
// This function returns a channel of Pairs which contains the indexes to
// the input set slice.
func AllPairs(sets [][]int, similarityFunctionName string,
similarityThreshold float64) (<-chan Pair, error) {
if len(sets) == 0 {
return nil, errors.New("input sets mut be a non-empty slice")
}
if similarityThreshold < 0 || similarityThreshold > 1.0 {
return nil, errors.New("input similarityThreshold must be in the range [0, 1]")
}
var simFunc function
if f, exists := similarityFuncs[similarityFunctionName]; exists {
simFunc = f
} else {
return nil, errors.New("input similarityFunctionName does not exist")
}
if !symmetricSimilarityFuncs[similarityFunctionName] {
return nil, errors.New("input similarityFunctionName is not symmetric")
}
overlapThresholdFunc := overlapThresholdFuncs[similarityFunctionName]
overlapIndexThresholdFunc := overlapIndexThresholdFuncs[similarityFunctionName]
positionFilterFunc := positionFilterFuncs[similarityFunctionName]
pairs := make(chan Pair)
go func() {
// Create a slice of set indexes.
indexes := make([]int, len(sets))
for i := range indexes {
indexes[i] = i
}
// Sort set indexes by set length.
sort.Slice(indexes, func(i, j int) bool {
return len(sets[i]) < len(sets[j])
})
defer close(pairs)
postingLists := make(map[int][]postingListEntry)
// Main loop of the All-Pairs algorithm.
for _, x1 := range indexes {
s1 := sets[x1]
t := overlapThresholdFunc(len(s1), similarityThreshold)
prefixSize := len(s1) - t + 1
prefix := s1[:prefixSize]
// Find candidates using tokens in the prefix.
candidates := make([]int, 0)
for p1, token := range prefix {
for _, entry := range postingLists[token] {
if positionFilterFunc(s1, sets[entry.setIndex], p1,
entry.tokenPosition, similarityThreshold) {
candidates = append(candidates, entry.setIndex)
}
}
}
// Sort and iterate through candidate indexes to verify
// pairs.
// TODO: optimize using partial overlaps.
sort.Ints(candidates)
prevCandidate := -1
for _, x2 := range candidates {
// Skip seen candidate.
if x2 == prevCandidate {
continue
}
prevCandidate = x2
// Compute the exact similarity of this candidate
sim := simFunc(s1, sets[x2])
if sim < similarityThreshold {
continue
}
if x1 > x2 {
pairs <- Pair{x1, x2, sim}
} else {
pairs <- Pair{x2, x1, sim}
}
}
// Insert the tokens in the prefix into index.
t = overlapIndexThresholdFunc(len(s1), similarityThreshold)
prefixSize = len(s1) - t + 1
prefix = s1[:prefixSize]
for j, token := range prefix {
if _, exists := postingLists[token]; !exists {
postingLists[token] = make([]postingListEntry, 0)
}
postingLists[token] = append(postingLists[token],
postingListEntry{x1, j, len(sets[x1])})
}
}
}()
return pairs, nil
} | allpairs.go | 0.600423 | 0.488405 | allpairs.go | starcoder |
package main
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/konsti/aoc2021/utils/color"
"github.com/konsti/aoc2021/utils/input"
"github.com/konsti/aoc2021/utils/logging"
)
type Point struct {
number int
neighbours []*Point
}
func setNeighbours(input [][]Point) [][]Point {
maxX := len(input[0])
maxY := len(input)
for y := 0; y < len(input); y++ {
for x := 0; x < len(input[y]); x++ {
point := &input[y][x]
if x == 0 {
if y == 0 {
point.neighbours = append(point.neighbours, &input[y+1][x], &input[y][x+1])
} else if y == maxY-1 {
point.neighbours = append(point.neighbours, &input[y-1][x], &input[y][x+1])
} else {
point.neighbours = append(point.neighbours, &input[y-1][x], &input[y][x+1], &input[y+1][x])
}
} else if x == maxX-1 {
if y == 0 {
point.neighbours = append(point.neighbours, &input[y][x-1], &input[y+1][x])
} else if y == maxY-1 {
point.neighbours = append(point.neighbours, &input[y-1][x], &input[y][x-1])
} else {
point.neighbours = append(point.neighbours, &input[y-1][x], &input[y][x-1], &input[y+1][x])
}
} else {
if y == 0 {
point.neighbours = append(point.neighbours, &input[y][x-1], &input[y][x+1], &input[y+1][x])
} else if y == maxY-1 {
point.neighbours = append(point.neighbours, &input[y][x-1], &input[y][x+1], &input[y-1][x])
} else {
point.neighbours = append(point.neighbours, &input[y][x-1], &input[y][x+1], &input[y-1][x], &input[y+1][x])
}
}
}
}
return input
}
func (point *Point) allNeighboursBigger() bool {
for _, neighbour := range point.neighbours {
if neighbour.number <= point.number {
return false
}
}
return true
}
func contains(slice []*Point, item *Point) bool {
for _, s := range slice {
if s == item {
return true
}
}
return false
}
func readInput(filename string) [][]Point {
lines, err := input.ReadLines(filename)
logging.FailOnError(err, "Error reading input file")
var input [][]Point
for _, line := range lines {
var row []Point
numbers := strings.Split(line, "")
for _, number := range numbers {
num, err := strconv.Atoi(number)
logging.FailOnError(err, "Error converting string to int")
row = append(row, Point{number: num})
}
input = append(input, row)
}
setNeighbours(input)
return input
}
func Part1(input [][]Point) int {
var lowPoints []int
sumRiskLevel := 0
for _, row := range input {
for _, point := range row {
if point.allNeighboursBigger() {
lowPoints = append(lowPoints, point.number)
}
}
}
for _, point := range lowPoints {
sumRiskLevel += point + 1
}
return sumRiskLevel
}
func fillBasin(basin []*Point, point *Point) []*Point {
basin = append(basin, point)
for index, neighbour := range point.neighbours {
if !contains(basin, point.neighbours[index]) && neighbour.number != 9 {
basin = fillBasin(basin, point.neighbours[index])
}
}
return basin
}
func Part2(input [][]Point) int {
var basins [][]*Point
var lowPoints []Point
for _, row := range input {
for _, point := range row {
if point.allNeighboursBigger() {
lowPoints = append(lowPoints, point)
}
}
}
for _, point := range lowPoints {
basin := fillBasin([]*Point{}, &point)
basins = append(basins, basin)
}
var basinSizes []int
for _, basin := range basins {
basinSizes = append(basinSizes, len(basin)-1)
}
sort.Ints(basinSizes)
result := 1
for _, size := range basinSizes[len(basinSizes)-3:] {
result *= size
}
return result
}
func main() {
fmt.Println(color.Purple("Advent of Code - Day9"))
fmt.Print("======================\n\n")
exampleInput := readInput("example.txt")
input := readInput("input.txt")
// Part 1
fmt.Println("* Part 1 | What is the sum of the risk levels of all low points on your heightmap?")
exampleResultPart1 := strconv.Itoa(Part1(exampleInput))
fmt.Printf(color.Yellow("[Example Input]: %s \n"), color.Teal(exampleResultPart1))
resultPart1 := strconv.Itoa(Part1(input))
fmt.Printf(color.Green("[Real Input]: %s \n\n"), color.Teal(resultPart1))
// Part 2
fmt.Println("* Part 2 | What do you get if you multiply together the sizes of the three largest basins?")
exampleResultPart2 := strconv.Itoa(Part2(exampleInput))
fmt.Printf(color.Yellow("[Example Input]: %s \n"), color.Teal(exampleResultPart2))
resultPart2 := strconv.Itoa(Part2(input))
fmt.Printf(color.Green("[Real Input]: %s \n\n"), color.Teal(resultPart2))
} | day09/day09.go | 0.549882 | 0.458106 | day09.go | starcoder |
package validate_binary_search_tree
/*
98. 验证二叉搜索树 https://leetcode-cn.com/problems/validate-binary-search-tree
给定一个二叉树,判断其是否是一个有效的二叉搜索树。
假设一个二叉搜索树具有如下特征:
节点的左子树只包含小于当前节点的数。
节点的右子树只包含大于当前节点的数。
所有左子树和右子树自身必须也是二叉搜索树。
示例 1:
输入:
2
/ \
1 3
输出: true
示例 2:
输入:
5
/ \
1 4
/ \
3 6
输出: false
解释: 输入为: [5,1,4,null,null,3,6]。
根节点的值为 5 ,但是其右子节点值为 4 。
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
/*
按直觉递归:
func isValidBST(root *TreeNode) bool {
if root == nil {
return true
}
if root.Left == nil && root.Right == nil {
return true
}
if root.Left == nil {
return root.Val < root.Right.Val && isValidBST(root.Right)
}
if root.Right == nil {
return root.Val > root.Left.Val && isValidBST(root.Left)
}
return root.Left.Val < root.Val &&
root.Val < root.Right.Val &&
isValidBST(root.Left) &&
isValidBST(root.Right)
}
这样的解法是错的,不能简单比较节点的左右子节点的值和其自身值;实际BST要求左子树所有节点值<根节点值<所有右子树节点值
实际应该对每个节点的值与上下界做判断
时空复杂度均为O(n), n为节点总数
*/
func isValidBST(root *TreeNode) bool {
return help(root, nil, nil)
}
/*
如果一个二叉树是BST, 那么所有元素的值都在开区间(min, max)里
时空复杂度均为O(n), n为节点总数
*/
func help(t, lo, hi *TreeNode) bool {
switch {
case t == nil:
return true
case lo != nil && lo.Val >= t.Val:
return false
case hi != nil && hi.Val <= t.Val:
return false
case !help(t.Left, lo, t):
return false
case !help(t.Right, t, hi):
return false
default:
return true
}
}
/* 递归式中序遍历
可以按照中序遍历的顺序,将所有节点的值存入一个数组,再检查数组是否升序排序的即可(遍历数组,判断每个元素是否大于其前的一个元素)
实际上空间可以优化,并不需要一个数组,只需要一个变量记录前一个元素即可
时空复杂度均为O(n), n为节点总数
*/
func isValidBST0(root *TreeNode) bool {
var prev *TreeNode
var inorder func(t *TreeNode) bool
inorder = func(t *TreeNode) bool {
if t == nil {
return true
}
if !inorder(t.Left) {
return false
}
if prev != nil && prev.Val >= t.Val {
return false
}
prev = t
return inorder(t.Right)
}
return inorder(root)
}
/* 借助栈,迭代式中序遍历
时空复杂度均为O(n), n为节点总数
*/
func isValidBST1(root *TreeNode) bool {
var prev *TreeNode
var stack []*TreeNode
for root != nil || len(stack) > 0 {
for root != nil {
stack = append(stack, root)
root = root.Left
}
root = stack[len(stack)-1]
stack = stack[:len(stack)-1]
if prev != nil && root.Val <= prev.Val {
return false
}
prev = root
root = root.Right
}
return true
}
/*
节点标记迭代
时空复杂度均为O(n), n为节点总数
*/
func isValidBST11(root *TreeNode) bool {
var prev *TreeNode
stack := []*TreeNode{root}
marked := make(map[*TreeNode]bool, 0)
for len(stack) > 0 {
node := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if node == nil {
continue
}
if marked[node] {
if prev != nil && prev.Val >= node.Val {
return false
}
prev = node
continue
}
marked[node] = true
stack = append(stack, node.Right)
stack = append(stack, node)
stack = append(stack, node.Left)
}
return true
} | solutions/validate-binary-search-tree/d.go | 0.550607 | 0.425963 | d.go | starcoder |
package main
/*****************************************************************************************************
*
* Design a HashMap without using any built-in hash table libraries.
*
* To be specific, your design should include these functions:
*
* put(key, value) : Insert a (key, value) pair into the HashMap. If the value already exists
* in the HashMap, update the value.
* get(key): Returns the value to which the specified key is mapped, or -1 if this map
* contains no mapping for the key.
* remove(key) : Remove the mapping for the value key if this map contains the mapping for the
* key.
*
* Example:
*
* MyHashMap hashMap = new MyHashMap();
* hashMap.put(1, 1);
* hashMap.put(2, 2);
* hashMap.get(1); // returns 1
* hashMap.get(3); // returns -1 (not found)
* hashMap.put(2, 1); // update the existing value
* hashMap.get(2); // returns 1
* hashMap.remove(2); // remove the mapping for 2
* hashMap.get(2); // returns -1 (not found)
*
* Note:
*
* All keys and values will be in the range of [0, 1000000].
* The number of operations will be in the range of [1, 10000].
* Please do not use the built-in HashMap library.
*
******************************************************************************************************/
// 最暴力写法了(直接用系统自带的map除外)。
// todo @zhangshilin wirte with red-black tree
// time: 5.56 mem: 5.57
type MyHashMap struct {
data []int
}
/** Initialize your data structure here. */
//10000001
func Constructor() MyHashMap {
data:=make([]int,10000000)
for i:=0;i<len(data);i++{
data[i] = -1
}
return MyHashMap{
data: data,
}
}
/** value will always be non-negative. */
func (this *MyHashMap) Put(key int, value int) {
this.data[key] = value
}
/** Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key */
func (this *MyHashMap) Get(key int) int {
return this.data[key]
}
/** Removes the mapping of the specified value key if this map contains a mapping for the key */
func (this *MyHashMap) Remove(key int) {
this.data[key] = -1
}
/**
* Your MyHashMap object will be instantiated and called as such:
* obj := Constructor();
* obj.Put(key,value);
* param_2 := obj.Get(key);
* obj.Remove(key);
*/ | leetcode/706.design_hashmap/706.DesignHashMap_zhangsl.go | 0.545528 | 0.441854 | 706.DesignHashMap_zhangsl.go | starcoder |
package main
import (
"errors"
"glycodist/pdb"
"glycodist/uniprot"
"math"
"sort"
)
type residueDistance struct {
UnpPos int64
Residue *pdb.Residue
Distance float64
}
func distance(atom1 *pdb.Atom, atom2 *pdb.Atom) float64 {
return math.Sqrt(math.Pow(atom1.X-atom2.X, 2) + math.Pow(atom1.Y-atom2.Y, 2) + math.Pow(atom1.Z-atom2.Z, 2))
}
func residuesDistance(res1 *pdb.Residue, res2 *pdb.Residue) float64 {
minDist := distance(res1.Atoms[0], res2.Atoms[0])
for _, a1 := range res1.Atoms {
for _, a2 := range res2.Atoms {
dist := distance(a1, a2)
if dist < minDist {
minDist = dist
}
}
}
return minDist
}
func maxResDist(pos int64, unp *uniprot.UniProt, p *pdb.PDB) (furthest residueDistance, err error) {
fromRes := p.UniProtPositions[unp.ID][pos][0]
for pos, residues := range p.UniProtPositions[unp.ID] {
res := residues[0]
dist := residuesDistance(fromRes, res)
if dist > furthest.Distance {
furthest = residueDistance{
UnpPos: pos,
Residue: res,
Distance: dist,
}
}
}
return furthest, nil
}
func minGlycoDist(pos int64, unp *uniprot.UniProt, p *pdb.PDB) (closest residueDistance, closest2nd residueDistance, err error) {
if len(unp.PTMs.Glycosilations) < 1 {
return closest, closest2nd, errors.New("sequence does not have at least one glyco site")
}
// Glycosilation site residues (asparagines) in structure
var glycoSites []residueDistance
for _, site := range unp.PTMs.Glycosilations {
if glycoSiteResidues, ok := p.UniProtPositions[unp.ID][site.Position]; ok {
for _, res := range glycoSiteResidues {
gs := residueDistance{
UnpPos: site.Position,
Residue: res,
Distance: residuesDistance(res, p.UniProtPositions[unp.ID][pos][0]),
}
glycoSites = append(glycoSites, gs)
}
}
}
if len(glycoSites) < 1 {
return closest, closest2nd, errors.New("crystal does not cover at least one glyco site")
}
// Sort sites by 3D distance
sort.Slice(glycoSites, func(i, j int) bool {
return glycoSites[i].Distance < glycoSites[j].Distance
})
closest = glycoSites[0]
if len(glycoSites) > 1 {
closest2nd = glycoSites[1]
} else {
closest2nd = residueDistance{
UnpPos: 0, Residue: nil, Distance: math.NaN(),
}
}
return closest, closest2nd, nil
}
func coveredGlycoSites(unp *uniprot.UniProt, p *pdb.PDB) (quantity int64) {
for _, site := range unp.PTMs.Glycosilations {
if _, ok := p.UniProtPositions[unp.ID][site.Position]; ok {
quantity++
}
}
return quantity
} | distance.go | 0.6705 | 0.407098 | distance.go | starcoder |
package lib
import (
"fmt"
"reflect"
)
// A Decomp (short for Decomposition) consists of a labelled tree which
// subdivides a graph in a certain way
type Decomp struct {
Graph Graph
Root Node
SkipRerooting bool //needed for BalDetK
}
func (d Decomp) String() string {
return d.Root.String()
}
// RestoreSubedges replaces any ad-hoc subedge with actual edges occurring in the graph
func (d *Decomp) RestoreSubedges() {
if reflect.DeepEqual(*d, Decomp{}) { // don't change the empty decomp
return
}
newRoot := d.Root.restoreEdges(d.Graph.Edges)
d.Root = newRoot
}
// Correct checks if a decomp full fills the properties of a GHD when given a hypergraph g as input.
// It also checks for the special condition of HDs, though it merely prints a warning if it is not satisfied,
// the output is not affected by this additional check.
func (d Decomp) Correct(g Graph) bool {
if reflect.DeepEqual(d, Decomp{}) { // empty Decomp is always false
return false
}
//must be a decomp of same graph
if !d.Graph.equal(g) {
if d.Graph.Edges.Len() > 0 {
fmt.Println("Decomp of different graph")
} else {
fmt.Println("Empty Decomp")
}
return false
}
//Every bag must be subset of the lambda label
if !d.Root.bagSubsets() {
fmt.Printf("Bags not subsets of edge labels")
return false
}
// Every edge has to be covered
for _, e := range d.Graph.Edges.Slice() {
if !d.Root.coversEdge(e) {
fmt.Println("Edge ", e, " isn't covered")
return false
}
}
//connectedness
for _, i := range d.Graph.Edges.Vertices() {
nodeCheck, _ := d.Root.connected(i, false)
if !nodeCheck {
mutex.RLock()
fmt.Printf("Vertex %v doesn't span connected subtree\n", m[i])
mutex.RUnlock()
return false
}
}
//special condition (optionally)
if !d.Root.noSCViolation() {
fmt.Println("SCV found!. Not a valid hypertree decomposition!")
}
return true
}
// CheckWidth returns the size of the largest bag of any node in a decomp
func (d Decomp) CheckWidth() int {
var output = 0
current := []Node{d.Root}
// iterate over decomp in BFS
for len(current) > 0 {
children := []Node{}
for _, n := range current {
if n.Cover.Len() > output {
output = n.Cover.Len()
}
for _, c := range n.Children {
children = append(children, c) // build up the next level of the tree
}
}
current = children
}
return output
} | lib/decomp.go | 0.65379 | 0.448789 | decomp.go | starcoder |
package slippy
import (
"math"
"errors"
"github.com/go-spatial/geom"
"fmt"
)
const MaxZoom = 22
func NewTile(z, x, y uint) *Tile {
return &Tile{
Z: z,
X: x,
Y: y,
}
}
// Tile describes a slippy tile.
type Tile struct {
// zoom
Z uint
// column
X uint
// row
Y uint
}
// This function returns the smallest tile which fits the
// geom.MinMaxer. Note: it assumes the values of ext are
// EPSG:4326 (lat/lng)
func NewTileMinMaxer(ext geom.MinMaxer) *Tile {
upperLeft := NewTileLatLon(MaxZoom, ext.MaxY(), ext.MinX())
point := &geom.Point{ext.MaxX(), ext.MinY()}
var ret *Tile
for z := uint(MaxZoom); int(z) >= 0 && ret == nil; z-- {
upperLeft.RangeFamilyAt(z, func(tile *Tile) error {
if tile.Extent4326().Contains(point) {
ret = tile
return errors.New("stop iter")
}
return nil
})
}
fmt.Println("returning", ret)
return ret
}
// Instantiates a tile containing the coordinate with the specified zoom
func NewTileLatLon(z uint, lat, lon float64) *Tile {
x := Lon2Tile(z, lon)
y := Lat2Tile(z, lat)
return &Tile{
Z: z,
X: x,
Y: y,
}
}
func (t *Tile) ZXY() (uint, uint, uint) { return t.Z, t.X, t.Y }
// Extent3857 returns the tile's extent in EPSG:3857 (aka Web Mercator) projection
func (t *Tile) Extent3857() *geom.Extent {
return geom.NewExtent(
[2]float64{Tile2WebX(t.Z, t.X), Tile2WebY(t.Z, t.Y+1)},
[2]float64{Tile2WebX(t.Z, t.X+1), Tile2WebY(t.Z, t.Y)},
)
}
// Extent4326 returns the tile's extent in EPSG:4326 (aka lat/long)
func (t *Tile) Extent4326() *geom.Extent {
return geom.NewExtent(
[2]float64{Tile2Lon(t.Z, t.X), Tile2Lat(t.Z, t.Y+1)},
[2]float64{Tile2Lon(t.Z, t.X+1), Tile2Lat(t.Z, t.Y)},
)
}
// TODO (ear7h): sibling support
// RangeFamilyAt calls f on every tile vertically related to t at the specified zoom
func (t *Tile) RangeFamilyAt(zoom uint, f func(*Tile) error) error {
// handle ancestors and self
if zoom <= t.Z {
mag := t.Z - zoom
arg := NewTile(zoom, t.X>>mag, t.Y>>mag)
return f(arg)
}
// handle descendants
mag := zoom - t.Z
delta := uint(math.Exp2(float64(mag)))
leastX := t.X << mag
leastY := t.Y << mag
for x := leastX; x < leastX+delta; x++ {
for y := leastY; y < leastY+delta; y++ {
err := f(NewTile(zoom, x, y))
if err != nil {
return err
}
}
}
return nil
} | slippy/tile.go | 0.72662 | 0.466359 | tile.go | starcoder |
package pointu
import (
"math"
)
// A rectangle has an upper left point (Min) and a lower right point (Max).
type rectangle struct {
min, max Point
}
type kdNode struct {
p Point
splitByX bool
left, right *kdNode
}
type kdTree struct {
root *kdNode
bounds rectangle
}
func makeKdTree(pts Points, bounds rectangle) kdTree {
var split func(pts Points, splitByX bool) *kdNode
split = func(pts Points, splitByX bool) *kdNode {
if len(pts) == 0 {
return nil
}
if splitByX {
pts.sortByX()
} else {
pts.sortByY()
}
med := len(pts) / 2
return &kdNode{
p: pts[med],
splitByX: splitByX,
left: split(pts[:med], !splitByX),
right: split(pts[med+1:], !splitByX),
}
}
return kdTree{
root: split(pts, true),
bounds: bounds,
}
}
func (t kdTree) findNearestNeighbour(p Point) (best Point, bestSqd float64) {
var search func(node *kdNode, target Point, r rectangle, maxDistSqd float64) (nearest Point, distSqd float64)
search = func(node *kdNode, target Point, r rectangle, maxDistSqd float64) (nearest Point, distSqd float64) {
if node == nil {
return Point{}, math.Inf(1)
}
var targetInLeft bool
var leftBox, rightBox rectangle
var nearestNode, furthestNode *kdNode
var nearestBox, furthestBox rectangle
if node.splitByX {
leftBox = rectangle{r.min, Point{node.p.X, r.max.Y}}
rightBox = rectangle{Point{node.p.X, r.min.Y}, r.max}
targetInLeft = target.X <= node.p.X
} else {
leftBox = rectangle{r.min, Point{r.max.X, node.p.Y}}
rightBox = rectangle{Point{r.min.X, node.p.Y}, r.max}
targetInLeft = target.Y <= node.p.Y
}
if targetInLeft {
nearestNode, nearestBox = node.left, leftBox
furthestNode, furthestBox = node.right, rightBox
} else {
nearestNode, nearestBox = node.right, rightBox
furthestNode, furthestBox = node.left, leftBox
}
nearest, distSqd = search(nearestNode, target, nearestBox, maxDistSqd)
if distSqd < maxDistSqd {
maxDistSqd = distSqd
}
var d float64
if node.splitByX {
d = node.p.X - target.X
} else {
d = node.p.Y - target.Y
}
d *= d
if d > maxDistSqd {
return
}
if d = node.p.getDist(target); d < distSqd {
nearest = node.p
distSqd = d
maxDistSqd = distSqd
}
tmpNearest, tmpSqd := search(furthestNode, target, furthestBox, maxDistSqd)
if tmpSqd < distSqd {
nearest = tmpNearest
distSqd = tmpSqd
}
return
}
return search(t.root, p, t.bounds, math.Inf(1))
} | pointu/kd_tree.go | 0.770508 | 0.528898 | kd_tree.go | starcoder |
package spec
const data_identifier_map_json = `
{
"01": {
"id": "01",
"max": "Aucune",
"min": "0",
"shortDesc": "Identifiant unique du document.",
"type": "Alphanumérique"
},
"02": {
"id": "02",
"max": "Aucune",
"min": "0",
"shortDesc": "Catégorie de document",
"type": "Alphanumérique"
},
"03": {
"id": "03",
"max": "Aucune",
"min": "0",
"shortDesc": "Sous-catégorie de document",
"type": "Alphanumérique"
},
"04": {
"id": "04",
"max": "Aucune",
"min": "0",
"shortDesc": "Application de composition",
"type": "Alphanumérique"
},
"05": {
"id": "05",
"max": "Aucune",
"min": "0",
"shortDesc": "Version de l’application de composition",
"type": "Alphanumérique"
},
"06": {
"id": "06",
"max": "4",
"min": "4",
"shortDesc": "Date de l’association entre le document et le code 2D-Doc.",
"type": "Alphanumérique"
},
"07": {
"id": "07",
"max": "6",
"min": "6",
"shortDesc": "Heure de l’association entre le document et le code 2D-Doc.",
"type": "Numérique"
},
"08": {
"id": "08",
"max": "4",
"min": "4",
"shortDesc": "Date d’expiration du document",
"type": "Alphanumérique"
},
"09": {
"id": "09",
"max": "4",
"min": "4",
"shortDesc": "Nombre de pages du document",
"type": "Numérique"
},
"0A": {
"id": "0A",
"max": "9",
"min": "9",
"shortDesc": "Editeur du 2D-Doc",
"type": "Numérique"
},
"0B": {
"id": "0B",
"max": "9",
"min": "9",
"shortDesc": "Intégrateur du 2D-Doc",
"type": "Numérique"
},
"0C": {
"id": "0C",
"max": "Aucune",
"min": "0",
"shortDesc": "URL du document",
"type": "Alphanumérique et symboles"
},
"10": {
"id": "10",
"max": "38",
"min": "0",
"shortDesc": "Ligne 1 de la norme adresse postale du bénéficiaire de la prestation",
"type": "Alphanumérique"
},
"11": {
"id": "11",
"max": "38",
"min": "0",
"shortDesc": "Qualité et/ou titre de la personne bénéficiaire de la prestation",
"type": "Alphanumérique"
},
"12": {
"id": "12",
"max": "38",
"min": "0",
"shortDesc": "Prénom de la personne bénéficiaire de la prestation",
"type": "Alphanumérique"
},
"13": {
"id": "13",
"max": "38",
"min": "0",
"shortDesc": "Nom de la personne bénéficiaire de la prestation",
"type": "Alphanumérique"
},
"14": {
"id": "14",
"max": "38",
"min": "0",
"shortDesc": "Ligne 1 de la norme adresse postale du destinataire de la facture",
"type": "Alphanumérique"
},
"15": {
"id": "15",
"max": "38",
"min": "0",
"shortDesc": "Qualité et/ou titre de la personne destinataire de la facture",
"type": "Alphanumérique"
},
"16": {
"id": "16",
"max": "38",
"min": "0",
"shortDesc": "Prénom de la personne destinataire de la facture",
"type": "Alphanumérique"
},
"17": {
"id": "17",
"max": "38",
"min": "0",
"shortDesc": "Nom de la personne destinataire de la facture",
"type": "Alphanumérique"
},
"18": {
"id": "18",
"max": "Aucune",
"min": "0",
"shortDesc": "Numéro de la facture",
"type": "Alphanumérique"
},
"19": {
"id": "19",
"max": "Aucune",
"min": "0",
"shortDesc": "Numéro de client",
"type": "Alphanumérique"
},
"1A": {
"id": "1A",
"max": "Aucune",
"min": "0",
"shortDesc": "Numéro du contrat",
"type": "Alphanumérique"
},
"1B": {
"id": "1B",
"max": "Aucune",
"min": "0",
"shortDesc": "Identifiant du souscripteur du contrat",
"type": "Alphanumérique"
},
"1C": {
"id": "1C",
"max": "8",
"min": "8",
"shortDesc": "Date d’effet du contrat",
"type": "Numérique"
},
"1D": {
"id": "1D",
"max": "16",
"min": "0",
"shortDesc": "Montant TTC de la facture",
"type": "Numérique"
},
"1E": {
"id": "1E",
"max": "30",
"min": "0",
"shortDesc": "Numéro de téléphone du bénéficiaire de la prestation",
"type": "Numérique"
},
"1F": {
"id": "1F",
"max": "30",
"min": "0",
"shortDesc": "Numéro de téléphone du destinataire de la facture",
"type": "Numérique"
},
"1G": {
"id": "1G",
"max": "1",
"min": "1",
"shortDesc": "Présence d’un co-bénéficiaire de la prestation non mentionné dans le code",
"type": "Numérique"
},
"1H": {
"id": "1H",
"max": "1",
"min": "1",
"shortDesc": "Présence d’un co-destinataire de la facture non mentionné dans le code",
"type": "Numérique"
},
"1I": {
"id": "1I",
"max": "38",
"min": "0",
"shortDesc": "Ligne 1 de la norme adresse postale du co-bénéficiaire de la prestation.",
"type": "Alphanumérique"
},
"1J": {
"id": "1J",
"max": "38",
"min": "0",
"shortDesc": "Qualité et/ou titre du co-bénéficiaire de la prestation.",
"type": "Alphanumérique"
},
"1K": {
"id": "1K",
"max": "38",
"min": "0",
"shortDesc": "Prénom du co-bénéficiaire de la prestation.",
"type": "Alphanumérique"
},
"1L": {
"id": "1L",
"max": "38",
"min": "0",
"shortDesc": "Nom du co-bénéficiaire de la prestation.",
"type": "Alphanumérique"
},
"1M": {
"id": "1M",
"max": "38",
"min": "0",
"shortDesc": "Ligne 1 de la norme adresse postale du co-destinataire de la facture.",
"type": "Alphanumérique"
},
"1N": {
"id": "1N",
"max": "38",
"min": "0",
"shortDesc": "Qualité et/ou titre du co-destinataire de la facture.",
"type": "Alphanumérique"
},
"1O": {
"id": "1O",
"max": "38",
"min": "0",
"shortDesc": "Prénom du co-destinataire de la facture.",
"type": "Alphanumérique"
},
"1P": {
"id": "1P",
"max": "38",
"min": "0",
"shortDesc": "Nom du co-destinataire de la facture.",
"type": "Alphanumérique"
},
"20": {
"id": "20",
"max": "38",
"min": "0",
"shortDesc": "Ligne 2 de la norme adresse postale du point de service des prestations",
"type": "Alphanumérique"
},
"21": {
"id": "21",
"max": "38",
"min": "0",
"shortDesc": "Ligne 3 de la norme adresse postale du point de service des prestations",
"type": "Alphanumérique"
},
"22": {
"id": "22",
"max": "38",
"min": "0",
"shortDesc": "Ligne 4 de la norme adresse postale du point de service des prestations",
"type": "Alphanumérique"
},
"23": {
"id": "23",
"max": "38",
"min": "0",
"shortDesc": "Ligne 5 de la norme adresse postale du point de service des prestations",
"type": "Alphanumérique"
},
"24": {
"id": "24",
"max": "5",
"min": "5",
"shortDesc": "Code postal ou code cedex du point de service des prestations",
"type": "Numérique"
},
"25": {
"id": "25",
"max": "32",
"min": "0",
"shortDesc": "Localité de destination ou libellé cedex du point de service des prestations",
"type": "Numérique"
},
"26": {
"id": "26",
"max": "2",
"min": "2",
"shortDesc": "Pays de service des prestations",
"type": "Alphanumérique"
},
"27": {
"id": "27",
"max": "38",
"min": "0",
"shortDesc": "Ligne 2 de la norme adresse postale du destinataire de la facture",
"type": "Alphanumérique"
},
"28": {
"id": "28",
"max": "38",
"min": "0",
"shortDesc": "Ligne 3 de la norme adresse postale du destinataire de la facture",
"type": "Alphanumérique"
},
"29": {
"id": "29",
"max": "38",
"min": "0",
"shortDesc": "Ligne 4 de la norme adresse postale du destinataire de la facture",
"type": "Alphanumérique"
},
"2A": {
"id": "2A",
"max": "38",
"min": "0",
"shortDesc": "Ligne 5 de la norme adresse postale du destinataire de la facture",
"type": "Alphanumérique"
},
"2B": {
"id": "2B",
"max": "5",
"min": "5",
"shortDesc": "Code postal ou code cedex du destinataire de la facture",
"type": "Numérique"
},
"2C": {
"id": "2C",
"max": "32",
"min": "0",
"shortDesc": "Localité de destination ou libellé cedex du destinataire de la facture",
"type": "Numérique"
},
"2D": {
"id": "2D",
"max": "2",
"min": "2",
"shortDesc": "Pays du destinataire de la facture",
"type": "Alphanumérique"
},
"30": {
"id": "30",
"max": "140",
"min": "0",
"shortDesc": "Qualité Nom et Prénom.",
"type": "Alphanumérique"
},
"31": {
"id": "31",
"max": "38",
"min": "14",
"shortDesc": "Code IBAN",
"type": "Alphanumérique"
},
"32": {
"id": "32",
"max": "11",
"min": "8",
"shortDesc": "Code BIC/SWIFT",
"type": "Alphanumérique"
},
"33": {
"id": "33",
"max": "30",
"min": "0",
"shortDesc": "Code BBAN",
"type": "Alphanumérique"
},
"34": {
"id": "34",
"max": "2",
"min": "2",
"shortDesc": "Pays de localisation du compte",
"type": "Alphanumérique"
},
"35": {
"id": "35",
"max": "34",
"min": "14",
"shortDesc": "Identifiant SEPAmail (QXBAN)",
"type": "Alphanumérique"
},
"36": {
"id": "36",
"max": "4",
"min": "4",
"shortDesc": "Date de début de période",
"type": "Alphanumérique"
},
"37": {
"id": "37",
"max": "4",
"min": "4",
"shortDesc": "Date de fin de période",
"type": "Alphanumérique"
},
"38": {
"id": "38",
"max": "11",
"min": "0",
"shortDesc": "Solde compte début de période",
"type": "Numérique"
},
"39": {
"id": "39",
"max": "11",
"min": "0",
"shortDesc": "Solde compte fin de période",
"type": "Numérique"
},
"40": {
"id": "40",
"max": "13",
"min": "13",
"shortDesc": "Numéro fiscal",
"type": "Numérique"
},
"41": {
"id": "41",
"max": "Aucune",
"min": "0",
"shortDesc": "Revenu fiscal de référence",
"type": "Numérique"
},
"42": {
"id": "42",
"max": "Aucune",
"min": "0",
"shortDesc": "Situation du foyer",
"type": "Alphanumérique"
},
"43": {
"id": "43",
"max": "Aucune",
"min": "0",
"shortDesc": "Nombre de parts",
"type": "Numérique"
},
"44": {
"id": "44",
"max": "13",
"min": "13",
"shortDesc": "Référence d’avis d’impôt",
"type": "Alphanumérique"
},
"50": {
"id": "50",
"max": "14",
"min": "14",
"shortDesc": "SIRET de l’employeur",
"type": "Numérique"
},
"51": {
"id": "51",
"max": "6",
"min": "6",
"shortDesc": "Nombre d’heures travaillées",
"type": "Numérique"
},
"52": {
"id": "52",
"max": "7",
"min": "7",
"shortDesc": "Cumul du nombre d’heures travaillées",
"type": "Numérique"
},
"53": {
"id": "53",
"max": "4",
"min": "4",
"shortDesc": "Début de période",
"type": "Alphanumérique"
},
"54": {
"id": "54",
"max": "4",
"min": "4",
"shortDesc": "Fin de période",
"type": "Alphanumérique"
},
"55": {
"id": "55",
"max": "8",
"min": "8",
"shortDesc": "Date de début de contrat",
"type": "Numérique"
},
"56": {
"id": "56",
"max": "4",
"min": "4",
"shortDesc": "Date de fin de contrat",
"type": "Alphanumérique"
},
"57": {
"id": "57",
"max": "8",
"min": "8",
"shortDesc": "Date de signature du contrat",
"type": "Numérique"
},
"58": {
"id": "58",
"max": "11",
"min": "0",
"shortDesc": "Salaire net imposable",
"type": "Numérique"
},
"59": {
"id": "59",
"max": "12",
"min": "0",
"shortDesc": "Cumul du salaire net imposable",
"type": "Numérique"
},
"5A": {
"id": "5A",
"max": "11",
"min": "0",
"shortDesc": "Salaire brut du mois",
"type": "Numérique"
},
"5B": {
"id": "5B",
"max": "12",
"min": "0",
"shortDesc": "Cumul du salaire brut",
"type": "Numérique"
},
"5C": {
"id": "5C",
"max": "11",
"min": "0",
"shortDesc": "Salaire net",
"type": "Numérique"
},
"5D": {
"id": "5D",
"max": "38",
"min": "0",
"shortDesc": "Ligne 2 de la norme adresse postale de l’employeur",
"type": "Alphanumérique"
},
"5E": {
"id": "5E",
"max": "38",
"min": "0",
"shortDesc": "Ligne 3 de la norme adresse postale de l’employeur",
"type": "Alphanumérique"
},
"5F": {
"id": "5F",
"max": "38",
"min": "0",
"shortDesc": "Ligne 4 de la norme adresse postale de l’employeur",
"type": "Alphanumérique"
},
"5G": {
"id": "5G",
"max": "38",
"min": "0",
"shortDesc": "Ligne 5 de la norme adresse postale de l’employeur",
"type": "Alphanumérique"
},
"5H": {
"id": "5H",
"max": "5",
"min": "5",
"shortDesc": "Code postal ou code cedex de l’employeur",
"type": "Numérique"
},
"5I": {
"id": "5I",
"max": "32",
"min": "0",
"shortDesc": "Localité de destination ou libellé cedex de l’employeur",
"type": "Alphanumérique"
},
"5J": {
"id": "5J",
"max": "2",
"min": "2",
"shortDesc": "Pays de l’employeur",
"type": "Alphanumérique"
},
"5K": {
"id": "5K",
"max": "50",
"min": "0",
"shortDesc": "Identifiant Cotisant Prestations Sociales",
"type": "Alphanumérique"
},
"60": {
"id": "60",
"max": "60",
"min": "0",
"shortDesc": "Liste des prénoms",
"type": "Alphanumérique"
},
"61": {
"id": "61",
"max": "20",
"min": "0",
"shortDesc": "Prénom",
"type": "Alphanumérique"
},
"62": {
"id": "62",
"max": "38",
"min": "0",
"shortDesc": "Nom patronymique",
"type": "Alphanumérique"
},
"63": {
"id": "63",
"max": "38",
"min": "0",
"shortDesc": "Nom d’usage",
"type": "Alphanumérique"
},
"64": {
"id": "64",
"max": "38",
"min": "0",
"shortDesc": "Nom d’épouse/époux",
"type": "Alphanumérique"
},
"65": {
"id": "65",
"max": "2",
"min": "2",
"shortDesc": "Type de pièce d’identité",
"type": "de pièce d’identitéTaille Min. 2 Taille Max. 2 Type Alphanumérique"
},
"66": {
"id": "66",
"max": "20",
"min": "0",
"shortDesc": "Numéro de la pièce d’identité",
"type": "Alphanumérique"
},
"67": {
"id": "67",
"max": "2",
"min": "2",
"shortDesc": "Nationalité",
"type": "Alphanumérique"
},
"68": {
"id": "68",
"max": "1",
"min": "1",
"shortDesc": "Genre",
"type": "Alphanumérique"
},
"69": {
"id": "69",
"max": "8",
"min": "8",
"shortDesc": "Date de naissance",
"type": "Numérique"
},
"6A": {
"id": "6A",
"max": "32",
"min": "0",
"shortDesc": "Lieu de naissance",
"type": "Alphanumérique"
},
"6B": {
"id": "6B",
"max": "3",
"min": "3",
"shortDesc": "Département du bureau émetteur",
"type": "Alphanumérique"
},
"6C": {
"id": "6C",
"max": "2",
"min": "2",
"shortDesc": "Pays de naissance",
"type": "Alphanumérique"
},
"6D": {
"id": "6D",
"max": "60",
"min": "0",
"shortDesc": "Nom et prénom du père",
"type": "Alphanumérique"
},
"6E": {
"id": "6E",
"max": "60",
"min": "0",
"shortDesc": "Nom et prénom de la mère",
"type": "Alphanumérique"
},
"6F": {
"id": "6F",
"max": "90",
"min": "0",
"shortDesc": "Machine Readable Zone (Zone de Lecture Automatique, ZLA)",
"type": "Alphanumérique"
},
"6G": {
"id": "6G",
"max": "38",
"min": "1",
"shortDesc": "Nom",
"type": "Alphanumérique"
},
"6H": {
"id": "6H",
"max": "10",
"min": "1",
"shortDesc": "Civilité",
"type": "Alphanumérique"
},
"6I": {
"id": "6I",
"max": "2",
"min": "2",
"shortDesc": "Pays émetteur",
"type": "Alphanumérique"
},
"70": {
"id": "70",
"max": "12",
"min": "12",
"shortDesc": "Date et heure du décès",
"type": "Numérique"
},
"71": {
"id": "71",
"max": "12",
"min": "12",
"shortDesc": "Date et heure du constat de décès",
"type": "Numérique"
},
"72": {
"id": "72",
"max": "38",
"min": "0",
"shortDesc": "Nom du défunt",
"type": "Alphanumérique"
},
"73": {
"id": "73",
"max": "60",
"min": "0",
"shortDesc": "Prénoms du défunt",
"type": "Alphanumérique"
},
"74": {
"id": "74",
"max": "38",
"min": "0",
"shortDesc": "Nom de jeune fille du défunt",
"type": "Alphanumérique"
},
"75": {
"id": "75",
"max": "8",
"min": "8",
"shortDesc": "Date de naissance du défunt",
"type": "Numérique"
},
"76": {
"id": "76",
"max": "1",
"min": "1",
"shortDesc": "Genre du défunt",
"type": "Alphanumérique"
},
"77": {
"id": "77",
"max": "45",
"min": "0",
"shortDesc": "Commune de décès",
"type": "Alphanumérique"
},
"78": {
"id": "78",
"max": "5",
"min": "5",
"shortDesc": "Code postal de la commune de décès",
"type": "Numérique"
},
"79": {
"id": "79",
"max": "114",
"min": "0",
"shortDesc": "Adresse du domicile du défunt",
"type": "Alphanumérique"
},
"7A": {
"id": "7A",
"max": "5",
"min": "5",
"shortDesc": "Code postal du domicile du défunt",
"type": "Numérique"
},
"7B": {
"id": "7B",
"max": "45",
"min": "0",
"shortDesc": "Commune du domicile du défunt",
"type": "Alphanumérique"
},
"7C": {
"id": "7C",
"max": "1",
"min": "1",
"shortDesc": "Obstacle médico-légal",
"type": "Numérique"
},
"7D": {
"id": "7D",
"max": "1",
"min": "1",
"shortDesc": "Mise en bière",
"type": "Alphanumérique"
},
"7E": {
"id": "7E",
"max": "1",
"min": "1",
"shortDesc": "Obstacle aux soins de conservation",
"type": "Numérique"
},
"7F": {
"id": "7F",
"max": "1",
"min": "1",
"shortDesc": "Obstacle aux dons du corps",
"type": "Numérique"
},
"7G": {
"id": "7G",
"max": "1",
"min": "1",
"shortDesc": "Recherche de la cause du décès",
"type": "Numérique"
},
"7H": {
"id": "7H",
"max": "2",
"min": "2",
"shortDesc": "Délai de transport du corps",
"type": "Alphanumérique"
},
"7I": {
"id": "7I",
"max": "1",
"min": "1",
"shortDesc": "Prothèse avec pile",
"type": "Numérique"
},
"7J": {
"id": "7J",
"max": "1",
"min": "1",
"shortDesc": "Retrait de la pile de prothèse",
"type": "Numérique"
},
"7K": {
"id": "7K",
"max": "13",
"min": "13",
"shortDesc": "Code NNC",
"type": "Alphanumérique"
},
"7L": {
"id": "7L",
"max": "9",
"min": "9",
"shortDesc": "Code Finess de l'organisme agréé",
"type": "Alphanumérique"
},
"7M": {
"id": "7M",
"max": "64",
"min": "0",
"shortDesc": "Identification du médecin",
"type": "Alphanumérique"
},
"7N": {
"id": "7N",
"max": "128",
"min": "0",
"shortDesc": "Lieu de validation du certificat de décès",
"type": "Alphanumérique"
},
"7O": {
"id": "7O",
"max": "1",
"min": "1",
"shortDesc": "Certificat de décès supplémentaire",
"type": "Numérique"
},
"80": {
"id": "80",
"max": "38",
"min": "0",
"shortDesc": "Nom",
"type": "Alphanumérique"
},
"81": {
"id": "81",
"max": "60",
"min": "0",
"shortDesc": "Prénoms",
"type": "Alphanumérique"
},
"82": {
"id": "82",
"max": "20",
"min": "0",
"shortDesc": "Numéro de carte",
"type": "Alphanumérique"
},
"83": {
"id": "83",
"max": "40",
"min": "0",
"shortDesc": "Organisme de tutelle",
"type": "Alphanumérique"
},
"84": {
"id": "84",
"max": "40",
"min": "0",
"shortDesc": "Profession",
"type": "Alphanumérique"
},
"90": {
"id": "90",
"max": "38",
"min": "0",
"shortDesc": "Identité de l'huissier de justice",
"type": "Alphanumérique"
},
"91": {
"id": "91",
"max": "38",
"min": "0",
"shortDesc": "Identité ou raison sociale du demandeur",
"type": "Alphanumérique"
},
"92": {
"id": "92",
"max": "38",
"min": "0",
"shortDesc": "Identité ou raison sociale du destinataire",
"type": "Alphanumérique"
},
"93": {
"id": "93",
"max": "38",
"min": "0",
"shortDesc": "Identité ou raison sociale de tiers concerné",
"type": "Alphanumérique"
},
"94": {
"id": "94",
"max": "38",
"min": "0",
"shortDesc": "Intitulé de l'acte",
"type": "Alphanumérique"
},
"95": {
"id": "95",
"max": "18",
"min": "0",
"shortDesc": "Numéro de l'acte",
"type": "Alphanumérique"
},
"96": {
"id": "96",
"max": "8",
"min": "8",
"shortDesc": "Date de signature de l'acte",
"type": "Numérique"
},
"A0": {
"id": "A0",
"max": "2",
"min": "2",
"shortDesc": "Pays ayant émis l’immatriculation du véhicule.",
"type": "Alphanumérique"
},
"A1": {
"id": "A1",
"max": "17",
"min": "0",
"shortDesc": "Immatriculation du véhicule",
"type": "Alphanumérique"
},
"A2": {
"id": "A2",
"max": "17",
"min": "0",
"shortDesc": "Marque du véhicule.",
"type": "Alphanumérique"
},
"A3": {
"id": "A3",
"max": "17",
"min": "0",
"shortDesc": "Nom commercial du véhicule.",
"type": "Alphanumérique"
},
"A4": {
"id": "A4",
"max": "17",
"min": "17",
"shortDesc": "Numéro de série du véhicule (VIN).",
"type": "Alphanumérique"
},
"A5": {
"id": "A5",
"max": "3",
"min": "3",
"shortDesc": "Catégorie du véhicule.",
"type": "Alphanumérique"
},
"A6": {
"id": "A6",
"max": "2",
"min": "2",
"shortDesc": "Carburant.",
"type": "Alphanumérique"
},
"A7": {
"id": "A7",
"max": "3",
"min": "3",
"shortDesc": "Taux d’émission de CO2 du véhicule (en g/km).",
"type": "Alphanumérique"
},
"A8": {
"id": "A8",
"max": "12",
"min": "0",
"shortDesc": "Indication de la classe environnementale de réception CE.",
"type": "Alphanumérique"
},
"A9": {
"id": "A9",
"max": "3",
"min": "3",
"shortDesc": "Classe d’émission polluante.",
"type": "Alphanumérique"
},
"AA": {
"id": "AA",
"max": "8",
"min": "8",
"shortDesc": "Date de première immatriculation du véhicule.",
"type": "Numérique"
},
"AB": {
"id": "AB",
"max": "8",
"min": "0",
"shortDesc": "Type de lettre",
"type": "de lettreTaille Min. 0 Taille Max. 8 Type Alphanumérique"
},
"AC": {
"id": "AC",
"max": "19",
"min": "0",
"shortDesc": "N° Dossier",
"type": "Alphanumérique"
},
"AD": {
"id": "AD",
"max": "4",
"min": "4",
"shortDesc": "Date Infraction",
"type": "Alphanumérique"
},
"AE": {
"id": "AE",
"max": "4",
"min": "4",
"shortDesc": "Heure de l’infraction",
"type": "Numérique"
},
"AF": {
"id": "AF",
"max": "1",
"min": "1",
"shortDesc": "Nombre de points retirés lors de l’infraction",
"type": "Alphanumérique"
},
"AG": {
"id": "AG",
"max": "1",
"min": "1",
"shortDesc": "Solde de points",
"type": "Alphanumérique"
},
"AH": {
"id": "AH",
"max": "30",
"min": "0",
"shortDesc": "Numéro de la carte",
"type": "Alphanumérique"
},
"AI": {
"id": "AI",
"max": "8",
"min": "8",
"shortDesc": "Date d’expiration initiale",
"type": "Numérique"
},
"AJ": {
"id": "AJ",
"max": "13",
"min": "13",
"shortDesc": "Numéro EVTC",
"type": "Alphanumérique"
},
"AK": {
"id": "AK",
"max": "7",
"min": "7",
"shortDesc": "Numéro de macaron",
"type": "Numérique"
},
"AL": {
"id": "AL",
"max": "11",
"min": "11",
"shortDesc": "Numéro de la carte",
"type": "Alphanumérique"
},
"AM": {
"id": "AM",
"max": "5",
"min": "0",
"shortDesc": "Motif de sur-classement",
"type": "Alphanumérique"
},
"B0": {
"id": "B0",
"max": "60",
"min": "0",
"shortDesc": "Liste des prénoms",
"type": "Alphanumérique"
},
"B1": {
"id": "B1",
"max": "20",
"min": "0",
"shortDesc": "Prénom",
"type": "Alphanumérique"
},
"B2": {
"id": "B2",
"max": "38",
"min": "0",
"shortDesc": "Nom patronymique",
"type": "Alphanumérique"
},
"B3": {
"id": "B3",
"max": "38",
"min": "0",
"shortDesc": "Nom d'usage",
"type": "Alphanumérique"
},
"B4": {
"id": "B4",
"max": "38",
"min": "0",
"shortDesc": "Nom d'épouse/époux",
"type": "Alphanumérique"
},
"B5": {
"id": "B5",
"max": "2",
"min": "2",
"shortDesc": "Nationalité",
"type": "Alphanumérique"
},
"B6": {
"id": "B6",
"max": "1",
"min": "1",
"shortDesc": "Genre",
"type": "Alphanumérique"
},
"B7": {
"id": "B7",
"max": "8",
"min": "8",
"shortDesc": "Date de naissance",
"type": "Numérique"
},
"B8": {
"id": "B8",
"max": "32",
"min": "0",
"shortDesc": "Lieu de naissance",
"type": "Alphanumérique"
},
"B9": {
"id": "B9",
"max": "2",
"min": "2",
"shortDesc": "Pays de naissance",
"type": "Alphanumérique"
},
"BA": {
"id": "BA",
"max": "1",
"min": "1",
"shortDesc": "Mention obtenue",
"type": "Numérique"
},
"BB": {
"id": "BB",
"max": "50",
"min": "0",
"shortDesc": "Numéro ou code d'identification de l'étudiant",
"type": "Alphanumérique"
},
"BC": {
"id": "BC",
"max": "20",
"min": "0",
"shortDesc": "Numéro du diplôme",
"type": "Alphanumérique"
},
"BD": {
"id": "BD",
"max": "1",
"min": "1",
"shortDesc": "Niveau du diplôme selon la classification CEC",
"type": "Numérique"
},
"BE": {
"id": "BE",
"max": "3",
"min": "3",
"shortDesc": "Crédits ECTS obtenus",
"type": "Numérique"
},
"BF": {
"id": "BF",
"max": "3",
"min": "3",
"shortDesc": "Année universitaire",
"type": "Numérique"
},
"BG": {
"id": "BG",
"max": "2",
"min": "2",
"shortDesc": "Type de diplôme",
"type": "Alphanumérique"
},
"BH": {
"id": "BH",
"max": "30",
"min": "0",
"shortDesc": "Domaine",
"type": "Alphanumérique"
},
"BI": {
"id": "BI",
"max": "30",
"min": "0",
"shortDesc": "Mention",
"type": "Alphanumérique"
},
"BJ": {
"id": "BJ",
"max": "30",
"min": "0",
"shortDesc": "Spécialité",
"type": "Alphanumérique"
},
"BK": {
"id": "BK",
"max": "14",
"min": "14",
"shortDesc": "Numéro de l'Attestation de versement de la CVE",
"type": "Alphanumérique"
}
}
` | pkg/spec/spec_data_identifier.go | 0.501953 | 0.558086 | spec_data_identifier.go | starcoder |
package day17
import (
"fmt"
"math"
)
type PixelType int
type Pixel struct {
X int
Y int
}
func NewPixel(x, y int) Pixel {
return Pixel{X: x, Y: y}
}
type Pixels map[Pixel]PixelType
func (pixels Pixels) CountByType() map[PixelType]int {
counts := map[PixelType]int{}
for _, pixelType := range pixels {
counts[pixelType]++
}
return counts
}
type Screen struct {
Pixels Pixels
}
func NewScreen() *Screen {
return &Screen{Pixels: Pixels{}}
}
func (screen *Screen) Draw(overrides map[Pixel]PixelType) string {
var maxX, maxY int
var minX, minY int
for pixel, _ := range screen.Pixels {
if pixel.X > maxX {
maxX = pixel.X
}
if pixel.X < minX {
minX = pixel.X
}
if pixel.Y > maxY {
maxY = pixel.Y
}
if pixel.Y < minY {
minY = pixel.Y
}
}
pixels := ""
for y := minY; y <= maxY; y++ {
for x := minX; x <= maxX; x++ {
curPixel := NewPixel(x, y)
var pixelType PixelType
pixelType, exists := overrides[curPixel]
if !exists {
pixelType = screen.Pixels[curPixel]
}
pixels += PixelChars[pixelType]
}
pixels += "\n"
}
return pixels
}
type Computer struct {
memory []int
Input chan int
PromptForInput chan bool
Output chan int
Done chan bool
relativeBase int
pos int
Iterations int
}
func NewComputer(initMemory []int, initPos int, initRelative int, input chan int, promptForInput chan bool, output chan int, done chan bool) *Computer {
memory := make([]int, 100000)
copy(memory, initMemory)
return &Computer{memory: memory, Input: input, PromptForInput: promptForInput, Output: output, pos: initPos, relativeBase: initRelative, Done: done, Iterations: 0}
}
func (c *Computer) Run() error {
c.Iterations++
op := c.memory[c.pos] % 100
switch op {
case 1:
// fmt.Printf("%v = add %v %v -> %v\n", c.memory[c.pos:c.pos+4], c.Arg(1), c.Arg(2), c.ArgPos(3))
c.memory[c.ArgPos(3)] = c.Arg(1) + c.Arg(2)
c.pos += 4
case 2:
// fmt.Printf("%v = multi %v %v -> %v\n", c.memory[c.pos:c.pos+4], c.Arg(1), c.Arg(2), c.ArgPos(3))
c.memory[c.ArgPos(3)] = c.Arg(1) * c.Arg(2)
c.pos += 4
case 3:
// fmt.Printf("prompting for input\n")
c.PromptForInput <- true
c.memory[c.ArgPos(1)] = <-c.Input
// fmt.Printf("%v = input %v -> %v\n", c.memory[c.pos:c.pos+2], c.Arg(1), c.ArgPos(1))
c.pos += 2
case 4:
// fmt.Printf("%v = output %v\n", c.memory[c.pos:c.pos+2], c.Arg(1))
c.Output <- c.Arg(1)
c.pos += 2
case 5:
// fmt.Printf("%v = nzj %v -> %v / %v\n", c.memory[c.pos:c.pos+3], c.Arg(1), c.Arg(2), c.pos+3)
if c.Arg(1) != 0 {
c.pos = c.Arg(2)
} else {
c.pos += 3
}
case 6:
// fmt.Printf("%v = zj %v -> %v / %v\n", c.memory[c.pos:c.pos+3], c.Arg(1), c.Arg(2), c.pos+3)
if c.Arg(1) == 0 {
c.pos = c.Arg(2)
} else {
c.pos += 3
}
case 7:
// fmt.Printf("%v = %v lt %v -> %v\n", c.memory[c.pos:c.pos+4], c.Arg(1), c.Arg(2), c.ArgPos(3))
if c.Arg(1) < c.Arg(2) {
c.memory[c.ArgPos(3)] = 1
} else {
c.memory[c.ArgPos(3)] = 0
}
c.pos += 4
case 8:
// fmt.Printf("%v = %v eq %v -> %v\n", c.memory[c.pos:c.pos+4], c.Arg(1), c.Arg(2), c.ArgPos(3))
if c.Arg(1) == c.Arg(2) {
c.memory[c.ArgPos(3)] = 1
} else {
c.memory[c.ArgPos(3)] = 0
}
c.pos += 4
case 9:
// fmt.Printf("%v = relative + %v -> %v\n", c.memory[c.pos:c.pos+2], c.Arg(1), c.relativeBase+c.Arg(1))
c.relativeBase += c.Arg(1)
c.pos += 2
case 99:
fmt.Println("halting!")
close(c.Output)
c.Done <- true
return nil
default:
return fmt.Errorf("bad operation")
}
return c.Run()
}
func (c *Computer) ArgPos(arg int) int {
flags := c.memory[c.pos] / 100
argflag := (flags % int(math.Pow10(arg))) / int(math.Pow10(arg-1))
if argflag == 1 {
return c.pos + arg
} else if argflag == 2 {
return c.relativeBase + c.memory[c.pos+arg]
}
return c.memory[c.pos+arg]
}
func (c *Computer) Arg(arg int) int {
return c.memory[c.ArgPos(arg)]
} | day17/computer.go | 0.553988 | 0.433082 | computer.go | starcoder |
package catalog
// GetDatasetQueryParams represents valid query parameters for the GetDataset operation
// For convenience GetDatasetQueryParams can be formed in a single statement, for example:
// `v := GetDatasetQueryParams{}.SetMaxstale(...)`
type GetDatasetQueryParams struct {
// Maxstale : The number of seconds beyond which we will refresh index metadata.
Maxstale *int32 `key:"maxstale"`
}
func (q GetDatasetQueryParams) SetMaxstale(v int32) GetDatasetQueryParams {
q.Maxstale = &v
return q
}
// GetDatasetByIdQueryParams represents valid query parameters for the GetDatasetById operation
// For convenience GetDatasetByIdQueryParams can be formed in a single statement, for example:
// `v := GetDatasetByIdQueryParams{}.SetMaxstale(...)`
type GetDatasetByIdQueryParams struct {
// Maxstale : The number of seconds beyond which we will refresh index metadata.
Maxstale *int32 `key:"maxstale"`
}
func (q GetDatasetByIdQueryParams) SetMaxstale(v int32) GetDatasetByIdQueryParams {
q.Maxstale = &v
return q
}
// ListActionsForRuleQueryParams represents valid query parameters for the ListActionsForRule operation
// For convenience ListActionsForRuleQueryParams can be formed in a single statement, for example:
// `v := ListActionsForRuleQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListActionsForRuleQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListActionsForRuleQueryParams) SetCount(v int32) ListActionsForRuleQueryParams {
q.Count = &v
return q
}
func (q ListActionsForRuleQueryParams) SetFilter(v string) ListActionsForRuleQueryParams {
q.Filter = v
return q
}
func (q ListActionsForRuleQueryParams) SetOffset(v int32) ListActionsForRuleQueryParams {
q.Offset = &v
return q
}
func (q ListActionsForRuleQueryParams) SetOrderby(v []string) ListActionsForRuleQueryParams {
q.Orderby = v
return q
}
// ListActionsForRuleByIdQueryParams represents valid query parameters for the ListActionsForRuleById operation
// For convenience ListActionsForRuleByIdQueryParams can be formed in a single statement, for example:
// `v := ListActionsForRuleByIdQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListActionsForRuleByIdQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListActionsForRuleByIdQueryParams) SetCount(v int32) ListActionsForRuleByIdQueryParams {
q.Count = &v
return q
}
func (q ListActionsForRuleByIdQueryParams) SetFilter(v string) ListActionsForRuleByIdQueryParams {
q.Filter = v
return q
}
func (q ListActionsForRuleByIdQueryParams) SetOffset(v int32) ListActionsForRuleByIdQueryParams {
q.Offset = &v
return q
}
func (q ListActionsForRuleByIdQueryParams) SetOrderby(v []string) ListActionsForRuleByIdQueryParams {
q.Orderby = v
return q
}
// ListAnnotationsQueryParams represents valid query parameters for the ListAnnotations operation
// For convenience ListAnnotationsQueryParams can be formed in a single statement, for example:
// `v := ListAnnotationsQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListAnnotationsQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListAnnotationsQueryParams) SetCount(v int32) ListAnnotationsQueryParams {
q.Count = &v
return q
}
func (q ListAnnotationsQueryParams) SetFilter(v string) ListAnnotationsQueryParams {
q.Filter = v
return q
}
func (q ListAnnotationsQueryParams) SetOffset(v int32) ListAnnotationsQueryParams {
q.Offset = &v
return q
}
func (q ListAnnotationsQueryParams) SetOrderby(v []string) ListAnnotationsQueryParams {
q.Orderby = v
return q
}
// ListAnnotationsForDashboardByIdQueryParams represents valid query parameters for the ListAnnotationsForDashboardById operation
// For convenience ListAnnotationsForDashboardByIdQueryParams can be formed in a single statement, for example:
// `v := ListAnnotationsForDashboardByIdQueryParams{}.SetFilter(...)`
type ListAnnotationsForDashboardByIdQueryParams struct {
// Filter : A filter query to apply to the annotations.
Filter string `key:"filter"`
}
func (q ListAnnotationsForDashboardByIdQueryParams) SetFilter(v string) ListAnnotationsForDashboardByIdQueryParams {
q.Filter = v
return q
}
// ListAnnotationsForDashboardByResourceNameQueryParams represents valid query parameters for the ListAnnotationsForDashboardByResourceName operation
// For convenience ListAnnotationsForDashboardByResourceNameQueryParams can be formed in a single statement, for example:
// `v := ListAnnotationsForDashboardByResourceNameQueryParams{}.SetFilter(...)`
type ListAnnotationsForDashboardByResourceNameQueryParams struct {
// Filter : A filter query to apply to the annotations.
Filter string `key:"filter"`
}
func (q ListAnnotationsForDashboardByResourceNameQueryParams) SetFilter(v string) ListAnnotationsForDashboardByResourceNameQueryParams {
q.Filter = v
return q
}
// ListAnnotationsForDatasetByIdQueryParams represents valid query parameters for the ListAnnotationsForDatasetById operation
// For convenience ListAnnotationsForDatasetByIdQueryParams can be formed in a single statement, for example:
// `v := ListAnnotationsForDatasetByIdQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListAnnotationsForDatasetByIdQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListAnnotationsForDatasetByIdQueryParams) SetCount(v int32) ListAnnotationsForDatasetByIdQueryParams {
q.Count = &v
return q
}
func (q ListAnnotationsForDatasetByIdQueryParams) SetFilter(v string) ListAnnotationsForDatasetByIdQueryParams {
q.Filter = v
return q
}
func (q ListAnnotationsForDatasetByIdQueryParams) SetOffset(v int32) ListAnnotationsForDatasetByIdQueryParams {
q.Offset = &v
return q
}
func (q ListAnnotationsForDatasetByIdQueryParams) SetOrderby(v []string) ListAnnotationsForDatasetByIdQueryParams {
q.Orderby = v
return q
}
// ListAnnotationsForDatasetByResourceNameQueryParams represents valid query parameters for the ListAnnotationsForDatasetByResourceName operation
// For convenience ListAnnotationsForDatasetByResourceNameQueryParams can be formed in a single statement, for example:
// `v := ListAnnotationsForDatasetByResourceNameQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListAnnotationsForDatasetByResourceNameQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListAnnotationsForDatasetByResourceNameQueryParams) SetCount(v int32) ListAnnotationsForDatasetByResourceNameQueryParams {
q.Count = &v
return q
}
func (q ListAnnotationsForDatasetByResourceNameQueryParams) SetFilter(v string) ListAnnotationsForDatasetByResourceNameQueryParams {
q.Filter = v
return q
}
func (q ListAnnotationsForDatasetByResourceNameQueryParams) SetOffset(v int32) ListAnnotationsForDatasetByResourceNameQueryParams {
q.Offset = &v
return q
}
func (q ListAnnotationsForDatasetByResourceNameQueryParams) SetOrderby(v []string) ListAnnotationsForDatasetByResourceNameQueryParams {
q.Orderby = v
return q
}
// ListDashboardsQueryParams represents valid query parameters for the ListDashboards operation
// For convenience ListDashboardsQueryParams can be formed in a single statement, for example:
// `v := ListDashboardsQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListDashboardsQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListDashboardsQueryParams) SetCount(v int32) ListDashboardsQueryParams {
q.Count = &v
return q
}
func (q ListDashboardsQueryParams) SetFilter(v string) ListDashboardsQueryParams {
q.Filter = v
return q
}
func (q ListDashboardsQueryParams) SetOffset(v int32) ListDashboardsQueryParams {
q.Offset = &v
return q
}
func (q ListDashboardsQueryParams) SetOrderby(v []string) ListDashboardsQueryParams {
q.Orderby = v
return q
}
// ListDatasetsQueryParams represents valid query parameters for the ListDatasets operation
// For convenience ListDatasetsQueryParams can be formed in a single statement, for example:
// `v := ListDatasetsQueryParams{}.SetCount(...).SetFilter(...).SetMaxstale(...).SetOffset(...).SetOrderby(...)`
type ListDatasetsQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the dataset list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Maxstale : The number of seconds beyond which we will refresh index metadata.
Maxstale *int32 `key:"maxstale"`
// Offset : The number of results to skip before the first result is returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc\". Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListDatasetsQueryParams) SetCount(v int32) ListDatasetsQueryParams {
q.Count = &v
return q
}
func (q ListDatasetsQueryParams) SetFilter(v string) ListDatasetsQueryParams {
q.Filter = v
return q
}
func (q ListDatasetsQueryParams) SetMaxstale(v int32) ListDatasetsQueryParams {
q.Maxstale = &v
return q
}
func (q ListDatasetsQueryParams) SetOffset(v int32) ListDatasetsQueryParams {
q.Offset = &v
return q
}
func (q ListDatasetsQueryParams) SetOrderby(v []string) ListDatasetsQueryParams {
q.Orderby = v
return q
}
// ListFieldsQueryParams represents valid query parameters for the ListFields operation
// For convenience ListFieldsQueryParams can be formed in a single statement, for example:
// `v := ListFieldsQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListFieldsQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the dataset list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListFieldsQueryParams) SetCount(v int32) ListFieldsQueryParams {
q.Count = &v
return q
}
func (q ListFieldsQueryParams) SetFilter(v string) ListFieldsQueryParams {
q.Filter = v
return q
}
func (q ListFieldsQueryParams) SetOffset(v int32) ListFieldsQueryParams {
q.Offset = &v
return q
}
func (q ListFieldsQueryParams) SetOrderby(v []string) ListFieldsQueryParams {
q.Orderby = v
return q
}
// ListFieldsForDatasetQueryParams represents valid query parameters for the ListFieldsForDataset operation
// For convenience ListFieldsForDatasetQueryParams can be formed in a single statement, for example:
// `v := ListFieldsForDatasetQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListFieldsForDatasetQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the dataset list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListFieldsForDatasetQueryParams) SetCount(v int32) ListFieldsForDatasetQueryParams {
q.Count = &v
return q
}
func (q ListFieldsForDatasetQueryParams) SetFilter(v string) ListFieldsForDatasetQueryParams {
q.Filter = v
return q
}
func (q ListFieldsForDatasetQueryParams) SetOffset(v int32) ListFieldsForDatasetQueryParams {
q.Offset = &v
return q
}
func (q ListFieldsForDatasetQueryParams) SetOrderby(v []string) ListFieldsForDatasetQueryParams {
q.Orderby = v
return q
}
// ListFieldsForDatasetByIdQueryParams represents valid query parameters for the ListFieldsForDatasetById operation
// For convenience ListFieldsForDatasetByIdQueryParams can be formed in a single statement, for example:
// `v := ListFieldsForDatasetByIdQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListFieldsForDatasetByIdQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the dataset list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListFieldsForDatasetByIdQueryParams) SetCount(v int32) ListFieldsForDatasetByIdQueryParams {
q.Count = &v
return q
}
func (q ListFieldsForDatasetByIdQueryParams) SetFilter(v string) ListFieldsForDatasetByIdQueryParams {
q.Filter = v
return q
}
func (q ListFieldsForDatasetByIdQueryParams) SetOffset(v int32) ListFieldsForDatasetByIdQueryParams {
q.Offset = &v
return q
}
func (q ListFieldsForDatasetByIdQueryParams) SetOrderby(v []string) ListFieldsForDatasetByIdQueryParams {
q.Orderby = v
return q
}
// ListModulesQueryParams represents valid query parameters for the ListModules operation
// For convenience ListModulesQueryParams can be formed in a single statement, for example:
// `v := ListModulesQueryParams{}.SetFilter(...)`
type ListModulesQueryParams struct {
// Filter : A filter to apply to the modules.
Filter string `key:"filter"`
}
func (q ListModulesQueryParams) SetFilter(v string) ListModulesQueryParams {
q.Filter = v
return q
}
// ListRelationshipsQueryParams represents valid query parameters for the ListRelationships operation
// For convenience ListRelationshipsQueryParams can be formed in a single statement, for example:
// `v := ListRelationshipsQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListRelationshipsQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListRelationshipsQueryParams) SetCount(v int32) ListRelationshipsQueryParams {
q.Count = &v
return q
}
func (q ListRelationshipsQueryParams) SetFilter(v string) ListRelationshipsQueryParams {
q.Filter = v
return q
}
func (q ListRelationshipsQueryParams) SetOffset(v int32) ListRelationshipsQueryParams {
q.Offset = &v
return q
}
func (q ListRelationshipsQueryParams) SetOrderby(v []string) ListRelationshipsQueryParams {
q.Orderby = v
return q
}
// ListRulesQueryParams represents valid query parameters for the ListRules operation
// For convenience ListRulesQueryParams can be formed in a single statement, for example:
// `v := ListRulesQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListRulesQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListRulesQueryParams) SetCount(v int32) ListRulesQueryParams {
q.Count = &v
return q
}
func (q ListRulesQueryParams) SetFilter(v string) ListRulesQueryParams {
q.Filter = v
return q
}
func (q ListRulesQueryParams) SetOffset(v int32) ListRulesQueryParams {
q.Offset = &v
return q
}
func (q ListRulesQueryParams) SetOrderby(v []string) ListRulesQueryParams {
q.Orderby = v
return q
}
// ListWorkflowBuildsQueryParams represents valid query parameters for the ListWorkflowBuilds operation
// For convenience ListWorkflowBuildsQueryParams can be formed in a single statement, for example:
// `v := ListWorkflowBuildsQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListWorkflowBuildsQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListWorkflowBuildsQueryParams) SetCount(v int32) ListWorkflowBuildsQueryParams {
q.Count = &v
return q
}
func (q ListWorkflowBuildsQueryParams) SetFilter(v string) ListWorkflowBuildsQueryParams {
q.Filter = v
return q
}
func (q ListWorkflowBuildsQueryParams) SetOffset(v int32) ListWorkflowBuildsQueryParams {
q.Offset = &v
return q
}
func (q ListWorkflowBuildsQueryParams) SetOrderby(v []string) ListWorkflowBuildsQueryParams {
q.Orderby = v
return q
}
// ListWorkflowRunsQueryParams represents valid query parameters for the ListWorkflowRuns operation
// For convenience ListWorkflowRunsQueryParams can be formed in a single statement, for example:
// `v := ListWorkflowRunsQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListWorkflowRunsQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListWorkflowRunsQueryParams) SetCount(v int32) ListWorkflowRunsQueryParams {
q.Count = &v
return q
}
func (q ListWorkflowRunsQueryParams) SetFilter(v string) ListWorkflowRunsQueryParams {
q.Filter = v
return q
}
func (q ListWorkflowRunsQueryParams) SetOffset(v int32) ListWorkflowRunsQueryParams {
q.Offset = &v
return q
}
func (q ListWorkflowRunsQueryParams) SetOrderby(v []string) ListWorkflowRunsQueryParams {
q.Orderby = v
return q
}
// ListWorkflowsQueryParams represents valid query parameters for the ListWorkflows operation
// For convenience ListWorkflowsQueryParams can be formed in a single statement, for example:
// `v := ListWorkflowsQueryParams{}.SetCount(...).SetFilter(...).SetOffset(...).SetOrderby(...)`
type ListWorkflowsQueryParams struct {
// Count : The maximum number of results to return.
Count *int32 `key:"count"`
// Filter : A filter to apply to the results list. The filter must be a SPL predicate expression.
Filter string `key:"filter"`
// Offset : The number of results to skip before the first one returned.
Offset *int32 `key:"offset"`
// Orderby : A list of fields to order the results by. You can specify either ascending or descending order using \"<field> asc\" or \"<field> desc. Ascending order is the default.
Orderby []string `key:"orderby"`
}
func (q ListWorkflowsQueryParams) SetCount(v int32) ListWorkflowsQueryParams {
q.Count = &v
return q
}
func (q ListWorkflowsQueryParams) SetFilter(v string) ListWorkflowsQueryParams {
q.Filter = v
return q
}
func (q ListWorkflowsQueryParams) SetOffset(v int32) ListWorkflowsQueryParams {
q.Offset = &v
return q
}
func (q ListWorkflowsQueryParams) SetOrderby(v []string) ListWorkflowsQueryParams {
q.Orderby = v
return q
} | services/catalog/param_generated.go | 0.94252 | 0.763175 | param_generated.go | starcoder |
package common
import (
"errors"
"log"
"github.com/EngoEngine/engo"
"github.com/EngoEngine/engo/math"
"github.com/EngoEngine/gl"
)
var spritesheetCache = make(map[string]*Spritesheet)
// Spritesheet is a class that stores a set of tiles from a file, used by tilemaps and animations
type Spritesheet struct {
texture *gl.Texture // The original texture
width, height float32 // The dimensions of the total texture
cells []SpriteRegion // The dimensions of each sprite
cache map[int]Texture // The cell cache cells
}
// SpriteRegion holds the position data for each sprite on the sheet
type SpriteRegion struct {
Position engo.Point
Width, Height int
}
// LoadedSpritesheet returns a Spritesheet that has already been created by New*
func LoadedSpritesheet(url string) (*Spritesheet, error) {
sheet, ok := spritesheetCache[url]
if !ok {
return nil, errors.New("Unable to find Spritesheet at URL " + url)
}
return sheet, nil
}
// NewAsymmetricSpritesheetFromTexture creates a new AsymmetricSpriteSheet from a
// TextureResource. The data provided is the location and size of the sprites
func NewAsymmetricSpritesheetFromTexture(tr *TextureResource, spriteRegions []SpriteRegion) *Spritesheet {
sheet := &Spritesheet{
texture: tr.Texture,
width: tr.Width,
height: tr.Height,
cells: spriteRegions,
cache: make(map[int]Texture),
}
spritesheetCache[tr.URL()] = sheet
return sheet
}
// NewAsymmetricSpritesheetFromFile creates a new AsymmetricSpriteSheet from a
// file name. The data provided is the location and size of the sprites
func NewAsymmetricSpritesheetFromFile(textureName string, spriteRegions []SpriteRegion) *Spritesheet {
res, err := engo.Files.Resource(textureName)
if err != nil {
log.Println("[WARNING] [NewAsymmetricSpritesheetFromFile]: Received error:", err)
return nil
}
img, ok := res.(TextureResource)
if !ok {
log.Println("[WARNING] [NewAsymmetricSpritesheetFromFile]: Resource not of type `TextureResource`:", textureName)
return nil
}
return NewAsymmetricSpritesheetFromTexture(&img, spriteRegions)
}
// NewSpritesheetFromTexture creates a new spritesheet from a texture resource.
func NewSpritesheetFromTexture(tr *TextureResource, cellWidth, cellHeight int) *Spritesheet {
spriteRegions := generateSymmetricSpriteRegions(tr.Width, tr.Height, cellWidth, cellHeight, 0, 0)
return NewAsymmetricSpritesheetFromTexture(tr, spriteRegions)
}
// NewSpritesheetFromFile is a simple handler for creating a new spritesheet from a file
// textureName is the name of a texture already preloaded with engo.Files.Add
func NewSpritesheetFromFile(textureName string, cellWidth, cellHeight int) *Spritesheet {
res, err := engo.Files.Resource(textureName)
if err != nil {
log.Println("[WARNING] [NewSpritesheetFromFile]: Received error:", err)
return nil
}
img, ok := res.(TextureResource)
if !ok {
log.Println("[WARNING] [NewSpritesheetFromFile]: Resource not of type `TextureResource`:", textureName)
return nil
}
return NewSpritesheetFromTexture(&img, cellWidth, cellHeight)
}
// NewSpritesheetWithBorderFromTexture creates a new spritesheet from a texture resource.
// This sheet has sprites of a uniform width and height, but also have borders around
// each sprite to prevent bleeding over
func NewSpritesheetWithBorderFromTexture(tr *TextureResource, cellWidth, cellHeight, borderWidth, borderHeight int) *Spritesheet {
spriteRegions := generateSymmetricSpriteRegions(tr.Width, tr.Height, cellWidth, cellHeight, borderWidth, borderHeight)
return NewAsymmetricSpritesheetFromTexture(tr, spriteRegions)
}
// NewSpritesheetWithBorderFromFile creates a new spritesheet from a file
// This sheet has sprites of a uniform width and height, but also have borders around
// each sprite to prevent bleeding over
func NewSpritesheetWithBorderFromFile(textureName string, cellWidth, cellHeight, borderWidth, borderHeight int) *Spritesheet {
res, err := engo.Files.Resource(textureName)
if err != nil {
log.Println("[WARNING] [NewSpritesheetWithBorderFromFile]: Received error:", err)
return nil
}
img, ok := res.(TextureResource)
if !ok {
log.Println("[WARNING] [NewSpritesheetWithBorderFromFile]: Resource not of type `TextureResource`:", textureName)
return nil
}
return NewSpritesheetWithBorderFromTexture(&img, cellWidth, cellHeight, borderWidth, borderHeight)
}
// Cell gets the region at the index i, updates and pulls from cache if need be
func (s *Spritesheet) Cell(index int) Texture {
if r, ok := s.cache[index]; ok {
return r
}
cell := s.cells[index]
s.cache[index] = Texture{
id: s.texture,
width: float32(cell.Width),
height: float32(cell.Height),
viewport: engo.AABB{
Min: engo.Point{
X: cell.Position.X / s.width,
Y: cell.Position.Y / s.height,
},
Max: engo.Point{
X: (cell.Position.X + float32(cell.Width)) / s.width,
Y: (cell.Position.Y + float32(cell.Height)) / s.height,
},
},
}
return s.cache[index]
}
// Drawable returns the drawable for a given index
func (s *Spritesheet) Drawable(index int) Drawable {
return s.Cell(index)
}
// Drawables returns all the drawables on the sheet
func (s *Spritesheet) Drawables() []Drawable {
drawables := make([]Drawable, s.CellCount())
for i := 0; i < s.CellCount(); i++ {
drawables[i] = s.Drawable(i)
}
return drawables
}
// CellCount returns the number of cells on the sheet
func (s *Spritesheet) CellCount() int {
return len(s.cells)
}
// Cells returns all the cells on the sheet
func (s *Spritesheet) Cells() []Texture {
cellsNo := s.CellCount()
cells := make([]Texture, cellsNo)
for i := 0; i < cellsNo; i++ {
cells[i] = s.Cell(i)
}
return cells
}
// Width is the amount of tiles on the x-axis of the spritesheet
// only if the sprite sheet is symmetric with no border.
func (s Spritesheet) Width() float32 {
return s.width / s.Cell(0).Width()
}
// Height is the amount of tiles on the y-axis of the spritesheet
// only if the sprite sheet is symmetric with no border.
func (s Spritesheet) Height() float32 {
return s.height / s.Cell(0).Height()
}
func generateSymmetricSpriteRegions(totalWidth, totalHeight float32, cellWidth, cellHeight, borderWidth, borderHeight int) []SpriteRegion {
var spriteRegions []SpriteRegion
for y := 0; y <= int(math.Floor(totalHeight-1)); y += cellHeight + borderHeight {
for x := 0; x <= int(math.Floor(totalWidth-1)); x += cellWidth + borderWidth {
spriteRegion := SpriteRegion{
Position: engo.Point{X: float32(x), Y: float32(y)},
Width: cellWidth,
Height: cellHeight,
}
spriteRegions = append(spriteRegions, spriteRegion)
}
}
return spriteRegions
}
/*
type Sprite struct {
Position *Point
Scale *Point
Anchor *Point
Rotation float32
Color color.Color
Alpha float32
Region *Region
}
func NewSprite(region *Region, x, y float32) *Sprite {
return &Sprite{
Position: &Point{x, y},
Scale: &Point{1, 1},
Anchor: &Point{0, 0},
Rotation: 0,
Color: color.White,
Alpha: 1,
Region: region,
}
}
*/ | common/spritesheet.go | 0.736874 | 0.444565 | spritesheet.go | starcoder |
package merkletree
import (
"errors"
"hash"
)
// A CachedTree can be used to build Merkle roots and proofs from the cached
// Merkle roots of smaller blocks of data. Each CachedTree has a height,
// meaning every element added to the CachedTree is the root of a full Merkle
// tree containing 2^height leaves.
type CachedTree struct {
cachedNodeHeight uint64
trueProofIndex uint64
Tree
}
// NewCachedTree initializes a CachedTree with a hash object, which will be
// used when hashing the input.
func NewCachedTree(h hash.Hash, cachedNodeHeight uint64) *CachedTree {
return &CachedTree{
cachedNodeHeight: cachedNodeHeight,
Tree: Tree{
treeHasher: NewDefaultHasher(h),
cachedTree: true,
},
}
}
// Prove will create a proof that the leaf at the indicated index is a part of
// the data represented by the Merkle root of the Cached Tree. The CachedTree
// needs the proof set proving that the index is an element of the cached
// element in order to create a correct proof. After proof is called, the
// CachedTree is unchanged, and can receive more elements.
func (ct *CachedTree) Prove(cachedProofSet [][]byte) (merkleRoot []byte, proofSet [][]byte, proofIndex uint64, numLeaves uint64) {
// Determine the proof index within the full tree, and the number of leaves
// within the full tree.
leavesPerCachedNode := uint64(1) << ct.cachedNodeHeight
numLeaves = leavesPerCachedNode * ct.currentIndex
// Get the proof set tail, which is generated based entirely on cached
// nodes.
merkleRoot, proofSetTail, _, _ := ct.Tree.Prove()
if len(proofSetTail) < 1 {
// The proof was invalid, return 'nil' for the proof set but accurate
// values for everything else.
return merkleRoot, nil, ct.trueProofIndex, numLeaves
}
// The full proof set is going to be the input cachedProofSet combined with
// the tail proof set. The one caveat is that the tail proof set has an
// extra piece of data at the first element - the verifier will assume that
// this data exists and therefore it needs to be omitted from the proof
// set.
proofSet = append(cachedProofSet, proofSetTail[1:]...)
return merkleRoot, proofSet, ct.trueProofIndex, numLeaves
}
// SetIndex will inform the CachedTree of the index of the leaf for which a
// storage proof is being created. The index should be the index of the actual
// leaf, and not the index of the cached element containing the leaf. SetIndex
// must be called on empty CachedTree.
func (ct *CachedTree) SetIndex(i uint64) error {
if ct.head != nil {
return errors.New("cannot call SetIndex on Tree if Tree has not been reset")
}
ct.trueProofIndex = i
return ct.Tree.SetIndex(i / (1 << ct.cachedNodeHeight))
} | cachedtree.go | 0.718397 | 0.554893 | cachedtree.go | starcoder |
package simulations
import (
"context"
"time"
"github.com/DxChainNetwork/godx/p2p/enode"
)
// Simulation provides a framework for running actions in a simulated network
// and then waiting for expectations to be met
type Simulation struct {
network *Network
}
// NewSimulation returns a new simulation which runs in the given network
func NewSimulation(network *Network) *Simulation {
return &Simulation{
network: network,
}
}
// Run performs a step of the simulation by performing the step's action and
// then waiting for the step's expectation to be met
func (s *Simulation) Run(ctx context.Context, step *Step) (result *StepResult) {
result = newStepResult()
result.StartedAt = time.Now()
defer func() { result.FinishedAt = time.Now() }()
// watch network events for the duration of the step
stop := s.watchNetwork(result)
defer stop()
// perform the action
if err := step.Action(ctx); err != nil {
result.Error = err
return
}
// wait for all node expectations to either pass, error or timeout
nodes := make(map[enode.ID]struct{}, len(step.Expect.Nodes))
for _, id := range step.Expect.Nodes {
nodes[id] = struct{}{}
}
for len(result.Passes) < len(nodes) {
select {
case id := <-step.Trigger:
// skip if we aren't checking the node
if _, ok := nodes[id]; !ok {
continue
}
// skip if the node has already passed
if _, ok := result.Passes[id]; ok {
continue
}
// run the node expectation check
pass, err := step.Expect.Check(ctx, id)
if err != nil {
result.Error = err
return
}
if pass {
result.Passes[id] = time.Now()
}
case <-ctx.Done():
result.Error = ctx.Err()
return
}
}
return
}
func (s *Simulation) watchNetwork(result *StepResult) func() {
stop := make(chan struct{})
done := make(chan struct{})
events := make(chan *Event)
sub := s.network.Events().Subscribe(events)
go func() {
defer close(done)
defer sub.Unsubscribe()
for {
select {
case event := <-events:
result.NetworkEvents = append(result.NetworkEvents, event)
case <-stop:
return
}
}
}()
return func() {
close(stop)
<-done
}
}
type Step struct {
// Action is the action to perform for this step
Action func(context.Context) error
// Trigger is a channel which receives node ids and triggers an
// expectation check for that node
Trigger chan enode.ID
// Expect is the expectation to wait for when performing this step
Expect *Expectation
}
type Expectation struct {
// Nodes is a list of nodes to check
Nodes []enode.ID
// Check checks whether a given node meets the expectation
Check func(context.Context, enode.ID) (bool, error)
}
func newStepResult() *StepResult {
return &StepResult{
Passes: make(map[enode.ID]time.Time),
}
}
type StepResult struct {
// Error is the error encountered whilst running the step
Error error
// StartedAt is the time the step started
StartedAt time.Time
// FinishedAt is the time the step finished
FinishedAt time.Time
// Passes are the timestamps of the successful node expectations
Passes map[enode.ID]time.Time
// NetworkEvents are the network events which occurred during the step
NetworkEvents []*Event
} | p2p/simulations/simulation.go | 0.702836 | 0.480479 | simulation.go | starcoder |
package texture
import (
"unsafe"
gl "github.com/adrianderstroff/pbr/pkg/core/gl"
"github.com/adrianderstroff/pbr/pkg/view/image/image2d"
)
// MakeEmpty creates a Texture with no image data.
func MakeEmpty() Texture {
return Texture{0, gl.TEXTURE_2D, 0}
}
// Make creates a texture the given width and height.
// Internalformat, format and pixelType specifed the layout of the data.
// Internalformat is the format of the texture on the GPU.
// Format is the format of the pixeldata that provided to this function.
// Pixeltype specifies the data type of a single component of the pixeldata.
// Data is pointing to the data that is going to be uploaded.
// Min and mag specify the behaviour when down and upscaling the texture.
// S and t specify the behaviour at the borders of the image.
func Make(width, height int, internalformat int32, format, pixelType uint32,
data unsafe.Pointer, min, mag, s, t int32) Texture {
texture := Texture{0, gl.TEXTURE_2D, 0}
// generate and bind texture
gl.GenTextures(1, &texture.handle)
texture.Bind(0)
// set texture properties
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, min)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, mag)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, s)
gl.TexParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, t)
// specify a texture image
gl.TexImage2D(gl.TEXTURE_2D, 0, internalformat, int32(width), int32(height),
0, format, pixelType, data)
// unbind texture
texture.Unbind()
return texture
}
// MakeColor creates a color texture of the specified size.
func MakeColor(width, height int) Texture {
return Make(width, height, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, nil,
gl.LINEAR, gl.LINEAR, gl.CLAMP_TO_BORDER, gl.CLAMP_TO_BORDER)
}
// MakeDepth creates a depth texture of the specfied size.
func MakeDepth(width, height int) Texture {
tex := Make(width, height, gl.DEPTH_COMPONENT, gl.DEPTH_COMPONENT,
gl.UNSIGNED_BYTE, nil, gl.LINEAR, gl.LINEAR, gl.CLAMP_TO_BORDER,
gl.CLAMP_TO_BORDER)
return tex
}
// MakeFromPathFixedChannels creates a texture with the image data specifed in path.
// The number is enforced no matter how many channels the image in the specified
// file actually has.
func MakeFromPathFixedChannels(path string, channels int, internalformat int32,
format uint32) (Texture, error) {
image, err := image2d.MakeFromPathFixedChannels(path, channels)
if err != nil {
return Texture{}, err
}
image.FlipY()
return Make(image.GetWidth(), image.GetHeight(), internalformat, format,
image.GetPixelType(), image.GetDataPointer(), gl.NEAREST, gl.NEAREST,
gl.CLAMP_TO_EDGE, gl.CLAMP_TO_EDGE), nil
}
// MakeFromPath creates a texture with the image data specifed in path.
func MakeFromPath(path string, internalformat int32, format uint32) (Texture, error) {
image, err := image2d.MakeFromPath(path)
if err != nil {
return Texture{}, err
}
image.FlipY()
return Make(image.GetWidth(), image.GetHeight(), internalformat, format,
image.GetPixelType(), image.GetDataPointer(), gl.NEAREST, gl.NEAREST,
gl.CLAMP_TO_EDGE, gl.CLAMP_TO_EDGE), nil
}
// MakeFromImage grabs the dimensions and information from the image
func MakeFromImage(image *image2d.Image2D, internalformat int32, format uint32) Texture {
return Make(image.GetWidth(), image.GetHeight(), internalformat, format,
image.GetPixelType(), image.GetDataPointer(), gl.NEAREST, gl.NEAREST,
gl.CLAMP_TO_EDGE, gl.CLAMP_TO_EDGE)
}
// MakeFromData creates a texture
func MakeFromData(data []uint8, width, height int, internalformat int32,
format uint32) (Texture, error) {
// determine channels
channels := channelsFromFormat(format)
// create an image2d from the data and format
image, err := image2d.MakeFromData(width, height, channels, data)
if err != nil {
return Texture{}, err
}
return Make(image.GetWidth(), image.GetHeight(), internalformat, format,
image.GetPixelType(), image.GetDataPointer(), gl.NEAREST, gl.NEAREST,
gl.CLAMP_TO_EDGE, gl.CLAMP_TO_EDGE), nil
}
// determineFormat returns the appropriate texture format for the given number
// of channels
func determineFormat(channels int) int {
switch channels {
case 1:
return gl.RED
case 2:
return gl.RG
case 3:
return gl.RGB
case 4:
return gl.RGBA
}
return gl.RGBA
} | pkg/view/texture/simple.go | 0.825062 | 0.593315 | simple.go | starcoder |
package test
import (
"fmt"
"strconv"
"testing"
"github.com/codeready-toolchain/api/pkg/apis/toolchain/v1alpha1"
"github.com/codeready-toolchain/host-operator/pkg/counter"
"github.com/codeready-toolchain/host-operator/pkg/metrics"
commontest "github.com/codeready-toolchain/toolchain-common/pkg/test"
"github.com/codeready-toolchain/toolchain-common/pkg/test/masteruserrecord"
promtestutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/runtime"
)
type CounterAssertion struct {
t *testing.T
counts counter.Counts
}
func AssertThatCounters(t *testing.T) *CounterAssertion {
counts, err := counter.GetCounts()
require.NoError(t, err)
return &CounterAssertion{
t: t,
counts: counts,
}
}
func AssertThatUninitializedCounters(t *testing.T) *CounterAssertion {
counts, err := counter.GetCounts()
require.EqualErrorf(t, err, "counter is not initialized", "should be error because counter hasn't been initialized yet")
return &CounterAssertion{
t: t,
counts: counts,
}
}
func (a *CounterAssertion) HaveMasterUserRecords(number int) *CounterAssertion {
assert.Equal(a.t, number, a.counts.MasterUserRecordCount)
AssertMetricsGaugeEquals(a.t, number, metrics.MasterUserRecordGauge)
return a
}
func (a *CounterAssertion) HaveUserAccountsForCluster(clusterName string, number int) *CounterAssertion {
assert.Equal(a.t, number, a.counts.UserAccountsPerClusterCounts[clusterName])
AssertMetricsGaugeEquals(a.t, number, metrics.UserAccountGaugeVec.WithLabelValues(clusterName))
return a
}
func (a *CounterAssertion) HaveUsersPerActivations(expected v1alpha1.Metric) *CounterAssertion {
actual := a.counts.UsersPerActivationCounts
assert.Equal(a.t, map[string]int(expected), actual)
for activations, count := range expected {
AssertMetricsGaugeEquals(a.t, count, metrics.UsersPerActivationGaugeVec.WithLabelValues(activations))
}
return a
}
func (a *CounterAssertion) HaveMasterUserRecordsPerDomain(expected v1alpha1.Metric) *CounterAssertion {
actual := a.counts.MasterUserRecordPerDomainCounts
assert.Equal(a.t, map[string]int(expected), actual, "invalid counter values")
for domain, count := range expected {
AssertMetricsGaugeEquals(a.t, count, metrics.MasterUserRecordGaugeVec.WithLabelValues(domain), "invalid gauge value for domain '%v'", domain)
}
return a
}
func CreateMultipleMurs(t *testing.T, prefix string, number int, targetCluster string) []runtime.Object {
murs := make([]runtime.Object, number)
for index := range murs {
murs[index] = masteruserrecord.NewMasterUserRecord(t, fmt.Sprintf("%s%d", prefix, index), masteruserrecord.TargetCluster(targetCluster))
}
return murs
}
func CreateMultipleUserSignups(prefix string, number int) []runtime.Object {
usersignups := make([]runtime.Object, number)
for index := range usersignups {
usersignups[index] = NewUserSignup(
WithName(fmt.Sprintf("%s%d", prefix, index)),
WithAnnotation(v1alpha1.UserSignupActivationCounterAnnotationKey, strconv.Itoa(index+1)),
)
}
return usersignups
}
func InitializeCounters(t *testing.T, toolchainStatus *v1alpha1.ToolchainStatus, initObjs ...runtime.Object) {
counter.Reset()
t.Cleanup(counter.Reset)
initializeCounters(t, commontest.NewFakeClient(t, initObjs...), toolchainStatus)
}
func InitializeCountersWithoutReset(t *testing.T, toolchainStatus *v1alpha1.ToolchainStatus) {
t.Cleanup(counter.Reset)
initializeCounters(t, commontest.NewFakeClient(t), toolchainStatus)
}
func initializeCounters(t *testing.T, cl *commontest.FakeClient, toolchainStatus *v1alpha1.ToolchainStatus) {
if toolchainStatus.Status.HostOperator != nil {
metrics.MasterUserRecordGauge.Set(float64(toolchainStatus.Status.HostOperator.MasterUserRecordCount))
}
t.Logf("toolchainStatus members: %v", toolchainStatus.Status.Members)
err := counter.Synchronize(cl, toolchainStatus)
require.NoError(t, err)
t.Logf("MasterUserRecordGauge=%.0f", promtestutil.ToFloat64(metrics.MasterUserRecordGauge))
} | test/counter.go | 0.560493 | 0.434041 | counter.go | starcoder |
package vg
import (
"math"
)
// DegToRad converts degree to radian.
func DegToRad(deg float32) float32 {
return deg / 180.0 * PI
}
// RadToDeg converts radian to degree.
func RadToDeg(rad float32) float32 {
return rad / PI * 180.0
}
func signF(a float32) float32 {
if a > 0.0 {
return 1.0
}
return -1.0
}
func clampF(a, min, max float32) float32 {
if a < min {
return min
}
if a > max {
return max
}
return a
}
func clampI(a, min, max int) int {
if a < min {
return min
}
if a > max {
return max
}
return a
}
func hue(h, m1, m2 float32) float32 {
if h < 0.0 {
h++
} else if h > 1 {
h--
}
if h < 1.0/6.0 {
return m1 + (m2-m1)*h*6.0
} else if h < 3.0/6.0 {
return m2
} else if h < 4.0/6.0 {
return m1 + (m2-m1)*(2.0/3.0-h)*6.0
}
return m1
}
func minF(a, b float32) float32 {
if a < b {
return a
}
return b
}
func maxF(a, b float32) float32 {
if a > b {
return a
}
return b
}
func maxI(a, b int) int {
if a > b {
return a
}
return b
}
func maxFs(v float32, values ...float32) float32 {
max := v
for _, value := range values {
if max < value {
max = value
}
}
return max
}
func minFs(v float32, values ...float32) float32 {
min := v
for _, value := range values {
if min > value {
min = value
}
}
return min
}
func cross(dx0, dy0, dx1, dy1 float32) float32 {
return dx1*dy0 - dx0*dy1
}
func absF(a float32) float32 {
if a > 0.0 {
return a
}
return -a
}
func sqrtF(a float32) float32 {
return float32(math.Sqrt(float64(a)))
}
func atan2F(a, b float32) float32 {
return float32(math.Atan2(float64(a), float64(b)))
}
func acosF(a float32) float32 {
return float32(math.Acos(float64(a)))
}
func tanF(a float32) float32 {
return float32(math.Tan(float64(a)))
}
func sinCosF(a float32) (float32, float32) {
s, c := math.Sincos(float64(a))
return float32(s), float32(c)
}
func ceilF(a float32) int {
return int(math.Ceil(float64(a)))
}
func normalize(x, y float32) (float32, float32, float32) {
d := float32(math.Sqrt(float64(x*x + y*y)))
if d > 1e-6 {
id := 1.0 / d
x *= id
y *= id
}
return d, x, y
}
func intersectRects(ax, ay, aw, ah, bx, by, bw, bh float32) [4]float32 {
minX := maxF(ax, bx)
minY := maxF(ay, by)
maxX := minF(ax+aw, bx+bw)
maxY := minF(ay+ah, by+bh)
return [4]float32{
minX,
minY,
maxF(0.0, maxX-minX),
maxF(0.0, maxY-minY),
}
}
func ptEquals(x1, y1, x2, y2, tol float32) bool {
dx := x2 - x1
dy := y2 - y1
return dx*dx+dy*dy < tol*tol
}
func distPtSeg(x, y, px, py, qx, qy float32) float32 {
pqx := qx - px
pqy := qy - py
dx := x - px
dy := y - py
d := pqx*pqx + pqy*pqy
t := clampF(pqx*dx+pqy*dy, 0.0, 1.1)
if d > 0 {
t /= d
}
dx = px + t*pqx - x
dy = py + t*pqy - y
return dx*dx + dy*dy
}
func triArea2(ax, ay, bx, by, cx, cy float32) float32 {
abX := bx - ax
abY := by - ay
acX := cx - ax
acY := cy - ay
return acX*abY - abX*acY
}
func polyArea(points []vgPoint, npts int) float32 {
var area float32
a := &points[0]
for i := 2; i < npts; i++ {
b := &points[i-1]
c := &points[i]
area += triArea2(a.x, a.y, b.x, b.y, c.x, c.y)
}
return area * 0.5
}
func polyReverse(points []vgPoint, npts int) {
i := 0
j := npts - 1
for i < j {
points[i], points[j] = points[j], points[i]
i++
j--
}
}
func curveDivs(r, arc, tol float32) int {
da := math.Acos(float64(r/(r+tol))) * 2.0
return maxI(2, int(math.Ceil(float64(arc)/da)))
}
func chooseBevel(bevel bool, p0, p1 *vgPoint, w float32) (x0, y0, x1, y1 float32) {
if bevel {
x0 = p1.x + p0.dy*w
y0 = p1.y - p0.dx*w
x1 = p1.x + p1.dy*w
y1 = p1.y - p1.dx*w
} else {
x0 = p1.x + p1.dmx*w
y0 = p1.y + p1.dmy*w
x1 = p1.x + p1.dmx*w
y1 = p1.y + p1.dmy*w
}
return
}
func roundJoin(dst []vgVertex, index int, p0, p1 *vgPoint, lw, rw, lu, ru float32, nCap int, fringe float32) int {
dlx0 := p0.dy
dly0 := -p0.dx
dlx1 := p1.dy
dly1 := -p1.dx
isInnerBevel := p1.flags&vgPrINNERBEVEL != 0
if p1.flags&vgPtLEFT != 0 {
lx0, ly0, lx1, ly1 := chooseBevel(isInnerBevel, p0, p1, lw)
a0 := atan2F(-dly0, -dlx0)
a1 := atan2F(-dly1, -dlx1)
if a1 > a0 {
a1 -= PI * 2
}
(&dst[index]).set(lx0, ly0, lu, 1)
(&dst[index+1]).set(p1.x-dlx0*rw, p1.y-dly0*rw, ru, 1)
index += 2
n := clampI(ceilF(((a0-a1)/PI)*float32(nCap)), 2, nCap)
for i := 0; i < n; i++ {
u := float32(i) / float32(n-1)
a := a0 + u*(a1-a0)
s, c := sinCosF(a)
rx := p1.x + c*rw
ry := p1.y + s*rw
(&dst[index]).set(p1.x, p1.y, 0.5, 1)
(&dst[index+1]).set(rx, ry, ru, 1)
index += 2
}
(&dst[index]).set(lx1, ly1, lu, 1)
(&dst[index+1]).set(p1.x-dlx1*rw, p1.y-dly1*rw, ru, 1)
index += 2
} else {
rx0, ry0, rx1, ry1 := chooseBevel(isInnerBevel, p0, p1, -rw)
a0 := atan2F(dly0, dlx0)
a1 := atan2F(dly1, dlx1)
if a1 < a0 {
a1 += PI * 2
}
(&dst[index]).set(p1.x+dlx0*rw, p1.y+dly0*rw, lu, 1)
(&dst[index+1]).set(rx0, ry0, ru, 1)
index += 2
n := clampI(ceilF(((a1-a0)/PI)*float32(nCap)), 2, nCap)
for i := 0; i < n; i++ {
u := float32(i) / float32(n-1)
a := a0 + u*(a1-a0)
s, c := sinCosF(a)
lx := p1.x + c*lw
ly := p1.y + s*lw
(&dst[index]).set(lx, ly, lu, 1)
(&dst[index+1]).set(p1.x, p1.y, 0.5, 1)
index += 2
}
(&dst[index]).set(p1.x+dlx1*rw, p1.y+dly1*rw, lu, 1)
(&dst[index+1]).set(rx1, ry1, ru, 1)
index += 2
}
return index
}
func bevelJoin(dst []vgVertex, index int, p0, p1 *vgPoint, lw, rw, lu, ru, fringe float32) int {
dlx0 := p0.dy
dly0 := -p0.dx
dlx1 := p1.dy
dly1 := -p1.dx
isInnerBevel := p1.flags&vgPrINNERBEVEL != 0
isBevel := p1.flags&vgPtBEVEL != 0
if p1.flags&vgPtLEFT != 0 {
lx0, ly0, lx1, ly1 := chooseBevel(isInnerBevel, p0, p1, lw)
(&dst[index]).set(lx0, ly0, lu, 1)
(&dst[index+1]).set(p1.x-dlx0*rw, p1.y-dly0*rw, ru, 1)
index += 2
if isBevel {
(&dst[index]).set(lx0, ly0, lu, 1)
(&dst[index+1]).set(p1.x-dlx0*rw, p1.y-dly0*rw, ru, 1)
(&dst[index+2]).set(lx1, ly1, lu, 1)
(&dst[index+3]).set(p1.x-dlx1*rw, p1.y-dly1*rw, ru, 1)
index += 4
} else {
rx0 := p1.x - p1.dmx*rw
ry0 := p1.y - p1.dmy*rw
(&dst[index]).set(p1.x, p1.y, 0.5, 1)
(&dst[index+1]).set(p1.x-dlx0*rw, p1.y-dly0*rw, ru, 1)
(&dst[index+2]).set(rx0, ry0, ru, 1)
(&dst[index+3]).set(rx0, ry0, ru, 1)
(&dst[index+4]).set(p1.x, p1.y, 0.5, 1)
(&dst[index+5]).set(p1.x-dlx1*rw, p1.y-dly1*rw, ru, 1)
index += 6
}
(&dst[index]).set(lx1, ly1, lu, 1)
(&dst[index+1]).set(p1.x-dlx1*rw, p1.y-dly1*rw, ru, 1)
index += 2
} else {
rx0, ry0, rx1, ry1 := chooseBevel(isInnerBevel, p0, p1, -rw)
(&dst[index]).set(p1.x+dlx0*lw, p1.y+dly0*lw, lu, 1)
(&dst[index+1]).set(rx0, ry0, ru, 1)
index += 2
if isBevel {
(&dst[index]).set(p1.x+dlx0*lw, p1.y+dly0*lw, lu, 1)
(&dst[index+1]).set(rx0, ry0, ru, 1)
(&dst[index+2]).set(p1.x+dlx1*rw, p1.y+dly1*rw, lu, 1)
(&dst[index+3]).set(rx1, ry1, ru, 1)
index += 4
} else {
lx0 := p1.x + p1.dmx*rw
ly0 := p1.y + p1.dmy*rw
(&dst[index]).set(p1.x+dlx0*lw, p1.y+dly0*lw, lu, 1)
(&dst[index+1]).set(p1.x, p1.y, 0.5, 1)
(&dst[index+2]).set(lx0, ly0, lu, 1)
(&dst[index+3]).set(lx0, ly0, lu, 1)
(&dst[index+4]).set(p1.x+dlx1*lw, p1.y+dly1*lw, lu, 1)
(&dst[index+5]).set(p1.x, p1.y, 0.5, 1)
index += 6
}
(&dst[index]).set(p1.x+dlx1*lw, p1.y+dly1*lw, lu, 1)
(&dst[index+1]).set(rx1, ry1, ru, 1)
index += 2
}
return index
}
func buttCapStart(dst []vgVertex, index int, p *vgPoint, dx, dy, w, d, aa float32) int {
px := p.x - dx*d
py := p.y - dy*d
dlx := dy
dly := -dx
(&dst[index]).set(px+dlx*w-dx*aa, py+dly*w-dy*aa, 0, 0)
(&dst[index+1]).set(px-dlx*w-dx*aa, py-dly*w-dy*aa, 1, 0)
(&dst[index+2]).set(px+dlx*w, py+dly*w, 0, 1)
(&dst[index+3]).set(px-dlx*w, py-dly*w, 1, 1)
return index + 4
}
func buttCapEnd(dst []vgVertex, index int, p *vgPoint, dx, dy, w, d, aa float32) int {
px := p.x + dx*d
py := p.y + dy*d
dlx := dy
dly := -dx
(&dst[index]).set(px+dlx*w, py+dly*w, 0, 1)
(&dst[index+1]).set(px-dlx*w, py-dly*w, 1, 1)
(&dst[index+2]).set(px+dlx*w+dx*aa, py+dly*w+dy*aa, 0, 0)
(&dst[index+3]).set(px-dlx*w+dx*aa, py-dly*w-dy*aa, 1, 0)
return index + 4
}
func roundCapStart(dst []vgVertex, index int, p *vgPoint, dx, dy, w float32, nCap int, aa float32) int {
px := p.x
py := p.y
dlx := dy
dly := -dx
for i := 0; i < nCap; i++ {
a := float32(i) / float32(nCap-1) * PI
s, c := sinCosF(a)
ax := c * w
ay := s * w
(&dst[index]).set(px-dlx*ax-dx*ay, py-dly*ax-dy*ay, 0, 1)
(&dst[index+1]).set(px, py, 0.5, 1)
index += 2
}
(&dst[index]).set(px+dlx*w, py+dly*w, 0, 1)
(&dst[index+1]).set(px-dlx*w, py-dly*w, 1, 1)
return index + 2
}
func roundCapEnd(dst []vgVertex, index int, p *vgPoint, dx, dy, w float32, nCap int, aa float32) int {
px := p.x
py := p.y
dlx := dy
dly := -dx
(&dst[index]).set(px+dlx*w, py+dly*w, 0, 1)
(&dst[index+1]).set(px-dlx*w, py-dly*w, 1, 1)
index += 2
for i := 0; i < nCap; i++ {
a := float32(i) / float32(nCap-1) * PI
s, c := sinCosF(a)
ax := c * w
ay := s * w
(&dst[index]).set(px, py, 0.5, 1)
(&dst[index+1]).set(px-dlx*ax+dx*ay, py-dly*ax+dy*ay, 0, 1)
index += 2
}
return index
}
func nearestPow2(num int) int {
var n uint
uNum := uint(num)
if uNum > 0 {
n = uNum - 1
} else {
n = 0
}
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n++
return int(num)
}
func quantize(a, d float32) float32 {
return float32(int(a/d+0.5)) * d
} | vg/util.go | 0.807764 | 0.585842 | util.go | starcoder |
package health
// Status represents the overall health status of a component
type Status string
const (
// StatusUp indicates that the checked component is up and running
StatusUp Status = "UP"
// StatusDown indicates the the checked component has an issue and is unavailable
StatusDown Status = "DOWN"
// StatusUnknown indicates that the health of the checked component cannot be determined
StatusUnknown Status = "UNKNOWN"
)
// Health contains information about the health of a component.
type Health struct {
Status Status `json:"status"`
Details map[string]interface{} `json:"details,omitempty"`
}
// New returns a new Health with an unknown status an empty details.
func New() *Health {
return &Health{
Status: StatusUnknown,
Details: make(map[string]interface{}),
}
}
// WithStatus sets the status of the health
func (h *Health) WithStatus(status Status) *Health {
h.Status = status
return h
}
// WithError sets the status of the health to DOWN and adds an error detail
func (h *Health) WithError(err error) *Health {
h.Status = StatusDown
return h.WithDetail("error", err)
}
// WithDetail adds a detail to the health
func (h *Health) WithDetail(key string, val interface{}) *Health {
h.Details[key] = val
return h
}
// Up sets the health status to up
func (h *Health) Up() *Health {
h.Status = StatusUp
return h
}
// Down sets the health status to down
func (h *Health) Down() *Health {
h.Status = StatusDown
return h
}
// Unknown sets the health status to unknown
func (h *Health) Unknown() *Health {
h.Status = StatusUnknown
return h
}
// WithDetails adds the given details to the health
func (h *Health) WithDetails(details map[string]interface{}) *Health {
for k, v := range details {
h.Details[k] = v
}
return h
}
// Indicator is an interface to provide the health of a component
//go:generate counterfeiter . Indicator
type Indicator interface {
// Name returns the name of the component
Name() string
// Health returns the health of the component
Health() *Health
}
// AggregationPolicy is an interface to provide aggregated health information
//go:generate counterfeiter . AggregationPolicy
type AggregationPolicy interface {
// Apply processes the given healths to build a single health
Apply(healths map[string]*Health) *Health
}
// Registry is an interface to store and fetch health indicators
type Registry interface {
// AddHealthIndicators registers a new health indicator
AddHealthIndicator(indicator Indicator)
// HealthIndicators returns the currently registered health indicators
HealthIndicators() []Indicator
// RegisterHealthAggregationPolicy sets the health aggregationPolicy
RegisterHealthAggregationPolicy(aggregator AggregationPolicy)
// HealthAggregationPolicy returns the registered health aggregationPolicy
HealthAggregationPolicy() AggregationPolicy
} | pkg/health/types.go | 0.79732 | 0.486088 | types.go | starcoder |
package cl
import "sync"
// StringClosure is a function that returns a string, used to defer execution of expensive logging operations
type StringClosure func() string
// Value is the generic list of things processed by the log chan
type Value []interface{}
type (
// Fatal is a log value that indicates level and how to interpret the interface slice
Fatal Value
// Fatalf is a log value that indicates level and how to interpret the interface slice
Fatalf Value
// Ftl is a log type that is just one string
Ftl string
// Fatalc is for passing a closure when the log entry is expensive to compute
Fatalc StringClosure
)
type (
// Error is a log value that indicates level and how to interpret the interface slice
Error Value
// Errorf is a log value that indicates level and how to interpret the interface slice
Errorf Value
// Err is a log type that is just one string
Err string
// Errorc is for passing a closure when the log entry is expensive to compute
Errorc StringClosure
)
type (
// Warn is a log value that indicates level and how to interpret the interface slice
Warn Value
// Warnf is a log value that indicates level and how to interpret the interface slice
Warnf Value
// Wrn is a log type that is just one string
Wrn string
// Warnc is for passing a closure when the log entry is expensive to compute
Warnc StringClosure
)
type (
// Info is a log value that indicates level and how to interpret the interface slice
Info Value
// Infof is a log value that indicates level and how to interpret the interface slice
Infof Value
// Inf is a log type that is just one string
Inf string
// Infoc is for passing a closure when the log entry is expensive to compute
Infoc StringClosure
)
type (
// Debug is a log value that indicates level and how to interpret the interface slice
Debug Value
// Debugf is a log value that indicates level and how to interpret the interface slice
Debugf Value
// Dbg is a log type that is just one string
Dbg string
// Debugc is for passing a closure when the log entry is expensive to compute
Debugc StringClosure
)
type (
// Trace is a log value that indicates level and how to interpret the interface slice
Trace Value
// Tracef is a log value that indicates level and how to interpret the interface slice
Tracef Value
// Trc is a log type that is just one string
Trc string
// Tracec is for passing a closure when the log entry is expensive to compute
Tracec StringClosure
)
// A SubSystem is a logger with a specific prefix name prepended to the entry
type SubSystem struct {
Name string
Ch chan interface{}
Level int
LevelString string
MaxLen int
mutex sync.Mutex
}
type Registry map[string]*SubSystem | pkg/util/cl/types.go | 0.550849 | 0.563558 | types.go | starcoder |
package field
import "gorm.io/gorm/clause"
type Float64 Field
func (field Float64) Eq(value float64) Expr {
return expr{e: clause.Eq{Column: field.RawExpr(), Value: value}}
}
func (field Float64) Neq(value float64) Expr {
return expr{e: clause.Neq{Column: field.RawExpr(), Value: value}}
}
func (field Float64) Gt(value float64) Expr {
return expr{e: clause.Gt{Column: field.RawExpr(), Value: value}}
}
func (field Float64) Gte(value float64) Expr {
return expr{e: clause.Gte{Column: field.RawExpr(), Value: value}}
}
func (field Float64) Lt(value float64) Expr {
return expr{e: clause.Lt{Column: field.RawExpr(), Value: value}}
}
func (field Float64) Lte(value float64) Expr {
return expr{e: clause.Lte{Column: field.RawExpr(), Value: value}}
}
func (field Float64) In(values ...float64) Expr {
return expr{e: clause.IN{Column: field.RawExpr(), Values: field.toSlice(values...)}}
}
func (field Float64) NotIn(values ...float64) Expr {
return expr{e: clause.Not(field.In(values...).expression())}
}
func (field Float64) Between(left float64, right float64) Expr {
return field.between([]interface{}{left, right})
}
func (field Float64) NotBetween(left float64, right float64) Expr {
return Not(field.Between(left, right))
}
func (field Float64) Like(value float64) Expr {
return expr{e: clause.Like{Column: field.RawExpr(), Value: value}}
}
func (field Float64) NotLike(value float64) Expr {
return expr{e: clause.Not(field.Like(value).expression())}
}
func (field Float64) Add(value float64) Float64 {
return Float64{field.add(value)}
}
func (field Float64) Sub(value float64) Float64 {
return Float64{field.sub(value)}
}
func (field Float64) Mul(value float64) Float64 {
return Float64{field.mul(value)}
}
func (field Float64) Div(value float64) Float64 {
return Float64{field.div(value)}
}
func (field Float64) FloorDiv(value float64) Int {
return Int{field.floorDiv(value)}
}
func (field Float64) Floor() Int {
return Int{field.floor()}
}
func (field Float64) toSlice(values ...float64) []interface{} {
slice := make([]interface{}, len(values))
for i, v := range values {
slice[i] = v
}
return slice
}
type Float32 Float64
func (field Float32) Eq(value float32) Expr {
return expr{e: clause.Eq{Column: field.RawExpr(), Value: value}}
}
func (field Float32) Neq(value float32) Expr {
return expr{e: clause.Neq{Column: field.RawExpr(), Value: value}}
}
func (field Float32) Gt(value float32) Expr {
return expr{e: clause.Gt{Column: field.RawExpr(), Value: value}}
}
func (field Float32) Gte(value float32) Expr {
return expr{e: clause.Gte{Column: field.RawExpr(), Value: value}}
}
func (field Float32) Lt(value float32) Expr {
return expr{e: clause.Lt{Column: field.RawExpr(), Value: value}}
}
func (field Float32) Lte(value float32) Expr {
return expr{e: clause.Lte{Column: field.RawExpr(), Value: value}}
}
func (field Float32) In(values ...float32) Expr {
return expr{e: clause.IN{Column: field.RawExpr(), Values: field.toSlice(values...)}}
}
func (field Float32) NotIn(values ...float32) Expr {
return expr{e: clause.Not(field.In(values...).expression())}
}
func (field Float32) Between(left float32, right float32) Expr {
return field.between([]interface{}{left, right})
}
func (field Float32) NotBetween(left float32, right float32) Expr {
return Not(field.Between(left, right))
}
func (field Float32) Like(value float32) Expr {
return expr{e: clause.Like{Column: field.RawExpr(), Value: value}}
}
func (field Float32) NotLike(value float32) Expr {
return expr{e: clause.Not(field.Like(value).expression())}
}
func (field Float32) Add(value float32) Float32 {
return Float32{field.add(value)}
}
func (field Float32) Sub(value float32) Float32 {
return Float32{field.sub(value)}
}
func (field Float32) Mul(value float32) Float32 {
return Float32{field.mul(value)}
}
func (field Float32) Div(value float32) Float32 {
return Float32{field.div(value)}
}
func (field Float32) FloorDiv(value float32) Int {
return Int{field.floorDiv(value)}
}
func (field Float32) Floor() Int {
return Int{field.floor()}
}
func (field Float32) toSlice(values ...float32) []interface{} {
slice := make([]interface{}, len(values))
for i, v := range values {
slice[i] = v
}
return slice
} | field/float.go | 0.833934 | 0.751283 | float.go | starcoder |
package mat
// Add matrices together
func Add(a, b Matrix) (Matrix, error) {
// Check matrix dimensions
sizeA := a.Size()
sizeB := b.Size()
if (sizeA[0] != sizeB[0]) || sizeA[1] != sizeB[1] {
return Matrix{}, ErrDimMisMatch
}
c := Zeros(sizeA[0], sizeA[1])
for i := range a {
for j := range a[i] {
c[i][j] = a[i][j] + b[i][j]
}
}
return c, nil
}
// Sub subtracts matrices
func Sub(a, b Matrix) (Matrix, error) {
// Check matrix dimensions
sizeA := a.Size()
sizeB := b.Size()
if (sizeA[0] != sizeB[0]) || sizeA[1] != sizeB[1] {
return Matrix{}, ErrDimMisMatch
}
c := Zeros(sizeA[0], sizeA[1])
for i := range a {
for j := range a[i] {
c[i][j] = a[i][j] - b[i][j]
}
}
return c, nil
}
// Mldm stands for MATLAB Dot Multiply 'c = a .* b;'
func Mldm(a, b Matrix) (Matrix, error) {
// Check matrix dimensions
sizeA := a.Size()
sizeB := b.Size()
if (sizeA[0] != sizeB[0]) || sizeA[1] != sizeB[1] {
return Matrix{}, ErrDimMisMatch
}
c := Zeros(sizeA[0], sizeA[1])
for i := range a {
for j := range a[i] {
c[i][j] = a[i][j] * b[i][j]
}
}
return c, nil
}
// Det returns the determinant of the matrix
func (m Matrix) Det() (float64, error) {
var det float64
var err error
// Check if matrix is square
if !m.IsSquare() {
return det, ErrMatNotSq
}
// Get matrix dimensions
size := m.Size()
nRows := size[0]
// calc determinant
switch nRows {
case 1:
det = m[0][0]
case 2:
det = m[0][0]*m[1][1] - m[0][1]*m[1][0]
default:
det, err = nxnDet(m)
}
return det, err
}
// nxnDet performs the determinant operation
func nxnDet(m Matrix) (float64, error) {
// init determinants
var det, lDet, uDet float64
// calc LU
l, u := m.LU()
// det(L) = L.Diag().Prod()
lDiag, err := l.Diag()
if err != nil {
return det, err
}
lDet = lDiag.Prod()
// det(U) = U.Diag().Prod()
uDiag, err := u.Diag()
if err != nil {
return det, err
}
uDet = uDiag.Prod()
// det(a) = det(LU) = det(L)*det(U)
det = lDet * uDet
return det, nil
}
// LU Decomposition
func (m Matrix) LU() (Matrix, Matrix) {
// Initialize L and U
n := m.Size()[0]
l := Zeros(n, n)
u := Zeros(n, n)
val := 0.0
// Calculate LU
for i := 0; i < n; i++ {
for j := 0; j < n; j++ {
if j < i {
l[j][i] = 0.0
continue
}
l[j][i] = m[j][i]
for k := 0; k < i; k++ {
val = l[j][i] - l[j][k]*u[k][i]
l[j][i] = val
}
for j := 0; j < n; j++ {
if j < i {
u[i][j] = 0.0
} else if j == i {
u[i][j] = 1.0
} else {
val = m[i][j] / l[i][i]
u[i][j] = val
for k := 0; k < i; k++ {
val = u[i][j] - (l[i][j]*u[k][j])/l[i][i]
u[i][j] = val
}
}
}
}
}
return l, u
} | mat/math.go | 0.83868 | 0.528594 | math.go | starcoder |
package dyngeo
import (
"math"
"strconv"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/golang/geo/s2"
)
type GeoPoint struct {
Latitude float64
Longitude float64
}
type GeoJSONAttribute struct {
Type string `json:"type"`
Coordinates []float64 `json:"coordinates"`
}
func newGeoJSONAttribute(p GeoPoint, lonFirst bool) GeoJSONAttribute {
var coordinates []float64
if lonFirst {
coordinates = []float64{p.Longitude, p.Latitude}
} else {
coordinates = []float64{p.Latitude, p.Longitude}
}
return GeoJSONAttribute{
Type: "POINT",
Coordinates: coordinates,
}
}
type PointInput struct {
RangeKeyValue string
GeoPoint GeoPoint
}
type GeoQueryInput struct {
QueryInput dynamodb.QueryInput
}
type GeoQueryOutput struct {
*dynamodb.QueryOutput
}
type BatchWritePointOutput struct {
*dynamodb.BatchWriteItemOutput
}
type DeletePointInput struct {
PointInput
DeleteItemInput dynamodb.DeleteItemInput
}
type DeletePointOutput struct {
*dynamodb.DeleteItemOutput
}
type GetPointInput struct {
PointInput
GetItemInput dynamodb.GetItemInput
}
type GetPointOutput struct {
*dynamodb.GetItemOutput
}
type PutPointInput struct {
PointInput
PutItemInput dynamodb.PutItemInput
}
type PutPointOutput struct {
*dynamodb.PutItemOutput
}
type UpdatePointInput struct {
PointInput
UpdateItemInput dynamodb.UpdateItemInput
}
type UpdatePointOutput struct {
*dynamodb.UpdateItemOutput
}
type QueryRadiusInput struct {
GeoQueryInput
CenterPoint GeoPoint
RadiusInMeter int
}
type QueryRadiusOutput struct {
*GeoQueryOutput
}
type QueryRectangleInput struct {
GeoQueryInput
MinPoint *GeoPoint
MaxPoint *GeoPoint
}
type QueryRectangleOutput struct {
*GeoQueryOutput
}
// GeoHashRange ...
type geoHashRange struct {
rangeMin uint64
rangeMax uint64
}
func newGeoHashRange(min uint64, max uint64) geoHashRange {
return geoHashRange{
rangeMin: min,
rangeMax: max,
}
}
func (g geoHashRange) tryMerge(r geoHashRange) bool {
if r.rangeMin-g.rangeMax <= MERGE_THRESHOLD && r.rangeMin > g.rangeMax {
g.rangeMax = r.rangeMax
return true
}
if g.rangeMin-r.rangeMax <= MERGE_THRESHOLD && g.rangeMin > r.rangeMax {
g.rangeMin = r.rangeMin
return true
}
return false
}
func (g geoHashRange) trySplit(hashKeyLength int8) []geoHashRange {
result := []geoHashRange{}
minHashKey := generateHashKey(g.rangeMin, hashKeyLength)
maxHashKey := generateHashKey(g.rangeMax, hashKeyLength)
rangeMinHashString := strconv.FormatUint(g.rangeMin, 10)
minHashKeyString := strconv.FormatUint(minHashKey, 10)
denominator := uint64(math.Pow10(len(rangeMinHashString) - len(minHashKeyString)))
if minHashKey == maxHashKey {
result = append(result, g)
} else {
for m := minHashKey; m <= maxHashKey; m++ {
var min uint64
var max uint64
if m > 0 {
if m == minHashKey {
min = g.rangeMin
} else {
min = m * denominator
}
if m == maxHashKey {
max = g.rangeMax
} else {
max = (m+1)*denominator - 1
}
} else {
if m == minHashKey {
min = g.rangeMin
} else {
min = (m-1)*denominator + 1
}
if m == maxHashKey {
max = g.rangeMax
} else {
max = m * denominator
}
}
result = append(result, newGeoHashRange(min, max))
}
}
return result
}
// S2
// Covering ...
type covering struct {
cellIDs []s2.CellID
}
func newCovering(cellIDs []s2.CellID) covering {
return covering{
cellIDs: cellIDs,
}
}
func (c covering) getGeoHashRanges(hashKeyLength int8) []geoHashRange {
ranges := []geoHashRange{}
for _, cellID := range c.cellIDs {
minRange := s2.CellID.RangeMin(cellID)
maxRange := s2.CellID.RangeMax(cellID)
gh := newGeoHashRange(uint64(minRange), uint64(maxRange))
ranges = append(ranges, gh.trySplit(hashKeyLength)...)
}
return ranges
}
func generateGeoHash(geoPoint GeoPoint) s2.CellID {
latLng := s2.LatLngFromDegrees(geoPoint.Latitude, geoPoint.Longitude)
cell := s2.CellFromLatLng(latLng)
return cell.ID()
}
func generateHashKey(geoHash uint64, hashKeyLength int8) uint64 {
if geoHash < 0 {
hashKeyLength++
}
geoHashString := strconv.FormatUint(geoHash, 10)
denominator := math.Pow10(len(geoHashString) - int(hashKeyLength))
return geoHash / uint64(denominator)
}
func generateHashes(p GeoPoint, hashKeyLength int8) (uint64, uint64) {
geoHash := uint64(generateGeoHash(p))
hashKey := generateHashKey(geoHash, hashKeyLength)
return geoHash, hashKey
}
// S2 Util
const EARTH_RADIUS_METERS = 6367000.0
func rectFromQueryRectangleInput(input QueryRectangleInput) *s2.Rect {
if input.MinPoint != nil && input.MaxPoint != nil {
minLatLng := s2.LatLngFromDegrees(input.MinPoint.Latitude, input.MinPoint.Longitude)
maxLatLng := s2.LatLngFromDegrees(input.MaxPoint.Latitude, input.MaxPoint.Longitude)
rect := rectFromTwoLatLng(minLatLng, maxLatLng)
return &rect
}
return nil
}
func boundingLatLngFromQueryRadiusInput(input QueryRadiusInput) *s2.Rect {
centerLatLng := s2.LatLngFromDegrees(input.CenterPoint.Latitude, input.CenterPoint.Longitude)
latRefUnit := 1.0
if input.CenterPoint.Latitude > 0 {
latRefUnit = -1.0
}
latRef := s2.LatLngFromDegrees(input.CenterPoint.Latitude+latRefUnit, input.CenterPoint.Longitude)
lngRefUnit := 1.0
if input.CenterPoint.Longitude > 0 {
lngRefUnit = -1.0
}
lngRef := s2.LatLngFromDegrees(input.CenterPoint.Latitude, input.CenterPoint.Longitude+lngRefUnit)
latDistance := getEarthDistance(centerLatLng, latRef)
lngDistance := getEarthDistance(centerLatLng, lngRef)
radiusInMeter := float64(input.RadiusInMeter)
latForRadius := radiusInMeter / latDistance
lngForRadius := radiusInMeter / lngDistance
center := s2.LatLngFromDegrees(input.CenterPoint.Latitude, input.CenterPoint.Longitude)
size := s2.LatLngFromDegrees(latForRadius, lngForRadius)
rect := s2.RectFromCenterSize(center, size)
return &rect
}
func getEarthDistance(p1 s2.LatLng, p2 s2.LatLng) float64 {
return p1.Distance(p2).Radians() * EARTH_RADIUS_METERS
}
func rectFromTwoLatLng(min s2.LatLng, max s2.LatLng) s2.Rect {
bounder := s2.NewRectBounder()
bounder.AddPoint(s2.PointFromLatLng(min))
bounder.AddPoint(s2.PointFromLatLng(max))
return bounder.RectBound()
} | model.go | 0.764628 | 0.523299 | model.go | starcoder |
package audio
import (
"fmt"
"io"
)
// InfiniteLoop represents a looped stream which never ends.
type InfiniteLoop struct {
src io.ReadSeeker
lstart int64
llength int64
pos int64
}
// NewInfiniteLoop creates a new infinite loop stream with a source stream and length in bytes.
func NewInfiniteLoop(src io.ReadSeeker, length int64) *InfiniteLoop {
return NewInfiniteLoopWithIntro(src, 0, length)
}
// NewInfiniteLoopWithIntro creates a new infinite loop stream with an intro part.
// NewInfiniteLoopWithIntro accepts a source stream src, introLength in bytes and loopLength in bytes.
func NewInfiniteLoopWithIntro(src io.ReadSeeker, introLength int64, loopLength int64) *InfiniteLoop {
return &InfiniteLoop{
src: src,
lstart: introLength / bytesPerSample * bytesPerSample,
llength: loopLength / bytesPerSample * bytesPerSample,
pos: -1,
}
}
func (i *InfiniteLoop) length() int64 {
return i.lstart + i.llength
}
func (i *InfiniteLoop) ensurePos() error {
if i.pos >= 0 {
return nil
}
pos, err := i.src.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
if pos >= i.length() {
return fmt.Errorf("audio: stream position must be less than the specified length")
}
i.pos = pos
return nil
}
// Read is implementation of ReadSeekCloser's Read.
func (i *InfiniteLoop) Read(b []byte) (int, error) {
if err := i.ensurePos(); err != nil {
return 0, err
}
if i.pos+int64(len(b)) > i.length() {
b = b[:i.length()-i.pos]
}
n, err := i.src.Read(b)
i.pos += int64(n)
if i.pos > i.length() {
panic(fmt.Sprintf("audio: position must be <= length but not at (*InfiniteLoop).Read: pos: %d, length: %d", i.pos, i.length()))
}
if err != nil && err != io.EOF {
return 0, err
}
if err == io.EOF || i.pos == i.length() {
pos, err := i.Seek(i.lstart, io.SeekStart)
if err != nil {
return 0, err
}
i.pos = pos
}
return n, nil
}
// Seek is implementation of ReadSeekCloser's Seek.
func (i *InfiniteLoop) Seek(offset int64, whence int) (int64, error) {
if err := i.ensurePos(); err != nil {
return 0, err
}
next := int64(0)
switch whence {
case io.SeekStart:
next = offset
case io.SeekCurrent:
next = i.pos + offset
case io.SeekEnd:
return 0, fmt.Errorf("audio: whence must be io.SeekStart or io.SeekCurrent for InfiniteLoop")
}
if next < 0 {
return 0, fmt.Errorf("audio: position must >= 0")
}
if next >= i.lstart {
next = ((next - i.lstart) % i.llength) + i.lstart
}
// Ignore the new position returned by Seek since the source position might not be match with the position
// managed by this.
if _, err := i.src.Seek(next, io.SeekStart); err != nil {
return 0, err
}
i.pos = next
return i.pos, nil
} | audio/loop.go | 0.586878 | 0.417984 | loop.go | starcoder |
package kvt
import (
"fmt"
"github.com/arr-ai/frozen/errors"
"github.com/arr-ai/frozen/internal/fu"
)
type leaf struct {
data [2]elementT
}
func newLeaf(data ...elementT) *leaf {
switch len(data) {
case 1:
return newLeaf1(data[0])
case 2:
return newLeaf2(data[0], data[1])
default:
panic(errors.Errorf("data wrong size (%d) for leaf", len(data)))
}
}
func newLeaf1(a elementT) *leaf {
return newLeaf2(a, zero)
}
func newLeaf2(a, b elementT) *leaf {
if a == zero {
panic(errors.WTF)
}
return &leaf{data: [2]elementT{a, b}}
}
// fmt.Formatter
func (l *leaf) Format(f fmt.State, verb rune) {
fu.WriteString(f, "(")
if l.data[0] != zero {
fu.Format(l.data[0], f, verb)
if l.data[1] != zero {
fu.WriteString(f, ",")
fu.Format(l.data[1], f, verb)
}
}
fu.WriteString(f, ")")
}
// fmt.Stringer
func (l *leaf) String() string {
return fmt.Sprintf("%s", l)
}
// node
func (l *leaf) Add(args *CombineArgs, v elementT, depth int, h hasher) (_ node, matches int) {
switch {
case args.eq(l.data[0], v):
l.data[0] = args.f(l.data[0], v)
matches++
case l.data[1] == zero:
l.data[1] = v
case args.eq(l.data[1], v):
l.data[1] = args.f(l.data[1], v)
matches++
case depth >= maxTreeDepth:
return newTwig(l.data[0], l.data[1], v), 0
default:
return newBranchFrom(depth, l.data[0], l.data[1], v), 0
}
return l, matches
}
func (l *leaf) Canonical(depth int) node {
return l
}
func (l *leaf) Combine(args *CombineArgs, n node, depth int) (_ node, matches int) { //nolint:cyclop
switch n := n.(type) {
case *branch:
return n.Combine(args.Flip(), l, depth)
case *leaf:
lr := func(a, b int) int { return a<<2 | b }
masks := lr(l.mask(), n.mask())
if masks == lr(3, 1) {
masks, l, n, args = lr(1, 3), n, l, args.Flip()
}
l0, l1 := l.data[0], l.data[1]
n0, n1 := n.data[0], n.data[1]
if args.eq(l0, n0) { //nolint:nestif
r0 := args.f(l0, n0)
matches++
switch masks {
case lr(1, 1):
return newLeaf1(r0), matches
case lr(1, 3):
return newLeaf2(r0, n1), matches
default:
if args.eq(l1, n1) {
matches++
return newLeaf2(r0, args.f(l1, n1)), matches
}
return newBranchFrom(depth, r0, l1, n1), matches
}
} else {
switch masks {
case lr(1, 1):
return newLeaf2(l0, n0), matches
case lr(1, 3):
if args.eq(l0, n1) {
matches++
return newLeaf2(n0, args.f(l0, n1)), matches
}
return newBranchFrom(depth, l0, n0, n1), matches
default:
if args.eq(l1, n1) {
matches++
return newBranchFrom(depth, l0, n0, args.f(l1, n1)), matches
}
if args.eq(l0, n1) {
r0 := args.f(l0, n1)
matches++
if args.eq(l1, n0) {
matches++
return newLeaf2(r0, args.f(l1, n0)), matches
}
return newBranchFrom(depth, r0, l1, n0), matches
}
if args.eq(l1, n0) {
matches++
return newBranchFrom(depth, l0, n1, args.f(l1, n0)), matches
}
return newBranchFrom(depth, l0, l1, n0, n1), matches
}
}
default:
panic(errors.WTF)
}
}
func (l *leaf) AppendTo(dest []elementT) []elementT {
data := l.slice()
if len(dest)+len(data) > cap(dest) {
return nil
}
return append(dest, data...)
}
func (l *leaf) Difference(args *EqArgs, n node, depth int) (_ node, matches int) {
mask := l.mask()
if n.Get(args, l.data[0], newHasher(l.data[0], depth)) != nil {
matches++
mask &^= 0b01
}
if l.data[1] != zero {
if n.Get(args, l.data[1], newHasher(l.data[1], depth)) != nil {
matches++
mask &^= 0b10
}
}
return l.where(mask), matches
}
func (l *leaf) Empty() bool {
return false
}
func (l *leaf) Equal(args *EqArgs, n node, depth int) bool {
if n, is := n.(*leaf); is {
lm, nm := l.mask(), n.mask()
if lm != nm {
return false
}
l0, l1 := l.data[0], l.data[1]
n0, n1 := n.data[0], n.data[1]
if lm == 1 && nm == 1 {
return args.eq(l0, n0)
}
return args.eq(l0, n0) && args.eq(l1, n1) ||
args.eq(l0, n1) && args.eq(l1, n0)
}
return false
}
func (l *leaf) Get(args *EqArgs, v elementT, _ hasher) *elementT {
for i, e := range l.slice() {
if args.eq(e, v) {
return &l.data[i]
}
}
return nil
}
func (l *leaf) Intersection(args *EqArgs, n node, depth int) (_ node, matches int) {
mask := 0
if n.Get(args, l.data[0], newHasher(l.data[0], depth)) != nil {
matches++
mask |= 0b01
}
if l.data[1] != zero {
if n.Get(args, l.data[1], newHasher(l.data[1], depth)) != nil {
matches++
mask |= 0b10
}
}
return l.where(mask), matches
}
func (l *leaf) Iterator([][]node) Iterator {
return newSliceIterator(l.slice())
}
func (l *leaf) Reduce(_ NodeArgs, _ int, r func(values ...elementT) elementT) elementT {
return r(l.slice()...)
}
func (l *leaf) Remove(args *EqArgs, v elementT, depth int, h hasher) (_ node, matches int) {
if args.eq(l.data[0], v) {
matches++
if l.data[1] == zero {
return nil, matches
}
l.data = [2]elementT{l.data[1], zero}
} else if l.data[1] != zero {
if args.eq(l.data[1], v) {
matches++
l.data[1] = zero
}
}
return l, matches
}
func (l *leaf) SubsetOf(args *EqArgs, n node, depth int) bool {
a := l.data[0]
h := newHasher(a, depth)
if n.Get(args, a, h) == nil {
return false
}
if b := l.data[1]; b != zero {
h := newHasher(b, depth)
if n.Get(args, b, h) == nil {
return false
}
}
return true
}
func (l *leaf) Map(args *CombineArgs, _ int, f func(e elementT) elementT) (_ node, matches int) {
var nb Builder
for _, e := range l.slice() {
nb.Add(args, f(e))
}
t := nb.Finish()
matches += t.count
return t.root, matches
}
func (l *leaf) Vet() int {
if l.data[0] == zero {
if l.data[1] != zero {
panic(errors.Errorf("data only in leaf slot 1"))
}
panic(errors.Errorf("empty leaf"))
}
return l.count()
}
func (l *leaf) Where(args *WhereArgs, depth int) (_ node, matches int) {
var mask int
if args.Pred(l.data[0]) {
matches++
mask ^= 0b01
}
if l.data[1] != zero {
if args.Pred(l.data[1]) {
matches++
mask ^= 0b10
}
}
return l.where(mask), matches
}
func (l *leaf) where(mask int) node {
switch mask {
case 0b00:
return nil
case 0b01:
if l.data[1] == zero {
return l
}
return newLeaf1(l.data[0])
case 0b10:
return newLeaf1(l.data[1])
default:
return l
}
}
func (l *leaf) With(args *CombineArgs, v elementT, depth int, h hasher) (_ node, matches int) {
switch {
case args.eq(l.data[0], v):
matches++
return newLeaf2(args.f(l.data[0], v), l.data[1]), matches
case l.data[1] == zero:
return newLeaf2(l.data[0], v), 0
case args.eq(l.data[1], v):
matches++
return newLeaf2(l.data[0], args.f(l.data[1], v)), matches
case depth >= maxTreeDepth:
return newTwig(append(l.data[:], v)...), matches
default:
return newBranchFrom(depth, l.data[0], l.data[1], v), matches
}
}
func (l *leaf) Without(args *EqArgs, v elementT, depth int, h hasher) (_ node, matches int) {
mask := l.mask()
if args.eq(l.data[0], v) {
matches++
mask ^= 0b01
} else if l.data[1] != zero {
if args.eq(l.data[1], v) {
matches++
mask ^= 0b10
}
}
return l.where(mask), matches
}
func (l *leaf) count() int {
if l.data[1] == zero {
return 1
}
return 2
}
func (l *leaf) clone() node {
ret := *l
return &ret
}
func (l *leaf) mask() int {
if l.data[1] == zero {
return 1
}
return 3
}
func (l *leaf) slice() []elementT {
return l.data[:l.count()]
} | internal/tree/kvt/leaf.go | 0.54577 | 0.428771 | leaf.go | starcoder |
package schemax
import "sync"
/*
DITContentRuleCollection describes all DITContentRules-based types.
*/
type DITContentRuleCollection interface {
// Get returns the *DITContentRule instance retrieved as a result
// of a term search, based on Name or OID. If no match is found,
// nil is returned.
Get(interface{}) *DITContentRule
// Index returns the *DITContentRule instance stored at the nth
// index within the receiver, or nil.
Index(int) *DITContentRule
// Equal performs a deep-equal between the receiver and the
// interface DITContentRuleCollection provided.
Equal(DITContentRuleCollection) bool
// Set returns an error instance based on an attempt to add
// the provided *DITContentRule instance to the receiver.
Set(*DITContentRule) error
// Contains returns the index number and presence boolean that
// reflects the result of a term search within the receiver.
Contains(interface{}) (int, bool)
// String returns a properly-delimited sequence of string
// values, either as a Name or OID, for the receiver type.
String() string
// Label returns the field name associated with the interface
// types, or a zero string if no label is appropriate.
Label() string
// IsZero returns a boolean value indicative of whether the
// receiver is considered zero, or undefined.
IsZero() bool
// Len returns an integer value indicative of the current
// number of elements stored within the receiver.
Len() int
// SetSpecifier assigns a string value to all definitions within
// the receiver. This value is used in cases where a definition
// type name (e.g.: attributetype, objectclass, etc.) is required.
// This value will be displayed at the beginning of the definition
// value during the unmarshal or unsafe stringification process.
SetSpecifier(string)
// SetUnmarshaler assigns the provided DefinitionUnmarshaler
// signature to all definitions within the receiver. The provided
// function shall be executed during the unmarshal or unsafe
// stringification process.
SetUnmarshaler(DefinitionUnmarshaler)
}
/*
DITContentRule conforms to the specifications of RFC4512 Section 4.1.6. Boolean values, e.g: 'OBSOLETE', are supported internally and are not explicit fields.
The OID value of this type MUST match the OID of a known (and catalogued) STRUCTURAL *ObjectClass instance.
*/
type DITContentRule struct {
OID OID
Name Name
Description Description
Aux ObjectClassCollection
Must AttributeTypeCollection
May AttributeTypeCollection
Not AttributeTypeCollection
Extensions Extensions
flags definitionFlags
ufn DefinitionUnmarshaler
spec string
info []byte
}
/*
DITContentRules is a thread-safe collection of *DITContentRule slice instances.
*/
type DITContentRules struct {
mutex *sync.Mutex
slice collection
macros *Macros
}
/*
Type returns the formal name of the receiver in order to satisfy signature requirements of the Definition interface type.
*/
func (r *DITContentRule) Type() string {
return `DITContentRule`
}
/*
Equal performs a deep-equal between the receiver and the provided collection type.
*/
func (r DITContentRules) Equal(x DITContentRuleCollection) bool {
return r.slice.equal(x.(*DITContentRules).slice)
}
/*
SetMacros assigns the *Macros instance to the receiver, allowing subsequent OID resolution capabilities during the addition of new slice elements.
*/
func (r *DITContentRules) SetMacros(macros *Macros) {
r.macros = macros
}
/*
SetSpecifier is a convenience method that executes the SetSpecifier method in iterative fashion for all definitions within the receiver.
*/
func (r *DITContentRules) SetSpecifier(spec string) {
for i := 0; i < r.Len(); i++ {
r.Index(i).SetSpecifier(spec)
}
}
/*
SetUnmarshaler is a convenience method that executes the SetUnmarshaler method in iterative fashion for all definitions within the receiver.
*/
func (r *DITContentRules) SetUnmarshaler(fn DefinitionUnmarshaler) {
for i := 0; i < r.Len(); i++ {
r.Index(i).SetUnmarshaler(fn)
}
}
/*
SetInfo assigns the byte slice to the receiver. This is a user-leveraged field intended to allow arbitrary information (documentation?) to be assigned to the definition.
*/
func (r *DITContentRule) SetInfo(info []byte) {
r.info = info
}
/*
Info returns the assigned informational byte slice instance stored within the receiver.
*/
func (r *DITContentRule) Info() []byte {
return r.info
}
/*
SetUnmarshaler assigns the provided DefinitionUnmarshaler signature value to the receiver. The provided function shall be executed during the unmarshal or unsafe stringification process.
*/
func (r *DITContentRule) SetUnmarshaler(fn DefinitionUnmarshaler) {
r.ufn = fn
}
/*
Contains is a thread-safe method that returns a collection slice element index integer and a presence-indicative boolean value based on a term search conducted within the receiver.
*/
func (r DITContentRules) Contains(x interface{}) (int, bool) {
r.mutex.Lock()
defer r.mutex.Unlock()
if !r.macros.IsZero() {
if oid, resolved := r.macros.Resolve(x); resolved {
return r.slice.contains(oid)
}
}
return r.slice.contains(x)
}
/*
Index is a thread-safe method that returns the nth collection slice element if defined, else nil. This method supports use of negative indices which should be used with special care.
*/
func (r DITContentRules) Index(idx int) *DITContentRule {
r.mutex.Lock()
defer r.mutex.Unlock()
assert, _ := r.slice.index(idx).(*DITContentRule)
return assert
}
/*
Get combines Contains and Index method executions to return an entry based on a term search conducted within the receiver.
*/
func (r DITContentRules) Get(x interface{}) *DITContentRule {
idx, found := r.Contains(x)
if !found {
return nil
}
return r.Index(idx)
}
/*
Len is a thread-safe method that returns the effective length of the receiver slice collection.
*/
func (r DITContentRules) Len() int {
r.mutex.Lock()
defer r.mutex.Unlock()
return r.slice.len()
}
/*
String is a non-functional stringer method needed to satisfy interface type requirements and should not be used. There is no practical application for a list of dITContentRule names or object identifiers in this package.
*/
func (r DITContentRules) String() string { return `` }
/*
String is an unsafe convenience wrapper for Unmarshal(r). If an error is encountered, an empty string definition is returned. If reliability and error handling are important, use Unmarshal.
*/
func (r DITContentRule) String() (def string) {
def, _ = r.unmarshal()
return
}
/*
SetSpecifier assigns a string value to the receiver, useful for placement into configurations that require a type name (e.g.: ditcontentrule). This will be displayed at the beginning of the definition value during the unmarshal or unsafe stringification process.
*/
func (r *DITContentRule) SetSpecifier(spec string) {
r.spec = spec
}
/*
IsZero returns a boolean value indicative of whether the receiver is considered empty or uninitialized.
*/
func (r DITContentRules) IsZero() bool {
return r.slice.len() == 0
}
/*
IsZero returns a boolean value indicative of whether the receiver is considered empty or uninitialized.
*/
func (r *DITContentRule) IsZero() bool {
return r == nil
}
/*
Set is a thread-safe append method that returns an error instance indicative of whether the append operation failed in some manner. Uniqueness is enforced for new elements based on Object Identifier and not the effective Name of the definition, if defined.
*/
func (r *DITContentRules) Set(x *DITContentRule) error {
if _, exists := r.Contains(x.OID); exists {
return nil //silent
}
r.mutex.Lock()
defer r.mutex.Unlock()
return r.slice.append(x)
}
/*
Belongs returns a boolean value indicative of whether the provided AUXILIARY *ObjectClass belongs to the receiver instance of *DITContentRule.
*/
func (r *DITContentRule) Belongs(aux *ObjectClass) (belongs bool) {
if aux.IsZero() || !aux.Kind.is(Auxiliary) {
return
}
_, belongs = r.Aux.Contains(aux)
return
}
/*
Requires returns a boolean value indicative of whether the provided value is required per the receiver.
*/
func (r *DITContentRule) Requires(x interface{}) (required bool) {
switch tv := x.(type) {
case *AttributeType:
_, required = r.Must.Contains(tv)
}
return
}
/*
Permits returns a boolean value indicative of whether the provided value is allowed from use per the receiver.
*/
func (r *DITContentRule) Permits(x interface{}) (permitted bool) {
switch tv := x.(type) {
case *AttributeType:
_, permitted = r.May.Contains(tv)
}
return
}
/*
Prohibits returns a boolean value indicative of whether the provided value is prohibited from use per the receiver.
*/
func (r *DITContentRule) Prohibits(x interface{}) (prohibited bool) {
switch tv := x.(type) {
case *AttributeType:
_, prohibited = r.Not.Contains(tv)
}
return
}
/*
Equal performs a deep-equal between the receiver and the provided definition type.
Description text is ignored.
*/
func (r *DITContentRule) Equal(x interface{}) (equals bool) {
z, ok := x.(*DITContentRule)
if !ok {
return
}
if z.IsZero() && r.IsZero() {
equals = true
return
} else if z.IsZero() || r.IsZero() {
return
}
if !r.OID.Equal(z.OID) {
return
}
if !r.Name.Equal(z.Name) {
return
}
if !r.Aux.Equal(z.Aux) {
return
}
if !r.Must.Equal(z.Must) {
return
}
if !r.May.Equal(z.May) {
return
}
if !r.Not.Equal(z.Not) {
return
}
equals = r.Extensions.Equal(z.Extensions)
return
}
/*
NewDITContentRules initializes and returns a new DITContentRuleCollection interface object.
*/
func NewDITContentRules() DITContentRuleCollection {
var x interface{} = &DITContentRules{
mutex: &sync.Mutex{},
slice: make(collection, 0, 0),
}
return x.(DITContentRuleCollection)
}
/*
Validate returns an error that reflects any fatal condition observed regarding the receiver configuration.
*/
func (r *DITContentRule) Validate() (err error) {
return r.validate()
}
func (r *DITContentRule) validate() (err error) {
if r.IsZero() {
return raise(isZero, "%T.validate", r)
}
if err = validateDesc(r.Description); err != nil {
return
}
return
}
func (r *DITContentRule) unmarshal() (string, error) {
if err := r.validate(); err != nil {
err = raise(invalidUnmarshal, err.Error())
return ``, err
}
if r.ufn != nil {
return r.ufn(r)
}
return r.unmarshalBasic()
}
/*
Map is a convenience method that returns a map[string][]string instance containing the effective contents of the receiver.
*/
func (r *DITContentRule) Map() (def map[string][]string) {
if err := r.Validate(); err != nil {
return
}
def = make(map[string][]string, 14)
def[`RAW`] = []string{r.String()}
def[`OID`] = []string{r.OID.String()}
def[`TYPE`] = []string{r.Type()}
if len(r.info) > 0 {
def[`INFO`] = []string{string(r.info)}
}
if !r.Name.IsZero() {
def[`NAME`] = make([]string, 0)
for i := 0; i < r.Name.Len(); i++ {
def[`NAME`] = append(def[`NAME`], r.Name.Index(i))
}
}
if len(r.Description) > 0 {
def[`DESC`] = []string{r.Description.String()}
}
if !r.Aux.IsZero() {
def[`AUX`] = make([]string, 0)
for i := 0; i < r.Aux.Len(); i++ {
aux := r.Aux.Index(i)
term := aux.Name.Index(0)
if len(term) == 0 {
term = aux.OID.String()
}
def[`AUX`] = append(def[`AUX`], term)
}
}
if !r.Must.IsZero() {
def[`MUST`] = make([]string, 0)
for i := 0; i < r.Must.Len(); i++ {
must := r.Must.Index(i)
term := must.Name.Index(0)
if len(term) == 0 {
term = must.OID.String()
}
def[`MUST`] = append(def[`MUST`], term)
}
}
if !r.May.IsZero() {
def[`MAY`] = make([]string, 0)
for i := 0; i < r.May.Len(); i++ {
must := r.May.Index(i)
term := must.Name.Index(0)
if len(term) == 0 {
term = must.OID.String()
}
def[`MAY`] = append(def[`MAY`], term)
}
}
if !r.Not.IsZero() {
def[`NOT`] = make([]string, 0)
for i := 0; i < r.Not.Len(); i++ {
not := r.Not.Index(i)
term := not.Name.Index(0)
if len(term) == 0 {
term = not.OID.String()
}
def[`NOT`] = append(def[`NOT`], term)
}
}
if !r.Extensions.IsZero() {
for k, v := range r.Extensions {
def[k] = v
}
}
if r.Obsolete() {
def[`OBSOLETE`] = []string{`TRUE`}
}
return def
}
/*
DITContentRuleUnmarshaler is a package-included function that honors the signature of the first class (closure) DefinitionUnmarshaler type.
The purpose of this function, and similar user-devised ones, is to unmarshal a definition with specific formatting included, such as linebreaks, leading specifier declarations and indenting.
*/
func DITContentRuleUnmarshaler(x interface{}) (def string, err error) {
var r *DITContentRule
switch tv := x.(type) {
case *DITContentRule:
if tv.IsZero() {
err = raise(isZero, "%T is nil", tv)
return
}
r = tv
default:
err = raise(unexpectedType,
"Bad type for unmarshal (%T)", tv)
return
}
var (
WHSP string = ` `
idnt string = "\n\t"
head string = `(`
tail string = `)`
)
if len(r.spec) > 0 {
head = r.spec + WHSP + head
}
def += head + WHSP + r.OID.String()
if !r.Name.IsZero() {
def += idnt + r.Name.Label()
def += WHSP + r.Name.String()
}
if !r.Description.IsZero() {
def += idnt + r.Description.Label()
def += WHSP + r.Description.String()
}
if r.Obsolete() {
def += idnt + Obsolete.String()
}
if !r.Aux.IsZero() {
def += idnt + r.Aux.Label()
def += WHSP + r.Aux.String()
}
if !r.Must.IsZero() {
def += idnt + r.Must.Label()
def += WHSP + r.Must.String()
}
if !r.May.IsZero() {
def += idnt + r.May.Label()
def += WHSP + r.May.String()
}
if !r.Not.IsZero() {
def += idnt + r.Not.Label()
def += WHSP + r.Not.String()
}
if !r.Extensions.IsZero() {
def += idnt + r.Extensions.String()
}
def += WHSP + tail
return
}
func (r *DITContentRule) unmarshalBasic() (def string, err error) {
var (
WHSP string = ` `
head string = `(`
tail string = `)`
)
if len(r.spec) > 0 {
head = r.spec + WHSP + head
}
def += head + WHSP + r.OID.String()
if !r.Name.IsZero() {
def += WHSP + r.Name.Label()
def += WHSP + r.Name.String()
}
if !r.Description.IsZero() {
def += WHSP + r.Description.Label()
def += WHSP + r.Description.String()
}
if r.Obsolete() {
def += WHSP + Obsolete.String()
}
if !r.Aux.IsZero() {
def += WHSP + r.Aux.Label()
def += WHSP + r.Aux.String()
}
if !r.Must.IsZero() {
def += WHSP + r.Must.Label()
def += WHSP + r.Must.String()
}
if !r.May.IsZero() {
def += WHSP + r.May.Label()
def += WHSP + r.May.String()
}
if !r.Not.IsZero() {
def += WHSP + r.Not.Label()
def += WHSP + r.Not.String()
}
if !r.Extensions.IsZero() {
def += WHSP + r.Extensions.String()
}
def += WHSP + tail
return
} | dcr.go | 0.788013 | 0.460592 | dcr.go | starcoder |
package doublearray
import (
"fmt"
)
// DoubleArray implements an associative array whose key is a string and value is int.
// The data structure is based on a double-array minimal-prefix trie.
type DoubleArray struct {
array []node
tail []byte
numKeys int
numNodes int
}
// Build returns a DoubleArray object built from sorted key strings and associated values.
// Key duplication and empty key are not allowed.
// NULL character byte(0) must not be included since it is used for the terminator.
func Build(keys []string, values []int) (*DoubleArray, error) {
if len(keys) == 0 {
return nil, fmt.Errorf("keys must not be empty")
}
if len(keys) != len(values) {
return nil, fmt.Errorf("The size of keys must be equal to that of values")
}
b := builder{keys: keys, values: values}
b.init()
err := b.arrange(0, len(keys), 0, 0)
if err != nil {
return nil, err
}
b.finish()
numNodes := 1 // 1 is for the root
for i := 1; i < len(b.array); i++ {
if b.array[i].check >= 0 {
numNodes++
}
}
return &DoubleArray{array: b.array, tail: b.tail, numKeys: len(keys), numNodes: numNodes}, nil
}
// NumKeys returns the number of keys stored.
func (da *DoubleArray) NumKeys() int {
return da.numKeys
}
// NumNodes returns the number of nodes.
func (da *DoubleArray) NumNodes() int {
return da.numNodes
}
// ArrayLen returns the length of BASE/CHECK array
func (da *DoubleArray) ArrayLen() int {
return len(da.array)
}
// TailLen returns the length of TAIL array
func (da *DoubleArray) TailLen() int {
return len(da.tail)
}
// AllocBytes returns the allocated size in bytes.
func (da *DoubleArray) AllocBytes() int {
return da.ArrayLen()*8 + da.TailLen()
}
// Lookup returns the associated value with the given key if found.
// If NULL character is included in the given key, this behavior is invalid.
func (da *DoubleArray) Lookup(key string) (int, bool) {
npos := 0
depth := 0
for ; depth < len(key); depth++ {
if da.array[npos].base < 0 {
break
}
cpos := da.array[npos].base ^ int(key[depth])
if da.array[cpos].check != npos {
return 0, false
}
npos = cpos
}
if da.array[npos].base >= 0 {
cpos := da.array[npos].base // ^ int(terminator)
if da.array[cpos].check != npos {
return 0, false
}
return da.array[cpos].base, true
}
tpos := -da.array[npos].base
for ; depth < len(key); depth++ {
if da.tail[tpos] != key[depth] {
return 0, false
}
tpos++
}
if da.tail[tpos] != terminator {
return 0, false
}
return da.getValue(tpos + 1), true
}
// PrefixLookup returns the keys and associated values included as prefixes of the given key.
// If NULL character is included in the given key, this behavior is invalid.
func (da *DoubleArray) PrefixLookup(key string) ([]string, []int) {
keys := make([]string, 0)
values := make([]int, 0)
npos := 0
depth := 0
for ; depth < len(key); depth++ {
if da.array[npos].base < 0 {
break
}
base := da.array[npos].base
if da.array[base].check == npos {
keys = append(keys, key[:depth])
values = append(values, da.array[base].base)
}
cpos := base ^ int(key[depth])
if da.array[cpos].check != npos {
return keys, values
}
npos = cpos
}
base := da.array[npos].base
if base >= 0 {
if da.array[base].check == npos {
keys = append(keys, key[:depth])
values = append(values, da.array[base].base)
}
return keys, values
}
tpos := -base
for ; depth < len(key); depth++ {
if da.tail[tpos] != key[depth] {
return keys, values
}
tpos++
}
if da.tail[tpos] == terminator {
keys = append(keys, key[:depth])
values = append(values, da.getValue(tpos+1))
}
return keys, values
}
// PredictiveLookup returns the keys and associated values starting with prefixes of the given key.
// If NULL character is included in the given key, this behavior is invalid.
func (da *DoubleArray) PredictiveLookup(key string) ([]string, []int) {
keys := make([]string, 0, da.numKeys)
values := make([]int, 0, da.numKeys)
npos := 0
depth := 0
for ; depth < len(key); depth++ {
if da.array[npos].base < 0 {
return keys, values
}
cpos := da.array[npos].base ^ int(key[depth])
if da.array[cpos].check != npos {
return keys, values
}
npos = cpos
}
keys, values = da.enumerate(npos, depth, []byte(key), keys, values)
return keys, values
}
func (da *DoubleArray) getValue(tpos int) int {
return int(da.tail[tpos]) | int(da.tail[tpos+1])<<8 | int(da.tail[tpos+2])<<16 | int(da.tail[tpos+3])<<24
}
func (da *DoubleArray) enumerate(npos int, depth int, decoded []byte, keys []string, values []int) ([]string, []int) {
if da.array[npos].base < 0 {
tpos := -da.array[npos].base
for da.tail[tpos] != byte(0) {
decoded = append(decoded, da.tail[tpos])
tpos++
}
keys = append(keys, string(decoded))
values = append(values, da.getValue(tpos+1))
return keys, values
}
base := da.array[npos].base
cpos := base // ^ int(terminator)
if da.array[cpos].check == npos {
keys = append(keys, string(decoded))
values = append(values, da.array[cpos].base)
}
for c := 1; c < 256; c++ {
decoded = decoded[:depth]
cpos = da.array[npos].base ^ c
if da.array[cpos].check == npos {
decoded = append(decoded, byte(c))
keys, values = da.enumerate(cpos, depth+1, decoded, keys, values)
}
}
return keys, values
}
const (
terminator = byte(0)
)
type node struct {
base, check int
}
type builder struct {
array []node
tail []byte
keys []string
values []int
}
func (b *builder) init() {
capa := 256
for capa < len(b.keys) {
capa <<= 1
}
array := make([]node, 256, capa)
tail := make([]byte, 1, capa)
for i := 1; i < 256; i++ {
array[i].base = -(i + 1)
array[i].check = -(i - 1)
}
array[255].base = -1
array[1].check = -255
array[0].check = 1 // head empty
b.array = array
b.tail = tail
}
func (b *builder) finish() {
b.array[0].check = -1 // To avoid traversal to the root
}
func (b *builder) enlarge() {
oldLen := len(b.array)
newLen := oldLen + 256
for i := oldLen; i < newLen; i++ {
b.array = append(b.array, node{base: -(i + 1), check: -(i - 1)})
}
if b.array[0].check == 0 {
b.array[oldLen].check = -(newLen - 1) // prev
b.array[newLen-1].base = -oldLen // next
b.array[0].check = oldLen
} else {
empHead := b.array[0].check
empTail := -b.array[empHead].check
b.array[oldLen].check = -empTail
b.array[empTail].base = -oldLen
b.array[empHead].check = -(newLen - 1)
b.array[newLen-1].base = -empHead
}
}
func (b *builder) fix(npos int) {
next := -b.array[npos].base
prev := -b.array[npos].check
b.array[next].check = -prev
b.array[prev].base = -next
if npos == b.array[0].check {
if next == npos {
b.array[0].check = 0
} else {
b.array[0].check = next
}
}
}
func (b *builder) arrange(bpos, epos, depth, npos int) error {
if bpos+1 == epos {
b.array[npos].base = -len(b.tail)
for ; depth < len(b.keys[bpos]); depth++ {
if b.keys[bpos][depth] == terminator {
return fmt.Errorf("keys must not include NULL terminator byte(0)")
}
b.tail = append(b.tail, b.keys[bpos][depth])
}
b.tail = append(b.tail, terminator)
val := b.values[bpos]
for i := 0; i < 4; i++ {
b.tail = append(b.tail, byte(val%256))
val >>= 8
}
return nil
}
edges := make([]byte, 0)
isPrefix := len(b.keys[bpos]) == depth
if isPrefix {
bpos++
if len(b.keys[bpos]) == depth {
return fmt.Errorf("Key duplication is not allowed")
}
edges = append(edges, terminator)
}
c := b.keys[bpos][depth]
for i := bpos + 1; i < epos; i++ {
c2 := b.keys[i][depth]
if c != c2 {
if c2 < c {
return fmt.Errorf("keys must be sorted in lex order")
}
if c == terminator {
return fmt.Errorf("keys must not include NULL terminator byte(0)")
}
edges = append(edges, c)
c = c2
}
}
if c == terminator {
return fmt.Errorf("keys must not include NULL terminator byte(0)")
}
edges = append(edges, c)
base := b.xcheck(edges)
if len(b.array) <= base {
b.enlarge()
}
b.array[npos].base = base
for _, c := range edges {
cpos := base ^ int(c)
b.fix(cpos)
b.array[cpos].check = npos
}
if isPrefix {
cpos := base // ^ int(terminator)
b.array[cpos].base = b.values[bpos-1]
}
i := bpos
c = b.keys[bpos][depth]
for j := bpos + 1; j < epos; j++ {
c2 := b.keys[j][depth]
if c != c2 {
err := b.arrange(i, j, depth+1, base^int(c))
if err != nil {
return err
}
i = j
c = c2
}
}
return b.arrange(i, epos, depth+1, base^int(c))
}
func (b *builder) xcheck(edges []byte) int {
empHead := b.array[0].check
if empHead == 0 {
return len(b.array) ^ int(edges[0])
}
i := empHead
for {
base := i ^ int(edges[0])
if b.isTarget(base, edges) {
return base
}
i = -b.array[i].base
if i == empHead {
break
}
}
return len(b.array) ^ int(edges[0])
}
func (b *builder) isTarget(base int, edges []byte) bool {
for _, c := range edges {
i := base ^ int(c)
if b.array[i].check >= 0 {
return false
}
}
return true
} | doublearray.go | 0.762954 | 0.656452 | doublearray.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.