code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package myers
// Myer's diff algorithm in golang
// Ported from https://blog.robertelder.org/diff-algorithm/
type OpType int
const (
OpDelete OpType = iota
OpInsert
)
type Op struct {
OpType OpType // Insert or delete, as above
OldPos int // Position in the old list of item to be inserted or deleted
NewPos int // Position in the _new_ list of item to be inserted
Elem interface{} // Actual value to be inserted or deleted
}
// Returns a minimal list of differences between 2 lists e and f
// requring O(min(len(e),len(f))) space and O(min(len(e),len(f)) * D)
// worst-case execution time where D is the number of differences.
func Diff(e, f []interface{}, equals func(interface{}, interface{}) bool) []Op {
return diffInternal(e, f, equals, 0, 0)
}
func diffInternal(e, f []interface{}, equals func(interface{}, interface{}) bool, i, j int) []Op {
N := len(e)
M := len(f)
L := N + M
Z := 2*min(N, M) + 2
if N > 0 && M > 0 {
w := N - M
g := make([]int, Z)
p := make([]int, Z)
hMax := ((L/2 + toInt(L%2 != 0)) + 1)
for h := 0; h < hMax; h++ {
for r := 0; r < 2; r++ {
var c, d []int
var o, m int
if r == 0 {
c = g
d = p
o = 1
m = 1
} else {
c = p
d = g
o = 0
m = -1
}
kMin := -(h - 2*max(0, h-M))
kMax := h - 2*max(0, h-N) + 1
for k := kMin; k < kMax; k += 2 {
var a int
if k == -h || k != h && c[absmod((k-1), Z)] < c[absmod((k+1), Z)] {
a = c[absmod((k+1), Z)]
} else {
a = c[absmod((k-1), Z)] + 1
}
b := a - k
s, t := a, b
for a < N && b < M && equals(e[(1-o)*N+m*a+(o-1)], f[(1-o)*M+m*b+(o-1)]) {
a, b = a+1, b+1
}
c[absmod(k, Z)] = a
z := -(k - w)
if absmod(L, 2) == o && z >= -(h-o) && z <= h-o && c[absmod(k, Z)]+d[absmod(z, Z)] >= N {
var D, x, y, u, v int
if o == 1 {
D = 2*h - 1
x = s
y = t
u = a
v = b
} else {
D = 2 * h
x = N - a
y = M - b
u = N - s
v = M - t
}
if D > 1 || (x != u && y != v) {
return append(diffInternal(e[0:x], f[0:y], equals, i, j), diffInternal(e[u:N], f[v:M], equals, i+u, j+v)...)
} else if M > N {
return diffInternal(make([]interface{}, 0), f[N:M], equals, i+N, j+N)
} else if M < N {
return diffInternal(e[M:N], make([]interface{}, 0), equals, i+M, j+M)
} else {
return make([]Op, 0)
}
}
}
}
}
} else if N > 0 {
res := make([]Op, N)
for n := 0; n < N; n++ {
res[n] = Op{OpDelete, i + n, -1, e[n]}
}
return res
} else {
res := make([]Op, M)
for n := 0; n < M; n++ {
res[n] = Op{OpInsert, i, j + n, f[n]}
}
return res
}
panic("Should never hit this!")
}
func max(x, y int) int {
if x > y {
return x
} else {
return y
}
}
func min(x, y int) int {
if x < y {
return x
} else {
return y
}
}
func toInt(b bool) int {
if b {
return 1
} else {
return 0
}
}
/**
* The remainder op in python always matches the sign of the _denominator_
* In golang it matches the sign of the numerator.
* See https://en.wikipedia.org/wiki/Modulo_operation#Variants_of_the_definition
*/
func abs(x int) int {
if x < 0 {
return -x
} else {
return x
}
}
func absmod(x, y int) int {
return abs(x % y)
}
// Convenient wrapper for string lists
func DiffStr(e, f []string) []Op {
e1, f1 := make([]interface{}, len(e)), make([]interface{}, len(f))
for i, ee := range e {
e1[i] = ee
}
for i, fe := range f {
f1[i] = fe
}
return Diff(e1, f1, func(s1, s2 interface{}) bool {
return s1 == s2
})
} | myers.go | 0.517083 | 0.409575 | myers.go | starcoder |
package transform
import (
"encoding/base64"
"fmt"
"github.com/francescomari/nu/parser"
)
const (
// StatsNodeDepthBucketSize is the size of a bucket for the NodesPerDepth
// field in Stats.
StatsNodeDepthBucketSize = 10
// StatsPropertyDepthBucketSize is the size of a bucket for the
// PropertiesPerDepth field in Stats.
StatsPropertyDepthBucketSize = 10
// StatsValueSizeBucketScale is the scale of a bucket for the ValuesPerSize
// field in Stats.
StatsValueSizeBucketScale = 1024
)
// Stats contains statistics about the data in an export.
type Stats struct {
// Nodes is the total amount of nodes in the export.
Nodes int
// Properties is the total amount of properties in the export.
Properties int
// Data is the total amount of data from every property in the export. For
// values expressed by a V command, the size is calculated as the length of
// the value in bytes. For values expressed as an X command, the size is
// expressed as the length of the Base64-decoded payload.
Data int64
// PropertiesPerType is the number of properties grouped by their type.
PropertiesPerType map[string]int
// PropertiesPerDepth is the number of properties grouped by the depth of
// the node they are attached to. PropertiesPerDepth groups the number of
// properties in buckets of fixed size, where the size of each bucket is
// StatsPropertyDepthBucketSize.
PropertiesPerDepth map[int]int
// NodesPerDepth is the number of nodes grouped by their depth in the
// content tree. NodesPerDepth groups the number of nodes in buckets of
// fixed size, where the size of each bucket is StatsNodeDepthBucketSize.
NodesPerDepth map[int]int
// ValuesPerSize is the number of property values grouped by their size.
// ValuesPerSize groups the property values in buckets whose size increases
// logarithmically. The base of the logarithimc increase is
// StatsValueSizeBucketScale.
ValuesPerSize map[int]int
}
// Statistics parses a stream of command and extract statistics about the
// export. Statistics either returns a non-nil Stats or an error.
func Statistics(commands <-chan parser.Cmd) (*Stats, error) {
stats := Stats{
PropertiesPerType: make(map[string]int),
PropertiesPerDepth: make(map[int]int),
NodesPerDepth: make(map[int]int),
ValuesPerSize: make(map[int]int),
}
if err := stats.parse(commands); err != nil {
return nil, err
}
return &stats, nil
}
func (s *Stats) parse(commands <-chan parser.Cmd) error {
for command := range commands {
switch cmd := command.(type) {
case parser.R:
if err := s.parseRoot(commands); err != nil {
return err
}
case parser.Err:
return s.onError(cmd)
default:
return s.onUnexpected(cmd)
}
}
return nil
}
func (s *Stats) parseRoot(commands <-chan parser.Cmd) error {
s.Nodes++
s.NodesPerDepth[s.nodeDepthToBucket(0)]++
for command := range commands {
switch cmd := command.(type) {
case parser.C:
if err := s.parseNode(cmd, 1, commands); err != nil {
return err
}
case parser.P:
if err := s.parseProperty(cmd, 0, commands); err != nil {
return err
}
case parser.Up:
return nil
case parser.Err:
return s.onError(cmd)
default:
return s.onUnexpected(cmd)
}
}
return nil
}
func (s *Stats) parseNode(c parser.C, depth int, commands <-chan parser.Cmd) error {
s.Nodes++
s.NodesPerDepth[s.nodeDepthToBucket(depth)]++
for command := range commands {
switch cmd := command.(type) {
case parser.C:
if err := s.parseNode(cmd, depth+1, commands); err != nil {
return err
}
case parser.P:
if err := s.parseProperty(cmd, depth, commands); err != nil {
return err
}
case parser.Up:
return nil
case parser.Err:
return s.onError(cmd)
default:
return s.onUnexpected(cmd)
}
}
return nil
}
func (s *Stats) parseProperty(p parser.P, depth int, commands <-chan parser.Cmd) error {
s.Properties++
s.PropertiesPerType[p.Type]++
s.PropertiesPerDepth[s.propertyDepthToBucket(depth)]++
for command := range commands {
switch cmd := command.(type) {
case parser.V:
size := len([]byte(cmd.Data))
s.Data += int64(size)
s.ValuesPerSize[s.valueSizeToBucket(size)]++
case parser.X:
size := base64.StdEncoding.DecodedLen(len(cmd.Data))
s.Data += int64(size)
s.ValuesPerSize[s.valueSizeToBucket(size)]++
case parser.Up:
return nil
case parser.Err:
return s.onError(cmd)
default:
return s.onUnexpected(cmd)
}
}
return nil
}
func (s *Stats) onError(err parser.Err) error {
return fmt.Errorf("error at line %v: %v", err.Line, err.Err)
}
func (s *Stats) onUnexpected(cmd parser.Cmd) error {
return fmt.Errorf("unexpected command %T", cmd)
}
func (*Stats) nodeDepthToBucket(depth int) int {
return (depth / StatsNodeDepthBucketSize) * StatsNodeDepthBucketSize
}
func (*Stats) propertyDepthToBucket(depth int) int {
return (depth / StatsPropertyDepthBucketSize) * StatsPropertyDepthBucketSize
}
func (*Stats) valueSizeToBucket(size int) int {
bucket := 0
for size >= StatsValueSizeBucketScale {
size = size / StatsValueSizeBucketScale
bucket++
}
return bucket
} | transform/stats.go | 0.656108 | 0.452113 | stats.go | starcoder |
package txsort
import (
"bytes"
"github.com/parallelcointeam/pod/chaincfg/chainhash"
"github.com/parallelcointeam/pod/wire"
"sort"
)
// InPlaceSort modifies the passed transaction inputs and outputs to be sorted based on BIP 69.
// WARNING: This function must NOT be called with published transactions since it will mutate the transaction if it's not already sorted. This can cause issues if you mutate a tx in a block, for example, which would invalidate the block. It could also cause cached hashes, such as in a btcutil.Tx to become invalidated.
// The function should only be used if the caller is creating the transaction or is otherwise 100% positive mutating will not cause adverse affects due to other dependencies.
func InPlaceSort(tx *wire.MsgTx) {
sort.Sort(sortableInputSlice(tx.TxIn))
sort.Sort(sortableOutputSlice(tx.TxOut))
}
// Sort returns a new transaction with the inputs and outputs sorted based on BIP 69. The passed transaction is not modified and the new transaction might have a different hash if any sorting was done.
func Sort(tx *wire.MsgTx) *wire.MsgTx {
txCopy := tx.Copy()
sort.Sort(sortableInputSlice(txCopy.TxIn))
sort.Sort(sortableOutputSlice(txCopy.TxOut))
return txCopy
}
// IsSorted checks whether tx has inputs and outputs sorted according to BIP 69.
func IsSorted(tx *wire.MsgTx) bool {
if !sort.IsSorted(sortableInputSlice(tx.TxIn)) {
return false
}
if !sort.IsSorted(sortableOutputSlice(tx.TxOut)) {
return false
}
return true
}
type sortableInputSlice []*wire.TxIn
type sortableOutputSlice []*wire.TxOut
// For SortableInputSlice and SortableOutputSlice, three functions are needed to make it sortable with sort.Sort() -- Len, Less, and Swap Len and Swap are trivial. Less is BIP 69 specific.
func (s sortableInputSlice) Len() int { return len(s) }
func (s sortableOutputSlice) Len() int { return len(s) }
func (s sortableOutputSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s sortableInputSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// Input comparison function.
// First sort based on input hash (reversed / rpc-style), then index.
func (s sortableInputSlice) Less(i, j int) bool {
// Input hashes are the same, so compare the index.
ihash := s[i].PreviousOutPoint.Hash
jhash := s[j].PreviousOutPoint.Hash
if ihash == jhash {
return s[i].PreviousOutPoint.Index < s[j].PreviousOutPoint.Index
}
// At this point, the hashes are not equal, so reverse them to big-endian and return the result of the comparison.
const hashSize = chainhash.HashSize
for b := 0; b < hashSize/2; b++ {
ihash[b], ihash[hashSize-1-b] = ihash[hashSize-1-b], ihash[b]
jhash[b], jhash[hashSize-1-b] = jhash[hashSize-1-b], jhash[b]
}
return bytes.Compare(ihash[:], jhash[:]) == -1
}
// Output comparison function.
// First sort based on amount (smallest first), then PkScript.
func (s sortableOutputSlice) Less(i, j int) bool {
if s[i].Value == s[j].Value {
return bytes.Compare(s[i].PkScript, s[j].PkScript) < 0
}
return s[i].Value < s[j].Value
} | btcutil/txsort/txsort.go | 0.722918 | 0.420421 | txsort.go | starcoder |
package accounting
import (
"encoding/json"
)
// UnitPrice Represents a unit price
type UnitPrice struct {
// The actual unit price amount.
Amount *float32 `json:"amount,omitempty"`
// Indicates if the unit price amount already includes taxes.
TaxIncluded bool `json:"taxIncluded"`
}
// NewUnitPrice instantiates a new UnitPrice object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewUnitPrice(taxIncluded bool) *UnitPrice {
this := UnitPrice{}
this.TaxIncluded = taxIncluded
return &this
}
// NewUnitPriceWithDefaults instantiates a new UnitPrice object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewUnitPriceWithDefaults() *UnitPrice {
this := UnitPrice{}
return &this
}
// GetAmount returns the Amount field value if set, zero value otherwise.
func (o *UnitPrice) GetAmount() float32 {
if o == nil || o.Amount == nil {
var ret float32
return ret
}
return *o.Amount
}
// GetAmountOk returns a tuple with the Amount field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *UnitPrice) GetAmountOk() (*float32, bool) {
if o == nil || o.Amount == nil {
return nil, false
}
return o.Amount, true
}
// HasAmount returns a boolean if a field has been set.
func (o *UnitPrice) HasAmount() bool {
if o != nil && o.Amount != nil {
return true
}
return false
}
// SetAmount gets a reference to the given float32 and assigns it to the Amount field.
func (o *UnitPrice) SetAmount(v float32) {
o.Amount = &v
}
// GetTaxIncluded returns the TaxIncluded field value
func (o *UnitPrice) GetTaxIncluded() bool {
if o == nil {
var ret bool
return ret
}
return o.TaxIncluded
}
// GetTaxIncludedOk returns a tuple with the TaxIncluded field value
// and a boolean to check if the value has been set.
func (o *UnitPrice) GetTaxIncludedOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.TaxIncluded, true
}
// SetTaxIncluded sets field value
func (o *UnitPrice) SetTaxIncluded(v bool) {
o.TaxIncluded = v
}
func (o UnitPrice) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Amount != nil {
toSerialize["amount"] = o.Amount
}
if true {
toSerialize["taxIncluded"] = o.TaxIncluded
}
return json.Marshal(toSerialize)
}
type NullableUnitPrice struct {
value *UnitPrice
isSet bool
}
func (v NullableUnitPrice) Get() *UnitPrice {
return v.value
}
func (v *NullableUnitPrice) Set(val *UnitPrice) {
v.value = val
v.isSet = true
}
func (v NullableUnitPrice) IsSet() bool {
return v.isSet
}
func (v *NullableUnitPrice) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableUnitPrice(val *UnitPrice) *NullableUnitPrice {
return &NullableUnitPrice{value: val, isSet: true}
}
func (v NullableUnitPrice) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableUnitPrice) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | generated/accounting/model_unit_price.go | 0.848219 | 0.472683 | model_unit_price.go | starcoder |
package eaopt
import (
"math"
)
func copyFloat64s(fs []float64) []float64 {
var fsc = make([]float64, len(fs))
copy(fsc, fs)
return fsc
}
func newInts(n uint) []int {
var ints = make([]int, n)
for i := range ints {
ints[i] = i
}
return ints
}
// Divide each element in a float64 slice by a given value.
func divide(floats []float64, value float64) []float64 {
var divided = make([]float64, len(floats))
for i, v := range floats {
divided[i] = v / value
}
return divided
}
// Compute the cumulative sum of a float64 slice.
func cumsum(floats []float64) []float64 {
var summed = make([]float64, len(floats))
copy(summed, floats)
for i := 1; i < len(summed); i++ {
summed[i] += summed[i-1]
}
return summed
}
// Find the minimum between two uints.
func minUint(a, b uint) uint {
if a <= b {
return a
}
return b
}
// Find the minimum between two ints.
func minInt(a, b int) int {
if a <= b {
return a
}
return b
}
// Compute the sum of an int slice.
func sumInts(ints []int) (sum int) {
for _, v := range ints {
sum += v
}
return
}
// Compute the sum of a float64 slice.
func sumFloat64s(floats []float64) (sum float64) {
for _, v := range floats {
sum += v
}
return
}
// Compute the minimum value of a float64 slice.
func minFloat64s(floats []float64) (min float64) {
min = math.Inf(1)
for _, f := range floats {
if f < min {
min = f
}
}
return
}
// Compute the maximum value of a float64 slice.
func maxFloat64s(floats []float64) (max float64) {
max = math.Inf(-1)
for _, f := range floats {
if f > max {
max = f
}
}
return
}
// Compute the mean of a float64 slice.
func meanFloat64s(floats []float64) float64 {
return sumFloat64s(floats) / float64(len(floats))
}
// Compute the variance of a float64 slice.
func varianceFloat64s(floats []float64) float64 {
var (
m = meanFloat64s(floats)
ss float64
)
for _, x := range floats {
ss += math.Pow(x-m, 2)
}
return ss / float64(len(floats))
}
type set map[interface{}]bool
type setInt map[interface{}]int
// union merges two slices and ignores duplicates.
func union(x, y set) set {
var (
u = make(set)
blackList = make(map[interface{}]bool)
)
for i := range x {
u[i] = true
blackList[i] = true
}
for i := range y {
if !blackList[i] {
u[i] = true
blackList[i] = true
}
}
return u
} | util.go | 0.745213 | 0.416619 | util.go | starcoder |
package toms
import (
"github.com/dreading/gospecfunc/machine"
"github.com/dreading/gospecfunc/utils"
"math"
)
// STROM calculates Stromgren's integral
// ∫ 0 to x { t^7 exp(2t)/[exp(t)-1]^3 } dt
// The code uses Chebyshev expansions with the coefficients
// given to 20 decimal places
func STROM(XVALUE float64) float64 {
const (
ZERO = 0.0e0
HALF = 0.5e0
ONE = 1.0e0
TWO = 2.0e0
FOUR = 4.0e0
SEVEN = 7.0e0
ONEHUN = 100.0e0
ONE30 = 130.0e0
ONE5LN = 0.4055e0
F15BP4 = 0.38497433455066256959e-1
PI4B3 = 1.29878788045336582982e2
VALINF = 196.51956920868988261257e0
)
var K1, K2, NTERMS, NUMEXP int
var EPNGLN, EPSLN, RET, RK, SUMEXP, SUM2, T, X, XHIGH, XK, XK1, XLOW0, XLOW1 float64
var ASTROM = []float64{
0.56556120872539155290e0,
0.4555731969101785525e-1,
-0.4039535875936869170e-1,
-0.133390572021486815e-2,
0.185862506250538030e-2,
-0.4685555868053659e-4,
-0.6343475643422949e-4,
0.572548708143200e-5,
0.159352812216822e-5,
-0.28884328431036e-6,
-0.2446633604801e-7,
0.1007250382374e-7,
-0.12482986104e-9,
-0.26300625283e-9,
0.2490407578e-10,
0.485454902e-11,
-0.105378913e-11,
-0.3604417e-13,
0.2992078e-13,
-0.163971e-14,
-0.61061e-15,
0.9335e-16,
0.709e-17,
-0.291e-17,
0.8e-19,
0.6e-19,
-0.1e-19}
X = XVALUE
// Error test
if X < ZERO {
return ZERO
}
// Compute the machine-dependent constants.
XK = machine.D1MACH[3]
T = XK / ONEHUN
if X <= FOUR {
NTERMS = 26
XLOW0 = math.Pow(ONE30*machine.D1MACH[1], ONE/(SEVEN-TWO))
XLOW1 = TWO * XK
} else {
EPSLN = math.Log(machine.D1MACH[4])
EPNGLN = math.Log(XK)
XHIGH = SEVEN / XK
}
// Code for x < = 4.0
if X <= FOUR {
if X < XLOW0 {
RET = ZERO
} else {
if X < XLOW1 {
RET = math.Pow(X, 5) / PI4B3
} else {
T = ((X / TWO) - HALF) - HALF
RET = math.Pow(X, 5) * utils.Cheval(NTERMS, ASTROM, T) * F15BP4
}
}
} else {
// Code for x > 4.0
if X > XHIGH {
SUMEXP = ONE
} else {
NUMEXP = int(EPSLN/(ONE5LN-X)) + 1
if NUMEXP > 1 {
T = math.Exp(-X)
} else {
T = ONE
}
RK = ZERO
for K1 = 1; K1 <= NUMEXP; K1++ {
RK = RK + ONE
}
SUMEXP = ZERO
for K1 = 1; K1 <= NUMEXP; K1++ {
SUM2 = ONE
XK = ONE / (RK * X)
XK1 = ONE
for K2 = 1; K2 <= 7; K2++ {
SUM2 = SUM2*XK1*XK + ONE
XK1 = XK1 + ONE
}
SUM2 = SUM2 * (RK + ONE) / TWO
SUMEXP = SUMEXP*T + SUM2
RK = RK - ONE
}
}
T = SEVEN*math.Log(X) - X + math.Log(SUMEXP)
if T < EPNGLN {
RET = VALINF
} else {
RET = VALINF - math.Exp(T)*F15BP4
}
}
return RET
} | integrals/internal/toms/stromgren.go | 0.505371 | 0.425009 | stromgren.go | starcoder |
package rings
import (
"image/color"
"gonum.org/v1/plot"
"gonum.org/v1/plot/vg"
"gonum.org/v1/plot/vg/draw"
)
// Highlight implements rendering a colored arc.
type Highlight struct {
// Base describes the arc through which the highlight should be drawn.
Base Arc
// Color determines the fill color of the highlight.
Color color.Color
// LineStyle determines the line style of the highlight.
LineStyle draw.LineStyle
// Inner and Outer define the inner and outer radii of the blocks.
Inner, Outer vg.Length
// X and Y specify rendering location when Plot is called.
X, Y float64
}
// NewHighlight returns a Highlight based on the parameters, first checking that the provided features
// are able to be rendered. An error is returned if the features are not renderable.
func NewHighlight(col color.Color, base Arc, inner, outer vg.Length) *Highlight {
return &Highlight{
Color: col,
Base: base,
Inner: inner,
Outer: outer,
}
}
// DrawAt renders the feature of a Highlight at cen in the specified drawing area,
// according to the Highlight configuration.
func (r *Highlight) DrawAt(ca draw.Canvas, cen vg.Point) {
if r.Color == nil && (r.LineStyle.Color == nil || r.LineStyle.Width == 0) {
return
}
var pa vg.Path
pa.Move(cen.Add(Rectangular(r.Base.Theta, r.Inner)))
pa.Arc(cen, r.Inner, float64(r.Base.Theta), float64(r.Base.Phi))
if r.Base.Phi == Clockwise*Complete || r.Base.Phi == CounterClockwise*Complete {
pa.Move(cen.Add(Rectangular(r.Base.Theta+r.Base.Phi, r.Outer)))
}
pa.Arc(cen, r.Outer, float64(r.Base.Theta+r.Base.Phi), float64(-r.Base.Phi))
pa.Close()
if r.Color != nil {
ca.SetColor(r.Color)
ca.Fill(pa)
}
if r.LineStyle.Color != nil && r.LineStyle.Width != 0 {
ca.SetLineStyle(r.LineStyle)
ca.Stroke(pa)
}
}
// XY returns the x and y coordinates of the Highlight.
func (r *Highlight) XY() (x, y float64) { return r.X, r.Y }
// Arc returns the arc of the Highlight.
func (r *Highlight) Arc() Arc { return r.Base }
// Plot calls DrawAt using the Highlight's X and Y values as the drawing coordinates.
func (r *Highlight) Plot(ca draw.Canvas, plt *plot.Plot) {
trX, trY := plt.Transforms(&ca)
r.DrawAt(ca, vg.Point{trX(r.X), trY(r.Y)})
}
// GlyphBoxes returns a liberal glyphbox for the highlight rendering.
func (r *Highlight) GlyphBoxes(plt *plot.Plot) []plot.GlyphBox {
return []plot.GlyphBox{{
X: plt.X.Norm(r.X),
Y: plt.Y.Norm(r.Y),
Rectangle: vg.Rectangle{
Min: vg.Point{-r.Outer, -r.Outer},
Max: vg.Point{r.Outer, r.Outer},
},
}}
} | plotter/rings/highlight.go | 0.854415 | 0.496399 | highlight.go | starcoder |
package layout
import (
"github.com/negrel/paon/geometry"
"github.com/negrel/paon/styles"
)
// BoxedObject define an object with a BoxModel.
type BoxedObject interface {
BoxModel() BoxModel
}
// BoxModel define a box with margin, border and padding in a 2D geometric plane.
type BoxModel interface {
MarginBox() geometry.Rectangle
BorderBox() geometry.Rectangle
PaddingBox() geometry.Rectangle
ContentBox() geometry.Rectangle
}
var _ BoxModel = PositionedBoxModel{}
var _ geometry.Positioned = PositionedBoxModel{}
// PositionedBoxModel is a BoxModel wrapper that implement the
// geometry.Positioned interface.
type PositionedBoxModel struct {
Origin geometry.Vec2D
BoxModel
}
// Position implements the geometry.Positioned interface.
func (pbm PositionedBoxModel) Position() geometry.Vec2D {
return pbm.Origin
}
// MarginBox implements the BoxModel interface.
func (pbm PositionedBoxModel) MarginBox() geometry.Rectangle {
return pbm.BoxModel.MarginBox().MoveBy(pbm.Origin)
}
// BorderBox implements the BoxModel interface.
func (pbm PositionedBoxModel) BorderBox() geometry.Rectangle {
return pbm.BoxModel.BorderBox().MoveBy(pbm.Origin)
}
// PaddingBox implements the BoxModel interface.
func (pbm PositionedBoxModel) PaddingBox() geometry.Rectangle {
return pbm.BoxModel.PaddingBox().MoveBy(pbm.Origin)
}
// ContentBox implements the BoxModel interface.
func (pbm PositionedBoxModel) ContentBox() geometry.Rectangle {
return pbm.BoxModel.ContentBox().MoveBy(pbm.Origin)
}
var _ BoxModel = &Box{}
// Box define a basic BoxModel implementation.
type Box struct {
boxSize geometry.Size
borderBoxOffset,
paddingBoxOffset,
contentBoxOffset boxOffset
}
// NewBox return a new Box with the given content box.
func NewBox(size geometry.Size) Box {
return Box{
boxSize: size,
}
}
// MarginBox implements the BoxModel interface.
func (b Box) MarginBox() geometry.Rectangle {
return geometry.Rectangle{
Min: geometry.Vec2D{},
Max: geometry.NewVec2D(b.boxSize.Width(), b.boxSize.Height()),
}
}
// BorderBox implements the BoxModel interface.
func (b Box) BorderBox() geometry.Rectangle {
return b.borderBoxOffset.applyOn(b.MarginBox())
}
// PaddingBox implements the BoxModel interface.
func (b Box) PaddingBox() geometry.Rectangle {
return b.paddingBoxOffset.applyOn(b.BorderBox())
}
// ContentBox implements the BoxModel interface.
func (b Box) ContentBox() geometry.Rectangle {
return b.contentBoxOffset.applyOn(b.PaddingBox())
}
// Resize change the size of the margin box.
func (b Box) Resize(size geometry.Size) {
b.boxSize = size
}
// ApplyMargin applies the margin of the given style to the box.
func (b Box) ApplyMargin(style styles.Style) Box {
b.applyMargin(marginOf(style))
return b
}
func (b Box) applyMargin(margin boxOffset) {
b.borderBoxOffset = margin
}
// ApplyBorder applies the border of the given style to the box.
func (b Box) ApplyBorder(style styles.Style) Box {
b.applyBorder(borderOf(style))
return b
}
func (b Box) applyBorder(border boxOffset) {
b.paddingBoxOffset = border
}
// ApplyPadding applies the padding of the given style to the box.
func (b Box) ApplyPadding(style styles.Style) Box {
b.applyPadding(paddingOf(style))
return b
}
func (b Box) applyPadding(padding boxOffset) {
b.contentBoxOffset = padding
} | pdk/layout/box.go | 0.904481 | 0.449151 | box.go | starcoder |
package gozxing
const (
LUMINANCE_BITS = 5
LUMINANCE_SHIFT = 8 - LUMINANCE_BITS
LUMINANCE_BUCKETS = 1 << LUMINANCE_BITS
)
type GlobalHistogramBinarizer struct {
source LuminanceSource
luminances []byte
buckets []int
}
func NewGlobalHistgramBinarizer(source LuminanceSource) Binarizer {
return &GlobalHistogramBinarizer{
source: source,
luminances: []byte{},
buckets: make([]int, LUMINANCE_BUCKETS),
}
}
func (this *GlobalHistogramBinarizer) GetLuminanceSource() LuminanceSource {
return this.source
}
func (this *GlobalHistogramBinarizer) GetWidth() int {
return this.source.GetWidth()
}
func (this *GlobalHistogramBinarizer) GetHeight() int {
return this.source.GetHeight()
}
func (this *GlobalHistogramBinarizer) GetBlackRow(y int, row *BitArray) (*BitArray, error) {
source := this.GetLuminanceSource()
width := source.GetWidth()
if row == nil || row.GetSize() < width {
row = NewBitArray(width)
} else {
row.Clear()
}
this.initArrays(width)
localLuminances, e := source.GetRow(y, this.luminances)
if e != nil {
return nil, e
}
localBuckets := this.buckets
for x := 0; x < width; x++ {
localBuckets[(localLuminances[x]&0xff)>>LUMINANCE_SHIFT]++
}
blackPoint, e := this.estimateBlackPoint(localBuckets)
if e != nil {
return nil, e
}
if width < 3 {
// Special case for very small images
for x := 0; x < width; x++ {
if int(localLuminances[x]&0xff) < blackPoint {
row.Set(x)
}
}
} else {
left := int(localLuminances[0] & 0xff)
center := int(localLuminances[1] & 0xff)
for x := 1; x < width-1; x++ {
right := int(localLuminances[x+1] & 0xff)
// A simple -1 4 -1 box filter with a weight of 2.
if ((center*4)-left-right)/2 < blackPoint {
row.Set(x)
}
left = center
center = right
}
}
return row, nil
}
func (this *GlobalHistogramBinarizer) GetBlackMatrix() (*BitMatrix, error) {
source := this.GetLuminanceSource()
width := source.GetWidth()
height := source.GetHeight()
matrix, e := NewBitMatrix(width, height)
if e != nil {
return nil, e
}
// Quickly calculates the histogram by sampling four rows from the image. This proved to be
// more robust on the blackbox tests than sampling a diagonal as we used to do.
this.initArrays(width)
localBuckets := this.buckets
for y := 1; y < 5; y++ {
row := height * y / 5
localLuminances, _ := source.GetRow(row, this.luminances)
right := (width * 4) / 5
for x := width / 5; x < right; x++ {
pixel := localLuminances[x] & 0xff
localBuckets[pixel>>LUMINANCE_SHIFT]++
}
}
blackPoint, e := this.estimateBlackPoint(localBuckets)
if e != nil {
return nil, e
}
// We delay reading the entire image luminance until the black point estimation succeeds.
// Although we end up reading four rows twice, it is consistent with our motto of
// "fail quickly" which is necessary for continuous scanning.
localLuminances := source.GetMatrix()
for y := 0; y < height; y++ {
offset := y * width
for x := 0; x < width; x++ {
pixel := int(localLuminances[offset+x] & 0xff)
if pixel < blackPoint {
matrix.Set(x, y)
}
}
}
return matrix, nil
}
func (this *GlobalHistogramBinarizer) CreateBinarizer(source LuminanceSource) Binarizer {
return NewGlobalHistgramBinarizer(source)
}
func (this *GlobalHistogramBinarizer) initArrays(luminanceSize int) {
if len(this.luminances) < luminanceSize {
this.luminances = make([]byte, luminanceSize)
}
for x := 0; x < LUMINANCE_BUCKETS; x++ {
this.buckets[x] = 0
}
}
func (this *GlobalHistogramBinarizer) estimateBlackPoint(buckets []int) (int, error) {
// Find the tallest peak in the histogram.
numBuckets := len(buckets)
maxBucketCount := 0
firstPeak := 0
firstPeakSize := 0
for x := 0; x < numBuckets; x++ {
if buckets[x] > firstPeakSize {
firstPeak = x
firstPeakSize = buckets[x]
}
if buckets[x] > maxBucketCount {
maxBucketCount = buckets[x]
}
}
// Find the second-tallest peak which is somewhat far from the tallest peak.
secondPeak := 0
secondPeakScore := 0
for x := 0; x < numBuckets; x++ {
distanceToBiggest := x - firstPeak
// Encourage more distant second peaks by multiplying by square of distance.
score := buckets[x] * distanceToBiggest * distanceToBiggest
if score > secondPeakScore {
secondPeak = x
secondPeakScore = score
}
}
// Make sure firstPeak corresponds to the black peak.
if firstPeak > secondPeak {
firstPeak, secondPeak = secondPeak, firstPeak
}
// If there is too little contrast in the image to pick a meaningful black point, throw rather
// than waste time trying to decode the image, and risk false positives.
if secondPeak-firstPeak <= numBuckets/16 {
return 0, GetNotFoundExceptionInstance()
}
// Find a valley between them that is low and closer to the white peak.
bestValley := secondPeak - 1
bestValleyScore := -1
for x := secondPeak - 1; x > firstPeak; x-- {
fromFirst := x - firstPeak
score := fromFirst * fromFirst * (secondPeak - x) * (maxBucketCount - buckets[x])
if score > bestValleyScore {
bestValley = x
bestValleyScore = score
}
}
return bestValley << LUMINANCE_SHIFT, nil
} | global_histogram_binarizer.go | 0.693784 | 0.400779 | global_histogram_binarizer.go | starcoder |
package options
// InsertOneOptions represents all possible options to the insertOne()
type InsertOneOptions struct {
BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
}
// InsertOne returns a pointer to a new InsertOneOptions
func InsertOne() *InsertOneOptions {
return &InsertOneOptions{}
}
// SetBypassDocumentValidation allows the write to opt-out of document level validation.
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
func (ioo *InsertOneOptions) SetBypassDocumentValidation(b bool) *InsertOneOptions {
ioo.BypassDocumentValidation = &b
return ioo
}
// MergeInsertOneOptions combines the argued InsertOneOptions into a single InsertOneOptions in a last-one-wins fashion
func MergeInsertOneOptions(opts ...*InsertOneOptions) *InsertOneOptions {
ioOpts := InsertOne()
for _, ioo := range opts {
if ioo == nil {
continue
}
if ioo.BypassDocumentValidation != nil {
ioOpts.BypassDocumentValidation = ioo.BypassDocumentValidation
}
}
return ioOpts
}
// InsertManyOptions represents all possible options to the insertMany()
type InsertManyOptions struct {
BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
Ordered *bool // If true, when an insert fails, return without performing the remaining inserts. Defaults to true.
}
// InsertMany returns a pointer to a new InsertManyOptions
func InsertMany() *InsertManyOptions {
return &InsertManyOptions{
Ordered: &DefaultOrdered,
}
}
// SetBypassDocumentValidation allows the write to opt-out of document level validation.
// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
func (imo *InsertManyOptions) SetBypassDocumentValidation(b bool) *InsertManyOptions {
imo.BypassDocumentValidation = &b
return imo
}
// SetOrdered configures the ordered option. If true, when a write fails, the function will return without attempting
// remaining writes. Defaults to true.
func (imo *InsertManyOptions) SetOrdered(b bool) *InsertManyOptions {
imo.Ordered = &b
return imo
}
// MergeInsertManyOptions combines the argued InsertManyOptions into a single InsertManyOptions in a last-one-wins fashion
func MergeInsertManyOptions(opts ...*InsertManyOptions) *InsertManyOptions {
imOpts := InsertMany()
for _, imo := range opts {
if imo == nil {
continue
}
if imo.BypassDocumentValidation != nil {
imOpts.BypassDocumentValidation = imo.BypassDocumentValidation
}
if imo.Ordered != nil {
imOpts.Ordered = imo.Ordered
}
}
return imOpts
} | vendor/github.com/mongodb/mongo-go-driver/mongo/options/insertoptions.go | 0.830663 | 0.44348 | insertoptions.go | starcoder |
Package dsp has a set of digital signal processing functions that are primarily
designed to support the discrete wavelet transform
("https://github.com/goccmack/dsp/dwt")
*/
package godsp
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"math"
"strconv"
"strings"
myioutil "github.com/goccmack/goutil/ioutil"
)
// Abs returns |x|
func Abs(x []float64) []float64 {
x1 := make([]float64, len(x))
for i, f := range x {
x1[i] = math.Abs(f)
}
return x1
}
// AbsInt returns |x|
func AbsInt(x []int) []int {
x1 := make([]int, len(x))
for i, e := range x {
if e < 0 {
x1[i] = -e
} else {
x1[i] = e
}
}
return x1
}
// AbsAll returns Abs(x) for every x in X
func AbsAll(X [][]float64) [][]float64 {
x1 := make([][]float64, len(X))
for i, x := range X {
x1[i] = Abs(x)
}
return x1
}
/*
Average returns Sum(x)/len(x).
*/
func Average(x []float64) float64 {
return Sum(x) / float64(len(x))
}
/*
DivS returns x/s where x is a vector and s a scalar.
*/
func DivS(x []float64, s float64) []float64 {
y := make([]float64, len(x))
for i := range x {
y[i] = x[i] / s
}
return y
}
/*
DownSampleAll returns DownSample(x, len(x)/min(len(xs))) for all x in xs
*/
func DownSampleAll(xs [][]float64) [][]float64 {
N := len(xs[0])
for _, x := range xs {
if len(x) < N {
N = len(x)
}
}
ys := make([][]float64, len(xs))
for i, x := range xs {
ys[i] = DownSample(x, len(x)/N)
}
return ys
}
/*
DownSample returns x downsampled by n
Function panics if len(x) is not an integer multiple of n.
*/
func DownSample(x []float64, n int) []float64 {
if len(x)%n != 0 {
panic(fmt.Sprintf("len(x) (%d) is not an integer multiple of n (%d)", len(x), n))
}
x1 := make([]float64, len(x)/n)
for i, j := 0, 0; j < len(x1); i, j = i+n, j+1 {
x1[j] = x[i]
}
return x1
}
// FindMax returns the value and index of the first element of x equal to the maximum value in x.
func FindMax(x []float64) (value float64, index int) {
value, index = x[0], 0
for i := 1; i < len(x)-1; i++ {
if x[i] > value {
value, index = x[i], i
}
}
return
}
// FindMax* returns the value and index of the first element of x equal to the maximum value in x.
func FindMaxI(x []int) (value int, index int) {
value, index = x[0], 0
for i := 1; i < len(x)-1; i++ {
if x[i] > value {
value, index = x[i], i
}
}
return
}
// FindMin returns the value and index of the first element of x equal to the minimum value in x.
func FindMin(x []float64) (value float64, index int) {
value, index = x[0], 0
for i := 1; i < len(x)-1; i++ {
if x[i] < value {
value, index = x[i], i
}
}
return
}
/*
Float32ToFloat64 returns a copy of x with type []float64
*/
func Float32ToFloat64(x []float32) []float64 {
y := make([]float64, len(x))
for i, f := range x {
y[i] = float64(f)
}
return y
}
func IsPowerOf2(x int) bool {
return (x != 0) && ((x & (x - 1)) == 0)
}
/*
LoadFloats reads a text file containing one float per line.
*/
func LoadFloats(fname string) []float64 {
data, err := ioutil.ReadFile(fname)
if err != nil {
panic(err)
}
rdr := bufio.NewReader(bytes.NewBuffer(data))
x := make([]float64, 0, 1024)
for s, err := rdr.ReadString('\n'); err == nil; s, err = rdr.ReadString('\n') {
f, err := strconv.ParseFloat(strings.TrimSuffix(s, "\n"), 64)
if err != nil {
panic(err)
}
x = append(x, f)
}
return x
}
// Log2 returns the integer log base 2 of n.
// E.g.: log2(12) ~ 3.6. Log2 returns 3
func Log2(n int) int {
return int(math.Log2(float64(n)))
}
/*
LowpassFilterAll returns LowpassFilter(x) for all x in xs.
*/
func LowpassFilterAll(xs [][]float64, alpha float64) [][]float64 {
ys := make([][]float64, len(xs))
for i, x := range xs {
ys[i] = LowpassFilter(x, alpha)
}
return ys
}
/*
LowpassFilter returns x filtered by alpha
*/
func LowpassFilter(x []float64, alpha float64) []float64 {
y := make([]float64, len(x))
y[0] = alpha * x[0]
for i := 1; i < len(x); i++ {
y[i] = y[i-1] + alpha*(x[i]-y[i-1])
}
return y
}
// Max returns the maximum value of the elements of x
func Max(x []float64) float64 {
max := x[0]
for _, f := range x {
if f > max {
max = f
}
}
return max
}
// MaxInt returns the maximum value of the elements of x
func MaxInt(x []int) int {
max := x[0]
for _, f := range x {
if f > max {
max = f
}
}
return max
}
/*
MovAvg returns the moving average for each x[i], given by sum(x[i-w:i+w])/(2w)
*/
func MovAvg(x []float64, w int) []float64 {
y := make([]float64, len(x))
for i := w; i < len(x)-w; i++ {
y[i] = Sum(x[i-w:i+w]) / float64(2*w)
}
return y
}
/*
Multiplex returns on vector with the element of vs interleaved
*/
func Multiplex(channels [][]float64) []float64 {
numChans := len(channels)
chanLen := len(channels[0])
buf := make([]float64, numChans*chanLen)
for i := 0; i < chanLen; i++ {
k := i * numChans
for j := 0; j < numChans; j++ {
buf[k+j] = channels[j][i]
}
}
return buf
}
// Normalise returns x/max(x)
func Normalise(x []float64) []float64 {
x1 := make([]float64, len(x))
sum := Max(x)
for i, f := range x {
x1[i] = f / sum
}
return x1
}
// Normalise returns x/max(x) for all x in xs
func NormaliseAll(xs [][]float64) [][]float64 {
x1 := make([][]float64, len(xs))
for i, x := range xs {
x1[i] = Normalise(x)
}
return x1
}
// Pow2 returns 2^x.
// The function panics if x < 0
func Pow2(x int) int {
return 1<<x
}
// Range returns an interger range 0:1:n-1
func Range(n int) []int {
rng := make([]int, n)
for i := range rng {
rng[i] = i
}
return rng
}
/*
RemoveAvgAllZ removes the average of all vectors x in xs. The minimum value
of any x[i] is 0.
*/
func RemoveAvgAllZ(xs [][]float64) [][]float64 {
xs1 := make([][]float64, len(xs))
for i, x := range xs {
xs1[i] = RemoveAvg(x)
}
return xs1
}
// RemoveAvgZ returns x[i] = x[i]-sum(x)/len(x) or 0 if x[i]-sum(x)/len(x) < 0
func RemoveAvg(x []float64) []float64 {
x1 := make([]float64, len(x))
avg := Sum(x) / float64(len(x))
for i, f := range x {
x1[i] = f - avg
if x1[i] < 0 {
x1[i] = 0
}
}
return x1
}
// Smooth smoothts x: x[i] = sum(x[i-wdw:i+wdw])/(2*wdw)
func Smooth(x []float64, wdw int) {
for i := 0; i < wdw; i++ {
x[i] = 0
}
for i := wdw; i < len(x)-wdw; i++ {
x[i] = Sum(x[i-wdw:i+wdw]) / float64((2 * wdw))
}
}
/*
Sub returns x - y. The function panics if len(x) != len(y).
*/
func Sub(x, y []float64) []float64 {
if len(x) != len(y) {
panic("len(x) != len(y)")
}
x1 := make([]float64, len(x))
for i := range x {
x1[i] = x[i] - y[i]
}
return x1
}
// Sum returns the sum of the elements of the vector x
func Sum(x []float64) float64 {
sum := 0.0
for _, f := range x {
sum += f
}
return sum
}
// SumVectors returns the sum of the vectors in X.
// The function panics if all vectors don't have the same length
func SumVectors(X [][]float64) []float64 {
N := len(X[0])
for i, x := range X {
if len(x) != N {
panic(fmt.Sprintf("N=%d but len(X[%d]=%d", N, i, len(x)))
}
}
sum := make([]float64, N)
for i := 0; i < N; i++ {
for j := range X {
sum[i] += X[j][i]
}
}
return sum
}
func ToFloat(x []int) []float64 {
y := make([]float64, len(x))
for i, e := range x {
y[i] = float64(e) / float64(math.MaxInt64)
}
return y
}
/*
ToInt returns y * math.MaxInt64.
The range of x is [-1.0,1.0].
The function panics if bitsPerSample is not one of 8,16,32.
*/
func ToInt(x []float64, bitsPerSample int) []int {
y := make([]int, len(x))
if bitsPerSample != 8 && bitsPerSample != 16 && bitsPerSample != 32 {
panic(fmt.Sprintf("Invalid bitsPerSample %d", bitsPerSample))
}
max := float64(int(1)<<bitsPerSample - 1)
for i, f := range x {
y[i] = int(f * max)
}
return y
}
func ToIntS(x float64, bitsPerSample int) int {
max := float64(int(1)<<bitsPerSample - 1)
return int(x * max)
}
func findLocalMax(x []float64, from, wdw, step int) (maxI, slopeEnd int) {
i, slp := from+wdw, 0
for slp >= 0 && i < len(x)-wdw {
slp = slope(x[i : i+wdw])
i += step
}
_, maxI = FindMax(x[from:i])
maxI += from
slopeEnd = i
return
}
func findLocalMin(x []float64, from, wdw, step int) (minI, slopeEnd int) {
i, slp := from+wdw, 0
for slp <= 0 && i < len(x)-wdw {
slp = slope(x[i : i+wdw])
i += step
}
_, minI = FindMin(x[from:i])
minI += from
slopeEnd = i
return
}
func findNon0Slope(x []float64, from, wdw int) (slp, end int) {
for i := from; i < len(x)-wdw; i++ {
slp := slope(x[i : i+wdw])
if slp != 0 {
return slp, i
}
}
return 0, len(x)
}
// slope returns +1, 0, -1
func slope(x []float64) int {
end := len(x) - 1
if x[0] < x[end] {
return -1
}
if x[0] == x[end] {
return 0
}
return 1
}
func ivecContain(x []int, v int) bool {
for _, v1 := range x {
if v1 == v {
return true
}
}
return false
}
// WriteAllDataFile writes each xs[i] in xs to a test file `fname_i.txt`
func WriteAllDataFile(xs [][]float64, fname string) {
for i, xs := range xs {
WriteDataFile(xs, fmt.Sprintf("%s_%d", fname, i))
}
}
// WriteDataFile writes x to a text file `fname.txt`
func WriteDataFile(x []float64, fname string) {
buf := new(bytes.Buffer)
for _, f := range x {
fmt.Fprintf(buf, "%f\n", f)
}
if err := myioutil.WriteFile(fname+".txt", buf.Bytes()); err != nil {
panic(err)
}
}
// WriteIntDataFile writes x to a text file `fname.txt`
func WriteIntDataFile(x []int, fname string) {
buf := new(bytes.Buffer)
for _, f := range x {
fmt.Fprintf(buf, "%d\n", f)
}
if err := myioutil.WriteFile(fname+".txt", buf.Bytes()); err != nil {
panic(err)
}
}
/*
WriteIntMatrixDataFile writes an integer matrix to a text file `fname.csv`
*/
func WriteIntMatrixDataFile(x [][]int, fname string) {
buf := new(bytes.Buffer)
for _, row := range x {
for i, col := range row {
if i > 0 {
fmt.Fprint(buf, ",")
}
fmt.Fprintf(buf, "%d", col)
}
fmt.Fprintln(buf)
}
if err := myioutil.WriteFile(fname+".csv", buf.Bytes()); err != nil {
panic(err)
}
}
/*
Xcorr returns the cross correlation of x with y for maxDelay.
*/
func Xcorr(x, y []float64, maxDelay int) (corr []float64) {
N := len(x)
corr = make([]float64, maxDelay)
for k := 0; k < maxDelay; k++ {
for n := 0; n < N-k; n++ {
corr[k] += x[n] * y[n+k]
}
corr[k] /= float64(N)
}
return
} | dsp.go | 0.802981 | 0.611469 | dsp.go | starcoder |
package leetcode
import "math"
/*
* @lc app=leetcode id=8 lang=golang
*
* [8] String to Integer (atoi)
*
* https://leetcode.com/problems/string-to-integer-atoi/description/
*
* algorithms
* Medium (15.59%)
* Likes: 2085
* Dislikes: 11270
* Total Accepted: 679.4K
* Total Submissions: 4.4M
* Testcase Example: '"42"'
*
* Implement the myAtoi(string s) function, which converts a string to a 32-bit
* signed integer (similar to C/C++'s atoi function).
*
* The algorithm for myAtoi(string s) is as follows:
*
*
* Read in and ignore any leading whitespace.
* Check if the next character (if not already at the end of the string) is '-'
* or '+'. Read this character in if it is either. This determines if the final
* result is negative or positive respectively. Assume the result is positive
* if neither is present.
* Read in next the characters until the next non-digit charcter or the end of
* the input is reached. The rest of the string is ignored.
* Convert these digits into an integer (i.e. "123" -> 123, "0032" -> 32). If
* no digits were read, then the integer is 0. Change the sign as necessary
* (from step 2).
* If the integer is out of the 32-bit signed integer range [-2^31, 2^31 - 1],
* then clamp the integer so that it remains in the range. Specifically,
* integers less than -2^31 should be clamped to -2^31, and integers greater
* than 2^31 - 1 should be clamped to 2^31 - 1.
* Return the integer as the final result.
*
*
* Note:
*
*
* Only the space character ' ' is considered a whitespace character.
* Do not ignore any characters other than the leading whitespace or the rest
* of the string after the digits.
*
*
*
* Example 1:
*
*
* Input: str = "42"
* Output: 42
* Explanation: The underlined characters are what is read in, the caret is the
* current reader position.
* Step 1: "42" (no characters read because there is no leading whitespace)
* ^
* Step 2: "42" (no characters read because there is neither a '-' nor '+')
* ^
* Step 3: "42" ("42" is read in)
* ^
* The parsed integer is 42.
* Since 42 is in the range [-2^31, 2^31 - 1], the final result is 42.
*
*
* Example 2:
*
*
* Input: str = " -42"
* Output: -42
* Explanation:
* Step 1: " -42" (leading whitespace is read and ignored)
* ^
* Step 2: " -42" ('-' is read, so the result should be negative)
* ^
* Step 3: " -42" ("42" is read in)
* ^
* The parsed integer is -42.
* Since -42 is in the range [-2^31, 2^31 - 1], the final result is -42.
*
*
* Example 3:
*
*
* Input: str = "4193 with words"
* Output: 4193
* Explanation:
* Step 1: "4193 with words" (no characters read because there is no leading
* whitespace)
* ^
* Step 2: "4193 with words" (no characters read because there is neither a '-'
* nor '+')
* ^
* Step 3: "4193 with words" ("4193" is read in; reading stops because the next
* character is a non-digit)
* ^
* The parsed integer is 4193.
* Since 4193 is in the range [-2^31, 2^31 - 1], the final result is 4193.
*
*
* Example 4:
*
*
* Input: str = "words and 987"
* Output: 0
* Explanation:
* Step 1: "words and 987" (no characters read because there is no leading
* whitespace)
* ^
* Step 2: "words and 987" (no characters read because there is neither a '-'
* nor '+')
* ^
* Step 3: "words and 987" (reading stops immediately because there is a
* non-digit 'w')
* ^
* The parsed integer is 0 because no digits were read.
* Since 0 is in the range [-2^31, 2^31 - 1], the final result is 4193.
*
*
* Example 5:
*
*
* Input: str = "-91283472332"
* Output: -2147483648
* Explanation:
* Step 1: "-91283472332" (no characters read because there is no leading
* whitespace)
* ^
* Step 2: "-91283472332" ('-' is read, so the result should be negative)
* ^
* Step 3: "-91283472332" ("91283472332" is read in)
* ^
* The parsed integer is -91283472332.
* Since -91283472332 is less than the lower bound of the range [-2^31, 2^31 -
* 1], the final result is clamped to -2^31 = -2147483648.
*
*
*
* Constraints:
*
*
* 0 <= s.length <= 200
* s consists of English letters (lower-case and upper-case), digits (0-9), '
* ', '+', '-', and '.'.
*
*
*/
// @lc code=start
func myAtoi(s string) int {
i := 0
sign := 1
ans := 0
len := len(s)
for i < len && s[i] == ' ' {
i++
}
if i < len && (s[i] == '+' || s[i] == '-') {
if s[i] == '-' {
sign = -1
}
i++
}
for i < len && (int(s[i])-'0' >= 0 && int(s[i])-'0' <= 9) {
pop := int(s[i] - '0')
if ans > math.MaxInt32/10 {
if sign == -1 {
return math.MinInt32
}
return math.MaxInt32
}
if ans == math.MaxInt32/10 {
if sign == -1 && pop > math.MaxInt32%10+1 {
return math.MinInt32
} else if sign == 1 && pop > math.MaxInt32%10 {
return math.MaxInt32
}
}
ans = ans*10 + pop
i++
}
return sign * ans
}
// @lc code=end | leetcode/8/8.string-to-integer-atoi.go | 0.890975 | 0.432063 | 8.string-to-integer-atoi.go | starcoder |
package agent
import (
"fmt"
"strconv"
"github.com/pkg/errors"
"github.com/determined-ai/determined/master/internal/sproto"
"github.com/determined-ai/determined/master/internal/task"
"github.com/determined-ai/determined/master/pkg/actor"
"github.com/determined-ai/determined/master/pkg/aproto"
"github.com/determined-ai/determined/master/pkg/cproto"
"github.com/determined-ai/determined/master/pkg/device"
"github.com/determined-ai/determined/master/pkg/model"
)
type slotEnabled struct {
deviceAdded bool
agentEnabled bool
userEnabled bool
draining bool
}
func (s slotEnabled) enabled() bool {
return s.agentEnabled && s.userEnabled
}
type slot struct {
device device.Device
enabled slotEnabled
container *cproto.Container
}
func (s *slot) summarize() model.SlotSummary {
return model.SlotSummary{
ID: strconv.Itoa(int(s.device.ID)),
Device: s.device,
Enabled: s.enabled.enabled(),
Container: s.container,
Draining: s.enabled.draining,
}
}
// AgentState holds the scheduler state for an agent. The implementation of agent-related operations
// (e.g., socket I/O) is deferred to the actor.
type AgentState struct {
// Handler is agent actor reference.
Handler *actor.Ref
Devices map[device.Device]*cproto.ID
Label string
enabled bool
draining bool
// Since we only model GPUs as devices/slots and assume each slot can be allocated with
// one container, we add one additional field to keep track of zero-slot containers.
// We need this field to know if the agent is idle.
ZeroSlotContainers map[cproto.ID]bool
maxZeroSlotContainers int
slotStates map[device.ID]*slot
containers map[cproto.ID]*actor.Ref
}
// NewAgentState returns a new agent empty agent state backed by the handler.
func NewAgentState(msg sproto.AddAgent, maxZeroSlotContainers int) *AgentState {
return &AgentState{
Handler: msg.Agent,
Label: msg.Label,
Devices: make(map[device.Device]*cproto.ID),
ZeroSlotContainers: make(map[cproto.ID]bool),
maxZeroSlotContainers: maxZeroSlotContainers,
enabled: true,
slotStates: make(map[device.ID]*slot),
containers: make(map[cproto.ID]*actor.Ref),
}
}
func (a *AgentState) string() string {
return a.Handler.Address().Local()
}
// NumSlots returns the total number of slots available.
func (a *AgentState) NumSlots() int {
switch {
case a.draining:
return a.NumUsedSlots()
case !a.enabled:
return 0
default:
return len(a.Devices)
}
}
// NumEmptySlots returns the number of slots that have not been allocated to containers.
func (a *AgentState) NumEmptySlots() (slots int) {
switch {
case a.draining, !a.enabled:
return 0
default:
return a.NumSlots() - a.NumUsedSlots()
}
}
// NumUsedSlots returns the number of slots that have been allocated to containers.
func (a *AgentState) NumUsedSlots() (slots int) {
for _, id := range a.Devices {
if id != nil {
slots++
}
}
return slots
}
// NumUsedZeroSlots returns the number of allocated zero-slot units.
func (a *AgentState) NumUsedZeroSlots() int {
return len(a.ZeroSlotContainers)
}
// NumZeroSlots returns the total number of zero-slot units.
func (a *AgentState) NumZeroSlots() int {
switch {
case a.draining:
return a.NumUsedZeroSlots()
case !a.enabled:
return 0
default:
return a.maxZeroSlotContainers
}
}
// NumEmptyZeroSlots returns the number of unallocated zero-slot units.
func (a *AgentState) NumEmptyZeroSlots() int {
switch {
case a.draining || !a.enabled:
return 0
default:
return a.NumZeroSlots() - a.NumUsedZeroSlots()
}
}
// Idle signals if the agent is idle.
func (a *AgentState) Idle() bool {
return a.NumUsedZeroSlots() == 0 && a.NumUsedSlots() == 0
}
// AllocateFreeDevices allocates devices.
func (a *AgentState) AllocateFreeDevices(slots int, id cproto.ID) ([]device.Device, error) {
if slots == 0 {
a.ZeroSlotContainers[id] = true
return nil, nil
}
cid := id
devices := make([]device.Device, 0, slots)
for d, dcid := range a.Devices {
if dcid == nil {
devices = append(devices, d)
}
if len(devices) == slots {
break
}
}
if len(devices) != slots {
return nil, errors.New("not enough devices")
}
for _, d := range devices {
a.Devices[d] = &cid
}
return devices, nil
}
// DeallocateContainer deallocates containers.
func (a *AgentState) DeallocateContainer(id cproto.ID) {
delete(a.ZeroSlotContainers, id)
for d, cid := range a.Devices {
if cid != nil && *cid == id {
a.Devices[d] = nil
}
}
}
// DeepCopy returns a copy of agentState for scheduler internals.
func (a *AgentState) DeepCopy() *AgentState {
copiedAgent := &AgentState{
Handler: a.Handler,
Label: a.Label,
Devices: make(map[device.Device]*cproto.ID),
ZeroSlotContainers: make(map[cproto.ID]bool),
maxZeroSlotContainers: a.maxZeroSlotContainers,
enabled: a.enabled,
draining: a.draining,
// TODO(ilia): Deepcopy of `slotStates` may be necessary one day.
slotStates: a.slotStates,
}
for originalDevice, id := range a.Devices {
copiedDevice := device.Device{
ID: originalDevice.ID,
Brand: originalDevice.Brand,
UUID: originalDevice.UUID,
Type: originalDevice.Type,
}
copiedAgent.Devices[copiedDevice] = id
}
for originalKey, originalValue := range a.ZeroSlotContainers {
copiedAgent.ZeroSlotContainers[originalKey] = originalValue
}
return copiedAgent
}
// Enable enables the agent.
func (a *AgentState) Enable(ctx *actor.Context) {
ctx.Log().Infof("enabling agent: %s", a.string())
a.enabled = true
a.draining = false
}
// Disable disables or drains the agent.
func (a *AgentState) Disable(ctx *actor.Context, drain bool) {
drainStr := "disabling"
if drain {
drainStr = "draining"
}
ctx.Log().Infof("%s agent: %s", drainStr, a.string())
a.draining = drain
a.enabled = false
}
func (a *AgentState) addDevice(ctx *actor.Context, device device.Device, containerID *cproto.ID) {
ctx.Log().Infof("adding device: %s on %s", device.String(), a.string())
a.Devices[device] = containerID
}
func (a *AgentState) removeDevice(ctx *actor.Context, device device.Device) {
ctx.Log().Infof("removing device: %s (%s)", device.String(), a.string())
delete(a.Devices, device)
}
// agentStarted initializes slots from AgentStarted.Devices.
func (a *AgentState) agentStarted(ctx *actor.Context, agentStarted *aproto.AgentStarted) {
msg := agentStarted
for _, d := range msg.Devices {
enabled := slotEnabled{
agentEnabled: true,
userEnabled: true,
}
a.slotStates[d.ID] = &slot{enabled: enabled, device: d}
a.updateSlotDeviceView(ctx, d.ID)
}
}
func (a *AgentState) containerStateChanged(ctx *actor.Context, msg aproto.ContainerStateChanged) {
for _, d := range msg.Container.Devices {
s, ok := a.slotStates[d.ID]
if !ok {
ctx.Log().Warnf("bad containerStateChanged on device: %d (%s)", d.ID, a.string())
continue
}
s.container = &msg.Container
if msg.Container.State == cproto.Terminated {
s.container = nil
}
}
}
func (a *AgentState) startContainer(ctx *actor.Context, msg sproto.StartTaskContainer) error {
inner := func(deviceId device.ID) error {
s, ok := a.slotStates[deviceId]
if !ok {
return errors.New("can't find slot")
}
// TODO(ilia): Potential race condition if slot is disabled in-between scheduling?
if !s.enabled.enabled() {
return errors.New("container allocated but slot is not enabled")
}
if s.container != nil {
return errors.New("container already allocated to slot")
}
s.container = &msg.StartContainer.Container
return nil
}
for _, d := range msg.StartContainer.Container.Devices {
if err := inner(d.ID); err != nil {
return errors.Wrapf(err, "bad startedContainer on device: %d (%s)", d.ID, a.string())
}
}
a.containers[msg.Container.ID] = msg.TaskActor
return nil
}
func (a *AgentState) getSlotsSummary(ctx *actor.Context) model.SlotsSummary {
summary := make(model.SlotsSummary, len(a.slotStates))
for deviceID, slotState := range a.slotStates {
summary[fmt.Sprintf("%s/slots/%d", ctx.Self().Address(), deviceID)] = slotState.summarize()
}
return summary
}
func (a *AgentState) updateSlotDeviceView(ctx *actor.Context, deviceID device.ID) {
s, ok := a.slotStates[deviceID]
if !ok {
ctx.Log().Warnf("bad updateSlotDeviceView on device: %d (%s): not found", deviceID, a.string())
return
}
// TODO(ilia): Don't materialize `Devices` view on slots.
if s.enabled.enabled() && !s.enabled.deviceAdded {
s.enabled.deviceAdded = true
var containerID *cproto.ID
if s.container != nil {
containerID = &s.container.ID
}
a.addDevice(ctx, s.device, containerID)
} else if !s.enabled.enabled() {
if !s.enabled.draining && s.enabled.deviceAdded {
s.enabled.deviceAdded = false
a.removeDevice(ctx, s.device)
}
// On `PostStop`, draining will be already set to false, and we'll kill the container
// whether we have the device or not.
if !s.enabled.draining && s.container != nil {
ctx.Self().System().TellAt(s.container.Parent, task.Kill)
}
}
}
func (a *AgentState) patchSlotStateInner(
ctx *actor.Context, msg PatchSlotState, slotState *slot) model.SlotSummary {
if msg.Enabled != nil {
slotState.enabled.userEnabled = *msg.Enabled
}
if msg.Drain != nil {
slotState.enabled.draining = *msg.Drain
}
a.updateSlotDeviceView(ctx, slotState.device.ID)
return slotState.summarize()
}
func (a *AgentState) patchAllSlotsState(
ctx *actor.Context, msg PatchAllSlotsState) model.SlotsSummary {
result := model.SlotsSummary{}
for _, slotState := range a.slotStates {
summary := a.patchSlotStateInner(
ctx, PatchSlotState{
ID: slotState.device.ID, // Note: this is effectively unused.
Enabled: msg.Enabled,
Drain: msg.Drain,
},
slotState)
result[summary.ID] = summary
}
return result
}
func (a *AgentState) patchSlotState(
ctx *actor.Context, msg PatchSlotState) (model.SlotSummary, error) {
s, ok := a.slotStates[msg.ID]
if !ok {
return model.SlotSummary{}, errors.New(
fmt.Sprintf("bad updateSlotDeviceView on device: %d (%s): not found", msg.ID, a.string()))
}
return a.patchSlotStateInner(ctx, msg, s), nil
} | master/internal/resourcemanagers/agent/agent_state.go | 0.709925 | 0.420243 | agent_state.go | starcoder |
package daemon
// Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
type Gauge interface {
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
// values.
Inc()
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
// Desc, but have different values for their variable labels.
type GaugeVec interface {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
WithLabelValues(lvs ...string) Gauge
}
// A Histogram counts individual observations from an event or sample stream in
// configurable buckets. Similar to a summary, it also provides a sum of
// observations and an observation count.
type Histogram interface {
// Observe adds a single observation to the histogram.
Observe(float64)
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
// same Desc, but have different values for their variable labels.
type HistogramVec interface {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
WithLabelValues(lvs ...string) Histogram
}
type APIMetrics struct {
apiDuration HistogramVec
connectedClients GaugeVec
}
// NewAPIMetrics creates a new APIMetrics with sane defaults
func NewAPIMetrics(apiDuration HistogramVec, connectedClients GaugeVec) APIMetrics {
return APIMetrics{
apiDuration: apiDuration,
connectedClients: connectedClients,
}
}
// APIDuration returns the HistogramVec for collecting api duration metrics
func (a APIMetrics) APIDuration() HistogramVec {
return a.apiDuration
}
// ConnectedClients returns the GaugeVec for collecting the number of connected
// client metrics
func (a APIMetrics) ConnectedClients() GaugeVec {
return a.connectedClients
} | pkg/daemon/metrics.go | 0.786541 | 0.52074 | metrics.go | starcoder |
package rgass
import (
"errors"
)
// Node represents a node of text inside an RGASS
type Node struct {
ID ID // The identifier of this node
List []*Node // A list of nodes this node has been split into
Str string // The string contents of this node
Split bool // Whether the node has been split
Sentinel bool // Whether the node is a sentinel (just a marker for the head)
Hidden bool // Whether the node is hidden
Next *Node // A pointer to the next node in the model
Prev *Node // A pointer to the previous node in the model
Ancestor *Node // A pointer to a child node's most distant ancestor
AncestorOffset int // The offset of this node from its most distant ancestor
}
// GetAncestor gets the node ancestor, or the node itself if it is an ancestor
func (n *Node) GetAncestor() *Node {
if n.Ancestor != nil {
return n.Ancestor
}
return n
}
// DeleteLast deletes the last part of the node.
func (n *Node) DeleteLast(pos int) (*Node, *Node) {
fNode, lNode, _ := n.SplitTwo(pos)
lNode.Hidden = true
return fNode, lNode
}
// DeleteMiddle deletes the middle part of the node.
func (n *Node) DeleteMiddle(pos int, len int) (*Node, *Node, *Node) {
fNode, mNode, lNode, _ := n.SplitThree(pos, len)
mNode.Hidden = true
return fNode, mNode, lNode
}
// DeletePrior deletes the prior part of the node.
func (n *Node) DeletePrior(pos int) (*Node, *Node) {
fNode, lNode, _ := n.SplitTwo(pos)
fNode.Hidden = true
return fNode, lNode
}
// DeleteWhole deletes an entire node.
func (n *Node) DeleteWhole() *Node {
n.Hidden = true
return n
}
// Length gets the length of the node.
func (n *Node) Length() int {
return n.ID.Length
}
// SplitThree splits a node in three with a given position and length (Algorithm 2, pp3)
func (n *Node) SplitThree(pos int, delLen int) (*Node, *Node, *Node, error) {
var fNode Node
var mNode Node
var lNode Node
if err := n.checkPos(pos); err != nil {
return &fNode, &mNode, &lNode, err
}
fNode = *n
fNode.ID.Length = pos
fNode.Str = n.Str[0:pos]
fNode.Ancestor = n
fNode.AncestorOffset = n.AncestorOffset
mNode = *n
mNode.ID.Length = delLen
mNode.ID.Offset = fNode.ID.Offset + pos
mNode.Str = n.Str[pos : pos+delLen]
mNode.Ancestor = n
mNode.AncestorOffset = n.AncestorOffset + mNode.ID.Offset
lNode = *n
lNode.ID.Offset = mNode.ID.Offset + delLen
lNode.ID.Length = n.Length() - fNode.Length() - mNode.Length()
lNode.Str = n.Str[pos+delLen:]
lNode.Ancestor = n
lNode.AncestorOffset = n.AncestorOffset + lNode.ID.Offset
n.Hidden = true
n.Split = true
n.List = []*Node{&fNode, &mNode, &lNode}
return &fNode, &mNode, &lNode, nil
}
// SplitTwo splits a node in two at a given position (Algorithm 1, pp3)
func (n *Node) SplitTwo(pos int) (*Node, *Node, error) {
var fNode Node
var lNode Node
if err := n.checkPos(pos); err != nil {
return &fNode, &lNode, err
}
fNode = *n
fNode.ID.Length = pos
fNode.Str = n.Str[0:pos]
fNode.Ancestor = n
fNode.AncestorOffset = n.AncestorOffset
lNode = *n
lNode.ID.Offset = n.ID.Offset + pos
lNode.ID.Length = n.ID.Length - pos
lNode.Str = n.Str[pos:]
lNode.Ancestor = n
lNode.AncestorOffset = n.AncestorOffset + lNode.ID.Offset
n.Hidden = true
n.Split = true
n.List = []*Node{&fNode, &lNode}
return &fNode, &lNode, nil
}
func (n Node) checkPos(pos int) error {
var err error
if pos < 0 {
err = errors.New("Position in node can not be less than 0")
}
if pos > n.ID.Length {
err = errors.New("Position in node can not be greater than node length")
}
return err
} | rgass/node.go | 0.667364 | 0.499817 | node.go | starcoder |
<tutorial>
Getting started example of using 51Degrees device detection. The example
shows how to:
<ol>
<li>Instantiate the 51Degrees device detection provider.
<p><pre class="prettyprint lang-go">
provider := FiftyOneDegreesPatternV3.NewProvider(dataFile)
</pre></p>
<li>Produce a match for a single HTTP User-Agent header
<p><pre class="prettyprint lang-go">
match := provider.GetMatch(userAgent)
</pre></p>
<li>Extract the value of the IsMobile property
<p><pre class="prettyprint lang-go">
match.GetValue("IsMobile")
</pre></p>
</ol>
This example assumes you have the 51Degrees Go API installed correctly.
</tutorial>
*/
// Snippet Start
package main
import (
"fmt"
"./src/pattern"
)
// Location of data file.
var dataFile = "../data/51Degrees-LiteV3.2.dat"
func main() {
// User-Agent string of an iPhone mobile device.
mobileUserAgent := "Mozilla/5.0 (iPhone; CPU iPhone OS 7_1 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) 'Version/7.0 Mobile/11D167 Safari/9537.53";
// User-Agent string of Firefox Web browser version 41 on desktop.
desktopUserAgent := "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0";
// User-Agent string of a MediaHub device.
mediaHubUserAgent := "Mozilla/5.0 (Linux; Android 4.4.2; X7 Quad Core Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Safari/537.36";
// Provides access to device detection functions.
provider := FiftyOneDegreesPatternV3.NewProvider(dataFile)
fmt.Println("Starting Getting Started Example.")
// Carries out a match for a mobile User-Agent.
fmt.Println("\nMobile User-Agent: ", mobileUserAgent)
match := provider.GetMatch(mobileUserAgent)
fmt.Println(" IsMobile: ", match.GetValue("IsMobile"))
FiftyOneDegreesPatternV3.DeleteMatch(match)
// Carries out a macth for a desktop User-Agent.
fmt.Println("\nDesktop User-Agent: ", desktopUserAgent)
match = provider.GetMatch(desktopUserAgent)
fmt.Println(" IsMobile: ", match.GetValue("IsMobile"))
FiftyOneDegreesPatternV3.DeleteMatch(match)
// Carries out a match for a MediaHub User-Agent.
fmt.Println("\nMediaHub User-Agent: ", mediaHubUserAgent)
match = provider.GetMatch(mediaHubUserAgent)
fmt.Println(" IsMobile: ", match.GetValue("IsMobile"))
FiftyOneDegreesPatternV3.DeleteMatch(match)
}
// Snippet End | GettingStarted.go | 0.667581 | 0.593668 | GettingStarted.go | starcoder |
package axis
import (
"context"
"image"
"github.com/zeebo/rothko/draw"
"golang.org/x/image/font"
"golang.org/x/image/math/fixed"
)
const (
tickSize = 10 // px
axisWidth = 1 // px
tickPadding = 2 // px
horizLabelSpacing = 10 // px
vertLabelSpacing = 2 // px
textOffset = axisWidth + tickSize + tickPadding // px
)
type Measured struct {
// Width is the width in pixels of the drawn axis.
Width int
// Height is the height in pixels of the drawn axis
Height int
// internal fields
opts Options
bounds []fixed.Rectangle26_6 // same index the labels
maxHeight int // maximum hight of a label
}
// copyLabels makes a copy of the labels to avoid mutation issues.
func copyLabels(labels []Label) (out []Label) {
return append(out, labels...)
}
// Label represents a tick mark on the axis.
type Label struct {
// Position is the position of the tick mark as a float in [0, 1].
Position float64
// Text is the text of the tick mark.
Text string
}
// Options describe the axis rendering options.
type Options struct {
// Face is the font face to use for rendering the label text.
Face font.Face
// Labels is the set of labels to draw.
Labels []Label
// Vertical is if the axis is vertical.
Vertical bool
// Length is how long the axis is.
Length int
// If true, vertical axes will be drawn for the left size. Horizontal axes
// ignore this field.
Flip bool
// If true, the label text will not go past the boundaries of Length.
DontBleed bool
}
// copy returns a deep copy of the Options.
func (o Options) copy() Options {
// TODO(jeff): font.Face could technically be mutated, but don't worry
// about it.
o.Labels = copyLabels(o.Labels)
return o
}
// Draw renders the axis and returns a canvas allocated for the appopriate
// size. See Measure if you want to control where and how it is drawn.
func Draw(ctx context.Context, opts Options) *draw.RGB {
return Measure(ctx, opts).Draw(ctx, nil)
}
// Measure measures the axis sizes, and returns some state that can be used
// to draw on to some canvas.
func Measure(ctx context.Context, opts Options) Measured {
if opts.Vertical {
return measureVertical(ctx, opts)
}
return measureHorizontal(ctx, opts)
}
// Draw performs the drawing of the data on to the canvas. The canvas is
// expected to be large enough to handle the drawing. If the canvas is nil,
// one is allocated. In either case, the canvas is returned.
func (m Measured) Draw(ctx context.Context, canvas *draw.RGB) *draw.RGB {
if m.opts.Vertical {
return m.drawVertical(ctx, canvas)
}
return m.drawHorizontal(ctx, canvas)
}
func measureVertical(ctx context.Context, opts Options) Measured {
// TODO(jeff): i know the vertical checking here is off by a pixel or two,
// but it produces results that are good enough for now.
// determine the extra space we need to draw the labels
max_width := 0
max_height := opts.Length
occupied := 0
bounds := make([]fixed.Rectangle26_6, 0, len(opts.Labels))
for _, label := range opts.Labels {
b, _ := font.BoundString(opts.Face, label.Text)
bounds = append(bounds, b)
y := int(float64(opts.Length-1) * label.Position)
height := y - b.Min.Y.Ceil()
if opts.DontBleed && height > opts.Length {
continue
}
if height > max_height {
max_height = height
}
label_width := b.Max.X.Ceil()
if label_width > max_width {
max_width = label_width
}
if occupied > 0 && y < occupied+vertLabelSpacing {
continue
}
occupied = height
}
return Measured{
Width: textOffset + max_width,
Height: max_height,
opts: opts.copy(),
bounds: bounds,
}
}
func (m Measured) drawVertical(ctx context.Context, canvas *draw.RGB) (
out *draw.RGB) {
w, h := 0, 0
if canvas != nil {
w, h = canvas.Size()
}
if w < m.Width || h < m.Height {
canvas = draw.NewRGB(m.Width, m.Height)
}
// set up the drawer
d := font.Drawer{
Dst: canvas.AsImage(),
Src: image.Black,
Face: m.opts.Face,
}
maybeFlip := func(x int) int {
if m.opts.Flip {
return m.Width - 1 - x
}
return x
}
// first draw the axis
for y := 0; y < m.opts.Length; y++ {
for x := 0; x < axisWidth; x++ {
canvas.Set(maybeFlip(x), y, draw.Color{})
}
}
// render the ticks
occupied := 0
for i, label := range m.opts.Labels {
b := m.bounds[i]
y := int(float64(m.opts.Length-1) * label.Position)
height := y - b.Min.Y.Ceil()
for x := 0; x < tickSize; x++ {
canvas.Set(maybeFlip(axisWidth+x), y, draw.Color{})
}
if m.opts.DontBleed && height > m.opts.Length {
continue
}
if occupied > 0 && y < occupied+vertLabelSpacing {
continue
}
text_size := b.Max.X - b.Min.X
d.Dot = fixed.Point26_6{
Y: fixed.I(y - b.Min.Y.Ceil()),
}
if m.opts.Flip {
d.Dot.X = fixed.I(m.Width - textOffset - text_size.Ceil())
} else {
d.Dot.X = fixed.I(textOffset)
}
occupied = d.Dot.Y.Ceil()
d.DrawString(label.Text)
}
return canvas
}
func measureHorizontal(ctx context.Context, opts Options) Measured {
max_height := 0
max_width := opts.Length
bounds := make([]fixed.Rectangle26_6, 0, len(opts.Labels))
occupied := 0
fits := func(x int) bool {
return occupied == 0 ||
x < horizLabelSpacing ||
x > occupied+horizLabelSpacing
}
for _, label := range opts.Labels {
b, _ := font.BoundString(opts.Face, label.Text)
bounds = append(bounds, b)
x := int(float64(opts.Length-1) * label.Position)
label_end := x + (b.Max.X - b.Min.X).Ceil()
if opts.DontBleed && label_end > opts.Length {
// as a special case, if we can nudge the x back so that it's just
// on opts.Length and fits, draw it
label_end = opts.Length
x = label_end - (b.Max.X - b.Min.X).Ceil()
}
if !fits(x) {
continue
}
if label_end > max_width {
max_width = label_end
}
label_height := (b.Max.Y - b.Min.Y).Ceil() + vertLabelSpacing
if label_height > max_height {
max_height = label_height
}
occupied = label_end
}
return Measured{
Width: max_width,
Height: textOffset + max_height,
opts: opts.copy(),
bounds: bounds,
maxHeight: max_height,
}
}
func (m Measured) drawHorizontal(ctx context.Context, canvas *draw.RGB) (
out *draw.RGB) {
w, h := 0, 0
if canvas != nil {
w, h = canvas.Size()
}
if w < m.Width || h < m.Height {
canvas = draw.NewRGB(m.Width, m.Height)
}
// set up the drawer
d := font.Drawer{
Dst: canvas.AsImage(),
Src: image.Black,
Face: m.opts.Face,
}
// draw the axis
for x := 0; x < m.opts.Length; x++ {
for y := 0; y < axisWidth; y++ {
canvas.Set(x, y, draw.Color{})
}
}
// render the ticks and labels
occupied := 0
fits := func(x int) bool {
return occupied == 0 ||
x < horizLabelSpacing ||
x > occupied+horizLabelSpacing
}
for i, label := range m.opts.Labels {
b := m.bounds[i]
x := int(float64(m.opts.Length-1) * label.Position)
label_end := x + (b.Max.X - b.Min.X).Ceil()
for y := 0; y < tickSize; y++ {
canvas.Set(x, axisWidth+y, draw.Color{})
}
tick_x := x
if m.opts.DontBleed && label_end > m.opts.Length {
// as a special case, if we can nudge the x back so that it's just
// on ,.opts.Length and still have the same slot, draw it.
label_end = m.opts.Length
x = label_end - (b.Max.X - b.Min.X).Ceil()
}
if !fits(x) {
continue
}
for y := 0; y < 3; y++ {
canvas.Set(tick_x, axisWidth+tickSize+y, draw.Color{})
}
d.Dot = fixed.Point26_6{
X: fixed.I(x),
Y: fixed.I(textOffset - b.Min.Y.Ceil()),
}
d.DrawString(label.Text)
occupied = d.Dot.X.Ceil()
}
return canvas
} | draw/axis/axis.go | 0.658966 | 0.438725 | axis.go | starcoder |
package main
import (
"fmt"
"container/list"
)
// Matrix represents the matrix graph
type Matrix [][]int
// Position defines the current position in the graph (matrix)
type Position struct {
row, col int
}
// Direction indicates the movement (delta between positions)
type Direction struct {
// We can move +/- 1 in each direction (up/down via rows, left/right via columns)
row, col int
}
// MatrixGraph is a matrix-based graph, where one can travel up, down, right, left
// (considering the boundaries)
type MatrixGraph struct {
matrix Matrix
rows, cols int
p Position
}
var directions = [...]Direction{{0, 1}, {0, -1}, {1, 0}, {-1, 0}}
// Create creates a matrix graph with dimension [rows, cols].
func Create(rows, cols int) *MatrixGraph {
mg := new(MatrixGraph)
mg.matrix = make(Matrix, rows)
for i := range mg.matrix {
mg.matrix[i] = make([]int, cols)
}
mg.rows = rows
mg.cols = cols
i := 0
for row := range mg.matrix {
for col := range mg.matrix[row] {
i++
mg.matrix[row][col] = i
}
}
return mg
}
func (mg *MatrixGraph) traverseDfs(to Position, visited map[Position]bool, f func(pos Position, mg *MatrixGraph)) {
if visited[to] {
return
}
visited[to] = true
// For all neighbours
for _, d := range directions {
nextRow, nextCol := to.row+d.row, to.col+d.col
if 0 <= nextRow && nextRow < mg.rows && 0 <= nextCol && nextCol < mg.cols {
mg.traverseDfs(Position{nextRow, nextCol}, visited, f)
}
}
f(to, mg)
}
func (mg *MatrixGraph) dfs(f func(pos Position, mg *MatrixGraph)) {
if mg == nil {
return
}
visited := make(map[Position]bool)
for row := range mg.matrix {
for col := range mg.matrix[row] {
mg.traverseDfs(Position{row, col}, visited, f)
}
}
}
func (mg *MatrixGraph) traverseBfs(to Position, visited map[Position]bool, f func(pos Position, mg *MatrixGraph)) {
queue := list.New()
queue.PushBack(to)
for queue.Len() > 0 {
e := queue.Front()
pos := e.Value.(Position)
queue.Remove(e)
if !visited[pos] {
visited[pos] = true
for _, d := range directions {
nextRow, nextCol := to.row+d.row, to.col+d.col
if 0 <= nextRow && nextRow < mg.rows && 0 <= nextCol && nextCol < mg.cols {
queue.PushBack(Position{nextRow, nextCol})
}
}
f(pos, mg)
}
}
}
func (mg *MatrixGraph) bfs(f func(pos Position, mg *MatrixGraph)) {
if mg == nil {
return
}
visited := make(map[Position]bool)
for row := range mg.matrix {
for col := range mg.matrix[row] {
mg.traverseBfs(Position{row, col}, visited, f)
}
}
}
func visit(p Position, mg *MatrixGraph) {
fmt.Printf("%d, ", mg.matrix[p.row][p.col])
}
func main() {
mg := Create(4, 8)
fmt.Println("DFS")
mg.dfs(visit)
fmt.Println()
fmt.Println("BFS")
mg.bfs(visit)
} | struct/graph/matrix/traversal/traversal.go | 0.672332 | 0.522568 | traversal.go | starcoder |
package operator
import (
"errors"
"fmt"
"github.com/weworksandbox/lingo"
"github.com/weworksandbox/lingo/check"
"github.com/weworksandbox/lingo/sql"
)
func NewBinary(left lingo.Expression, op Operator, right lingo.Expression) Binary {
return Binary{
left: left,
op: op,
right: right,
}
}
type Binary struct {
left lingo.Expression
op Operator
right lingo.Expression
}
func (b Binary) And(exp lingo.Expression) lingo.ComboExpression {
return And(b, exp)
}
func (b Binary) Or(exp lingo.Expression) lingo.ComboExpression {
return And(b, exp)
}
func (b Binary) ToSQL(d lingo.Dialect) (sql.Data, error) {
operand, ok := d.(Dialect)
if !ok {
return nil, fmt.Errorf("dialect '%s' does not support 'operator.Dialect'", d.GetName())
}
if check.IsValueNilOrEmpty(b.left) {
return nil, errors.New("left of operator.Binary cannot be empty")
}
leftSQL, err := b.left.ToSQL(d)
if err != nil {
return nil, err
}
if check.IsValueNilOrEmpty(b.right) {
return nil, errors.New("right of operator.Binary cannot be empty")
}
rightSQL, err := b.right.ToSQL(d)
if err != nil {
return nil, err
}
return operand.BinaryOperator(leftSQL, b.op, rightSQL)
}
// And creates an AND operator.Binary expression
func And(left, right lingo.Expression) Binary {
return NewBinary(left, OpAnd, right)
}
// Or creates an OR operator.Binary expression
func Or(left, right lingo.Expression) Binary {
return NewBinary(left, OpOr, right)
}
// Eq creates an equals operator.Binary expression
func Eq(left, right lingo.Expression) Binary {
return NewBinary(left, OpEq, right)
}
// NotEq creates an not equal operator.Binary expression
func NotEq(left, right lingo.Expression) Binary {
return NewBinary(left, OpNotEq, right)
}
// Like creates a like operator.Binary expression
func Like(left, right lingo.Expression) Binary {
return NewBinary(left, OpLike, right)
}
// NotLike creates a not like operator.Binary expression
func NotLike(left, right lingo.Expression) Binary {
return NewBinary(left, OpNotLike, right)
}
// LessThan creates a less than operator.Binary expression
func LessThan(left, right lingo.Expression) Binary {
return NewBinary(left, OpLessThan, right)
}
// LessThanOrEqual creates a less than or equal to operator.Binary expression
func LessThanOrEqual(left, right lingo.Expression) Binary {
return NewBinary(left, OpLessThanOrEqual, right)
}
// GreaterThan creates a greater than operator.Binary expression
func GreaterThan(left, right lingo.Expression) Binary {
return NewBinary(left, OpGreaterThan, right)
}
// GreaterThanOrEqual creates a greater than or equal to operator.Binary expression
func GreaterThanOrEqual(left, right lingo.Expression) Binary {
return NewBinary(left, OpGreaterThanOrEqual, right)
}
// Between creates a between operator.Binary expression, adding the And expression for the first and second values
func Between(left, first, second lingo.Expression) Binary {
return NewBinary(left, OpBetween, And(first, second))
}
// NotBetween creates a not between operator.Binary expression, adding the And expression for the
// first and second values
func NotBetween(left, first, second lingo.Expression) Binary {
return NewBinary(left, OpNotBetween, And(first, second))
}
// In creates an in operator.Binary expression
func In(left lingo.Expression, values lingo.Expression) Binary {
return NewBinary(left, OpIn, values)
}
// NotIn creates a not in operator.Binary expression
func NotIn(left lingo.Expression, values lingo.Expression) Binary {
return NewBinary(left, OpNotIn, values)
} | expr/operator/binary.go | 0.840979 | 0.547585 | binary.go | starcoder |
// Package exe defines QoL functions to simplify and unify creating executables
package exe
import (
"fmt"
"strings"
"gopkg.in/alecthomas/kingpin.v2"
"microsoft.com/pkggen/internal/logger"
)
// ToolkitVersion specifies the version of the toolkit and the reported version of all tools in it.
const ToolkitVersion = "1.0"
// InputFlag registers an input flag for k with documentation doc and returns the passed value
func InputFlag(k *kingpin.Application, doc string) *string {
return k.Flag("input", doc).Required().ExistingFile()
}
// InputStringFlag registers an input flag for k with documentation doc and returns the passed value
func InputStringFlag(k *kingpin.Application, doc string) *string {
return k.Flag("input", doc).Required().String()
}
// InputDirFlag registers an input flag for k with documentation doc and returns the passed value
func InputDirFlag(k *kingpin.Application, doc string) *string {
return k.Flag("dir", doc).Required().ExistingDir()
}
// OutputFlag registers an output flag for k with documentation doc and returns the passed value
func OutputFlag(k *kingpin.Application, doc string) *string {
return k.Flag("output", doc).Required().String()
}
// OutputDirFlag registers an output flag for k with documentation doc and returns the passed value
func OutputDirFlag(k *kingpin.Application, doc string) *string {
return k.Flag("output-dir", doc).Required().String()
}
// LogFileFlag registers a log file flag for k and returns the passed value
func LogFileFlag(k *kingpin.Application) *string {
return k.Flag(logger.FileFlag, logger.FileFlagHelp).String()
}
// LogLevelFlag registers a log level flag for k and returns the passed value
func LogLevelFlag(k *kingpin.Application) *string {
return k.Flag(logger.LevelsFlag, logger.LevelsHelp).PlaceHolder(logger.LevelsPlaceholder).Enum(logger.Levels()...)
}
// PlaceHolderize takes a list of available inputs and returns a corresponding placeholder
func PlaceHolderize(thing []string) string {
return fmt.Sprintf("(%s)", strings.Join(thing, "|"))
}
// ParseListArgument takes a user provided string list that is space seperated
// and returns a slice of the split and trimmed elements.
func ParseListArgument(input string) (results []string) {
const delimiter = " "
trimmedInput := strings.TrimSpace(input)
if trimmedInput != "" {
results = strings.Split(trimmedInput, delimiter)
}
return
} | toolkit/tools/internal/exe/exe.go | 0.716615 | 0.453564 | exe.go | starcoder |
package ann
import (
"errors"
"math/rand"
"github.com/azuwey/gonetwork/activationfn"
"github.com/azuwey/gonetwork/matrix"
)
// LayerDescriptor used to generate the layers in the artificial neural network.
type LayerDescriptor struct {
Nodes int `json:"nodes"`
ActivationFunction string `json:"activationFunction"`
Weights []float64 `json:"weights"`
Biases []float64 `json:"biases"`
}
// Model used to generate a artificial neural network.
type Model struct {
LearningRate float64 `json:"learningRate"`
Layers []LayerDescriptor `json:"layers"`
}
// Layer represents a layer in the artificial neural network.
type Layer struct {
weights *matrix.Matrix
biases *matrix.Matrix
activationFunction *activationfn.ActivationFunction
}
// ANN represents the structure of a artificial neural network.
type ANN struct {
learningRate float64
layers []*Layer
rand *rand.Rand
}
// layerValues is used by calculateLayerValues to return both activated and unactivated values.
type layerValues struct {
activated *matrix.Matrix
unactivated *matrix.Matrix
}
// New creates a new artificial neural network with "ls" layer structure,
// the first element in the "ls" represents the input layer,
// the last element in the "ls" represents the output layer.
// It will return an error if "ls == nil || len(ls) < 3", "lr <= 0 || lr > 1", "r == nil".
// It will also return an error if any of the layers activationFunction is nill except for the input layer.
func New(model *Model, r *rand.Rand) (*ANN, error) {
if model.Layers == nil || len(model.Layers) < 3 {
return nil, ErrLayerStructureLength
}
if model.LearningRate <= 0 || model.LearningRate > 1 {
return nil, ErrLearningRateRange
}
if r == nil {
return nil, ErrNilRand
}
lyrs := make([]*Layer, len(model.Layers)-1)
rnd := func(v float64, _ int, _ []float64) float64 {
return r.Float64()*2 - 1
}
for idx, lyr := range model.Layers[1:] {
w, err := matrix.New(lyr.Nodes, model.Layers[idx].Nodes, lyr.Weights)
if !errors.Is(err, nil) {
return nil, err
}
b, _ := matrix.New(lyr.Nodes, 1, lyr.Weights)
w.Apply(rnd, w)
aFn, ok := activationfn.ActivationFunctions[lyr.ActivationFunction]
if !ok {
return nil, ErrActivationFnNotExist
}
lyrs[idx] = &Layer{w, b, aFn}
}
n := &ANN{model.LearningRate, lyrs, r}
return n, nil
}
func (n *ANN) calculateLayerValues(i []float64) ([]*layerValues, error) {
iMat, err := matrix.New(len(i), 1, i)
if !errors.Is(err, nil) {
return nil, err
}
vals := make([]*layerValues, len(n.layers)+1)
vals[0] = &layerValues{iMat, nil}
for idx := range vals[1:] {
uV := &matrix.Matrix{}
uV.Product(n.layers[idx].weights, vals[idx].activated)
uV.Add(n.layers[idx].biases, uV)
aV, _ := matrix.Copy(uV)
aV.Apply(n.layers[idx].activationFunction.ActivationFn(aV), aV)
vals[idx+1] = &layerValues{aV, uV}
}
return vals, nil
}
// Predict ...
func (n *ANN) Predict(i []float64) ([]float64, error) {
if i == nil {
return nil, ErrNilInputSlice
}
lVals, err := n.calculateLayerValues(i)
if !errors.Is(err, nil) {
return nil, err
}
return lVals[len(lVals)-1].activated.Values, nil
}
// Train ...
func (n *ANN) Train(i, t []float64) error {
if i == nil {
return ErrNilInputSlice
}
if t == nil {
return ErrNilTargetSlice
}
lVals, err := n.calculateLayerValues(i)
if !errors.Is(err, nil) {
return err
}
tMat, err := matrix.New(len(t), 1, t)
if !errors.Is(err, nil) {
return err
}
if lVals[len(lVals)-1].activated.Rows != tMat.Rows {
return ErrBadTargetSlice
}
lastErrVal := &matrix.Matrix{}
for idx := len(n.layers) - 1; idx >= 0; idx-- {
e := &matrix.Matrix{}
if idx == len(n.layers)-1 {
e.Subtract(tMat, lVals[idx+1].activated)
} else {
e.Transpose(n.layers[idx+1].weights)
e.Product(e, lastErrVal)
}
lastErrVal = e
g := &matrix.Matrix{}
g.Apply(n.layers[idx].activationFunction.DeactivationFn(lVals[idx+1].unactivated), lVals[idx+1].unactivated)
g.Multiply(e, g)
d := &matrix.Matrix{}
d.Transpose(lVals[idx].activated)
d.Product(g, d)
d.Scale(n.learningRate, d)
n.layers[idx].weights.Add(n.layers[idx].weights, d)
n.layers[idx].biases.Add(n.layers[idx].biases, g)
}
return nil
} | ann/ann.go | 0.737158 | 0.565719 | ann.go | starcoder |
package linear
import (
"math"
)
/**
* Class defining a real-valued vector with basic algebraic operations.
*
* vector element indexing is 0-based -- e.g., At(0) returns the first element of the vector.
*
* The map method operate on vectors element-wise, i.e. they perform the same operation (adding a scalar,
* applying a function ...) on each element in turn. It uses the instance itself to store the
* results, so the instance is changed by this method. In all cases, the result
* vector is returned by the methods.
*
*/
type RealVector interface {
/**
* Returns the size of the vector.
*
* @return the size of this vector.
*/
Dimension() int
/**
* Return the entry at the specified index.
*/
At(index int) float64
/**
* Set a single element.
*/
SetEntry(index int, value float64)
/**
* Change an entry at the specified index.
*/
AddToEntry(index int, increment float64)
/**
* Construct a new vector by appending a vector to this vector.
*/
AppendVector(v RealVector) RealVector
/**
* Construct a new vector by appending a double to this vector.
*/
Append(d float64) RealVector
/**
* Get a subvector from consecutive elements.
*/
SubVector(index, n int) RealVector
/**
* Set a sequence of consecutive elements.
*/
SetSubVector(index int, v RealVector)
/**
* Check whether any coordinate of this vector is NaN.
*/
IsNaN() bool
/**
* Check whether any coordinate of this vector is infinite and none are NaN.
*/
IsInf() bool
/**
* Compute the sum of this vector and v.
* Returns a new vector. Does not change instance data.
*/
Add(v RealVector) RealVector
/**
* Subtract v from this vector.
* Returns a new vector. Does not change instance data.
*/
Subtract(v RealVector) RealVector
/**
* Add a value to each entry.
* Returns a new vector. Does not change instance data.
*/
MapAdd(d float64)
/**
* Element-by-element division.
*/
EBEDivide(v RealVector) RealVector
/**
* Element-by-element multiplication.
*/
EBEMultiply(v RealVector) RealVector
/**
* Subtract a value from each entry. Returns a new vector.
* Does not change instance data.
*/
MapSubtract(d float64)
/**
* Multiply each entry by the argument. Returns a new vector.
* Does not change instance data.
*/
MapMultiply(d float64)
/**
* Divide each entry by the argument. Returns a new vector.
* Does not change instance data.
*/
MapDivide(d float64)
/**
* Copies entries from a vector with same size as this instance.
*/
CopyFrom(vec RealVector)
/**
* Returns a (deep) copy of this vector.
*/
Copy() RealVector
/**
* Set all elements to a single value.
*/
Set(value float64)
/**
* Convert the vector to an array of {@code double}s.
* The array is independent from this vector data: the elements
* are copied.
*/
ToArray() []float64
/**
* Converts this vector into a unit vector.
* The instance itself is changed by this method.
*/
Unitize()
/**
* Generic dense iterator. Iteration is in increasing order
* of the vector index.
*
* Note: derived classes are required to return an Iterator that
* returns non-nil Entry objects as long as hasNext() returns true.
*/
Iterator() EntryIterator
/**
* Entries of this vector are modified in-place.
*/
Map(f func(float64) float64)
/**
* Updates this vector with the linear combination of this and y.
*/
Combine(a, b float64, y RealVector)
/**
* Visits (but does not alter) all entries of this vector in default order
* (increasing index).
*/
WalkInDefaultOrder(visitor RealVectorPreservingVisitor) float64
/**
* Visits (but does not alter) some entries of this vector in default order
* (increasing index).
*/
WalkInDefaultOrderBounded(visitor RealVectorPreservingVisitor, start, end int) float64
/**
* Visits (and possibly alters) all entries of this vector in default order
* (increasing index).
*/
WalkInUpdateDefaultOrder(visitor RealVectorChangingVisitor) float64
/**
* Visits (and possibly alters) some entries of this vector in default order
* (increasing index).
*/
WalkInUpdateDefaultOrderBounded(visitor RealVectorChangingVisitor, start, end int) float64
/**
* Visits (but does not alter) all entries of this vector in optimized
* order. The order in which the entries are visited is selected so as to
* lead to the most efficient implementation; it might depend on the
* concrete implementation of this abstract class.
*/
WalkInOptimizedOrder(visitor RealVectorPreservingVisitor) float64
/**
* Visits (but does not alter) some entries of this vector in optimized
* order. The order in which the entries are visited is selected so as to
* lead to the most efficient implementation; it might depend on the
* concrete implementation of this abstract class.
*/
WalkInOptimizedOrderBounded(visitor RealVectorPreservingVisitor, start, end int) float64
/**
* Visits (and possibly alters) all entries of this vector in optimized
* order. The order in which the entries are visited is selected so as to
* lead to the most efficient implementation; it might depend on the
* concrete implementation of this abstract class.
*/
WalkInUpdateOptimizedOrder(visitor RealVectorChangingVisitor) float64
/**
* Visits (and possibly change) some entries of this vector in optimized
* order. The order in which the entries are visited is selected so as to
* lead to the most efficient implementation; it might depend on the
* concrete implementation of this abstract class.
*/
WalkInUpdateOptimizedOrderBounded(visitor RealVectorChangingVisitor, start, end int) float64
/**
* Test for the equality of two real vectors. If all coordinates of two real
* vectors are exactly the same, and none are NaN, the two real
* vectors are considered to be equal. NaN coordinates are
* considered to affect globally the vector and be equals to each other -
* i.e, if either (or all) coordinates of the real vector are equal to
* NaN, the real vector is equal to a vector with all NaN
* coordinates.
*/
Equals(other interface{}) bool
}
type EntryIterator interface {
HasNext() bool
Next() Entry
}
type Entry interface {
Index() int
Value() float64
}
type entryImpl struct {
idx int
value float64
}
func (e *entryImpl) Index() int {
return e.idx
}
func (e *entryImpl) Value() float64 {
return e.value
}
type entryIteratorImpl struct {
vec RealVector
idx int
}
func newEntryIterator(vec RealVector) *entryIteratorImpl {
return &entryIteratorImpl{vec: vec}
}
func (ei *entryIteratorImpl) HasNext() bool {
return ei.idx < ei.vec.Dimension()
}
func (ei *entryIteratorImpl) Next() Entry {
if ei.idx >= ei.vec.Dimension() {
panic("no entry left")
}
e := &entryImpl{idx: ei.idx, value: ei.vec.At(ei.idx)}
ei.idx++
return e
}
func NewRealVector(data []float64) (RealVector, error) {
if data == nil {
return nil, invalidArgumentSimpleErrorf()
}
return NewArrayRealVector(data, true)
}
/**
* Returns the L2 norm of the vector. The root of the sum of
* the squared elements.
*/
func VecNorm(v RealVector) float64 {
var sum float64
it := v.Iterator()
for it.HasNext() {
e := it.Next()
value := e.Value()
sum += value * value
}
return math.Sqrt(sum)
}
/**
* Computes the cosine of the angle between this vector and the
* argument.
*/
func VecCosine(v1, v2 RealVector) float64 {
norm := VecNorm(v1)
vNorm := VecNorm(v2)
if norm == 0 || vNorm == 0 {
panic(mathArithmeticErrorf(zero_norm))
}
return VecDotProduct(v1, v2) / (norm * vNorm)
}
/**
* Compute the dot product of v1 with v2.
*/
func VecDotProduct(vec1, vec2 RealVector) float64 {
err := checkVectorDimensions(vec1, vec2)
if err != nil {
panic(err)
}
var d float64
n := vec1.Dimension()
for i := 0; i < n; i++ {
d += vec1.At(i) * vec2.At(i)
}
return d
}
/**
* Returns the L1 norm of the vector. The sum of the absolute
* values of the elements.
*/
func VecL1Norm(v RealVector) float64 {
var norm float64
it := v.Iterator()
for it.HasNext() {
e := it.Next()
norm += math.Abs(e.Value())
}
return norm
}
/**
* Returns the L-inf norm of the vector.The max of the absolute
* values of the elements.
*/
func VecLInfNorm(v RealVector) float64 {
var norm float64
it := v.Iterator()
for it.HasNext() {
e := it.Next()
norm = math.Max(norm, math.Abs(e.Value()))
}
return norm
}
/**
* Distance between two vectors.
* This method computes the distance consistent with the
* L2 norm, i.e. the square root of the sum of
* element differences, or Euclidean distance.
*/
func VecDistance(v1, v2 RealVector) float64 {
err := checkVectorDimensions(v1, v2)
if err != nil {
panic(err)
}
var d float64
it := v1.Iterator()
for it.HasNext() {
e := it.Next()
diff := e.Value() - v2.At(e.Index())
d += diff * diff
}
return math.Sqrt(d)
}
/**
* Distance between two vectors.
* This method computes the distance consistent with
* L1 norm, i.e. the sum of the absolute values of
* the elements differences.
*/
func VecL1Distance(v1, v2 RealVector) float64 {
err := checkVectorDimensions(v1, v2)
if err != nil {
panic(err)
}
var d float64
it := v1.Iterator()
for it.HasNext() {
e := it.Next()
d += math.Abs(e.Value() - v2.At(e.Index()))
}
return d
}
/**
* Distance between two vectors.
* This method computes the distance consistent with
* L-inf norm, i.e. the max of the absolute values of
* element differences.
*/
func VecLInfDistance(v1, v2 RealVector) float64 {
err := checkVectorDimensions(v1, v2)
if err != nil {
panic(err)
}
var d float64
it := v1.Iterator()
for it.HasNext() {
e := it.Next()
d = math.Max(math.Abs(e.Value()-v2.At(e.Index())), d)
}
return d
}
/**
* Get the index of the minimum entry.
*/
func MinIndex(v RealVector) int {
minIndex := -1
minValue := math.Inf(1)
iterator := v.Iterator()
for iterator.HasNext() {
entry := iterator.Next()
if entry.Value() <= minValue {
minIndex = entry.Index()
minValue = entry.Value()
}
}
return minIndex
}
/**
* Get the value of the minimum entry.
*/
func MinValue(v RealVector) float64 {
minIndex := MinIndex(v)
if minIndex < 0 {
return math.NaN()
}
return v.At(minIndex)
}
/**
* Get the index of the maximum entry.
*/
func MaxIndex(v RealVector) int {
maxIndex := -1
maxValue := math.Inf(-1)
iterator := v.Iterator()
for iterator.HasNext() {
entry := iterator.Next()
if entry.Value() >= maxValue {
maxIndex = entry.Index()
maxValue = entry.Value()
}
}
return maxIndex
}
/**
* Get the value of the maximum entry.
*/
func MaxValue(v RealVector) float64 {
maxIndex := MaxIndex(v)
if maxIndex < 0 {
return math.NaN()
}
return v.At(maxIndex)
}
/**
* Find the orthogonal projection of this vector onto another vector.
*/
func Projection(src, dest RealVector) RealVector {
norm2 := VecDotProduct(dest, dest)
if norm2 == 0.0 {
panic(mathArithmeticErrorf(zero_norm))
}
dest.MapMultiply(VecDotProduct(src, dest) / VecDotProduct(dest, dest))
return dest
}
/**
* Creates a unit vector pointing in the direction of this vector.
* The instance is not changed by this method.
*/
func UnitVector(v RealVector) RealVector {
a, err := NewArrayRealVectorCopy(v)
if err != nil {
panic(err)
}
a.Unitize()
return a
} | real_vector.go | 0.942115 | 0.715325 | real_vector.go | starcoder |
package stdlib
import "github.com/asukakenji/go-benchmarks"
// --- LeadingZeros ---
// LeadingZeros returns the number of leading zero bits in x; the result is the size of uint in bits for x == 0.
func LeadingZeros(x uint) int { return int(benchmarks.SizeOfUintInBits) - Len(x) }
// LeadingZeros8 returns the number of leading zero bits in x; the result is 8 for x == 0.
func LeadingZeros8(x uint8) int { return 8 - Len8(x) }
// LeadingZeros16 returns the number of leading zero bits in x; the result is 16 for x == 0.
func LeadingZeros16(x uint16) int { return 16 - Len16(x) }
// LeadingZeros32 returns the number of leading zero bits in x; the result is 32 for x == 0.
func LeadingZeros32(x uint32) int { return 32 - Len32(x) }
// LeadingZeros64 returns the number of leading zero bits in x; the result is 64 for x == 0.
func LeadingZeros64(x uint64) int { return 64 - Len64(x) }
// LeadingZerosPtr returns the number of leading zero bits in x; the result is the size of uintptr in bits for x == 0.
func LeadingZerosPtr(x uintptr) int { return benchmarks.SizeOf[uintptr]() - LenPtr(x) }
// --- Len ---
// Len returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len(x uint) int {
if benchmarks.SizeOfUintInBits == 32 {
return Len32(uint32(x))
}
return Len64(uint64(x))
}
// Len8 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len8(x uint8) int {
return int(len8tab[x])
}
// Len16 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len16(x uint16) (n int) {
if x >= 1<<8 {
x >>= 8
n = 8
}
return n + int(len8tab[x])
}
// Len32 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len32(x uint32) (n int) {
if x >= 1<<16 {
x >>= 16
n = 16
}
if x >= 1<<8 {
x >>= 8
n += 8
}
return n + int(len8tab[x])
}
// Len64 returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func Len64(x uint64) (n int) {
if x >= 1<<32 {
x >>= 32
n = 32
}
if x >= 1<<16 {
x >>= 16
n += 16
}
if x >= 1<<8 {
x >>= 8
n += 8
}
return n + int(len8tab[x])
}
// LenPtr returns the minimum number of bits required to represent x; the result is 0 for x == 0.
func LenPtr(x uintptr) int {
if benchmarks.SizeOf[uintptr]() == 32 {
return Len32(uint32(x))
}
return Len64(uint64(x))
} | math/bits/impl/leadingzeros/stdlib/bits.go | 0.687735 | 0.665105 | bits.go | starcoder |
package vector2
import (
"fmt"
"math"
)
const Epsilon = 0.00001
type Vector2 struct {
X, Y float64
}
func Dot(ihs *Vector2, rhs *Vector2) float64 {
return ihs.X*rhs.X + ihs.Y*rhs.Y
}
func Lerp(a *Vector2, b *Vector2, t float64) *Vector2 {
return New(
a.X+(b.X-a.X)*t,
a.Y+(b.Y-a.Y)*t,
)
}
func Distance(a *Vector2, b *Vector2) float64 {
dx := a.X - b.X
dy := a.Y - b.Y
return math.Sqrt(dx*dx + dy*dy)
}
func Reflect(ihs *Vector2, rhs *Vector2) *Vector2 {
factor := -2.0 * Dot(ihs, rhs)
return New(
factor*ihs.X+rhs.X,
factor*ihs.Y+rhs.Y,
)
}
func New(x float64, y float64) *Vector2 {
return &Vector2{X: x, Y: y}
}
func (v *Vector2) Copy() *Vector2 {
return New(v.X, v.Y)
}
func (v *Vector2) Set(x float64, y float64) *Vector2 {
v.X = x
v.Y = y
return v
}
func (v *Vector2) Add(other *Vector2) *Vector2 {
return New(v.X+other.X, v.Y+other.Y)
}
func (v *Vector2) AddScalar(scalar float64) *Vector2 {
return New(v.X+scalar, v.Y+scalar)
}
func (v *Vector2) AddScalars(x float64, y float64) *Vector2 {
return New(v.X+x, v.Y+y)
}
func (v *Vector2) Sub(other *Vector2) *Vector2 {
return New(v.X-other.X, v.Y-other.Y)
}
func (v *Vector2) SubScalar(scalar float64) *Vector2 {
return New(v.X-scalar, v.Y-scalar)
}
func (v *Vector2) SubScalars(x float64, y float64) *Vector2 {
return New(v.X-x, v.Y-y)
}
func (v *Vector2) Mul(other *Vector2) *Vector2 {
return New(v.X*other.X, v.Y*other.Y)
}
func (v *Vector2) MulScalar(scalar float64) *Vector2 {
return New(v.X*scalar, v.Y*scalar)
}
func (v *Vector2) MulScalars(x float64, y float64) *Vector2 {
return New(v.X*x, v.Y*y)
}
func (v *Vector2) Div(other *Vector2) *Vector2 {
return New(v.X/other.X, v.Y/other.Y)
}
func (v *Vector2) DivScalar(scalar float64) *Vector2 {
return New(v.X/scalar, v.Y/scalar)
}
func (v *Vector2) DivScalars(x float64, y float64) *Vector2 {
return New(v.X/x, v.Y/y)
}
func (v *Vector2) Distance(other *Vector2) float64 {
dx := v.X - other.X
dy := v.Y - other.Y
return math.Sqrt(dx*dx + dy*dy)
}
func (v *Vector2) Dot(other *Vector2) float64 {
return v.X*other.X + v.Y*other.Y
}
func (v *Vector2) Lerp(other *Vector2, t float64) *Vector2 {
return New(
v.X+(other.X-v.X)*t,
v.Y+(other.Y-v.Y)*t,
)
}
func (v *Vector2) Magnitude() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y)
}
func (v *Vector2) Normalize() *Vector2 {
m := v.Magnitude()
if m > Epsilon {
return v.DivScalar(m)
} else {
return v.Copy()
}
}
func (v *Vector2) Reflect(other *Vector2) *Vector2 {
factor := -2.0 * v.Dot(other)
return New(
factor*v.X+other.X,
factor*v.Y+other.Y,
)
}
func (v *Vector2) Equals(other *Vector2) bool {
return v.X == other.X && v.Y == other.Y
}
func (v *Vector2) ToString() string {
return fmt.Sprintf("Vector2(%f, %f)", v.X, v.Y)
} | vector2/vector2.go | 0.858274 | 0.77437 | vector2.go | starcoder |
package bitarray
// Join concatenates the elements of its first parameter to create a single
// bit array. The separator sep is placed between elements in the result.
func Join(elems []*BitArray, sep BitArrayer) *BitArray {
var basep *BitArray
if sep != nil {
basep = sep.BitArray()
}
switch len(elems) {
case 0:
return zeroBitArray
case 1:
return elems[0]
}
bb := NewBuilder()
for i, elem := range elems {
if i != 0 && basep != nil {
_, _ = bb.WriteBitArray(basep)
}
_, _ = bb.WriteBitArray(elem)
}
return bb.BitArray()
}
// JoinBitArrayer is identical to Join except that it accepts elems in
// []BitArrayer type instead of []*BitArray type.
func JoinBitArrayer(elems []BitArrayer, sep BitArrayer) *BitArray {
var basep *BitArray
if sep != nil {
basep = sep.BitArray()
}
switch len(elems) {
case 0:
return zeroBitArray
case 1:
if elems[0] == nil {
return zeroBitArray
}
return elems[0].BitArray()
}
bb := NewBuilder()
for i, elem := range elems {
if i != 0 && basep != nil {
_, _ = bb.WriteBitArray(basep)
}
if elem != nil {
_, _ = bb.WriteBitArray(elem)
}
}
return bb.BitArray()
}
// Append returns the new BitArray resulting from appending the passed elements
// to the current bit array.
func (ba *BitArray) Append(bas ...BitArrayer) *BitArray {
switch len(bas) {
case 0:
if ba.IsZero() {
return zeroBitArray
}
return ba
case 1:
if bas[0] == nil {
return ba
}
return ba.append1(bas[0])
}
bb := NewBuilder(ba)
for _, bai := range bas {
_, _ = bb.WriteBitArray(bai)
}
return bb.BitArray()
}
func (ba *BitArray) append1(x BitArrayer) *BitArray {
var bax *BitArray
if x != nil {
bax = x.BitArray()
}
switch {
case ba.IsZero():
if bax.IsZero() {
return zeroBitArray
}
return bax
case bax.IsZero():
return ba
}
if bax.b == nil {
nBits := ba.nBits + bax.nBits
if ba.b == nil {
return &BitArray{nBits: nBits}
}
nBytes := (nBits + 7) >> 3
if nBytes <= cap(ba.b) {
return &BitArray{b: ba.b[:nBytes], nBits: nBits}
}
buf := allocByteSlice(nBytes)
copy(buf, ba.b)
return &BitArray{b: buf, nBits: nBits}
}
nBits := ba.nBits + bax.nBits
buf := allocByteSlice((nBits + 7) >> 3)
copy(buf, ba.b)
if copyBits(buf, bax.b, ba.nBits, 0, bax.nBits) && ba.b == nil {
return &BitArray{nBits: nBits}
}
return &BitArray{b: buf, nBits: nBits}
}
// Repeat returns a bit array consisting of count copies of the bit array ba.
func (ba *BitArray) Repeat(count int) *BitArray {
switch {
case count < 0:
panicf("invalid count: %d < 0", count)
case ba.IsZero(), count == 0:
return zeroBitArray
case count == 1:
return ba
case ba.b == nil:
return &BitArray{nBits: ba.nBits * count}
}
bb := NewBuilder()
for i := 0; i < count; i++ {
_, _ = bb.WriteBitArray(ba)
}
return bb.BitArray()
} | bitarray_concat.go | 0.737442 | 0.54958 | bitarray_concat.go | starcoder |
package common
// The size of a SHABAL256 checksum in bytes.
const Size = 32
// The blocksize of SHABAL256 in bytes.
const BlockSize = 64
const ivSize = 44
var iv []uint32
// digest represents the partial evaluation of a checksum.
type Digest struct {
buf [64]byte
state [44]uint32
ptr uint32
w int64
}
func (d *Digest) Reset() {
copy(d.state[:], getIV())
d.w = 1
d.ptr = 0
}
// NewDegist returns a new hash.Hash computing the SHABAL256 checksum.
func NewDegist() *Digest {
d := new(Digest)
d.Reset()
return d
}
func (d *Digest) Size() int { return Size }
func (d *Digest) BlockSize() int { return BlockSize }
func (d *Digest) Write(p []byte) (int, error) {
return d.Write1(p, 0, len(p))
}
func (d *Digest) Write1(p []byte, off uint32, nn int) (int, error) {
len := uint32(nn)
if d.ptr != 0 {
rlen := 64 - d.ptr
if len < rlen {
copy(d.buf[d.ptr:d.ptr+rlen], p)
d.ptr += len
return int(d.ptr), nil
}
copy(d.buf[d.ptr:], p[off:off+rlen])
off += rlen
len -= rlen
d.core1()
}
num := len >> 6
if num > 0 {
d.core(p, off, num)
off += num << 6
len &= 63
}
copy(d.buf[:len], p[off:])
d.ptr = len
return nn, nil
}
func (d0 *Digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d := *d0
hash := d.checkSum()
return append(in, hash[:]...)
}
func (d *Digest) checkSum() [Size]byte {
var digest [Size]byte
d.buf[d.ptr] = 0x80
d.ptr++
for i := d.ptr; i < BlockSize; i++ {
d.buf[i] = 0
}
d.core1()
d.w--
d.core1()
d.w--
d.core1()
d.w--
d.core1()
d.w--
var j int = 36
var w uint32 = 0
for i := 0; i < 32; i++ {
if i&3 == 0 {
w = d.state[j]
j++
}
digest[i] = byte(w)
w >>= 8
}
return digest
}
func decodeLEInt(data []byte, off uint32) uint32 {
return uint32(data[off]&0xFF) |
(uint32(data[off+1]&0xFF) << 8) |
(uint32(data[off+2]&0xFF) << 16) |
(uint32(data[off+3]&0xFF) << 24)
}
func (d *Digest) core(data []byte, off, num uint32) {
A0 := d.state[0]
A1 := d.state[1]
A2 := d.state[2]
A3 := d.state[3]
A4 := d.state[4]
A5 := d.state[5]
A6 := d.state[6]
A7 := d.state[7]
A8 := d.state[8]
A9 := d.state[9]
AA := d.state[10]
AB := d.state[11]
B0 := d.state[12]
B1 := d.state[13]
B2 := d.state[14]
B3 := d.state[15]
B4 := d.state[16]
B5 := d.state[17]
B6 := d.state[18]
B7 := d.state[19]
B8 := d.state[20]
B9 := d.state[21]
BA := d.state[22]
BB := d.state[23]
BC := d.state[24]
BD := d.state[25]
BE := d.state[26]
BF := d.state[27]
C0 := d.state[28]
C1 := d.state[29]
C2 := d.state[30]
C3 := d.state[31]
C4 := d.state[32]
C5 := d.state[33]
C6 := d.state[34]
C7 := d.state[35]
C8 := d.state[36]
C9 := d.state[37]
CA := d.state[38]
CB := d.state[39]
CC := d.state[40]
CD := d.state[41]
CE := d.state[42]
CF := d.state[43]
for ; num > 0; num-- {
M0 := decodeLEInt(data, off)
B0 += M0
B0 = (B0 << 17) | (B0 >> 15)
M1 := decodeLEInt(data, off+4)
B1 += M1
B1 = (B1 << 17) | (B1 >> 15)
M2 := decodeLEInt(data, off+8)
B2 += M2
B2 = (B2 << 17) | (B2 >> 15)
M3 := decodeLEInt(data, off+12)
B3 += M3
B3 = (B3 << 17) | (B3 >> 15)
M4 := decodeLEInt(data, off+16)
B4 += M4
B4 = (B4 << 17) | (B4 >> 15)
M5 := decodeLEInt(data, off+20)
B5 += M5
B5 = (B5 << 17) | (B5 >> 15)
M6 := decodeLEInt(data, off+24)
B6 += M6
B6 = (B6 << 17) | (B6 >> 15)
M7 := decodeLEInt(data, off+28)
B7 += M7
B7 = (B7 << 17) | (B7 >> 15)
M8 := decodeLEInt(data, off+32)
B8 += M8
B8 = (B8 << 17) | (B8 >> 15)
M9 := decodeLEInt(data, off+36)
B9 += M9
B9 = (B9 << 17) | (B9 >> 15)
MA := decodeLEInt(data, off+40)
BA += MA
BA = (BA << 17) | (BA >> 15)
MB := decodeLEInt(data, off+44)
BB += MB
BB = (BB << 17) | (BB >> 15)
MC := decodeLEInt(data, off+48)
BC += MC
BC = (BC << 17) | (BC >> 15)
MD := decodeLEInt(data, off+52)
BD += MD
BD = (BD << 17) | (BD >> 15)
ME := decodeLEInt(data, off+56)
BE += ME
BE = (BE << 17) | (BE >> 15)
MF := decodeLEInt(data, off+60)
BF += MF
BF = (BF << 17) | (BF >> 15)
off += 64
A0 ^= uint32(d.w)
A1 ^= uint32(d.w >> 32)
d.w++
A0 = ((A0 ^ (((AB << 15) | (AB >> 17)) * 5) ^ C8) * 3) ^ BD ^ (B9 & ^B6) ^ M0
B0 = ^((B0 << 1) | (B0 >> 31)) ^ A0
A1 = ((A1 ^ (((A0 << 15) | (A0 >> 17)) * 5) ^ C7) * 3) ^ BE ^ (BA & ^B7) ^ M1
B1 = ^((B1 << 1) | (B1 >> 31)) ^ A1
A2 = ((A2 ^ (((A1 << 15) | (A1 >> 17)) * 5) ^ C6) * 3) ^ BF ^ (BB & ^B8) ^ M2
B2 = ^((B2 << 1) | (B2 >> 31)) ^ A2
A3 = ((A3 ^ (((A2 << 15) | (A2 >> 17)) * 5) ^ C5) * 3) ^ B0 ^ (BC & ^B9) ^ M3
B3 = ^((B3 << 1) | (B3 >> 31)) ^ A3
A4 = ((A4 ^ (((A3 << 15) | (A3 >> 17)) * 5) ^ C4) * 3) ^ B1 ^ (BD & ^BA) ^ M4
B4 = ^((B4 << 1) | (B4 >> 31)) ^ A4
A5 = ((A5 ^ (((A4 << 15) | (A4 >> 17)) * 5) ^ C3) * 3) ^ B2 ^ (BE & ^BB) ^ M5
B5 = ^((B5 << 1) | (B5 >> 31)) ^ A5
A6 = ((A6 ^ (((A5 << 15) | (A5 >> 17)) * 5) ^ C2) * 3) ^ B3 ^ (BF & ^BC) ^ M6
B6 = ^((B6 << 1) | (B6 >> 31)) ^ A6
A7 = ((A7 ^ (((A6 << 15) | (A6 >> 17)) * 5) ^ C1) * 3) ^ B4 ^ (B0 & ^BD) ^ M7
B7 = ^((B7 << 1) | (B7 >> 31)) ^ A7
A8 = ((A8 ^ (((A7 << 15) | (A7 >> 17)) * 5) ^ C0) * 3) ^ B5 ^ (B1 & ^BE) ^ M8
B8 = ^((B8 << 1) | (B8 >> 31)) ^ A8
A9 = ((A9 ^ (((A8 << 15) | (A8 >> 17)) * 5) ^ CF) * 3) ^ B6 ^ (B2 & ^BF) ^ M9
B9 = ^((B9 << 1) | (B9 >> 31)) ^ A9
AA = ((AA ^ (((A9 << 15) | (A9 >> 17)) * 5) ^ CE) * 3) ^ B7 ^ (B3 & ^B0) ^ MA
BA = ^((BA << 1) | (BA >> 31)) ^ AA
AB = ((AB ^ (((AA << 15) | (AA >> 17)) * 5) ^ CD) * 3) ^ B8 ^ (B4 & ^B1) ^ MB
BB = ^((BB << 1) | (BB >> 31)) ^ AB
A0 = ((A0 ^ (((AB << 15) | (AB >> 17)) * 5) ^ CC) * 3) ^ B9 ^ (B5 & ^B2) ^ MC
BC = ^((BC << 1) | (BC >> 31)) ^ A0
A1 = ((A1 ^ (((A0 << 15) | (A0 >> 17)) * 5) ^ CB) * 3) ^ BA ^ (B6 & ^B3) ^ MD
BD = ^((BD << 1) | (BD >> 31)) ^ A1
A2 = ((A2 ^ (((A1 << 15) | (A1 >> 17)) * 5) ^ CA) * 3) ^ BB ^ (B7 & ^B4) ^ ME
BE = ^((BE << 1) | (BE >> 31)) ^ A2
A3 = ((A3 ^ (((A2 << 15) | (A2 >> 17)) * 5) ^ C9) * 3) ^ BC ^ (B8 & ^B5) ^ MF
BF = ^((BF << 1) | (BF >> 31)) ^ A3
A4 = ((A4 ^ (((A3 << 15) | (A3 >> 17)) * 5) ^ C8) * 3) ^ BD ^ (B9 & ^B6) ^ M0
B0 = ^((B0 << 1) | (B0 >> 31)) ^ A4
A5 = ((A5 ^ (((A4 << 15) | (A4 >> 17)) * 5) ^ C7) * 3) ^ BE ^ (BA & ^B7) ^ M1
B1 = ^((B1 << 1) | (B1 >> 31)) ^ A5
A6 = ((A6 ^ (((A5 << 15) | (A5 >> 17)) * 5) ^ C6) * 3) ^ BF ^ (BB & ^B8) ^ M2
B2 = ^((B2 << 1) | (B2 >> 31)) ^ A6
A7 = ((A7 ^ (((A6 << 15) | (A6 >> 17)) * 5) ^ C5) * 3) ^ B0 ^ (BC & ^B9) ^ M3
B3 = ^((B3 << 1) | (B3 >> 31)) ^ A7
A8 = ((A8 ^ (((A7 << 15) | (A7 >> 17)) * 5) ^ C4) * 3) ^ B1 ^ (BD & ^BA) ^ M4
B4 = ^((B4 << 1) | (B4 >> 31)) ^ A8
A9 = ((A9 ^ (((A8 << 15) | (A8 >> 17)) * 5) ^ C3) * 3) ^ B2 ^ (BE & ^BB) ^ M5
B5 = ^((B5 << 1) | (B5 >> 31)) ^ A9
AA = ((AA ^ (((A9 << 15) | (A9 >> 17)) * 5) ^ C2) * 3) ^ B3 ^ (BF & ^BC) ^ M6
B6 = ^((B6 << 1) | (B6 >> 31)) ^ AA
AB = ((AB ^ (((AA << 15) | (AA >> 17)) * 5) ^ C1) * 3) ^ B4 ^ (B0 & ^BD) ^ M7
B7 = ^((B7 << 1) | (B7 >> 31)) ^ AB
A0 = ((A0 ^ (((AB << 15) | (AB >> 17)) * 5) ^ C0) * 3) ^ B5 ^ (B1 & ^BE) ^ M8
B8 = ^((B8 << 1) | (B8 >> 31)) ^ A0
A1 = ((A1 ^ (((A0 << 15) | (A0 >> 17)) * 5) ^ CF) * 3) ^ B6 ^ (B2 & ^BF) ^ M9
B9 = ^((B9 << 1) | (B9 >> 31)) ^ A1
A2 = ((A2 ^ (((A1 << 15) | (A1 >> 17)) * 5) ^ CE) * 3) ^ B7 ^ (B3 & ^B0) ^ MA
BA = ^((BA << 1) | (BA >> 31)) ^ A2
A3 = ((A3 ^ (((A2 << 15) | (A2 >> 17)) * 5) ^ CD) * 3) ^ B8 ^ (B4 & ^B1) ^ MB
BB = ^((BB << 1) | (BB >> 31)) ^ A3
A4 = ((A4 ^ (((A3 << 15) | (A3 >> 17)) * 5) ^ CC) * 3) ^ B9 ^ (B5 & ^B2) ^ MC
BC = ^((BC << 1) | (BC >> 31)) ^ A4
A5 = ((A5 ^ (((A4 << 15) | (A4 >> 17)) * 5) ^ CB) * 3) ^ BA ^ (B6 & ^B3) ^ MD
BD = ^((BD << 1) | (BD >> 31)) ^ A5
A6 = ((A6 ^ (((A5 << 15) | (A5 >> 17)) * 5) ^ CA) * 3) ^ BB ^ (B7 & ^B4) ^ ME
BE = ^((BE << 1) | (BE >> 31)) ^ A6
A7 = ((A7 ^ (((A6 << 15) | (A6 >> 17)) * 5) ^ C9) * 3) ^ BC ^ (B8 & ^B5) ^ MF
BF = ^((BF << 1) | (BF >> 31)) ^ A7
A8 = ((A8 ^ (((A7 << 15) | (A7 >> 17)) * 5) ^ C8) * 3) ^ BD ^ (B9 & ^B6) ^ M0
B0 = ^((B0 << 1) | (B0 >> 31)) ^ A8
A9 = ((A9 ^ (((A8 << 15) | (A8 >> 17)) * 5) ^ C7) * 3) ^ BE ^ (BA & ^B7) ^ M1
B1 = ^((B1 << 1) | (B1 >> 31)) ^ A9
AA = ((AA ^ (((A9 << 15) | (A9 >> 17)) * 5) ^ C6) * 3) ^ BF ^ (BB & ^B8) ^ M2
B2 = ^((B2 << 1) | (B2 >> 31)) ^ AA
AB = ((AB ^ (((AA << 15) | (AA >> 17)) * 5) ^ C5) * 3) ^ B0 ^ (BC & ^B9) ^ M3
B3 = ^((B3 << 1) | (B3 >> 31)) ^ AB
A0 = ((A0 ^ (((AB << 15) | (AB >> 17)) * 5) ^ C4) * 3) ^ B1 ^ (BD & ^BA) ^ M4
B4 = ^((B4 << 1) | (B4 >> 31)) ^ A0
A1 = ((A1 ^ (((A0 << 15) | (A0 >> 17)) * 5) ^ C3) * 3) ^ B2 ^ (BE & ^BB) ^ M5
B5 = ^((B5 << 1) | (B5 >> 31)) ^ A1
A2 = ((A2 ^ (((A1 << 15) | (A1 >> 17)) * 5) ^ C2) * 3) ^ B3 ^ (BF & ^BC) ^ M6
B6 = ^((B6 << 1) | (B6 >> 31)) ^ A2
A3 = ((A3 ^ (((A2 << 15) | (A2 >> 17)) * 5) ^ C1) * 3) ^ B4 ^ (B0 & ^BD) ^ M7
B7 = ^((B7 << 1) | (B7 >> 31)) ^ A3
A4 = ((A4 ^ (((A3 << 15) | (A3 >> 17)) * 5) ^ C0) * 3) ^ B5 ^ (B1 & ^BE) ^ M8
B8 = ^((B8 << 1) | (B8 >> 31)) ^ A4
A5 = ((A5 ^ (((A4 << 15) | (A4 >> 17)) * 5) ^ CF) * 3) ^ B6 ^ (B2 & ^BF) ^ M9
B9 = ^((B9 << 1) | (B9 >> 31)) ^ A5
A6 = ((A6 ^ (((A5 << 15) | (A5 >> 17)) * 5) ^ CE) * 3) ^ B7 ^ (B3 & ^B0) ^ MA
BA = ^((BA << 1) | (BA >> 31)) ^ A6
A7 = ((A7 ^ (((A6 << 15) | (A6 >> 17)) * 5) ^ CD) * 3) ^ B8 ^ (B4 & ^B1) ^ MB
BB = ^((BB << 1) | (BB >> 31)) ^ A7
A8 = ((A8 ^ (((A7 << 15) | (A7 >> 17)) * 5) ^ CC) * 3) ^ B9 ^ (B5 & ^B2) ^ MC
BC = ^((BC << 1) | (BC >> 31)) ^ A8
A9 = ((A9 ^ (((A8 << 15) | (A8 >> 17)) * 5) ^ CB) * 3) ^ BA ^ (B6 & ^B3) ^ MD
BD = ^((BD << 1) | (BD >> 31)) ^ A9
AA = ((AA ^ (((A9 << 15) | (A9 >> 17)) * 5) ^ CA) * 3) ^ BB ^ (B7 & ^B4) ^ ME
BE = ^((BE << 1) | (BE >> 31)) ^ AA
AB = ((AB ^ (((AA << 15) | (AA >> 17)) * 5) ^ C9) * 3) ^ BC ^ (B8 & ^B5) ^ MF
BF = ^((BF << 1) | (BF >> 31)) ^ AB
AB += C6 + CA + CE
AA += C5 + C9 + CD
A9 += C4 + C8 + CC
A8 += C3 + C7 + CB
A7 += C2 + C6 + CA
A6 += C1 + C5 + C9
A5 += C0 + C4 + C8
A4 += CF + C3 + C7
A3 += CE + C2 + C6
A2 += CD + C1 + C5
A1 += CC + C0 + C4
A0 += CB + CF + C3
var tmp uint32
tmp = B0
B0 = C0 - M0
C0 = tmp
tmp = B1
B1 = C1 - M1
C1 = tmp
tmp = B2
B2 = C2 - M2
C2 = tmp
tmp = B3
B3 = C3 - M3
C3 = tmp
tmp = B4
B4 = C4 - M4
C4 = tmp
tmp = B5
B5 = C5 - M5
C5 = tmp
tmp = B6
B6 = C6 - M6
C6 = tmp
tmp = B7
B7 = C7 - M7
C7 = tmp
tmp = B8
B8 = C8 - M8
C8 = tmp
tmp = B9
B9 = C9 - M9
C9 = tmp
tmp = BA
BA = CA - MA
CA = tmp
tmp = BB
BB = CB - MB
CB = tmp
tmp = BC
BC = CC - MC
CC = tmp
tmp = BD
BD = CD - MD
CD = tmp
tmp = BE
BE = CE - ME
CE = tmp
tmp = BF
BF = CF - MF
CF = tmp
}
d.state[0] = A0
d.state[1] = A1
d.state[2] = A2
d.state[3] = A3
d.state[4] = A4
d.state[5] = A5
d.state[6] = A6
d.state[7] = A7
d.state[8] = A8
d.state[9] = A9
d.state[10] = AA
d.state[11] = AB
d.state[12] = B0
d.state[13] = B1
d.state[14] = B2
d.state[15] = B3
d.state[16] = B4
d.state[17] = B5
d.state[18] = B6
d.state[19] = B7
d.state[20] = B8
d.state[21] = B9
d.state[22] = BA
d.state[23] = BB
d.state[24] = BC
d.state[25] = BD
d.state[26] = BE
d.state[27] = BF
d.state[28] = C0
d.state[29] = C1
d.state[30] = C2
d.state[31] = C3
d.state[32] = C4
d.state[33] = C5
d.state[34] = C6
d.state[35] = C7
d.state[36] = C8
d.state[37] = C9
d.state[38] = CA
d.state[39] = CB
d.state[40] = CC
d.state[41] = CD
d.state[42] = CE
d.state[43] = CF
}
func (d *Digest) core1() {
A0 := d.state[0]
A1 := d.state[1]
A2 := d.state[2]
A3 := d.state[3]
A4 := d.state[4]
A5 := d.state[5]
A6 := d.state[6]
A7 := d.state[7]
A8 := d.state[8]
A9 := d.state[9]
AA := d.state[10]
AB := d.state[11]
B0 := d.state[12]
B1 := d.state[13]
B2 := d.state[14]
B3 := d.state[15]
B4 := d.state[16]
B5 := d.state[17]
B6 := d.state[18]
B7 := d.state[19]
B8 := d.state[20]
B9 := d.state[21]
BA := d.state[22]
BB := d.state[23]
BC := d.state[24]
BD := d.state[25]
BE := d.state[26]
BF := d.state[27]
C0 := d.state[28]
C1 := d.state[29]
C2 := d.state[30]
C3 := d.state[31]
C4 := d.state[32]
C5 := d.state[33]
C6 := d.state[34]
C7 := d.state[35]
C8 := d.state[36]
C9 := d.state[37]
CA := d.state[38]
CB := d.state[39]
CC := d.state[40]
CD := d.state[41]
CE := d.state[42]
CF := d.state[43]
M0 := decodeLEInt(d.buf[:], 0)
B0 += M0
B0 = (B0 << 17) | (B0 >> 15)
M1 := decodeLEInt(d.buf[:], 4)
B1 += M1
B1 = (B1 << 17) | (B1 >> 15)
M2 := decodeLEInt(d.buf[:], 8)
B2 += M2
B2 = (B2 << 17) | (B2 >> 15)
M3 := decodeLEInt(d.buf[:], 12)
B3 += M3
B3 = (B3 << 17) | (B3 >> 15)
M4 := decodeLEInt(d.buf[:], 16)
B4 += M4
B4 = (B4 << 17) | (B4 >> 15)
M5 := decodeLEInt(d.buf[:], 20)
B5 += M5
B5 = (B5 << 17) | (B5 >> 15)
M6 := decodeLEInt(d.buf[:], 24)
B6 += M6
B6 = (B6 << 17) | (B6 >> 15)
M7 := decodeLEInt(d.buf[:], 28)
B7 += M7
B7 = (B7 << 17) | (B7 >> 15)
M8 := decodeLEInt(d.buf[:], 32)
B8 += M8
B8 = (B8 << 17) | (B8 >> 15)
M9 := decodeLEInt(d.buf[:], 36)
B9 += M9
B9 = (B9 << 17) | (B9 >> 15)
MA := decodeLEInt(d.buf[:], 40)
BA += MA
BA = (BA << 17) | (BA >> 15)
MB := decodeLEInt(d.buf[:], 44)
BB += MB
BB = (BB << 17) | (BB >> 15)
MC := decodeLEInt(d.buf[:], 48)
BC += MC
BC = (BC << 17) | (BC >> 15)
MD := decodeLEInt(d.buf[:], 52)
BD += MD
BD = (BD << 17) | (BD >> 15)
ME := decodeLEInt(d.buf[:], 56)
BE += ME
BE = (BE << 17) | (BE >> 15)
MF := decodeLEInt(d.buf[:], 60)
BF += MF
BF = (BF << 17) | (BF >> 15)
A0 ^= uint32(d.w)
A1 ^= uint32(d.w >> 32)
d.w++
A0 = ((A0 ^ (((AB << 15) | (AB >> 17)) * 5) ^ C8) * 3) ^ BD ^ (B9 & ^B6) ^ M0
B0 = ^((B0 << 1) | (B0 >> 31)) ^ A0
A1 = ((A1 ^ (((A0 << 15) | (A0 >> 17)) * 5) ^ C7) * 3) ^ BE ^ (BA & ^B7) ^ M1
B1 = ^((B1 << 1) | (B1 >> 31)) ^ A1
A2 = ((A2 ^ (((A1 << 15) | (A1 >> 17)) * 5) ^ C6) * 3) ^ BF ^ (BB & ^B8) ^ M2
B2 = ^((B2 << 1) | (B2 >> 31)) ^ A2
A3 = ((A3 ^ (((A2 << 15) | (A2 >> 17)) * 5) ^ C5) * 3) ^ B0 ^ (BC & ^B9) ^ M3
B3 = ^((B3 << 1) | (B3 >> 31)) ^ A3
A4 = ((A4 ^ (((A3 << 15) | (A3 >> 17)) * 5) ^ C4) * 3) ^ B1 ^ (BD & ^BA) ^ M4
B4 = ^((B4 << 1) | (B4 >> 31)) ^ A4
A5 = ((A5 ^ (((A4 << 15) | (A4 >> 17)) * 5) ^ C3) * 3) ^ B2 ^ (BE & ^BB) ^ M5
B5 = ^((B5 << 1) | (B5 >> 31)) ^ A5
A6 = ((A6 ^ (((A5 << 15) | (A5 >> 17)) * 5) ^ C2) * 3) ^ B3 ^ (BF & ^BC) ^ M6
B6 = ^((B6 << 1) | (B6 >> 31)) ^ A6
A7 = ((A7 ^ (((A6 << 15) | (A6 >> 17)) * 5) ^ C1) * 3) ^ B4 ^ (B0 & ^BD) ^ M7
B7 = ^((B7 << 1) | (B7 >> 31)) ^ A7
A8 = ((A8 ^ (((A7 << 15) | (A7 >> 17)) * 5) ^ C0) * 3) ^ B5 ^ (B1 & ^BE) ^ M8
B8 = ^((B8 << 1) | (B8 >> 31)) ^ A8
A9 = ((A9 ^ (((A8 << 15) | (A8 >> 17)) * 5) ^ CF) * 3) ^ B6 ^ (B2 & ^BF) ^ M9
B9 = ^((B9 << 1) | (B9 >> 31)) ^ A9
AA = ((AA ^ (((A9 << 15) | (A9 >> 17)) * 5) ^ CE) * 3) ^ B7 ^ (B3 & ^B0) ^ MA
BA = ^((BA << 1) | (BA >> 31)) ^ AA
AB = ((AB ^ (((AA << 15) | (AA >> 17)) * 5) ^ CD) * 3) ^ B8 ^ (B4 & ^B1) ^ MB
BB = ^((BB << 1) | (BB >> 31)) ^ AB
A0 = ((A0 ^ (((AB << 15) | (AB >> 17)) * 5) ^ CC) * 3) ^ B9 ^ (B5 & ^B2) ^ MC
BC = ^((BC << 1) | (BC >> 31)) ^ A0
A1 = ((A1 ^ (((A0 << 15) | (A0 >> 17)) * 5) ^ CB) * 3) ^ BA ^ (B6 & ^B3) ^ MD
BD = ^((BD << 1) | (BD >> 31)) ^ A1
A2 = ((A2 ^ (((A1 << 15) | (A1 >> 17)) * 5) ^ CA) * 3) ^ BB ^ (B7 & ^B4) ^ ME
BE = ^((BE << 1) | (BE >> 31)) ^ A2
A3 = ((A3 ^ (((A2 << 15) | (A2 >> 17)) * 5) ^ C9) * 3) ^ BC ^ (B8 & ^B5) ^ MF
BF = ^((BF << 1) | (BF >> 31)) ^ A3
A4 = ((A4 ^ (((A3 << 15) | (A3 >> 17)) * 5) ^ C8) * 3) ^ BD ^ (B9 & ^B6) ^ M0
B0 = ^((B0 << 1) | (B0 >> 31)) ^ A4
A5 = ((A5 ^ (((A4 << 15) | (A4 >> 17)) * 5) ^ C7) * 3) ^ BE ^ (BA & ^B7) ^ M1
B1 = ^((B1 << 1) | (B1 >> 31)) ^ A5
A6 = ((A6 ^ (((A5 << 15) | (A5 >> 17)) * 5) ^ C6) * 3) ^ BF ^ (BB & ^B8) ^ M2
B2 = ^((B2 << 1) | (B2 >> 31)) ^ A6
A7 = ((A7 ^ (((A6 << 15) | (A6 >> 17)) * 5) ^ C5) * 3) ^ B0 ^ (BC & ^B9) ^ M3
B3 = ^((B3 << 1) | (B3 >> 31)) ^ A7
A8 = ((A8 ^ (((A7 << 15) | (A7 >> 17)) * 5) ^ C4) * 3) ^ B1 ^ (BD & ^BA) ^ M4
B4 = ^((B4 << 1) | (B4 >> 31)) ^ A8
A9 = ((A9 ^ (((A8 << 15) | (A8 >> 17)) * 5) ^ C3) * 3) ^ B2 ^ (BE & ^BB) ^ M5
B5 = ^((B5 << 1) | (B5 >> 31)) ^ A9
AA = ((AA ^ (((A9 << 15) | (A9 >> 17)) * 5) ^ C2) * 3) ^ B3 ^ (BF & ^BC) ^ M6
B6 = ^((B6 << 1) | (B6 >> 31)) ^ AA
AB = ((AB ^ (((AA << 15) | (AA >> 17)) * 5) ^ C1) * 3) ^ B4 ^ (B0 & ^BD) ^ M7
B7 = ^((B7 << 1) | (B7 >> 31)) ^ AB
A0 = ((A0 ^ (((AB << 15) | (AB >> 17)) * 5) ^ C0) * 3) ^ B5 ^ (B1 & ^BE) ^ M8
B8 = ^((B8 << 1) | (B8 >> 31)) ^ A0
A1 = ((A1 ^ (((A0 << 15) | (A0 >> 17)) * 5) ^ CF) * 3) ^ B6 ^ (B2 & ^BF) ^ M9
B9 = ^((B9 << 1) | (B9 >> 31)) ^ A1
A2 = ((A2 ^ (((A1 << 15) | (A1 >> 17)) * 5) ^ CE) * 3) ^ B7 ^ (B3 & ^B0) ^ MA
BA = ^((BA << 1) | (BA >> 31)) ^ A2
A3 = ((A3 ^ (((A2 << 15) | (A2 >> 17)) * 5) ^ CD) * 3) ^ B8 ^ (B4 & ^B1) ^ MB
BB = ^((BB << 1) | (BB >> 31)) ^ A3
A4 = ((A4 ^ (((A3 << 15) | (A3 >> 17)) * 5) ^ CC) * 3) ^ B9 ^ (B5 & ^B2) ^ MC
BC = ^((BC << 1) | (BC >> 31)) ^ A4
A5 = ((A5 ^ (((A4 << 15) | (A4 >> 17)) * 5) ^ CB) * 3) ^ BA ^ (B6 & ^B3) ^ MD
BD = ^((BD << 1) | (BD >> 31)) ^ A5
A6 = ((A6 ^ (((A5 << 15) | (A5 >> 17)) * 5) ^ CA) * 3) ^ BB ^ (B7 & ^B4) ^ ME
BE = ^((BE << 1) | (BE >> 31)) ^ A6
A7 = ((A7 ^ (((A6 << 15) | (A6 >> 17)) * 5) ^ C9) * 3) ^ BC ^ (B8 & ^B5) ^ MF
BF = ^((BF << 1) | (BF >> 31)) ^ A7
A8 = ((A8 ^ (((A7 << 15) | (A7 >> 17)) * 5) ^ C8) * 3) ^ BD ^ (B9 & ^B6) ^ M0
B0 = ^((B0 << 1) | (B0 >> 31)) ^ A8
A9 = ((A9 ^ (((A8 << 15) | (A8 >> 17)) * 5) ^ C7) * 3) ^ BE ^ (BA & ^B7) ^ M1
B1 = ^((B1 << 1) | (B1 >> 31)) ^ A9
AA = ((AA ^ (((A9 << 15) | (A9 >> 17)) * 5) ^ C6) * 3) ^ BF ^ (BB & ^B8) ^ M2
B2 = ^((B2 << 1) | (B2 >> 31)) ^ AA
AB = ((AB ^ (((AA << 15) | (AA >> 17)) * 5) ^ C5) * 3) ^ B0 ^ (BC & ^B9) ^ M3
B3 = ^((B3 << 1) | (B3 >> 31)) ^ AB
A0 = ((A0 ^ (((AB << 15) | (AB >> 17)) * 5) ^ C4) * 3) ^ B1 ^ (BD & ^BA) ^ M4
B4 = ^((B4 << 1) | (B4 >> 31)) ^ A0
A1 = ((A1 ^ (((A0 << 15) | (A0 >> 17)) * 5) ^ C3) * 3) ^ B2 ^ (BE & ^BB) ^ M5
B5 = ^((B5 << 1) | (B5 >> 31)) ^ A1
A2 = ((A2 ^ (((A1 << 15) | (A1 >> 17)) * 5) ^ C2) * 3) ^ B3 ^ (BF & ^BC) ^ M6
B6 = ^((B6 << 1) | (B6 >> 31)) ^ A2
A3 = ((A3 ^ (((A2 << 15) | (A2 >> 17)) * 5) ^ C1) * 3) ^ B4 ^ (B0 & ^BD) ^ M7
B7 = ^((B7 << 1) | (B7 >> 31)) ^ A3
A4 = ((A4 ^ (((A3 << 15) | (A3 >> 17)) * 5) ^ C0) * 3) ^ B5 ^ (B1 & ^BE) ^ M8
B8 = ^((B8 << 1) | (B8 >> 31)) ^ A4
A5 = ((A5 ^ (((A4 << 15) | (A4 >> 17)) * 5) ^ CF) * 3) ^ B6 ^ (B2 & ^BF) ^ M9
B9 = ^((B9 << 1) | (B9 >> 31)) ^ A5
A6 = ((A6 ^ (((A5 << 15) | (A5 >> 17)) * 5) ^ CE) * 3) ^ B7 ^ (B3 & ^B0) ^ MA
BA = ^((BA << 1) | (BA >> 31)) ^ A6
A7 = ((A7 ^ (((A6 << 15) | (A6 >> 17)) * 5) ^ CD) * 3) ^ B8 ^ (B4 & ^B1) ^ MB
BB = ^((BB << 1) | (BB >> 31)) ^ A7
A8 = ((A8 ^ (((A7 << 15) | (A7 >> 17)) * 5) ^ CC) * 3) ^ B9 ^ (B5 & ^B2) ^ MC
BC = ^((BC << 1) | (BC >> 31)) ^ A8
A9 = ((A9 ^ (((A8 << 15) | (A8 >> 17)) * 5) ^ CB) * 3) ^ BA ^ (B6 & ^B3) ^ MD
BD = ^((BD << 1) | (BD >> 31)) ^ A9
AA = ((AA ^ (((A9 << 15) | (A9 >> 17)) * 5) ^ CA) * 3) ^ BB ^ (B7 & ^B4) ^ ME
BE = ^((BE << 1) | (BE >> 31)) ^ AA
AB = ((AB ^ (((AA << 15) | (AA >> 17)) * 5) ^ C9) * 3) ^ BC ^ (B8 & ^B5) ^ MF
BF = ^((BF << 1) | (BF >> 31)) ^ AB
AB += C6 + CA + CE
AA += C5 + C9 + CD
A9 += C4 + C8 + CC
A8 += C3 + C7 + CB
A7 += C2 + C6 + CA
A6 += C1 + C5 + C9
A5 += C0 + C4 + C8
A4 += CF + C3 + C7
A3 += CE + C2 + C6
A2 += CD + C1 + C5
A1 += CC + C0 + C4
A0 += CB + CF + C3
var tmp uint32
tmp = B0
B0 = C0 - M0
C0 = tmp
tmp = B1
B1 = C1 - M1
C1 = tmp
tmp = B2
B2 = C2 - M2
C2 = tmp
tmp = B3
B3 = C3 - M3
C3 = tmp
tmp = B4
B4 = C4 - M4
C4 = tmp
tmp = B5
B5 = C5 - M5
C5 = tmp
tmp = B6
B6 = C6 - M6
C6 = tmp
tmp = B7
B7 = C7 - M7
C7 = tmp
tmp = B8
B8 = C8 - M8
C8 = tmp
tmp = B9
B9 = C9 - M9
C9 = tmp
tmp = BA
BA = CA - MA
CA = tmp
tmp = BB
BB = CB - MB
CB = tmp
tmp = BC
BC = CC - MC
CC = tmp
tmp = BD
BD = CD - MD
CD = tmp
tmp = BE
BE = CE - ME
CE = tmp
tmp = BF
BF = CF - MF
CF = tmp
d.state[0] = A0
d.state[1] = A1
d.state[2] = A2
d.state[3] = A3
d.state[4] = A4
d.state[5] = A5
d.state[6] = A6
d.state[7] = A7
d.state[8] = A8
d.state[9] = A9
d.state[10] = AA
d.state[11] = AB
d.state[12] = B0
d.state[13] = B1
d.state[14] = B2
d.state[15] = B3
d.state[16] = B4
d.state[17] = B5
d.state[18] = B6
d.state[19] = B7
d.state[20] = B8
d.state[21] = B9
d.state[22] = BA
d.state[23] = BB
d.state[24] = BC
d.state[25] = BD
d.state[26] = BE
d.state[27] = BF
d.state[28] = C0
d.state[29] = C1
d.state[30] = C2
d.state[31] = C3
d.state[32] = C4
d.state[33] = C5
d.state[34] = C6
d.state[35] = C7
d.state[36] = C8
d.state[37] = C9
d.state[38] = CA
d.state[39] = CB
d.state[40] = CC
d.state[41] = CD
d.state[42] = CE
d.state[43] = CF
}
// Sum256 returns the SHABAL256 checksum of the data.
func Sum256(data []byte) [Size]byte {
var d Digest
d.Reset()
d.Write(data)
return d.checkSum()
}
func getIV() []uint32 {
if iv == nil {
sg := new(Digest)
sg.buf[0] = 0
sg.buf[1] = 1
sg.buf[4] = 1
sg.buf[5] = 1
sg.buf[8] = 2
sg.buf[9] = 1
sg.buf[12] = 3
sg.buf[13] = 1
sg.buf[16] = 4
sg.buf[17] = 1
sg.buf[20] = 5
sg.buf[21] = 1
sg.buf[24] = 6
sg.buf[25] = 1
sg.buf[28] = 7
sg.buf[29] = 1
sg.buf[32] = 8
sg.buf[33] = 1
sg.buf[36] = 9
sg.buf[37] = 1
sg.buf[40] = 10
sg.buf[41] = 1
sg.buf[44] = 11
sg.buf[45] = 1
sg.buf[48] = 12
sg.buf[49] = 1
sg.buf[52] = 13
sg.buf[53] = 1
sg.buf[56] = 14
sg.buf[57] = 1
sg.buf[60] = 15
sg.buf[61] = 1
sg.w = -1
sg.core1()
sg.buf[0] = 16
sg.buf[4] = 17
sg.buf[8] = 18
sg.buf[12] = 19
sg.buf[16] = 20
sg.buf[20] = 21
sg.buf[24] = 22
sg.buf[28] = 23
sg.buf[32] = 24
sg.buf[36] = 25
sg.buf[40] = 26
sg.buf[44] = 27
sg.buf[48] = 28
sg.buf[52] = 29
sg.buf[56] = 30
sg.buf[60] = 31
sg.core1()
iv = make([]uint32, len(sg.state))
copy(iv[0:], sg.state[0:len(sg.state)])
}
return iv
} | shabal256.go | 0.66356 | 0.470676 | shabal256.go | starcoder |
package mlpack
/*
#cgo CFLAGS: -I./capi -Wall
#cgo LDFLAGS: -L. -lmlpack_go_random_forest
#include <capi/random_forest.h>
#include <stdlib.h>
*/
import "C"
import "gonum.org/v1/gonum/mat"
type RandomForestOptionalParam struct {
InputModel *randomForestModel
Labels *mat.Dense
MaximumDepth int
MinimumGainSplit float64
MinimumLeafSize int
NumTrees int
PrintTrainingAccuracy bool
Seed int
SubspaceDim int
Test *mat.Dense
TestLabels *mat.Dense
Training *mat.Dense
Verbose bool
}
func RandomForestOptions() *RandomForestOptionalParam {
return &RandomForestOptionalParam{
InputModel: nil,
Labels: nil,
MaximumDepth: 0,
MinimumGainSplit: 0,
MinimumLeafSize: 1,
NumTrees: 10,
PrintTrainingAccuracy: false,
Seed: 0,
SubspaceDim: 0,
Test: nil,
TestLabels: nil,
Training: nil,
Verbose: false,
}
}
/*
This program is an implementation of the standard random forest classification
algorithm by <NAME>. A random forest can be trained and saved for later
use, or a random forest may be loaded and predictions or class probabilities
for points may be generated.
The training set and associated labels are specified with the "Training" and
"Labels" parameters, respectively. The labels should be in the range [0,
num_classes - 1]. Optionally, if "Labels" is not specified, the labels are
assumed to be the last dimension of the training dataset.
When a model is trained, the "OutputModel" output parameter may be used to
save the trained model. A model may be loaded for predictions with the
"InputModel"parameter. The "InputModel" parameter may not be specified when
the "Training" parameter is specified. The "MinimumLeafSize" parameter
specifies the minimum number of training points that must fall into each leaf
for it to be split. The "NumTrees" controls the number of trees in the random
forest. The "MinimumGainSplit" parameter controls the minimum required gain
for a decision tree node to split. Larger values will force higher-confidence
splits. The "MaximumDepth" parameter specifies the maximum depth of the tree.
The "SubspaceDim" parameter is used to control the number of random
dimensions chosen for an individual node's split. If "PrintTrainingAccuracy"
is specified, the calculated accuracy on the training set will be printed.
Test data may be specified with the "Test" parameter, and if performance
measures are desired for that test set, labels for the test points may be
specified with the "TestLabels" parameter. Predictions for each test point
may be saved via the "Predictions"output parameter. Class probabilities for
each prediction may be saved with the "Probabilities" output parameter.
For example, to train a random forest with a minimum leaf size of 20 using 10
trees on the dataset contained in datawith labels labels, saving the output
random forest to rf_model and printing the training error, one could call
// Initialize optional parameters for RandomForest().
param := mlpack.RandomForestOptions()
param.Training = data
param.Labels = labels
param.MinimumLeafSize = 20
param.NumTrees = 10
param.PrintTrainingAccuracy = true
rf_model, _, _ := mlpack.RandomForest(param)
Then, to use that model to classify points in test_set and print the test
error given the labels test_labels using that model, while saving the
predictions for each point to predictions, one could call
// Initialize optional parameters for RandomForest().
param := mlpack.RandomForestOptions()
param.InputModel = &rf_model
param.Test = test_set
param.TestLabels = test_labels
_, predictions, _ := mlpack.RandomForest(param)
Input parameters:
- InputModel (randomForestModel): Pre-trained random forest to use for
classification.
- Labels (mat.Dense): Labels for training dataset.
- MaximumDepth (int): Maximum depth of the tree (0 means no limit).
Default value 0.
- MinimumGainSplit (float64): Minimum gain needed to make a split when
building a tree. Default value 0.
- MinimumLeafSize (int): Minimum number of points in each leaf node.
Default value 1.
- NumTrees (int): Number of trees in the random forest. Default value
10.
- PrintTrainingAccuracy (bool): If set, then the accuracy of the model
on the training set will be predicted (verbose must also be specified).
- Seed (int): Random seed. If 0, 'std::time(NULL)' is used. Default
value 0.
- SubspaceDim (int): Dimensionality of random subspace to use for each
split. '0' will autoselect the square root of data dimensionality.
Default value 0.
- Test (mat.Dense): Test dataset to produce predictions for.
- TestLabels (mat.Dense): Test dataset labels, if accuracy calculation
is desired.
- Training (mat.Dense): Training dataset.
- Verbose (bool): Display informational messages and the full list of
parameters and timers at the end of execution.
Output parameters:
- outputModel (randomForestModel): Model to save trained random forest
to.
- predictions (mat.Dense): Predicted classes for each point in the test
set.
- probabilities (mat.Dense): Predicted class probabilities for each
point in the test set.
*/
func RandomForest(param *RandomForestOptionalParam) (randomForestModel, *mat.Dense, *mat.Dense) {
resetTimers()
enableTimers()
disableBacktrace()
disableVerbose()
restoreSettings("Random forests")
// Detect if the parameter was passed; set if so.
if param.InputModel != nil {
setRandomForestModel("input_model", param.InputModel)
setPassed("input_model")
}
// Detect if the parameter was passed; set if so.
if param.Labels != nil {
gonumToArmaUrow("labels", param.Labels)
setPassed("labels")
}
// Detect if the parameter was passed; set if so.
if param.MaximumDepth != 0 {
setParamInt("maximum_depth", param.MaximumDepth)
setPassed("maximum_depth")
}
// Detect if the parameter was passed; set if so.
if param.MinimumGainSplit != 0 {
setParamDouble("minimum_gain_split", param.MinimumGainSplit)
setPassed("minimum_gain_split")
}
// Detect if the parameter was passed; set if so.
if param.MinimumLeafSize != 1 {
setParamInt("minimum_leaf_size", param.MinimumLeafSize)
setPassed("minimum_leaf_size")
}
// Detect if the parameter was passed; set if so.
if param.NumTrees != 10 {
setParamInt("num_trees", param.NumTrees)
setPassed("num_trees")
}
// Detect if the parameter was passed; set if so.
if param.PrintTrainingAccuracy != false {
setParamBool("print_training_accuracy", param.PrintTrainingAccuracy)
setPassed("print_training_accuracy")
}
// Detect if the parameter was passed; set if so.
if param.Seed != 0 {
setParamInt("seed", param.Seed)
setPassed("seed")
}
// Detect if the parameter was passed; set if so.
if param.SubspaceDim != 0 {
setParamInt("subspace_dim", param.SubspaceDim)
setPassed("subspace_dim")
}
// Detect if the parameter was passed; set if so.
if param.Test != nil {
gonumToArmaMat("test", param.Test)
setPassed("test")
}
// Detect if the parameter was passed; set if so.
if param.TestLabels != nil {
gonumToArmaUrow("test_labels", param.TestLabels)
setPassed("test_labels")
}
// Detect if the parameter was passed; set if so.
if param.Training != nil {
gonumToArmaMat("training", param.Training)
setPassed("training")
}
// Detect if the parameter was passed; set if so.
if param.Verbose != false {
setParamBool("verbose", param.Verbose)
setPassed("verbose")
enableVerbose()
}
// Mark all output options as passed.
setPassed("output_model")
setPassed("predictions")
setPassed("probabilities")
// Call the mlpack program.
C.mlpackRandomForest()
// Initialize result variable and get output.
var outputModel randomForestModel
outputModel.getRandomForestModel("output_model")
var predictionsPtr mlpackArma
predictions := predictionsPtr.armaToGonumUrow("predictions")
var probabilitiesPtr mlpackArma
probabilities := probabilitiesPtr.armaToGonumMat("probabilities")
// Clear settings.
clearSettings()
// Return output(s).
return outputModel, predictions, probabilities
} | random_forest.go | 0.733452 | 0.474814 | random_forest.go | starcoder |
package expr
import (
"bytes"
"fmt"
"io"
)
// Program represents a parsed expression.
type Program struct {
root node
}
// Parse parses an expression into a program.
func Parse(r io.RuneScanner) *Program {
return &Program{newparser(newscanner(r)).parse()}
}
// ParseString parses an expression from a string.
func ParseString(s string) *Program {
return Parse(bytes.NewBufferString(s))
}
type parser struct {
s *scanner
a bool
t token
}
func newparser(s *scanner) *parser {
return &parser{s: s}
}
func (p *parser) readtoken() *token {
if !p.a {
p.a = true
p.t = p.s.scan()
}
return &p.t
}
func (p *parser) consume() {
p.a = false
}
func (p *parser) accept(k tokenkind) bool {
if p.readtoken().kind == k {
p.consume()
return true
}
return false
}
func (p *parser) expect(k tokenkind) {
if p.readtoken().kind != k {
panic(fmt.Errorf("expected %s token, got %s", k, p.t.kind))
}
p.consume()
}
// This parser is strongly based on byuu's modified recursive-descent algorithm
// (particularly the 'depth' parameter.)
// https://github.com/byuu/bsnes/blob/master/nall/string/eval/parser.hpp
func (p *parser) parseexpr(depth int) node {
var n node
unary := func(op unaryop, depth int) {
n = unaryexpr{op: op, n: p.parseexpr(depth)}
}
binary := func(op binaryop, depth int) {
if n == nil {
panic("unexpected binary op")
}
n = binaryexpr{op: op, a: n, b: p.parseexpr(depth)}
}
ternary := func(depth int) {
t := ternaryexpr{}
t.a = n
t.b = p.parseexpr(depth)
p.expect(colontoken)
t.c = p.parseexpr(depth)
n = t
}
switch {
case p.accept(identtoken):
n = newidentnode(p.t)
case p.accept(inttoken):
n = newintnode(p.t)
case p.accept(floattoken):
n = newfloatnode(p.t)
case p.accept(booltoken):
n = newboolnode(p.t)
case p.accept(strtoken):
n = newstrnode(p.t)
case p.accept(runetoken):
n = newrunenode(p.t)
case p.accept(nilkeyword):
n = newnilnode(p.t)
case p.accept(leftparentoken):
n = p.parseexpr(1)
default:
}
for {
if depth >= 8 {
break
}
if n != nil && p.accept(periodtoken) {
binary(binarymember, 8)
continue
}
if n != nil && p.accept(leftparentoken) {
binary(binarycall, 1)
continue
}
if n != nil && p.accept(leftbrackettoken) {
binary(binarysubscript, 1)
continue
}
if n == nil && p.accept(addtoken) {
unary(unaryplus, 7)
}
if n == nil && p.accept(subtoken) {
unary(unarynegate, 7)
}
if n == nil && p.accept(nottoken) {
unary(unarynot, 7)
}
if n == nil && p.accept(xortoken) {
unary(unarybitnot, 7)
}
if n == nil && p.accept(multoken) {
unary(unaryderef, 7)
}
if n == nil && p.accept(andtoken) {
unary(unaryref, 7)
}
if depth >= 7 {
break
}
if p.accept(multoken) {
binary(binarymul, 7)
continue
}
if p.accept(quotoken) {
binary(binarydiv, 7)
continue
}
if p.accept(remtoken) {
binary(binaryrem, 7)
continue
}
if p.accept(shltoken) {
binary(binarylsh, 7)
continue
}
if p.accept(shrtoken) {
binary(binaryrsh, 7)
continue
}
if p.accept(andtoken) {
binary(binaryand, 7)
continue
}
if depth >= 6 {
break
}
if p.accept(addtoken) {
binary(binaryadd, 6)
continue
}
if p.accept(subtoken) {
binary(binarysub, 6)
continue
}
if p.accept(ortoken) {
binary(binaryor, 6)
continue
}
if p.accept(xortoken) {
binary(binaryxor, 6)
continue
}
if depth >= 5 {
break
}
if p.accept(equaltoken) {
binary(binaryequal, 5)
continue
}
if p.accept(notequaltoken) {
binary(binarynotequal, 5)
continue
}
if p.accept(lessertoken) {
binary(binarylesser, 5)
continue
}
if p.accept(lesserequaltoken) {
binary(binarylesserequal, 5)
continue
}
if p.accept(greatertoken) {
binary(binarygreater, 5)
continue
}
if p.accept(greaterequaltoken) {
binary(binarygreaterequal, 5)
continue
}
if depth >= 4 {
break
}
if p.accept(logicalandtoken) {
binary(binarylogicaland, 4)
continue
}
if depth >= 3 {
break
}
if p.accept(logicalortoken) {
binary(binarylogicalor, 3)
continue
}
if p.accept(ternarytoken) {
ternary(3)
continue
}
if depth >= 2 {
break
}
if p.accept(commatoken) {
binary(binarygroup, 2)
continue
}
if depth >= 1 && (p.accept(rightparentoken) || p.accept(rightbrackettoken)) {
break
}
p.expect(eoftoken)
break
}
return n
}
func (p *parser) parse() node {
return p.parseexpr(0)
} | sgx-tools/vendor/github.com/go-restruct/restruct/expr/parse.go | 0.628863 | 0.434761 | parse.go | starcoder |
package evaluator
import (
"fmt"
"github.com/bradford-hamilton/monkey-lang/ast"
"github.com/bradford-hamilton/monkey-lang/object"
)
// No need to create new true/false or null objects every time we encounter one, they will
// be the same. Let's reference them instead
var (
True = &object.Boolean{Value: true}
False = &object.Boolean{Value: false}
Null = &object.Null{}
)
// Eval takes an ast.Node (starting with the RootNode) and traverses the AST.
// It switches on the node's type and recursively evaluates them appropriately
func Eval(node ast.Node, env *object.Environment) object.Object {
switch node := node.(type) {
// Statements
case *ast.RootNode:
return evalRootNode(node, env)
case *ast.BlockStatement:
return evalBlockStmt(node, env)
case *ast.ExpressionStatement:
return Eval(node.Expression, env)
case *ast.ReturnStatement:
val := Eval(node.ReturnValue, env)
if isError(val) {
return val
}
return &object.ReturnValue{Value: val}
case *ast.LetStatement:
val := Eval(node.Value, env)
if isError(val) {
return val
}
env.Set(node.Name.Value, val)
case *ast.ConstStatement:
val := Eval(node.Value, env)
if isError(val) {
return val
}
env.Set(node.Name.Value, val)
// Expressions
case *ast.IntegerLiteral:
return &object.Integer{Value: node.Value}
case *ast.StringLiteral:
return &object.String{Value: node.Value}
case *ast.Boolean:
return nativeBoolToBooleanObj(node.Value)
case *ast.PrefixExpression:
right := Eval(node.Right, env)
if isError(right) {
return right
}
return evalPrefixExpr(node.Operator, right, node.Token.Line)
case *ast.InfixExpression:
left := Eval(node.Left, env)
if isError(left) {
return left
}
right := Eval(node.Right, env)
if isError(right) {
return right
}
return evalInfixExpr(node.Operator, left, right, node.Token.Line)
case *ast.PostfixExpression:
return evalPostfixExpr(env, node.Operator, node)
case *ast.IfExpression:
return evalIfExpr(node, env)
case *ast.Identifier:
return evalIdentifier(node, env)
case *ast.FunctionLiteral:
params := node.Parameters
body := node.Body
return &object.Function{
Parameters: params,
Body: body,
Env: env,
}
case *ast.CallExpression:
fn := Eval(node.Function, env)
if isError(fn) {
return fn
}
args := evalExprs(node.Arguments, env)
if len(args) == 1 && isError(args[0]) {
return args[0]
}
return applyFunction(fn, args, node.Token.Line)
case *ast.ArrayLiteral:
elements := evalExprs(node.Elements, env)
if len(elements) == 1 && isError(elements[0]) {
return elements[0]
}
return &object.Array{Elements: elements}
case *ast.IndexExpression:
left := Eval(node.Left, env)
if isError(left) {
return left
}
index := Eval(node.Index, env)
if isError(index) {
return index
}
return evalIndexExpr(left, index, node.Token.Line)
case *ast.HashLiteral:
return evalHashLiteral(node, env)
}
return nil
}
func evalRootNode(rootNode *ast.RootNode, env *object.Environment) object.Object {
var result object.Object
for _, stmt := range rootNode.Statements {
result = Eval(stmt, env)
switch result := result.(type) {
case *object.ReturnValue:
return result.Value
case *object.Error:
return result
}
}
return result
}
func evalBlockStmt(block *ast.BlockStatement, env *object.Environment) object.Object {
var result object.Object
for _, stmt := range block.Statements {
result = Eval(stmt, env)
if result != nil {
rt := result.Type()
if rt == object.ReturnValueObj || rt == object.ErrorObj {
return result
}
}
}
return result
}
func nativeBoolToBooleanObj(input bool) *object.Boolean {
if input {
return True
}
return False
}
// Coerce our different object types to booleans for truthy/falsey values
func coerceObjToNativeBool(o object.Object) bool {
if rv, ok := o.(*object.ReturnValue); ok {
o = rv.Value
}
switch obj := o.(type) {
case *object.Boolean:
return obj.Value
case *object.String:
return obj.Value != ""
case *object.Null:
return false
case *object.Integer:
return obj.Value != 0
case *object.Array:
return len(obj.Elements) > 0
case *object.Hash:
return len(obj.Pairs) > 0
default:
return true
}
}
func evalPrefixExpr(operator string, right object.Object, line int) object.Object {
switch operator {
case "!":
return evalBangOperatorExpr(right)
case "-":
return evalMinusPrefixOperatorExpr(right, line)
default:
return newError("Line %d: Unknown operator: %s%s", line, operator, right.Type())
}
}
func evalBangOperatorExpr(right object.Object) object.Object {
switch right {
case True:
return False
case False:
return True
case Null:
return True
default:
return False
}
}
func evalMinusPrefixOperatorExpr(right object.Object, line int) object.Object {
if right.Type() != object.IntegerObj {
return newError("Line %d: Unknown operator: -%s", line, right.Type())
}
value := right.(*object.Integer).Value
return &object.Integer{Value: -value}
}
func evalPostfixExpr(env *object.Environment, operator string, node *ast.PostfixExpression) object.Object {
switch operator {
case "++":
val, ok := env.Get(node.Token.Literal)
if !ok {
return newError("Line %d: Token literal %s is unknown", node.Token.Line, node.Token.Literal)
}
arg, ok := val.(*object.Integer)
if !ok {
return newError("Line %d: Invalid left-hand side expression in postfix operation", node.Token.Line)
}
v := arg.Value
env.Set(node.Token.Literal, &object.Integer{Value: v + 1})
return arg
case "--":
val, ok := env.Get(node.Token.Literal)
if !ok {
return newError("Line %d: Token literal %s is unknown", node.Token.Line, node.Token.Literal)
}
arg, ok := val.(*object.Integer)
if !ok {
return newError("Line %d: Invalid left-hand side expression in postfix operation", node.Token.Line)
}
v := arg.Value
env.Set(node.Token.Literal, &object.Integer{Value: v - 1})
return arg
default:
return newError("Line %d: Unknown operator: %s", node.Token.Line, operator)
}
}
func evalInfixExpr(operator string, left, right object.Object, line int) object.Object {
switch {
case left.Type() == object.IntegerObj && right.Type() == object.IntegerObj:
return evalIntegerInfixExpr(operator, left, right, line)
case left.Type() == object.StringObj && right.Type() == object.StringObj:
return evalStringInfixExpr(operator, left, right, line)
case operator == "==":
return nativeBoolToBooleanObj(left == right)
case operator == "!=":
return nativeBoolToBooleanObj(left != right)
case operator == "&&":
return nativeBoolToBooleanObj(coerceObjToNativeBool(left) && coerceObjToNativeBool(right))
case operator == "||":
return nativeBoolToBooleanObj(coerceObjToNativeBool(left) || coerceObjToNativeBool(right))
case left.Type() != right.Type():
return newError("Line %d: Type mismatch: %s %s %s", line, left.Type(), operator, right.Type())
default:
fmt.Printf("%s %s %s", left.Type(), operator, right.Type())
return newError("Line %d: Unknown operator: %s %s %s", line, left.Type(), operator, right.Type())
}
}
func evalIfExpr(ifExpr *ast.IfExpression, env *object.Environment) object.Object {
condition := Eval(ifExpr.Condition, env)
if isError(condition) {
return condition
}
if isTruthy(condition) {
return Eval(ifExpr.Consequence, env)
} else if ifExpr.Alternative != nil {
return Eval(ifExpr.Alternative, env)
} else {
return Null
}
}
func isTruthy(obj object.Object) bool {
switch obj {
case Null:
return false
case True:
return true
case False:
return false
default:
return true
}
}
func evalIntegerInfixExpr(operator string, left, right object.Object, line int) object.Object {
leftVal := left.(*object.Integer).Value
rightVal := right.(*object.Integer).Value
switch operator {
case "+":
return &object.Integer{Value: leftVal + rightVal}
case "-":
return &object.Integer{Value: leftVal - rightVal}
case "*":
return &object.Integer{Value: leftVal * rightVal}
case "/":
return &object.Integer{Value: leftVal / rightVal}
case "%":
return &object.Integer{Value: leftVal % rightVal}
case "<":
return nativeBoolToBooleanObj(leftVal < rightVal)
case ">":
return nativeBoolToBooleanObj(leftVal > rightVal)
case "<=":
return nativeBoolToBooleanObj(leftVal <= rightVal)
case ">=":
return nativeBoolToBooleanObj(leftVal >= rightVal)
case "==":
return nativeBoolToBooleanObj(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanObj(leftVal != rightVal)
default:
return newError("Line %d: Unknown operator: %s %s %s", line, left.Type(), operator, right.Type())
}
}
func evalStringInfixExpr(operator string, left, right object.Object, line int) object.Object {
leftVal := left.(*object.String).Value
rightVal := right.(*object.String).Value
switch operator {
case "+":
return &object.String{Value: leftVal + rightVal}
case "==":
return nativeBoolToBooleanObj(leftVal == rightVal)
case "!=":
return nativeBoolToBooleanObj(leftVal != rightVal)
default:
return newError("Line %d: Unknown operator: %s %s %s", line, left.Type(), operator, right.Type())
}
}
func evalIdentifier(node *ast.Identifier, env *object.Environment) object.Object {
if val, ok := env.Get(node.Value); ok {
return val
}
if builtinFn, ok := builtinFunctions[node.Value]; ok {
return builtinFn
}
return newError("Line %d: Identifier not found: %s", node.Token.Line, node.Value)
}
func evalIndexExpr(left, index object.Object, line int) object.Object {
switch {
case left.Type() == object.ArrayObj && index.Type() == object.IntegerObj:
return evalArrayIndexExpr(left, index)
case left.Type() == object.HashObj:
return evalHashIndexExpr(left, index, line)
default:
return newError("Line %d: Index operator not supported: %s", line, left.Type())
}
}
func evalArrayIndexExpr(array, index object.Object) object.Object {
arrayObj := array.(*object.Array)
idx := index.(*object.Integer).Value
max := int64(len(arrayObj.Elements) - 1)
if idx < 0 || idx > max {
return Null
}
return arrayObj.Elements[idx]
}
func evalHashIndexExpr(hash, index object.Object, line int) object.Object {
hashObj := hash.(*object.Hash)
key, ok := index.(object.Hashable)
if !ok {
return newError("Line %d: Unusable as a hash key: %s", line, index.Type())
}
pair, ok := hashObj.Pairs[key.HashKey()]
if !ok {
return Null
}
return pair.Value
}
func evalHashLiteral(node *ast.HashLiteral, env *object.Environment) object.Object {
pairs := make(map[object.HashKey]object.HashPair)
for keyNode, valueNode := range node.Pairs {
key := Eval(keyNode, env)
if isError(key) {
return key
}
hashKey, ok := key.(object.Hashable)
if !ok {
return newError("Line %d: Unusable as a hash key: %s", node.Token.Line, key.Type())
}
value := Eval(valueNode, env)
if isError(value) {
return value
}
hashed := hashKey.HashKey()
pairs[hashed] = object.HashPair{Key: key, Value: value}
}
return &object.Hash{Pairs: pairs}
}
func evalExprs(exprs []ast.Expression, env *object.Environment) []object.Object {
var result []object.Object
for _, expr := range exprs {
evaluated := Eval(expr, env)
if isError(evaluated) {
return []object.Object{evaluated}
}
result = append(result, evaluated)
}
return result
}
func applyFunction(function object.Object, args []object.Object, line int) object.Object {
switch fn := function.(type) {
case *object.Function:
extendedEnv := extendFunctionEnv(fn, args)
evaluated := Eval(fn.Body, extendedEnv)
return unwrapReturnValue(evaluated)
case *object.Builtin:
if result := fn.Fn(args...); result != nil {
return result
}
return Null
default:
return newError("Line %d: Not a function: %s", line, function.Type())
}
}
func extendFunctionEnv(fn *object.Function, args []object.Object) *object.Environment {
env := object.NewEnclosedEnvironment(fn.Env)
for i, param := range fn.Parameters {
env.Set(param.Value, args[i])
}
return env
}
func unwrapReturnValue(obj object.Object) object.Object {
if returnValue, ok := obj.(*object.ReturnValue); ok {
return returnValue.Value
}
return obj
}
func newError(msgWithFormatVerbs string, values ...interface{}) *object.Error {
return &object.Error{Message: fmt.Sprintf(msgWithFormatVerbs, values...)}
}
func isError(obj object.Object) bool {
if obj != nil {
return obj.Type() == object.ErrorObj
}
return false
} | evaluator/evaluator.go | 0.702734 | 0.439928 | evaluator.go | starcoder |
package blockchain
import (
"encoding/hex"
"fmt"
"github.com/parallelcointeam/pod/chaincfg/chainhash"
"github.com/parallelcointeam/pod/fork"
"math"
"math/big"
"math/rand"
"time"
)
var (
scryptPowLimit = func() big.Int {
mplb, _ := hex.DecodeString("000000039fcaa04ac30b6384471f337748ef5c87c7aeffce5e51770ce6283137,")
return *big.NewInt(0).SetBytes(mplb) //AllOnes.Rsh(&AllOnes, 0)
}()
ScryptPowLimit = scryptPowLimit
ScryptPowLimitBits = BigToCompact(&scryptPowLimit)
// bigOne is 1 represented as a big.Int. It is defined here to avoid the overhead of creating it multiple times.
bigOne = big.NewInt(1)
// oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid the overhead of creating it multiple times.
oneLsh256 = new(big.Int).Lsh(bigOne, 256)
)
// HashToBig converts a chainhash.Hash into a big.Int that can be used to perform math comparisons.
func HashToBig(hash *chainhash.Hash) *big.Int {
// A Hash is in little-endian, but the big package wants the bytes in big-endian, so reverse them.
buf := *hash
blen := len(buf)
for i := 0; i < blen/2; i++ {
buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i]
}
return new(big.Int).SetBytes(buf[:])
}
// CompactToBig converts a compact representation of a whole number N to an unsigned 32-bit number. The representation is similar to IEEE754 floating
// point numbers.
// Like IEEE754 floating point, there are three basic components: the sign, the exponent, and the mantissa. They are broken out as follows:
// * the most significant 8 bits represent the unsigned base 256 exponent
// * bit 23 (the 24th bit) represents the sign bit
// * the least significant 23 bits represent the mantissa
// -------------------------------------------------
// | Exponent | Sign | Mantissa |
// -------------------------------------------------
// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] |
// -------------------------------------------------
// The formula to calculate N is:
// N = (-1^sign) * mantissa * 256^(exponent-3)
// This compact form is only used in bitcoin to encode unsigned 256-bit numbers which represent difficulty targets, thus there really is not a need for a sign bit, but it is implemented here to stay consistent with bitcoind.
func CompactToBig(compact uint32) *big.Int {
// Extract the mantissa, sign bit, and exponent.
mantissa := compact & 0x007fffff
isNegative := compact&0x00800000 != 0
exponent := uint(compact >> 24)
// Since the base for the exponent is 256, the exponent can be treated as the number of bytes to represent the full 256-bit number. So, treat the exponent as the number of bytes and shift the mantissa right or left accordingly. This is equivalent to: N = mantissa * 256^(exponent-3)
var bn *big.Int
if exponent <= 3 {
mantissa >>= 8 * (3 - exponent)
bn = big.NewInt(int64(mantissa))
} else {
bn = big.NewInt(int64(mantissa))
bn.Lsh(bn, 8*(exponent-3))
}
// Make it negative if the sign bit is set.
if isNegative {
bn = bn.Neg(bn)
}
return bn
}
// BigToCompact converts a whole number N to a compact representation using an unsigned 32-bit number. The compact representation only provides 23 bits of precision, so values larger than (2^23 - 1) only encode the most significant digits of the number. See CompactToBig for details.
func BigToCompact(n *big.Int) uint32 {
// No need to do any work if it's zero.
if n.Sign() == 0 {
return 0
}
// Since the base for the exponent is 256, the exponent can be treated as the number of bytes. So, shift the number right or left accordingly. This is equivalent to: mantissa = mantissa / 256^(exponent-3)
var mantissa uint32
exponent := uint(len(n.Bytes()))
if exponent <= 3 {
mantissa = uint32(n.Bits()[0])
mantissa <<= 8 * (3 - exponent)
} else {
// Use a copy to avoid modifying the caller's original number.
tn := new(big.Int).Set(n)
mantissa = uint32(tn.Rsh(tn, 8*(exponent-3)).Bits()[0])
}
// When the mantissa already has the sign bit set, the number is too large to fit into the available 23-bits, so divide the number by 256 and increment the exponent accordingly.
if mantissa&0x00800000 != 0 {
mantissa >>= 8
exponent++
}
// Pack the exponent, sign bit, and mantissa into an unsigned 32-bit int and return it.
compact := uint32(exponent<<24) | mantissa
if n.Sign() < 0 {
compact |= 0x00800000
}
return compact
}
// CalcWork calculates a work value from difficulty bits. Bitcoin increases the difficulty for generating a block by decreasing the value which the generated hash must be less than. This difficulty target is stored in each block header using a compact representation as described in the documentation for CompactToBig. The main chain is selected by choosing the chain that has the most proof of work (highest difficulty). Since a lower target difficulty value equates to higher actual difficulty, the work value which will be accumulated must be the inverse of the difficulty. Also, in order to avoid potential division by zero and really small floating point numbers, the result adds 1 to the denominator and multiplies the numerator by 2^256.
func CalcWork(bits uint32, height int32, algover int32) *big.Int {
// Return a work value of zero if the passed difficulty bits represent a negative number. Note this should not happen in practice with valid blocks, but an invalid block could trigger it.
difficultyNum := CompactToBig(bits)
// To make the difficulty values correlate to number of hash operations, multiply this difficulty base by the nanoseconds/hash figures in the fork algorithms list
current := fork.GetCurrent(height)
algoname := fork.List[current].AlgoVers[algover]
difficultyNum = new(big.Int).Mul(difficultyNum, big.NewInt(fork.List[current].Algos[algoname].NSperOp))
difficultyNum = new(big.Int).Quo(difficultyNum, big.NewInt(fork.List[current].WorkBase))
if difficultyNum.Sign() <= 0 {
return big.NewInt(0)
}
denominator := new(big.Int).Add(difficultyNum, bigOne)
r := new(big.Int).Div(oneLsh256, denominator)
return r
}
// calcEasiestDifficulty calculates the easiest possible difficulty that a block can have given starting difficulty bits and a duration. It is mainly used to verify that claimed proof of work by a block is sane as compared to a known good checkpoint.
func (b *BlockChain) calcEasiestDifficulty(bits uint32, duration time.Duration) uint32 {
// Convert types used in the calculations below.
durationVal := int64(duration / time.Second)
adjustmentFactor := big.NewInt(b.chainParams.RetargetAdjustmentFactor)
// Since easier difficulty equates to higher numbers, the easiest difficulty for a given duration is the largest value possible given the number of retargets for the duration and starting difficulty multiplied by the max adjustment factor.
newTarget := CompactToBig(bits)
for durationVal > 0 && newTarget.Cmp(b.chainParams.PowLimit) < 0 {
newTarget.Mul(newTarget, adjustmentFactor)
durationVal -= b.maxRetargetTimespan
}
// Limit new value to the proof of work limit.
if newTarget.Cmp(b.chainParams.PowLimit) > 0 {
newTarget.Set(b.chainParams.PowLimit)
}
return BigToCompact(newTarget)
}
// findPrevTestNetDifficulty returns the difficulty of the previous block which did not have the special testnet minimum difficulty rule applied. This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) findPrevTestNetDifficulty(startNode *blockNode) uint32 {
// Search backwards through the chain for the last block without the special rule applied.
iterNode := startNode
for iterNode != nil && iterNode.height%b.blocksPerRetarget != 0 &&
iterNode.bits == b.chainParams.PowLimitBits {
iterNode = iterNode.parent
}
// Return the found difficulty or the minimum difficulty if no appropriate block was found.
lastBits := b.chainParams.PowLimitBits
if iterNode != nil {
lastBits = iterNode.bits
}
return lastBits
}
// calcNextRequiredDifficulty calculates the required difficulty for the block after the passed previous block node based on the difficulty retarget rules. This function differs from the exported CalcNextRequiredDifficulty in that the exported version uses the current best chain as the previous block node while this function accepts any block node.
func (b *BlockChain) calcNextRequiredDifficulty(lastNode *blockNode, newBlockTime time.Time, algoname string, l bool) (newTargetBits uint32, err error) {
switch fork.GetCurrent(lastNode.height + 1) {
case 0:
nH := lastNode.height + 1
algo := fork.GetAlgoVer(algoname, nH)
newTargetBits = fork.GetMinBits(algoname, nH)
if lastNode == nil {
return newTargetBits, nil
}
prevNode := lastNode
if prevNode.version != algo {
prevNode = prevNode.GetPrevWithAlgo(algo)
}
firstNode := prevNode.GetPrevWithAlgo(algo)
for i := int64(1); firstNode != nil && i < b.chainParams.AveragingInterval; i++ {
firstNode = firstNode.RelativeAncestor(1).GetPrevWithAlgo(algo)
}
if firstNode == nil {
return newTargetBits, nil
}
actualTimespan := prevNode.timestamp - firstNode.timestamp
adjustedTimespan := actualTimespan
if actualTimespan < b.chainParams.MinActualTimespan {
adjustedTimespan = b.chainParams.MinActualTimespan
} else if actualTimespan > b.chainParams.MaxActualTimespan {
adjustedTimespan = b.chainParams.MaxActualTimespan
}
oldTarget := CompactToBig(prevNode.bits)
newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan))
newTarget = newTarget.Div(newTarget, big.NewInt(b.chainParams.AveragingTargetTimespan))
if newTarget.Cmp(CompactToBig(newTargetBits)) > 0 {
newTarget.Set(CompactToBig(newTargetBits))
}
newTargetBits = BigToCompact(newTarget)
log.Debugf("Difficulty retarget at block height %d, old %08x new %08x", lastNode.height+1, prevNode.bits, newTargetBits)
log.Tracef("Old %08x New %08x", prevNode.bits, oldTarget, newTargetBits, CompactToBig(newTargetBits))
log.Tracef("Actual timespan %v, adjusted timespan %v, target timespan %v",
actualTimespan,
adjustedTimespan,
b.chainParams.AveragingTargetTimespan)
return newTargetBits, nil
case 1: // Plan 9 from Crypto Space
if lastNode.height == 0 {
return fork.FirstPowLimitBits, nil
}
nH := lastNode.height + 1
algo := fork.GetAlgoVer(algoname, nH)
newTargetBits = fork.GetMinBits(algoname, nH)
last := lastNode
// find the most recent block of the same algo
if last.version != algo {
l := last.RelativeAncestor(1)
l = l.GetPrevWithAlgo(algo)
// ignore the first block as its time is not a normal timestamp
if l.height < 1 {
break
}
last = l
}
counter := 1
var timestamps []float64
timestamps = append(timestamps, float64(last.timestamp))
pb := last
// collect the timestamps of all the blocks of the same algo until we pass genesis block or get AveragingInterval blocks
for ; counter < int(b.chainParams.AveragingInterval) && pb.height > 1; counter++ {
p := pb.RelativeAncestor(1)
if p != nil {
if p.height == 0 {
return fork.SecondPowLimitBits, nil
}
pb = p.GetPrevWithAlgo(algo)
} else {
break
}
if pb != nil && pb.height > 0 {
// only add the timestamp if is not the same as the previous
if float64(pb.timestamp) != timestamps[len(timestamps)-1] {
timestamps = append(timestamps, float64(pb.timestamp))
}
} else {
break
}
}
allTimeAverage, trailTimeAverage := float64(b.chainParams.TargetTimePerBlock), float64(b.chainParams.TargetTimePerBlock)
startHeight := fork.List[1].ActivationHeight
if b.chainParams.Name == "testnet" {
startHeight = 1
}
trailHeight := int32(int64(lastNode.height) - b.chainParams.AveragingInterval*int64(len(fork.List[1].Algos)))
if trailHeight < 0 {
trailHeight = 1
}
firstBlock, _ := b.BlockByHeight(startHeight)
trailBlock, _ := b.BlockByHeight(trailHeight)
lastTime := lastNode.timestamp
if firstBlock != nil {
firstTime := firstBlock.MsgBlock().Header.Timestamp.Unix()
allTimeAverage = (float64(lastTime) - float64(firstTime)) / (float64(lastNode.height) - float64(firstBlock.Height()))
}
if trailBlock != nil {
trailTime := trailBlock.MsgBlock().Header.Timestamp.Unix()
trailTimeAverage = (float64(lastTime) - float64(trailTime)) / (float64(lastNode.height) - float64(trailBlock.Height()))
}
if len(timestamps) < 2 {
return fork.SecondPowLimitBits, nil
}
var adjusted, targetAdjusted, adjustment float64
if len(timestamps) > 1 {
numalgos := int64(len(fork.List[1].Algos))
target := b.chainParams.TargetTimePerBlock * numalgos
adjustment = 1.0
counter = 0
for i := 0; i < len(timestamps)-1; i++ {
factor := 0.9
if i == 0 {
f := factor
for j := 0; j < i; j++ {
f = f * factor
}
factor = f
} else {
factor = 1.0
}
adjustment = timestamps[i] - timestamps[i+1]
adjustment = adjustment * factor
switch {
case math.IsNaN(adjustment):
break
case adjustment == 0.0:
break
}
adjusted += adjustment
targetAdjusted += float64(target) * factor
counter++
}
} else {
targetAdjusted = 100
adjusted = 100
}
ttpb := float64(b.chainParams.TargetTimePerBlock)
allTimeDivergence := allTimeAverage / ttpb
trailTimeDivergence := trailTimeAverage / ttpb
weighted := adjusted / targetAdjusted
adjustment = (weighted*weighted*weighted + allTimeDivergence + trailTimeDivergence) / 3.0
if adjustment < 0 {
fmt.Println("negative weight adjustment")
adjustment = allTimeDivergence
}
// d := adjustment - 1.0
// adjustment = 1.0 + (d*d*d+d+d*d)
if math.IsNaN(adjustment) {
return lastNode.bits, nil
}
bigadjustment := big.NewFloat(adjustment)
bigoldtarget := big.NewFloat(1.0).SetInt(CompactToBig(last.bits))
bigfnewtarget := big.NewFloat(1.0).Mul(bigadjustment, bigoldtarget)
newtarget, _ := bigfnewtarget.Int(nil)
if newtarget == nil {
return newTargetBits, nil
}
mintarget := CompactToBig(newTargetBits)
var delay uint16
if newtarget.Cmp(mintarget) < 0 {
newTargetBits = BigToCompact(newtarget)
if b.chainParams.Name == "testnet" {
rand.Seed(time.Now().UnixNano())
delay = uint16(rand.Int()) >> 6
// fmt.Printf("%s testnet delay %dms algo %s\n", time.Now().Format("2006-01-02 15:04:05.000000"), delay, algoname)
time.Sleep(time.Millisecond * time.Duration(delay))
}
if l {
log.Debugf("mining %d, old %08x new %08x average %3.2f trail %3.2f weighted %3.2f blocks in window: %d adjustment %0.1f%% algo %s delayed %dms",
lastNode.height+1, last.bits, newTargetBits, allTimeAverage, trailTimeAverage, weighted*ttpb, counter, (1-adjustment)*100, fork.List[1].AlgoVers[algo], delay)
if b.chainParams.Name == "testnet" && int64(lastNode.height) < b.chainParams.TargetTimePerBlock+1 && lastNode.height > 0 {
time.Sleep(time.Second * time.Duration(b.chainParams.TargetTimePerBlock))
}
}
}
return newTargetBits, nil
}
nH := lastNode.height + 1
// algo := fork.GetAlgoVer(algoname, nH)
return fork.GetMinBits(algoname, nH), nil
}
// CalcNextRequiredDifficulty calculates the required difficulty for the block after the end of the current best chain based on the difficulty retarget rules. This function is safe for concurrent access.
func (b *BlockChain) CalcNextRequiredDifficulty(timestamp time.Time, algo string) (difficulty uint32, err error) {
b.chainLock.Lock()
difficulty, err = b.calcNextRequiredDifficulty(b.bestChain.Tip(), timestamp, algo, true)
b.chainLock.Unlock()
return
} | blockchain/difficulty.go | 0.708112 | 0.486575 | difficulty.go | starcoder |
package tuile
import (
"image/color"
"math"
)
type HBlank func(line int)
type Plot func(x, y int, r, g, b, a byte)
// Engine structure
type Engine struct {
hBlank HBlank
backgroundColor color.Color
width int
height int
plot Plot
layers []*Layer
}
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
// NewEngine instantiates a new tuile engine
func NewEngine(width, height int) *Engine {
return &Engine{
width: width,
height: height,
}
}
func (t *Engine) SetHBlank(hBlank HBlank) {
t.hBlank = hBlank
}
func (t *Engine) SetPlot(plot Plot) {
t.plot = plot
}
func (t *Engine) SetBackgroundColor(color color.Color) {
t.backgroundColor = color
}
func (t *Engine) DrawFrame() {
for line := 0; line < t.height; line++ {
if t.hBlank != nil {
t.hBlank(line)
}
if t.backgroundColor != nil {
t.fillBackgroundLine(line, t.backgroundColor, t.width)
}
for _, layer := range t.layers {
if layer.disabled {
continue
}
if layer.transformed {
t.drawLayerLineAffine(line, layer)
} else {
t.drawLayerLine(line, layer)
}
}
}
}
func (t *Engine) fillBackgroundLine(line int, color color.Color, width int) {
r, g, b, _ := color.RGBA()
for x := 0; x < width; x++ {
t.plot(x, line, byte(r), byte(g), byte(b), math.MaxUint8)
}
}
func (t *Engine) AddLayer(layer ...*Layer) {
t.layers = append(t.layers, layer...)
}
func (t *Engine) drawLayerLine(line int, layer *Layer) {
yTile := layer.origin.Y + line
if yTile < 0 || yTile >= layer.pixelHeight {
if !layer.repeat {
return
}
// https://maurobringolf.ch/2017/12/a-neat-trick-to-compute-modulo-of-negative-numbers/
yTile = (yTile%layer.pixelHeight + layer.pixelHeight) % layer.pixelHeight
}
for x := 0; x < t.width; {
xTile := layer.origin.X + x
if xTile < 0 || xTile >= layer.pixelWidth {
if !layer.repeat {
x++
continue
}
// https://maurobringolf.ch/2017/12/a-neat-trick-to-compute-modulo-of-negative-numbers/
xTile = (xTile%layer.pixelWidth + layer.pixelWidth) % layer.pixelWidth
}
tile := layer.tiles[yTile/layer.tileHeight*layer.width+xTile/layer.tileWidth]
if tile.Nil {
x++
continue
}
yImage := int(tile.ID) / layer.tileSet.Columns
yImage *= layer.tileHeight
yImage += yTile % layer.tileHeight
xImage := int(tile.ID) % layer.tileSet.Columns
xImage *= layer.tileWidth
for xx := xTile % layer.tileWidth; xx < layer.tileWidth && x < t.width; xx, x = xx+1, x+1 {
var src int
if tile.HorizontalFlip {
src = layer.Image.PixOffset(xImage+layer.tileWidth-1-xx, yImage)
} else {
src = layer.Image.PixOffset(xImage+xx, yImage)
}
r, g, b, a := layer.Image.Palette[layer.Image.Pix[src]].RGBA()
if a == 0 {
continue
}
t.plot(x, line, byte(r), byte(g), byte(b), math.MaxUint8)
}
}
}
func (t *Engine) drawLayerLineAffine(line int, layer *Layer) {
left, right := layer.transform(
VInt(layer.origin.X, layer.origin.Y+line),
VInt(layer.origin.X+t.width, layer.origin.Y+line),
)
x1, y1 := left.X, left.Y
x2, y2 := right.X, right.Y
dx := (x2 - x1) / float64(t.width)
dy := (y2 - y1) / float64(t.width)
for x := 0; x < t.width; x, x1, y1 = x+1, x1+dx, y1+dy {
if !layer.repeat && (x1 < 0 || int(x1) >= layer.pixelWidth || y1 < 0 || int(y1) >= layer.pixelHeight) {
continue
}
xTile := abs(int(x1)+layer.pixelWidth) % layer.pixelWidth
yTile := abs(int(y1)+layer.pixelHeight) % layer.pixelHeight
tile := layer.tiles[yTile/layer.tileHeight*layer.width+xTile/layer.tileWidth]
if tile.Nil {
continue
}
yImage := int(tile.ID) / layer.tileSet.Columns
yImage *= layer.tileHeight
yImage += yTile % layer.tileHeight
xImage := int(tile.ID) % layer.tileSet.Columns
xImage *= layer.tileWidth
var src int
if tile.HorizontalFlip {
src = layer.Image.PixOffset(xImage+layer.tileWidth-1-(xTile%layer.tileWidth), yImage)
} else {
src = layer.Image.PixOffset(xImage+xTile%layer.tileWidth, yImage)
}
r, g, b, a := layer.Image.Palette[layer.Image.Pix[src]].RGBA()
if a == 0 {
continue
}
t.plot(x, line, byte(r), byte(g), byte(b), math.MaxUint8)
}
} | tuile.go | 0.70069 | 0.401746 | tuile.go | starcoder |
package chapter09
import "reflect"
// Apply takes a slice of type []T and a function of type func(T) T. (If the
// input conditions are not satisfied, Apply panics.) It returns a newly
// allocated slice where each element is the result of calling the function on
// successive elements of the slice.
func Apply(slice, function interface{}) interface{} {
return apply(slice, function, false)
}
// ApplyInPlace is like Apply, but overwrites the slice rather than returning a
// newly allocated slice.
func ApplyInPlace(slice, function interface{}) {
apply(slice, function, true)
}
// Choose takes a slice of type []T and a function of type func(T) bool. (If
//// the input conditions are not satisfied, Choose panics.) It returns a newly
//// allocated slice containing only those elements of the input slice that
//// satisfy the function.
func Choose(slice, function interface{}) interface{} {
out, _ := chooseOrDrop(slice, function, false, true)
return out
}
func Drop(slice, function interface{}) interface{} {
out, _ := chooseOrDrop(slice, function, false, false)
return out
}
func ChooseInPalce(pointerToSlice, function interface{}) {
chooseOrDropInplace(pointerToSlice, function, true)
}
func DropInPlace(pointerToSlice, function interface{}) {
chooseOrDropInplace(pointerToSlice, function, false)
}
func apply(slice, function interface{}, inPalce bool) interface{} {
// special case for strings,very common.
if strSlice, ok := slice.([]string); ok {
if strFn, ok := function.(func(string) string); ok {
r := strSlice
if !inPalce {
r = make([]string, len(strSlice))
}
for i, s := range strSlice {
r[i] = strFn(s)
}
}
}
in := reflect.ValueOf(slice)
if in.Kind() != reflect.Slice {
panic("apply: not slice")
}
fn := reflect.ValueOf(function)
elemType := in.Type().Elem()
if !goodFunc(fn, elemType, nil) {
panic("apply:function must be type func(" + in.Type().Elem().String() + ") outputElemType")
}
out := in
if !inPalce {
out = reflect.MakeSlice(reflect.SliceOf(fn.Type().Out(0)), in.Len(), in.Cap())
}
var ins [1]reflect.Value //Outside the loop to aovid one allocation
for i := 0; i < in.Len(); i++ {
ins[0] = in.Index(i)
out.Index(i).Set(fn.Call(ins[:])[0])
}
return out.Interface()
}
func chooseOrDropInplace(slice, function interface{}, truth bool) {
inp := reflect.ValueOf(slice)
if inp.Kind() != reflect.Ptr {
panic("choose or drop: not pointer to slice")
}
_, n := chooseOrDrop(inp.Elem().Interface(), function, true, truth)
inp.Elem().SetLen(n)
}
var boolType = reflect.ValueOf(true).Type()
func chooseOrDrop(slice, function interface{}, inPlace, truth bool) (interface{}, int) {
// Special case for strings, very common
if strSlice, ok := slice.([]string); ok {
if strFn, ok := function.(func(string) bool); ok {
var r []string
if inPlace {
r = strSlice[:0]
}
for _, s := range strSlice {
if strFn(s) == truth {
r = append(r, s)
}
}
return r, len(r)
}
}
in := reflect.ValueOf(slice)
if in.Kind() != reflect.Slice {
panic("choose or Drop: not slice")
}
fn := reflect.ValueOf(function)
elemType := in.Type().Elem()
if !goodFunc(fn, elemType, boolType) {
panic("choose/drop: function must be of type func(" + elemType.String() + ") bool")
}
var which []int
var ins [1]reflect.Value //Outside the loop to avoid one allocation.
for i := 0; i < in.Len(); i++ {
ins[0] = in.Index(i)
if fn.Call(ins[:])[0].Bool() == truth {
which = append(which, i)
}
}
out := in
if !inPlace {
out = reflect.MakeSlice(in.Type(), len(which), len(which))
}
for i := range which {
out.Index(i).Set(in.Index(which[i]))
}
return out.Interface(), len(which)
}
// goodFunc verifies that the function satisfies the signature, represented as a slice of types.
func goodFunc(fn reflect.Value, types ...reflect.Type) bool {
if fn.Kind() != reflect.Func {
return false
}
// last type is return ,the rest are ins.
if fn.Type().NumIn() != len(types)-1 || fn.Type().NumOut() != 1 {
return false
}
for i := 0; i < len(types)-1; i++ {
if fn.Type().In(i) != types[i] {
return false
}
}
outType := types[len(types)-1]
if outType != nil && fn.Type().Out(0) != outType {
return false
}
return true
} | chapter09/apply.go | 0.688783 | 0.49109 | apply.go | starcoder |
package selector
import (
"strings"
)
// Selector represents a field or label selector that declares one or more
// operations.
type Selector struct {
Operations []Operation
}
// Matches returns the logical intersection of the evaluations of each of the
// operations in s.
func (s *Selector) Matches(set map[string]string) bool {
for i := range s.Operations {
if matches := matches(s.Operations[i], set); !matches {
return false
}
}
return true
}
// match determines if an operation matches the given set
func matches(r Operation, set map[string]string) bool {
switch r.Operator {
case InOperator:
// Verify if we have an l-value in the r-values.
// e.g. linux in (check.subscriptions)
if hasKeysInValues(set, r.RValues) {
return hasValue(r.LValue, split(set[r.RValues[0]]))
}
// We are not dealing with a key in the values so we can follow the same
// logic as the equal operator
fallthrough
case DoubleEqualSignOperator:
// Make sure the r-value set has the specified l-value
if !hasKey(set, r.LValue) {
return false
}
// Make sure the r-value set's value for the operation's l-value exists in the
// operation r-values.
return hasValue(set[r.LValue], r.RValues)
case NotInOperator:
// Verify if we have an l-value in the r-values.
// e.g. linux notin (check.subscriptions)
if hasKeysInValues(set, r.RValues) {
return !hasValue(r.LValue, split(set[r.RValues[0]]))
}
fallthrough
case NotEqualOperator:
// Make sure the r-value set has the specified l-value.
if !hasKey(set, r.LValue) {
return true
}
// Make sure the set's value for the operation's l-value does not exists in
// the operation r-values.
return !hasValue(set[r.LValue], r.RValues)
case MatchesOperator:
// Make sure the r-value set has the specified l-value
if !hasKey(set, r.LValue) {
return false
}
// Make sure the set's value for the operation's l-value matches
// the operation r-values
return matchesValue(set[r.LValue], r.RValues)
default:
return false
}
}
// hasKey determines if the given set has a key with the specified name
func hasKey(set map[string]string, key string) bool {
_, ok := set[key]
return ok
}
// hasValue determines if the set value exists in the operation values
func hasValue(value string, values []string) bool {
for i := range values {
if values[i] == value {
return true
}
}
return false
}
// matchesValue determines if the set value matches in the operation values
func matchesValue(value string, values []string) bool {
for i := range values {
if strings.Contains(value, values[i]) {
return true
}
}
return false
}
// hasKeysInValues determines if the values contains an actual key of the set
func hasKeysInValues(set map[string]string, values []string) bool {
// We only support a single value in the array
// e.g. linux in (check.subscriptions)
if len(values) != 1 {
return false
}
if !hasKey(set, values[0]) {
return false
}
return true
}
// split slices input into substrings and remove any whitespaces
func split(input string) []string {
input = strings.Trim(input, "[]")
s := strings.Split(input, ",")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s
}
// Merge merges many selectors into one mega selector!
func Merge(selectors ...*Selector) *Selector {
var selector Selector
for _, s := range selectors {
if s == nil {
continue
}
selector.Operations = append(selector.Operations, s.Operations...)
}
return &selector
} | backend/selector/selector.go | 0.674158 | 0.561816 | selector.go | starcoder |
package lexnum
import (
"fmt"
"strconv"
)
// an Encoder may be used to encode or decode an integer as a string. The
// produced strings will have the property that any set of numbers will have
// the same lexical sorting and numeric sorting.
type Encoder struct {
pos rune
neg rune
}
// NewEncoder creates a new lexnum Encoder. We achieve
func NewEncoder(pos rune, neg rune) *Encoder {
if pos < neg {
panic("positive lexnum rune must be of higher rank than negative lexnum rune")
}
if neg >= '0' {
panic("negative prefix must be lexically less than '0'")
}
if pos <= '9' {
panic("positive prefix must be lexically greather than '9'")
}
return &Encoder{pos: pos, neg: neg}
}
// Encodes an integer as a string.
func (l Encoder) EncodeInt(i int) string {
if i == 0 {
return "0"
}
if i > 0 {
return l.encodePos(i)
}
return l.encodeNeg(i)
}
func (l Encoder) encodePos(i int) string {
s := strconv.Itoa(i)
if len(s) == 1 {
return fmt.Sprintf("%c%s", l.pos, s)
}
return fmt.Sprintf("%c%s%s", l.pos, l.encodePos(len(s)), s)
}
func (l Encoder) encodeNeg(i int) string {
if i < 0 {
i = -i
}
runes := []rune(strconv.Itoa(i))
for i := range runes {
runes[i] = l.flip(runes[i])
}
if len(runes) == 1 {
return fmt.Sprintf("%c%s", l.neg, string(runes))
}
return fmt.Sprintf("%c%s%s", l.neg, l.encodeNeg(len(runes)), string(runes))
}
func (l Encoder) flip(r rune) rune {
switch r {
case '0':
return '9'
case '1':
return '8'
case '2':
return '7'
case '3':
return '6'
case '4':
return '5'
case '5':
return '4'
case '6':
return '3'
case '7':
return '2'
case '8':
return '1'
case '9':
return '0'
default:
panic(fmt.Sprintf("can't flip illegal rune %c", r))
}
}
func (l Encoder) flipInPlace(runes []rune) {
for i := range runes {
runes[i] = l.flip(runes[i])
}
}
func (l Encoder) isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func (l Encoder) prefixCount(runes []rune) int {
i := 0
for _, r := range runes {
if r == l.neg || r == l.pos {
i++
} else {
break
}
}
return i
}
// Decodes a lexnum string, returning its original integer representation.
func (l Encoder) DecodeInt(s string) (int, error) {
if s == "" {
return 0, fmt.Errorf("illegal Lexnum decode of empty string")
}
runes := []rune(s)
if len(runes) == 1 {
if runes[0] == '0' {
return 0, nil
}
return 0, fmt.Errorf("illegal Lexnum decode of non-zero unit string: %s", s)
}
switch runes[0] {
case l.neg:
return l.decodeNeg(runes)
case l.pos:
return l.decodePos(runes)
default:
return 0, fmt.Errorf("illegal Lexnum decode of string without %c or %c as initial rune: %s", l.neg, l.pos, s)
}
}
func (l Encoder) decodePos(runes []rune) (int, error) {
return l._decodePos(runes, 1, l.prefixCount(runes))
}
func (l Encoder) _decodePos(runes []rune, size int, index int) (int, error) {
n, err := strconv.ParseInt(string(runes[index:index+size]), 10, 64)
if err != nil {
return 0, err
}
if index+size > len(runes) {
return 0, fmt.Errorf("illegal Lexnum decode of abnormally long string %s", string(runes))
}
if index+size == len(runes) {
return int(n), nil
}
return l._decodePos(runes, int(n), index+size)
}
func (l Encoder) decodeNeg(runes []rune) (int, error) {
p := l.prefixCount(runes)
l.flipInPlace(runes[p:len(runes)])
n, err := l._decodePos(runes, 1, p)
if err != nil {
return 0, err
}
return -n, nil
} | lexnum.go | 0.651466 | 0.501038 | lexnum.go | starcoder |
package stats
import (
"math"
"time"
)
// timeseries holds the history of a changing value over a predefined period of
// time.
type timeseries struct {
size int // The number of time slots. Equivalent to len(slots).
resolution time.Duration // The time resolution of each slot.
stepCount int64 // The number of intervals seen since creation.
head int // The position of the current time in slots.
time time.Time // The time at the beginning of the current time slot.
slots []int64 // A circular buffer of time slots.
}
// newTimeSeries returns a newly allocated timeseries that covers the requested
// period with the given resolution.
func newTimeSeries(initialTime time.Time, period, resolution time.Duration) *timeseries {
size := int(period.Nanoseconds()/resolution.Nanoseconds()) + 1
return ×eries{
size: size,
resolution: resolution,
stepCount: 1,
time: initialTime,
slots: make([]int64, size),
}
}
// advanceTimeWithFill moves the timeseries forward to time t and fills in any
// slots that get skipped in the process with the given value. Values older than
// the timeseries period are lost.
func (ts *timeseries) advanceTimeWithFill(t time.Time, value int64) {
advanceTo := t.Truncate(ts.resolution)
if !advanceTo.After(ts.time) {
// This is shortcut for the most common case of a busy counter
// where updates come in many times per ts.resolution.
ts.time = advanceTo
return
}
steps := int(advanceTo.Sub(ts.time).Nanoseconds() / ts.resolution.Nanoseconds())
ts.stepCount += int64(steps)
if steps > ts.size {
steps = ts.size
}
for steps > 0 {
ts.head = (ts.head + 1) % ts.size
ts.slots[ts.head] = value
steps--
}
ts.time = advanceTo
}
// advanceTime moves the timeseries forward to time t and fills in any slots
// that get skipped in the process with the head value. Values older than the
// timeseries period are lost.
func (ts *timeseries) advanceTime(t time.Time) {
ts.advanceTimeWithFill(t, ts.slots[ts.head])
}
// set sets the current value of the timeseries.
func (ts *timeseries) set(value int64) {
ts.slots[ts.head] = value
}
// incr sets the current value of the timeseries.
func (ts *timeseries) incr(delta int64) {
ts.slots[ts.head] += delta
}
// headValue returns the latest value from the timeseries.
func (ts *timeseries) headValue() int64 {
return ts.slots[ts.head]
}
// headTime returns the time of the latest value from the timeseries.
func (ts *timeseries) headTime() time.Time {
return ts.time
}
// tailValue returns the oldest value from the timeseries.
func (ts *timeseries) tailValue() int64 {
if ts.stepCount < int64(ts.size) {
return 0
}
return ts.slots[(ts.head+1)%ts.size]
}
// tailTime returns the time of the oldest value from the timeseries.
func (ts *timeseries) tailTime() time.Time {
size := int64(ts.size)
if ts.stepCount < size {
size = ts.stepCount
}
return ts.time.Add(-time.Duration(size-1) * ts.resolution)
}
// delta returns the difference between the newest and oldest values from the
// timeseries.
func (ts *timeseries) delta() int64 {
return ts.headValue() - ts.tailValue()
}
// rate returns the rate of change between the oldest and newest values from
// the timeseries in units per second.
func (ts *timeseries) rate() float64 {
deltaTime := ts.headTime().Sub(ts.tailTime()).Seconds()
if deltaTime == 0 {
return 0
}
return float64(ts.delta()) / deltaTime
}
// min returns the smallest value from the timeseries.
func (ts *timeseries) min() int64 {
to := ts.size
if ts.stepCount < int64(ts.size) {
to = ts.head + 1
}
tail := (ts.head + 1) % ts.size
min := int64(math.MaxInt64)
for b := 0; b < to; b++ {
if b != tail && ts.slots[b] < min {
min = ts.slots[b]
}
}
return min
}
// max returns the largest value from the timeseries.
func (ts *timeseries) max() int64 {
to := ts.size
if ts.stepCount < int64(ts.size) {
to = ts.head + 1
}
tail := (ts.head + 1) % ts.size
max := int64(math.MinInt64)
for b := 0; b < to; b++ {
if b != tail && ts.slots[b] > max {
max = ts.slots[b]
}
}
return max
}
// reset resets the timeseries to an empty state.
func (ts *timeseries) reset(t time.Time) {
ts.head = 0
ts.time = t
ts.stepCount = 1
ts.slots = make([]int64, ts.size)
} | vendor/github.com/aristanetworks/goarista/monitor/stats/timeseries.go | 0.868437 | 0.716169 | timeseries.go | starcoder |
package gofpdf
import "math"
const bezierSampleCardinality = 10000
type BezierCurve struct {
Cx1, Cx2, Cx3, Cx4, Cy1, Cy2, Cy3, Cy4, Length float64
}
type BezierSpline []BezierCurve
type BezierSplineSample [][]float64
type BezierPoint struct {
pt Point
normaldir float64
}
func NewBezierCurve(x0, y0, cx0, cy0, cx1, cy1, x1, y1 float64) BezierCurve {
Cx1, Cx2, Cx3, Cx4 := Coefficients(x0, cx0, cx1, x1)
Cy1, Cy2, Cy3, Cy4 := Coefficients(y0, cy0, cy1, y1)
bc := BezierCurve{Cx1, Cx2, Cx3, Cx4, Cy1, Cy2, Cy3, Cy4, 0.0}
bc.Length = CurveLength(bc)
return bc
}
func NewBezierSpline(cp []Point) BezierSpline {
var bs []BezierCurve
// Consume groups of 4 points to create curve segments
for len(cp) >= 4 {
x0, y0 := cp[0].XY()
cx0, cy0 := cp[1].XY()
cx1, cy1 := cp[2].XY()
x1, y1 := cp[3].XY()
bs = append(bs, NewBezierCurve(x0, y0, cx0, cy0, cx1, cy1, x1, y1))
cp = cp[3:] // Each curve's tail is also the previous curve's tip
}
return bs
}
func Coefficients(p0, p1, p2, p3 float64) (C1, C2, C3, C4 float64) {
C1 = (p3 - (3.0 * p2) + (3.0 * p1) - p0)
C2 = ((3.0 * p2) - (6.0 * p1) + (3.0 * p0))
C3 = ((3.0 * p1) - (3.0 * p0))
C4 = p0
return
}
func (bc BezierCurve) At(t float64) Point {
x := bc.Cx1*t*t*t + bc.Cx2*t*t + bc.Cx3*t + bc.Cx4
y := bc.Cy1*t*t*t + bc.Cy2*t*t + bc.Cy3*t + bc.Cy4
return Point{x, y}
}
func (bc BezierCurve) Curve(p []Point) []Point {
// Returns a uniform sample with respect to the parameter t
for i, nf, l := 0, float64(len(p)-1), len(p); i < l; i++ {
p[i] = bc.At(float64(i) / nf)
}
return p
}
func CurveLength(bc BezierCurve) float64 {
n := bezierSampleCardinality
d := 0.0
curve := make([]Point, n)
// Approximate the curve by a polyline
bc.Curve(curve)
for len(curve) > 1 {
d += Distance(curve[0], curve[1])
curve = curve[1:]
}
return d
}
func Distance(p0, p1 Point) float64 {
return math.Sqrt(math.Pow(p1.Y-p0.Y, 2) + math.Pow(p1.X-p0.X, 2))
}
func (bc BezierCurve) SampleByArcLength(sample []float64) []float64 {
n := len(sample)
d := 0.0
curve := make([]Point, n)
// Approximate the curve by a polyline
bc.Curve(curve)
polyline := curve
distances := make([]float64, n-1)
for len(curve) > 1 {
distances[n-len(curve)] = Distance(curve[0], curve[1])
d += distances[n-len(curve)]
curve = curve[1:]
}
dd := d / float64(n-1)
// Walk the polyline with even steps
stride := dd
sample[0] = 0.0
i := 1
for len(polyline) > 1 {
if distances[0] >= stride {
frac := stride / distances[0]
t0 := float64(n-len(polyline)) / float64(n-1)
t1 := float64(n-len(polyline)+1) / float64(n-1)
t := t0 + frac*(t1-t0)
sample[i] = t
i++
distances[0] -= stride
stride = dd
} else {
stride -= distances[0]
polyline = polyline[1:]
distances = distances[1:]
}
}
for i < len(sample) {
sample[i] = 1.0
i++
}
return sample
}
func (bc BezierCurve) Tangent(t float64) Point {
dx := bc.Dx(t)
dy := bc.Dy(t)
return Point{dx, dy}
}
func (bc BezierCurve) NormalDegrees(t float64) float64 {
tan := bc.Tangent(t)
normal := Point{tan.Y, -1 * tan.X}
return (math.Atan2(normal.Y, normal.X) * -180.0 / math.Pi) - 90.0
}
func (bc BezierCurve) Dx(t float64) float64 {
return 3.0*bc.Cx1*t*t + 2.0*bc.Cx2*t + bc.Cx3
}
func (bc BezierCurve) Dy(t float64) float64 {
return 3.0*bc.Cy1*t*t + 2.0*bc.Cy2*t + bc.Cy3
}
func (bs BezierSpline) SampleByArcLength(n int) BezierSplineSample {
totalLength := bs.Length()
clens := make([]int, len(bs))
for i, bc := range bs {
if i == len(bs)-1 {
clens[i] = n
break
}
// Extra point here is the endpoint which will be removed
clens[i] = int((bc.Length/totalLength)*float64(n)) + 1
n -= clens[i] - 1
totalLength -= bc.Length
}
csamples := make([][]float64, len(bs))
for i, cn := range clens {
curve := make([]float64, cn)
curve = bs[i].SampleByArcLength(curve)
if i < len(clens)-1 && len(curve) > 0 {
// Omit the final point of each but the last curve
curve = curve[:len(curve)-1]
}
csamples[i] = curve
}
return csamples
}
func (bss BezierSplineSample) At(k int) (int, float64) {
for i, c := range bss {
if k < len(c) {
return i, c[k]
}
k -= len(c)
}
return len(bss) - 1, 1.0
}
func (bs BezierSpline) Length() float64 {
length := 0.0
for _, bc := range bs {
length += bc.Length
}
return length
} | bezier.go | 0.800341 | 0.474022 | bezier.go | starcoder |
// This package implements a parser for the subset of the CommonMark spec necessary for us to do
// server-side processing. It is not a full implementation and lacks many features. But it is
// complete enough to efficiently and accurately allow us to do what we need to like rewrite image
// URLs for proxying.
package markdown
import (
"strings"
)
func isEscapable(c rune) bool {
return c > ' ' && (c < '0' || (c > '9' && (c < 'A' || (c > 'Z' && (c < 'a' || (c > 'z' && c <= '~'))))))
}
func isEscapableByte(c byte) bool {
return isEscapable(rune(c))
}
func isWhitespace(c rune) bool {
switch c {
case ' ', '\t', '\n', '\u000b', '\u000c', '\r':
return true
}
return false
}
func isWhitespaceByte(c byte) bool {
return isWhitespace(rune(c))
}
func isNumeric(c rune) bool {
return c >= '0' && c <= '9'
}
func isNumericByte(c byte) bool {
return isNumeric(rune(c))
}
func isHex(c rune) bool {
return isNumeric(c) || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')
}
func isHexByte(c byte) bool {
return isHex(rune(c))
}
func isAlphanumeric(c rune) bool {
return isNumeric(c) || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
func isAlphanumericByte(c byte) bool {
return isAlphanumeric(rune(c))
}
func nextNonWhitespace(markdown string, position int) int {
for offset, c := range []byte(markdown[position:]) {
if !isWhitespaceByte(c) {
return position + offset
}
}
return len(markdown)
}
func nextLine(markdown string, position int) (linePosition int, skippedNonWhitespace bool) {
for i := position; i < len(markdown); i++ {
c := markdown[i]
if c == '\r' {
if i+1 < len(markdown) && markdown[i+1] == '\n' {
return i + 2, skippedNonWhitespace
}
return i + 1, skippedNonWhitespace
} else if c == '\n' {
return i + 1, skippedNonWhitespace
} else if !isWhitespaceByte(c) {
skippedNonWhitespace = true
}
}
return len(markdown), skippedNonWhitespace
}
func countIndentation(markdown string, r Range) (spaces, bytes int) {
for i := r.Position; i < r.End; i++ {
if markdown[i] == ' ' {
spaces++
bytes++
} else if markdown[i] == '\t' {
spaces += 4
bytes++
} else {
break
}
}
return
}
func trimLeftSpace(markdown string, r Range) Range {
s := markdown[r.Position:r.End]
trimmed := strings.TrimLeftFunc(s, isWhitespace)
return Range{r.Position, r.End - (len(s) - len(trimmed))}
}
func trimRightSpace(markdown string, r Range) Range {
s := markdown[r.Position:r.End]
trimmed := strings.TrimRightFunc(s, isWhitespace)
return Range{r.Position, r.End - (len(s) - len(trimmed))}
}
func relativeToAbsolutePosition(ranges []Range, position int) int {
rem := position
for _, r := range ranges {
l := r.End - r.Position
if rem < l {
return r.Position + rem
}
rem -= l
}
if len(ranges) == 0 {
return 0
}
return ranges[len(ranges)-1].End
}
func trimBytesFromRanges(ranges []Range, bytes int) (result []Range) {
rem := bytes
for _, r := range ranges {
if rem == 0 {
result = append(result, r)
continue
}
l := r.End - r.Position
if rem < l {
result = append(result, Range{r.Position + rem, r.End})
rem = 0
continue
}
rem -= l
}
return
}
func Parse(markdown string) (*Document, []*ReferenceDefinition) {
lines := ParseLines(markdown)
return ParseBlocks(markdown, lines)
} | shared/markdown/markdown.go | 0.66454 | 0.487612 | markdown.go | starcoder |
package main
// Problem link: https://leetcode-cn.com/problems/cousins-in-binary-tree/
// this is a recursion solution
func isCousins(root *TreeNode, x int, y int) bool {
if root == nil {
return false
}
xLevel, yLevel := getLevel993(root, x, 1), getLevel993(root, y, 1)
if xLevel != yLevel {
return false
}
return !hasSameParent993(root, x, y)
}
// check if the values have the same parent
func hasSameParent993(root *TreeNode, x int, y int) bool {
if root == nil {
return false
}
if root.Left != nil && root.Right != nil {
if (root.Left.Val == x && root.Right.Val == y) || (root.Left.Val == y && root.Right.Val == x) {
return true
}
}
return hasSameParent993(root.Left, x, y) || hasSameParent993(root.Right, x, y)
}
// get the level of the given value
func getLevel993(root *TreeNode, val int, level int) int {
if root == nil {
return 0
}
if root.Val == val {
return level
}
leftLevel, rightLevel := getLevel993(root.Left, val, level+1), getLevel993(root.Right, val, level+1)
if leftLevel == 0 {
return rightLevel
}
return leftLevel
}
// dfs
func isCousins1(root *TreeNode, x int, y int) bool {
if root == nil {
return false
}
var xDepth, yDepth, xParent, yParent int
dfs993(root, x, 0, -1, &xParent, &xDepth)
dfs993(root, y, 0, -1, &yParent, &yDepth)
return xDepth > 1 && xDepth == yDepth && xParent != yParent
}
func dfs993(root *TreeNode, val int, depth int, last int, parent *int, depthRes *int) {
if root == nil {
return
}
if root.Val == val {
*depthRes = depth
*parent = last
}
depth++
dfs993(root.Left, val, depth, root.Val, parent, depthRes)
dfs993(root.Right, val, depth, root.Val, parent, depthRes)
}
type node993 struct {
parent int
depth int
}
// bfs, just like level order bfs traversal, but need to add one more data structure
func isCousins2(root *TreeNode, x int, y int) bool {
if root == nil {
return false
}
visitedNodes := make(map[int]node993, 100)
visitedNodes[root.Val] = node993{
parent: -1,
depth: 0,
}
queue := make([]*TreeNode, 0)
queue = append(queue, root)
level := 0
for len(queue) != 0 {
size := len(queue)
for i := 0; i < size; i++ {
n := queue[0]
queue = queue[1:]
if n.Left != nil {
queue = append(queue, n.Left)
visitedNodes[n.Left.Val] = node993{
parent: n.Val,
depth: level + 1,
}
}
if n.Right != nil {
queue = append(queue, n.Right)
visitedNodes[n.Right.Val] = node993{
parent: n.Val,
depth: level + 1,
}
}
}
level++
}
xNode, yNode := visitedNodes[x], visitedNodes[y]
return xNode.parent != yNode.parent && xNode.depth > 1 && xNode.depth == yNode.depth
} | Go-Solutions/993.go | 0.843186 | 0.485417 | 993.go | starcoder |
package indicators
import (
"errors"
"github.com/thetruetrade/gotrade"
)
// A Chaikin Oscillator Indicator (ChaikinOsc), no storage, for use in other indicators
type ChaikinOscWithoutStorage struct {
*baseIndicatorWithFloatBounds
// private variables
fastTimePeriod int
slowTimePeriod int
adl *AdlWithoutStorage
emaFast float64
emaSlow float64
emaFastMultiplier float64
emaSlowMultiplier float64
periodCounter int
isInitialised bool
}
// NewChaikinOscWithoutStorage creates a Chaikin Oscillator Indicator (ChaikinOsc) without storage
// This should be as simple as EMA(Adl,3) - EMA(Adl,10), however it seems the TA-Lib emas are intialised with the
// first adl value and not offset like the macd to conincide, they are both calculated from the 2nd bar and used before their
// lookback period is reached - so the emas are calculated inline and not using the general EmaWithoutStorage
func NewChaikinOscWithoutStorage(fastTimePeriod int, slowTimePeriod int, valueAvailableAction ValueAvailableActionFloat) (indicator *ChaikinOscWithoutStorage, err error) {
// an indicator without storage MUST have a value available action
if valueAvailableAction == nil {
return nil, ErrValueAvailableActionIsNil
}
// the minimum fastTimePeriod for a Chaikin Oscillator Indicator is 2
if fastTimePeriod < 2 {
return nil, errors.New("fastTimePeriod is less than the minimum (2)")
}
// the minimum slowTimePeriod for a Chaikin Oscillator Indicator is 2
if slowTimePeriod < 2 {
return nil, errors.New("slowTimePeriod is less than the minimum (2)")
}
// check the maximum fastTimePeriod
if fastTimePeriod > MaximumLookbackPeriod {
return nil, errors.New("fastTimePeriod is greater than the maximum (100000)")
}
// check the maximum slowTimePeriod
if slowTimePeriod > MaximumLookbackPeriod {
return nil, errors.New("slowTimePeriod is greater than the maximum (100000)")
}
lookback := slowTimePeriod - 1
ind := ChaikinOscWithoutStorage{
baseIndicatorWithFloatBounds: newBaseIndicatorWithFloatBounds(lookback, valueAvailableAction),
slowTimePeriod: slowTimePeriod,
fastTimePeriod: fastTimePeriod,
emaFastMultiplier: float64(2.0 / float64(fastTimePeriod+1.0)),
emaSlowMultiplier: float64(2.0 / float64(slowTimePeriod+1.0)),
periodCounter: slowTimePeriod * -1,
isInitialised: false,
}
ind.adl, err = NewAdlWithoutStorage(func(dataItem float64, streamBarIndex int) {
ind.periodCounter += 1
if !ind.isInitialised {
ind.emaFast = dataItem
ind.emaSlow = dataItem
ind.isInitialised = true
}
if ind.periodCounter < 0 {
ind.emaFast = (dataItem-ind.emaFast)*ind.emaFastMultiplier + ind.emaFast
ind.emaSlow = (dataItem-ind.emaSlow)*ind.emaSlowMultiplier + ind.emaSlow
}
if ind.periodCounter >= 0 {
ind.emaFast = (dataItem-ind.emaFast)*ind.emaFastMultiplier + ind.emaFast
ind.emaSlow = (dataItem-ind.emaSlow)*ind.emaSlowMultiplier + ind.emaSlow
result := ind.emaFast - ind.emaSlow
ind.UpdateIndicatorWithNewValue(result, streamBarIndex)
}
})
return &ind, err
}
// A Chaikin Oscillator Indicator (ChaikinOsc)
type ChaikinOsc struct {
*ChaikinOscWithoutStorage
// public variables
Data []float64
}
// NewChaikinOsc creates a Chaikin Oscillator (ChaikinOsc) for online usage
func NewChaikinOsc(fastTimePeriod int, slowTimePeriod int) (indicator *ChaikinOsc, err error) {
newChaikinOsc := ChaikinOsc{}
newChaikinOsc.ChaikinOscWithoutStorage, err = NewChaikinOscWithoutStorage(fastTimePeriod, slowTimePeriod,
func(dataItem float64, streamBarIndex int) {
newChaikinOsc.Data = append(newChaikinOsc.Data, dataItem)
})
return &newChaikinOsc, err
}
// NewDefaultChaikinOsc creates a Chaikin Oscillator (ChaikinOsc) for online usage with default parameters
// - fastTimePeriod: 3
// - slowTimePeriod: 10
func NewDefaultChaikinOsc() (indicator *ChaikinOsc, err error) {
fastTimePeriod := 3
slowTimePeriod := 10
return NewChaikinOsc(fastTimePeriod, slowTimePeriod)
}
// NewChaikinOscWithSrcLen creates a Chaikin Oscillator (ChaikinOsc) for offline usage
func NewChaikinOscWithSrcLen(sourceLength uint, fastTimePeriod int, slowTimePeriod int) (indicator *ChaikinOsc, err error) {
ind, err := NewChaikinOsc(fastTimePeriod, slowTimePeriod)
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewDefaultChaikinOscWithSrcLen creates a Chaikin Oscillator (ChaikinOsc) for offline usage with default parameters
func NewDefaultChaikinOscWithSrcLen(sourceLength uint) (indicator *ChaikinOsc, err error) {
ind, err := NewDefaultChaikinOsc()
// only initialise the storage if there is enough source data to require it
if sourceLength-uint(ind.GetLookbackPeriod()) > 1 {
ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))
}
return ind, err
}
// NewChaikinOscForStream creates a Chaikin Oscillator (ChaikinOsc) for online usage with a source data stream
func NewChaikinOscForStream(priceStream gotrade.DOHLCVStreamSubscriber, fastTimePeriod int, slowTimePeriod int) (indicator *ChaikinOsc, err error) {
newChaikinOsc, err := NewChaikinOsc(fastTimePeriod, slowTimePeriod)
priceStream.AddTickSubscription(newChaikinOsc)
return newChaikinOsc, err
}
// NewDefaultChaikinOscForStream creates a Chaikin Oscillator (ChaikinOsc) for online usage with a source data stream
func NewDefaultChaikinOscForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *ChaikinOsc, err error) {
ind, err := NewDefaultChaikinOsc()
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewChaikinOscForStreamWithSrcLen creates a Chaikin Oscillator (ChaikinOsc) for offline usage with a source data stream
func NewChaikinOscForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, fastTimePeriod int, slowTimePeriod int) (indicator *ChaikinOsc, err error) {
ind, err := NewChaikinOscWithSrcLen(sourceLength, fastTimePeriod, slowTimePeriod)
priceStream.AddTickSubscription(ind)
return ind, err
}
// NewDefaultChaikinOscForStreamWithSrcLen creates a Chaikin Oscillator (ChaikinOsc) for offline usage with a source data stream
func NewDefaultChaikinOscForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *ChaikinOsc, err error) {
ind, err := NewDefaultChaikinOscWithSrcLen(sourceLength)
priceStream.AddTickSubscription(ind)
return ind, err
}
// ReceiveDOHLCVTick consumes a source data DOHLCV price tick
func (ind *ChaikinOsc) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) {
ind.adl.ReceiveDOHLCVTick(tickData, streamBarIndex)
} | indicators/chainkinosc.go | 0.656658 | 0.418637 | chainkinosc.go | starcoder |
package protoutil
import (
"fmt"
"reflect"
"time"
structpb "github.com/golang/protobuf/ptypes/struct"
log "github.com/sirupsen/logrus"
)
//StructSet take value and add it to Struct s using key
func StructSet(s *structpb.Struct, key string, value interface{}) {
vw := WrapValue(value)
s.Fields[key] = vw
}
// WrapValue takes a value and turns it into a protobuf structpb Value
func WrapValue(value interface{}) *structpb.Value {
if value == nil {
return &structpb.Value{Kind: &structpb.Value_NullValue{}}
}
v := reflect.ValueOf(value)
switch v.Kind() {
case reflect.String:
return &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: v.String()}}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: float64(v.Int())}}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: float64(v.Uint())}}
case reflect.Float32, reflect.Float64:
return &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: v.Float()}}
case reflect.Bool:
return &structpb.Value{Kind: &structpb.Value_BoolValue{BoolValue: v.Bool()}}
case reflect.Array, reflect.Slice:
o := make([]*structpb.Value, v.Len())
for i := 0; i < v.Len(); i++ {
wv := WrapValue(v.Index(i).Interface())
o[i] = wv
}
return &structpb.Value{Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: o}}}
case reflect.Map:
keys := v.MapKeys()
o := &structpb.Struct{Fields: map[string]*structpb.Value{}}
for _, key := range keys {
k := fmt.Sprintf("%v", key.Interface())
wv := WrapValue(v.MapIndex(key).Interface())
o.Fields[k] = wv
}
return &structpb.Value{Kind: &structpb.Value_StructValue{StructValue: o}}
case reflect.Ptr, reflect.Struct:
switch val := value.(type) {
case *structpb.Value:
return val
case *structpb.Struct:
return &structpb.Value{Kind: &structpb.Value_StructValue{StructValue: val}}
case time.Time:
return &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: val.String()}}
default:
log.Errorf("wrap unknown pointer data type: %T", value)
}
default:
log.Errorf("wrap unknown data type: %T", value)
}
return nil
}
// UnWrapValue takes protobuf structpb Value and return a native go value
func UnWrapValue(value *structpb.Value) interface{} {
switch value.GetKind().(type) {
case *structpb.Value_StringValue:
return value.GetStringValue()
case *structpb.Value_NumberValue:
return value.GetNumberValue()
case *structpb.Value_StructValue:
return AsMap(value.GetStructValue())
case *structpb.Value_ListValue:
out := make([]interface{}, len(value.GetListValue().Values))
for i := range value.GetListValue().Values {
out[i] = UnWrapValue(value.GetListValue().Values[i])
}
return out
case *structpb.Value_BoolValue:
return value.GetBoolValue()
case *structpb.Value_NullValue:
return nil
default:
log.Errorf("unwrap unknown data type: %T", value.GetKind())
}
return nil
}
// CopyToStructSub copies a subset of keys from a map to a protobuf struct
func CopyToStructSub(s *structpb.Struct, keys []string, values map[string]interface{}) {
for _, i := range keys {
StructSet(s, i, values[i])
}
}
// CopyToStruct copies values from map into protobuf struct
func CopyToStruct(s *structpb.Struct, values map[string]interface{}) {
for i := range values {
StructSet(s, i, values[i])
}
}
// CopyStructToStruct copy the contents of one protobuf struct to another
func CopyStructToStruct(dst *structpb.Struct, src *structpb.Struct) {
for k, v := range src.Fields {
StructSet(dst, k, v)
}
}
// CopyStructToStructSub copy the contents of one protobuf struct to another,
// but only using a subset of the keys
func CopyStructToStructSub(dst *structpb.Struct, keys []string, src *structpb.Struct) {
for _, k := range keys {
StructSet(dst, k, src.Fields[k])
}
}
// AsMap takes a protobuf Struct and converts it into a go map
func AsMap(src *structpb.Struct) map[string]interface{} {
if src == nil {
return nil
}
out := map[string]interface{}{}
for k, f := range src.Fields {
out[k] = UnWrapValue(f)
}
return out
}
// AsStruct takes a go map and converts it into a protobuf Struct
func AsStruct(src map[string]interface{}) *structpb.Struct {
out := structpb.Struct{Fields: map[string]*structpb.Value{}}
for k, v := range src {
StructSet(&out, k, v)
}
return &out
}
// AsStringList takes a protobuf ListValue and converts it into a []string
func AsStringList(src *structpb.ListValue) []string {
out := make([]string, len(src.Values))
for i := range src.Values {
out[i] = src.Values[i].GetStringValue()
}
return out
}
// AsListValue takes a go []string and converts it into a protobuf ListValue
func AsListValue(str []string) *structpb.ListValue {
v := make([]*structpb.Value, len(str))
for i := range str {
v[i] = &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: str[i]}}
}
o := structpb.ListValue{Values: v}
return &o
} | protoutil/protoutil.go | 0.609292 | 0.481759 | protoutil.go | starcoder |
package semantic
import (
"errors"
"fmt"
"strconv"
"time"
"github.com/influxdata/flux/ast"
)
// New creates a semantic graph from the provided AST
func New(prog *ast.Program) (*Program, error) {
return analyzeProgram(prog)
}
func analyzeProgram(prog *ast.Program) (*Program, error) {
p := &Program{
loc: loc(prog.Location()),
Body: make([]Statement, len(prog.Body)),
}
for i, s := range prog.Body {
n, err := analyzeStatment(s)
if err != nil {
return nil, err
}
p.Body[i] = n
}
return p, nil
}
func analyzeNode(n ast.Node) (Node, error) {
switch n := n.(type) {
case ast.Statement:
return analyzeStatment(n)
case ast.Expression:
return analyzeExpression(n)
default:
return nil, fmt.Errorf("unsupported node %T", n)
}
}
func analyzeStatment(s ast.Statement) (Statement, error) {
switch s := s.(type) {
case *ast.BlockStatement:
return analyzeBlockStatement(s)
case *ast.OptionStatement:
return analyzeOptionStatement(s)
case *ast.ExpressionStatement:
return analyzeExpressionStatement(s)
case *ast.ReturnStatement:
return analyzeReturnStatement(s)
case *ast.VariableDeclaration:
// Expect a single declaration
if len(s.Declarations) != 1 {
return nil, fmt.Errorf("only single variable declarations are supported, found %d declarations", len(s.Declarations))
}
return analyzeVariableDeclaration(s.Declarations[0])
default:
return nil, fmt.Errorf("unsupported statement %T", s)
}
}
func analyzeBlockStatement(block *ast.BlockStatement) (*BlockStatement, error) {
b := &BlockStatement{
loc: loc(block.Location()),
Body: make([]Statement, len(block.Body)),
}
for i, s := range block.Body {
n, err := analyzeStatment(s)
if err != nil {
return nil, err
}
b.Body[i] = n
}
last := len(b.Body) - 1
if _, ok := b.Body[last].(*ReturnStatement); !ok {
return nil, errors.New("missing return statement in block")
}
return b, nil
}
func analyzeOptionStatement(option *ast.OptionStatement) (*OptionStatement, error) {
declaration, err := analyzeVariableDeclaration(option.Declaration)
if err != nil {
return nil, err
}
return &OptionStatement{
loc: loc(option.Location()),
Declaration: declaration,
}, nil
}
func analyzeExpressionStatement(expr *ast.ExpressionStatement) (*ExpressionStatement, error) {
e, err := analyzeExpression(expr.Expression)
if err != nil {
return nil, err
}
return &ExpressionStatement{
loc: loc(expr.Location()),
Expression: e,
}, nil
}
func analyzeReturnStatement(ret *ast.ReturnStatement) (*ReturnStatement, error) {
arg, err := analyzeExpression(ret.Argument)
if err != nil {
return nil, err
}
return &ReturnStatement{
loc: loc(ret.Location()),
Argument: arg,
}, nil
}
func analyzeVariableDeclaration(decl *ast.VariableDeclarator) (*NativeVariableDeclaration, error) {
id, err := analyzeIdentifier(decl.ID)
if err != nil {
return nil, err
}
init, err := analyzeExpression(decl.Init)
if err != nil {
return nil, err
}
vd := &NativeVariableDeclaration{
loc: loc(decl.Location()),
Identifier: id,
Init: init,
}
return vd, nil
}
func analyzeExpression(expr ast.Expression) (Expression, error) {
switch expr := expr.(type) {
case *ast.ArrowFunctionExpression:
return analyzeArrowFunctionExpression(expr)
case *ast.CallExpression:
return analyzeCallExpression(expr)
case *ast.MemberExpression:
return analyzeMemberExpression(expr)
case *ast.IndexExpression:
return analyzeIndexExpression(expr)
case *ast.PipeExpression:
return analyzePipeExpression(expr)
case *ast.BinaryExpression:
return analyzeBinaryExpression(expr)
case *ast.UnaryExpression:
return analyzeUnaryExpression(expr)
case *ast.LogicalExpression:
return analyzeLogicalExpression(expr)
case *ast.ObjectExpression:
return analyzeObjectExpression(expr)
case *ast.ArrayExpression:
return analyzeArrayExpression(expr)
case *ast.Identifier:
return analyzeIdentifierExpression(expr)
case ast.Literal:
return analyzeLiteral(expr)
default:
return nil, fmt.Errorf("unsupported expression %T", expr)
}
}
func analyzeLiteral(lit ast.Literal) (Literal, error) {
switch lit := lit.(type) {
case *ast.StringLiteral:
return analyzeStringLiteral(lit)
case *ast.BooleanLiteral:
return analyzeBooleanLiteral(lit)
case *ast.FloatLiteral:
return analyzeFloatLiteral(lit)
case *ast.IntegerLiteral:
return analyzeIntegerLiteral(lit)
case *ast.UnsignedIntegerLiteral:
return analyzeUnsignedIntegerLiteral(lit)
case *ast.RegexpLiteral:
return analyzeRegexpLiteral(lit)
case *ast.DurationLiteral:
return analyzeDurationLiteral(lit)
case *ast.DateTimeLiteral:
return analyzeDateTimeLiteral(lit)
case *ast.PipeLiteral:
return nil, errors.New("a pipe literal may only be used as a default value for an argument in a function definition")
default:
return nil, fmt.Errorf("unsupported literal %T", lit)
}
}
func analyzeArrowFunctionExpression(arrow *ast.ArrowFunctionExpression) (*FunctionExpression, error) {
var parameters *FunctionParameters
var defaults *ObjectExpression
if len(arrow.Params) > 0 {
pipedCount := 0
parameters = &FunctionParameters{
loc: loc(arrow.Location()),
}
parameters.List = make([]*FunctionParameter, len(arrow.Params))
for i, p := range arrow.Params {
key, err := analyzeIdentifier(p.Key)
if err != nil {
return nil, err
}
var def Expression
var piped bool
if p.Value != nil {
if _, ok := p.Value.(*ast.PipeLiteral); ok {
// Special case the PipeLiteral
piped = true
pipedCount++
if pipedCount > 1 {
return nil, errors.New("only a single argument may be piped")
}
} else {
d, err := analyzeExpression(p.Value)
if err != nil {
return nil, err
}
def = d
}
}
parameters.List[i] = &FunctionParameter{
loc: loc(p.Location()),
Key: key,
}
if def != nil {
if defaults == nil {
defaults = &ObjectExpression{
loc: loc(arrow.Location()),
Properties: make([]*Property, 0, len(arrow.Params)),
}
}
defaults.Properties = append(defaults.Properties, &Property{
loc: loc(p.Location()),
Key: key,
Value: def,
})
}
if piped {
parameters.Pipe = key
}
}
}
b, err := analyzeNode(arrow.Body)
if err != nil {
return nil, err
}
f := &FunctionExpression{
loc: loc(arrow.Location()),
Defaults: defaults,
Block: &FunctionBlock{
loc: loc(arrow.Location()),
Parameters: parameters,
Body: b,
},
}
return f, nil
}
func analyzeCallExpression(call *ast.CallExpression) (*CallExpression, error) {
callee, err := analyzeExpression(call.Callee)
if err != nil {
return nil, err
}
var args *ObjectExpression
if l := len(call.Arguments); l > 1 {
return nil, fmt.Errorf("arguments are not a single object expression %v", args)
} else if l == 1 {
obj, ok := call.Arguments[0].(*ast.ObjectExpression)
if !ok {
return nil, fmt.Errorf("arguments not an object expression")
}
var err error
args, err = analyzeObjectExpression(obj)
if err != nil {
return nil, err
}
} else {
args = &ObjectExpression{
loc: loc(call.Location()),
}
}
return &CallExpression{
loc: loc(call.Location()),
Callee: callee,
Arguments: args,
}, nil
}
func analyzeMemberExpression(member *ast.MemberExpression) (*MemberExpression, error) {
obj, err := analyzeExpression(member.Object)
if err != nil {
return nil, err
}
var prop string
switch n := member.Property.(type) {
case *ast.Identifier:
prop = n.Name
case *ast.StringLiteral:
prop = n.Value
}
return &MemberExpression{
loc: loc(member.Location()),
Object: obj,
Property: prop,
}, nil
}
func analyzeIndexExpression(e *ast.IndexExpression) (Expression, error) {
array, err := analyzeExpression(e.Array)
if err != nil {
return nil, err
}
index, err := analyzeExpression(e.Index)
if err != nil {
return nil, err
}
return &IndexExpression{
loc: loc(e.Location()),
Array: array,
Index: index,
}, nil
}
func analyzePipeExpression(pipe *ast.PipeExpression) (*CallExpression, error) {
call, err := analyzeCallExpression(pipe.Call)
if err != nil {
return nil, err
}
value, err := analyzeExpression(pipe.Argument)
if err != nil {
return nil, err
}
call.Pipe = value
return call, nil
}
func analyzeBinaryExpression(binary *ast.BinaryExpression) (*BinaryExpression, error) {
left, err := analyzeExpression(binary.Left)
if err != nil {
return nil, err
}
right, err := analyzeExpression(binary.Right)
if err != nil {
return nil, err
}
return &BinaryExpression{
loc: loc(binary.Location()),
Operator: binary.Operator,
Left: left,
Right: right,
}, nil
}
func analyzeUnaryExpression(unary *ast.UnaryExpression) (*UnaryExpression, error) {
arg, err := analyzeExpression(unary.Argument)
if err != nil {
return nil, err
}
return &UnaryExpression{
loc: loc(unary.Location()),
Operator: unary.Operator,
Argument: arg,
}, nil
}
func analyzeLogicalExpression(logical *ast.LogicalExpression) (*LogicalExpression, error) {
left, err := analyzeExpression(logical.Left)
if err != nil {
return nil, err
}
right, err := analyzeExpression(logical.Right)
if err != nil {
return nil, err
}
return &LogicalExpression{
loc: loc(logical.Location()),
Operator: logical.Operator,
Left: left,
Right: right,
}, nil
}
func analyzeObjectExpression(obj *ast.ObjectExpression) (*ObjectExpression, error) {
o := &ObjectExpression{
loc: loc(obj.Location()),
Properties: make([]*Property, len(obj.Properties)),
}
for i, p := range obj.Properties {
n, err := analyzeProperty(p)
if err != nil {
return nil, err
}
o.Properties[i] = n
}
return o, nil
}
func analyzeArrayExpression(array *ast.ArrayExpression) (*ArrayExpression, error) {
a := &ArrayExpression{
loc: loc(array.Location()),
Elements: make([]Expression, len(array.Elements)),
}
for i, e := range array.Elements {
n, err := analyzeExpression(e)
if err != nil {
return nil, err
}
a.Elements[i] = n
}
return a, nil
}
func analyzeIdentifier(ident *ast.Identifier) (*Identifier, error) {
return &Identifier{
loc: loc(ident.Location()),
Name: ident.Name,
}, nil
}
func analyzeIdentifierExpression(ident *ast.Identifier) (*IdentifierExpression, error) {
return &IdentifierExpression{
loc: loc(ident.Location()),
Name: ident.Name,
}, nil
}
func analyzeProperty(property *ast.Property) (*Property, error) {
key, err := analyzeIdentifier(property.Key)
if err != nil {
return nil, err
}
value, err := analyzeExpression(property.Value)
if err != nil {
return nil, err
}
return &Property{
loc: loc(property.Location()),
Key: key,
Value: value,
}, nil
}
func analyzeDateTimeLiteral(lit *ast.DateTimeLiteral) (*DateTimeLiteral, error) {
return &DateTimeLiteral{
loc: loc(lit.Location()),
Value: lit.Value,
}, nil
}
func analyzeDurationLiteral(lit *ast.DurationLiteral) (*DurationLiteral, error) {
var duration time.Duration
for _, d := range lit.Values {
dur, err := toDuration(d)
if err != nil {
return nil, err
}
duration += dur
}
return &DurationLiteral{
loc: loc(lit.Location()),
Value: duration,
}, nil
}
func analyzeFloatLiteral(lit *ast.FloatLiteral) (*FloatLiteral, error) {
return &FloatLiteral{
loc: loc(lit.Location()),
Value: lit.Value,
}, nil
}
func analyzeIntegerLiteral(lit *ast.IntegerLiteral) (*IntegerLiteral, error) {
return &IntegerLiteral{
loc: loc(lit.Location()),
Value: lit.Value,
}, nil
}
func analyzeUnsignedIntegerLiteral(lit *ast.UnsignedIntegerLiteral) (*UnsignedIntegerLiteral, error) {
return &UnsignedIntegerLiteral{
loc: loc(lit.Location()),
Value: lit.Value,
}, nil
}
func analyzeStringLiteral(lit *ast.StringLiteral) (*StringLiteral, error) {
return &StringLiteral{
loc: loc(lit.Location()),
Value: lit.Value,
}, nil
}
func analyzeBooleanLiteral(lit *ast.BooleanLiteral) (*BooleanLiteral, error) {
return &BooleanLiteral{
loc: loc(lit.Location()),
Value: lit.Value,
}, nil
}
func analyzeRegexpLiteral(lit *ast.RegexpLiteral) (*RegexpLiteral, error) {
return &RegexpLiteral{
loc: loc(lit.Location()),
Value: lit.Value,
}, nil
}
func toDuration(lit ast.Duration) (time.Duration, error) {
// TODO: This is temporary code until we have proper duration type that takes different months, DST, etc into account
var dur time.Duration
var err error
mag := lit.Magnitude
unit := lit.Unit
switch unit {
case "y":
mag *= 12
unit = "mo"
fallthrough
case "mo":
const weeksPerMonth = 365.25 / 12 / 7
mag = int64(float64(mag) * weeksPerMonth)
unit = "w"
fallthrough
case "w":
mag *= 7
unit = "d"
fallthrough
case "d":
mag *= 24
unit = "h"
fallthrough
default:
// ParseDuration will handle h, m, s, ms, us, ns.
dur, err = time.ParseDuration(strconv.FormatInt(mag, 10) + unit)
}
return dur, err
} | semantic/analyze.go | 0.671471 | 0.511168 | analyze.go | starcoder |
package pyg
// #include "utils.h"
// static inline void incref(PyObject *obj) { Py_INCREF(obj); }
// static inline void decref(PyObject *obj) { Py_DECREF(obj); }
// static inline void xdecref(PyObject *obj) { Py_XDECREF(obj); }
import "C"
import "fmt"
// Error represents a Python exception as a Go struct that implements the
// error interface. It allows Go code to handle Python exceptions in an
// idiomatic Go fashion.
type Error struct {
Kind Object
Value Object
tb *C.PyObject
}
// Error() returns a string representation of the Python exception represented
// by the Error e. This is the same as the final line of the Python output from
// an uncaught exception.
func (e *Error) Error() string {
ts := ""
en := C.excName(c(e.Kind))
if en.c == nil {
ts = stringify(e.Kind)
} else {
if en.m != nil {
ts = C.GoString(en.m) + "."
}
ts += C.GoString(en.c)
}
s := stringify(e.Value)
return fmt.Sprintf("%s: %s", ts, s)
}
// Matches returns true if e.Kind matches the exception in exc. If exc is a
// Class, then true is returned if e.Kind is an instance. If exc is a Tuple,
// then all elements (and recursively for sub elements) are searched for a
// match.
func (e *Error) Matches(exc Object) bool {
return C.PyErr_GivenExceptionMatches(c(e.Kind), c(exc)) != 0
}
// Normalize adjusts e.Kind/e.Value in the case that the values aren't
// normalized to start with. It's possible that an Error returned from Python
// might have e.Kind be a Class, with e.Value not being an instance of that
// class, Normalize will fix this. The separate normalization is implemented in
// Python to improve performance.
func (e *Error) Normalize() {
exc := c(e.Kind)
val := c(e.Value)
tb := e.tb
C.PyErr_NormalizeException(&exc, &val, &tb)
if exc != c(e.Kind) {
e.Kind = newObject(exc)
}
if val != c(e.Value) {
e.Value = newObject(val)
}
e.tb = tb
}
// NewErrorV returns a new Error of the specified kind, and with the given
// value.
func NewErrorV(kind Object, value Object) *Error {
Incref(kind)
Incref(value)
return &Error{kind, value, nil}
}
// NewError returns a new Error of the specified kind, and with the value
// being a new String containing the string created the given format and args.
func NewError(kind Object, format string, args ...interface{}) *Error {
msg := fmt.Sprintf(format, args...)
Incref(kind)
val, _ := NewUnicode(msg)
return &Error{kind, val, nil}
}
func exceptionRaised() bool {
return C.PyErr_Occurred() != nil
}
func exception() error {
if C.PyErr_Occurred() == nil {
return nil
}
var t, v, tb *C.PyObject
defer C.xdecref(v)
C.PyErr_Fetch(&t, &v, &tb)
return &Error{newObject(t), newObject(v), tb}
}
func raise(err error) {
var val *C.PyObject
var exc = C.PyExc_Exception
e, ok := err.(*Error)
if ok {
exc = c(e.Kind)
val = c(e.Value)
} else {
v, _ := NewUnicode(err.Error())
val = c(v)
}
C.PyErr_SetObject(exc, val)
}
func int2Err(i C.int) error {
if i < 0 {
return exception()
}
return nil
}
func int2BoolErr(i C.int) (bool, error) {
if i < 0 {
return false, exception()
}
return i > 0, nil
}
func ssize_t2Int64Err(s C.Py_ssize_t) (int64, error) {
if s < 0 {
return 0, exception()
}
return int64(s), nil
}
func obj2ObjErr(obj *C.PyObject) (Object, error) {
if obj == nil {
return nil, exception()
}
return newObject(obj), nil
} | err.go | 0.680666 | 0.400105 | err.go | starcoder |
package resolver
import (
"github.com/google/gapid/gapil/ast"
"github.com/google/gapid/gapil/semantic"
)
func block(rv *resolver, in *ast.Block, owner semantic.Node) *semantic.Block {
out := &semantic.Block{AST: in}
if in == nil {
return out
}
rv.with(semantic.VoidType, func() {
rv.scope.block = &out.Statements
r := body(rv, in.Statements, owner)
if r != nil {
rv.addStatement(r)
}
})
rv.mappings.Add(in, out)
return out
}
// body is a resolve function that processes a list of statements and injects them
// into the context's current block.
// the final return statement, if present, is not injected, but returned from the
// function, as it often needs special handling depending on the owner of the
// statements
func body(rv *resolver, in []ast.Node, owner semantic.Node) *semantic.Return {
f, isFunction := owner.(*semantic.Function)
var returnStatement *ast.Return
// we need to check and strip the "return" if the function is supposed to have one
if isFunction && !isVoid(f.Return.Type) {
if len(in) == 0 {
rv.errorf(f.AST, "Missing return statement")
} else if r, ok := in[len(in)-1].(*ast.Return); !ok {
rv.errorf(f.AST, "Last statement must be a return")
} else {
in = in[0 : len(in)-1]
returnStatement = r
}
}
// now process the non return statements
for _, s := range in {
rv.addStatement(statement(rv, s))
}
// and special case the return statement allowing access to the return parameter
if returnStatement != nil {
out := return_(rv, returnStatement, f)
rv.mappings.Add(returnStatement, out)
return out
}
return nil
}
func statement(rv *resolver, in ast.Node) semantic.Statement {
var out semantic.Statement
switch in := in.(type) {
case *ast.Assign:
out = assign(rv, in)
case *ast.DeclareLocal:
out = declareLocal(rv, in)
case *ast.Delete:
out = delete_(rv, in)
case *ast.Clear:
out = clear_(rv, in)
case *ast.Branch:
out = branch(rv, in)
case *ast.Switch:
out = switch_(rv, in)
case *ast.Iteration:
out = iteration(rv, in)
case *ast.MapIteration:
out = mapIteration(rv, in)
case *ast.Call:
c := call(rv, in)
if e, ok := c.(semantic.Expression); ok {
if ty := e.ExpressionType(); !isVoid(ty) && !isInvalid(ty) {
rv.errorf(in, "function with return type as statement not allowed")
return semantic.Invalid{}
}
}
s, ok := c.(semantic.Statement)
if !ok {
rv.errorf(in, "expected statement, got %T", c)
return semantic.Invalid{}
}
out = s
case *ast.Return:
rv.errorf(in, "unexpected return")
return semantic.Invalid{}
case *ast.Abort:
out = &semantic.Abort{AST: in, Function: rv.scope.function}
case *ast.Fence:
out = &semantic.Fence{AST: in, Explicit: true}
case *ast.Generic, *ast.Member:
rv.errorf(in, "incomplete statement")
out = semantic.Invalid{Partial: expression(rv, in)}
case *ast.Invalid:
out = semantic.Invalid{}
default:
rv.errorf(in, "not a statement (%T)", in)
out = semantic.Invalid{}
}
rv.mappings.Add(in, out)
return out
}
func assign(rv *resolver, in *ast.Assign) semantic.Statement {
lhs := expression(rv, in.LHS)
var rhs semantic.Expression
rv.with(lhs.ExpressionType(), func() {
rhs = expression(rv, in.RHS)
})
var out semantic.Statement
inferUnknown(rv, lhs, rhs)
lt := lhs.ExpressionType()
rt := rhs.ExpressionType()
if !assignable(lt, rt) {
rv.errorf(in, "cannot assign %s to %s", typename(rt), typename(lt))
}
switch lhs := lhs.(type) {
case semantic.Invalid:
out = semantic.Invalid{}
case *semantic.ArrayIndex:
out = &semantic.ArrayAssign{AST: in, To: lhs, Value: rhs, Operator: in.Operator}
case *semantic.MapIndex:
out = &semantic.MapAssign{AST: in, To: lhs, Value: rhs, Operator: in.Operator}
case *semantic.SliceIndex:
out = &semantic.SliceAssign{AST: in, To: lhs, Value: rhs, Operator: in.Operator}
case *semantic.Global, *semantic.Ignore, *semantic.Member:
out = &semantic.Assign{AST: in, LHS: lhs, Operator: in.Operator, RHS: rhs}
case *semantic.Local:
rv.errorf(in, "Cannot assign to '%v' - locals are immutable", lhs.Name())
default:
rv.icef(in, "Unexpected LHS type for assignment: %T", lhs)
}
if out == nil {
out = &semantic.Assign{AST: in, LHS: lhs, Operator: in.Operator, RHS: rhs}
}
rv.mappings.Add(in, out)
return out
}
func delete_(rv *resolver, in *ast.Delete) *semantic.MapRemove {
k := semantic.Expression(semantic.Invalid{})
m := expression(rv, in.Map)
mty, ok := m.ExpressionType().(*semantic.Map)
if ok {
rv.with(mty.KeyType, func() {
k = expression(rv, in.Key)
})
if !comparable(k.ExpressionType(), mty.KeyType) {
rv.errorf(in.Key, "Cannot use %s as key to %s",
typename(k.ExpressionType()), typename(m.ExpressionType()))
}
} else {
rv.errorf(in.Map, "delete's first argument must be a map, got %s", typename(m.ExpressionType()))
}
return &semantic.MapRemove{AST: in, Type: mty, Map: m, Key: k}
}
func clear_(rv *resolver, in *ast.Clear) *semantic.MapClear {
m := expression(rv, in.Map)
mty, ok := m.ExpressionType().(*semantic.Map)
if !ok {
rv.errorf(in.Map, "clear's argument must be a map, got %s", typename(m.ExpressionType()))
}
out := &semantic.MapClear{AST: in, Type: mty, Map: m}
rv.mappings.Add(in, out)
return out
}
func addLocal(rv *resolver, in *ast.DeclareLocal, name string, value semantic.Expression) *semantic.DeclareLocal {
out := &semantic.DeclareLocal{AST: in}
out.Local = &semantic.Local{
Declaration: out,
Named: semantic.Named(name),
Value: value,
Type: value.ExpressionType(),
}
if isVoid(out.Local.Type) {
rv.errorf(in, "void in local declaration")
}
rv.addNamed(out.Local)
if in != nil {
rv.mappings.Add(in, out)
rv.mappings.Add(in.Name, out.Local)
}
return out
}
func declareLocal(rv *resolver, in *ast.DeclareLocal) *semantic.DeclareLocal {
out := addLocal(rv, in, in.Name.Value, expression(rv, in.RHS))
rv.mappings.Add(in, out)
return out
}
func branch(rv *resolver, in *ast.Branch) *semantic.Branch {
out := &semantic.Branch{AST: in}
out.Condition = expression(rv, in.Condition)
ct := out.Condition.ExpressionType()
if ct == nil {
rv.errorf(in, "condition was not valid")
return out
}
if !equal(ct, semantic.BoolType) {
rv.errorf(in, "if condition must be boolean (got %s)", typename(ct))
}
out.True = block(rv, in.True, out)
if in.False != nil {
out.False = block(rv, in.False, out)
}
rv.mappings.Add(in, out)
return out
}
func switch_(rv *resolver, in *ast.Switch) *semantic.Switch {
out := &semantic.Switch{AST: in}
out.Value = expression(rv, in.Value)
vt := out.Value.ExpressionType()
for _, c := range in.Cases {
out.Cases = append(out.Cases, case_(rv, c, vt))
}
if in.Default != nil {
out.Default = block(rv, in.Default.Block, out)
}
rv.mappings.Add(in, out)
return out
}
// case_ translates Case in to a switch Case.
// vt is the resolved type of the switch value being compared against, and can
// be used to infer the case condition type.
func case_(rv *resolver, in *ast.Case, vt semantic.Type) *semantic.Case {
out := &semantic.Case{AST: in}
rv.with(vt, func() {
for _, cond := range in.Conditions {
exp := expression(rv, cond)
out.Conditions = append(out.Conditions, exp)
ct := exp.ExpressionType()
if !comparable(vt, ct) {
rv.errorf(cond, "switch value %s is not comparable with case condition %s", typename(vt), typename(ct))
}
}
})
out.Annotations = annotations(rv, in.Annotations)
out.Block = block(rv, in.Block, out)
rv.mappings.Add(in, out)
return out
}
func iteration(rv *resolver, in *ast.Iteration) semantic.Statement {
v := &semantic.Local{Named: semantic.Named(in.Variable.Value)}
rv.mappings.Add(in.Variable, v)
iterable := expression(rv, in.Iterable)
b, ok := iterable.(*semantic.BinaryOp)
if !ok {
rv.errorf(in, "iterable can only be range operator, got %T", b)
return semantic.Invalid{}
} else if b.Operator != ast.OpRange {
rv.errorf(in, "iterable can only be range operator, got %s\n", b.Operator)
}
rv.mappings.Remove(b) // The binary op is no longer referenced directly.
out := &semantic.Iteration{AST: in, Iterator: v, From: b.LHS, To: b.RHS}
v.Type = iterable.ExpressionType()
rv.with(semantic.VoidType, func() {
rv.addNamed(v)
out.Block = block(rv, in.Block, out)
})
rv.mappings.Add(in, out)
return out
}
func mapIteration(rv *resolver, in *ast.MapIteration) *semantic.MapIteration {
i := &semantic.Local{Named: semantic.Named(in.IndexVariable.Value)}
k := &semantic.Local{Named: semantic.Named(in.KeyVariable.Value)}
v := &semantic.Local{Named: semantic.Named(in.ValueVariable.Value)}
rv.mappings.Add(in.IndexVariable, i)
rv.mappings.Add(in.KeyVariable, k)
rv.mappings.Add(in.ValueVariable, v)
out := &semantic.MapIteration{AST: in, IndexIterator: i, KeyIterator: k, ValueIterator: v}
out.Map = expression(rv, in.Map)
if m, ok := out.Map.ExpressionType().(*semantic.Map); ok {
i.Type = semantic.Int32Type
k.Type = m.KeyType
v.Type = m.ValueType
} else {
rv.errorf(in, "key value iteration can only be done on a map, got %T", out.Map.ExpressionType())
i.Type = semantic.InvalidType
k.Type = semantic.InvalidType
v.Type = semantic.InvalidType
}
rv.with(semantic.VoidType, func() {
rv.addNamed(i)
rv.addNamed(k)
rv.addNamed(v)
out.Block = block(rv, in.Block, out)
})
rv.mappings.Add(in, out)
return out
}
func return_(rv *resolver, in *ast.Return, f *semantic.Function) *semantic.Return {
out := &semantic.Return{AST: in}
out.Function = f
rv.with(f.Return.Type, func() {
out.Value = expression(rv, in.Value)
})
inferUnknown(rv, f.Return, out.Value)
rt := out.Value.ExpressionType()
if !assignable(f.Return.Type, rt) {
rv.errorf(in, "cannot assign %s to %s", typename(rt), typename(f.Return.Type))
}
rv.mappings.Add(in, out)
return out
} | gapil/resolver/statement.go | 0.580233 | 0.409693 | statement.go | starcoder |
package json
import (
"encoding/binary"
"fmt"
"math"
time2 "time"
"github.com/yorkie-team/yorkie/pkg/document/time"
)
type ValueType int
const (
Null ValueType = iota
Boolean
Integer
Long
Double
String
Bytes
Date
)
// ValueFromBytes parses the given bytes into value.
func ValueFromBytes(valueType ValueType, value []byte) interface{} {
switch valueType {
case Boolean:
if value[0] == 1 {
return true
}
return false
case Integer:
val := int32(binary.LittleEndian.Uint32(value))
return int(val)
case Long:
return int64(binary.LittleEndian.Uint64(value))
case Double:
return math.Float64frombits(binary.LittleEndian.Uint64(value))
case String:
return string(value)
case Bytes:
return value
case Date:
v := int64(binary.LittleEndian.Uint64(value))
return time2.Unix(v, 0)
}
panic("unsupported type")
}
// Primitive represents JSON primitive data type including logical lock.
type Primitive struct {
valueType ValueType
value interface{}
createdAt *time.Ticket
movedAt *time.Ticket
removedAt *time.Ticket
}
// NewPrimitive creates a new instance of Primitive.
func NewPrimitive(value interface{}, createdAt *time.Ticket) *Primitive {
switch val := value.(type) {
case bool:
return &Primitive{
valueType: Boolean,
value: val,
createdAt: createdAt,
}
case int:
if val > math.MaxInt32 || val < math.MinInt32 {
return &Primitive{
valueType: Long,
value: int64(val),
createdAt: createdAt,
}
} else {
return &Primitive{
valueType: Integer,
value: val,
createdAt: createdAt,
}
}
case int64:
return &Primitive{
valueType: Long,
value: val,
createdAt: createdAt,
}
case float64:
return &Primitive{
valueType: Double,
value: val,
createdAt: createdAt,
}
case string:
return &Primitive{
valueType: String,
value: val,
createdAt: createdAt,
}
case []byte:
return &Primitive{
valueType: Bytes,
value: val,
createdAt: createdAt,
}
case time2.Time:
return &Primitive{
valueType: Date,
value: val,
createdAt: createdAt,
}
}
panic("unsupported type")
}
// Bytes creates an array representing the value.
func (p *Primitive) Bytes() []byte {
switch val := p.value.(type) {
case bool:
if val {
return []byte{1}
}
return []byte{0}
case int:
bytes := [4]byte{}
binary.LittleEndian.PutUint32(bytes[:], uint32(val))
return bytes[:]
case int64:
bytes := [8]byte{}
binary.LittleEndian.PutUint64(bytes[:], uint64(val))
return bytes[:]
case float64:
bytes := [8]byte{}
binary.LittleEndian.PutUint64(bytes[:], math.Float64bits(val))
return bytes[:]
case string:
return []byte(val)
case []byte:
return val
case time2.Time:
bytes := [8]byte{}
binary.LittleEndian.PutUint64(bytes[:], uint64(val.UTC().Unix()))
return bytes[:]
}
panic("unsupported type")
}
// Marshal returns the JSON encoding of the value.
func (p *Primitive) Marshal() string {
switch p.valueType {
case Boolean:
return fmt.Sprintf("%t", p.value)
case Integer:
return fmt.Sprintf("%d", p.value)
case Long:
return fmt.Sprintf("%d", p.value)
case Double:
return fmt.Sprintf("%f", p.value)
case String:
return fmt.Sprintf("\"%s\"", p.value)
case Bytes:
// TODO: JSON.stringify({a: new Uint8Array([1,2]), b: 2})
// {"a":{"0":1,"1":2},"b":2}
return fmt.Sprintf("\"%s\"", p.value)
case Date:
return p.value.(time2.Time).Format(time2.RFC3339)
}
panic("unsupported type")
}
// DeepCopy copies itself deeply.
func (p *Primitive) DeepCopy() Element {
primitive := *p
return &primitive
}
// CreatedAt returns the creation time.
func (p *Primitive) CreatedAt() *time.Ticket {
return p.createdAt
}
// MovedAt returns the move time of this element.
func (p *Primitive) MovedAt() *time.Ticket {
return p.movedAt
}
// SetMovedAt sets the move time of this element.
func (p *Primitive) SetMovedAt(movedAt *time.Ticket) {
p.movedAt = movedAt
}
// RemovedAt returns the removal time of this element.
func (p *Primitive) RemovedAt() *time.Ticket {
return p.removedAt
}
// Remove removes this element.
func (p *Primitive) Remove(removedAt *time.Ticket) bool {
if p.removedAt == nil || removedAt.After(p.removedAt) {
p.removedAt = removedAt
return true
}
return false
}
// ValueType returns the type of the value.
func (p *Primitive) ValueType() ValueType {
return p.valueType
}
// IsNumericType checks for numeric types.
func (p *Primitive) IsNumericType() bool {
t := p.valueType
return t == Integer || t == Long || t == Double
} | pkg/document/json/primitive.go | 0.687525 | 0.407216 | primitive.go | starcoder |
package eaprm
import (
"fmt"
"time"
)
// Hour returns a two dimensional array of 1 or zero values, where
// the first dimension of the array is of length 24 and corresponds
// to the hours of the day, and the second dimension of the array is
// the same length as t. Values in the returned array [i,j] will be
// 1 if the hour of day in t[j] equals i, and zero otherwise.
func Hour(t []time.Time) [][]float64 {
o := make([][]float64, 24)
for i := range o {
o[i] = make([]float64, len(t))
}
for j, tt := range t {
o[tt.Hour()][j] = 1
}
return o
}
// Month returns a two dimensional array of 1 or zero values, where
// the first dimension of the array is of length 12 and corresponds
// to the months of the year, and the second dimension of the array is
// the same length as t. Values in the returned array [i,j] will be
// 1 if the month in t[j] equals i, and zero otherwise. January is month 0.
func Month(t []time.Time) [][]float64 {
o := make([][]float64, 12)
for i := range o {
o[i] = make([]float64, len(t))
}
for j, tt := range t {
o[int(tt.Month())-1][j] = 1
}
return o
}
// Weekend returns an array of 1 or zero values, with 1 values for
// members of t that are on a weekend (i.e., Saturday or Sunday) and
// zero otherwise.
func Weekend(t []time.Time) []float64 {
o := make([]float64, len(t))
for j, tt := range t {
if w := tt.Weekday(); w == time.Saturday || w == time.Sunday {
o[j] = 1
}
}
return o
}
// Year returns a two dimensional array of 1 or zero values, where
// the first dimension of the array is of length (end - start), where
// start and end are the first and last years considered, and corresponds
// to the year, and the second dimension of the array is
// the same length as t. Values in the returned array [i,j] will be
// 1 if the year in t[j] equals i, and zero otherwise.
func Year(t []time.Time, start, end int) ([][]float64, error) {
o := make([][]float64, end-start+1)
for i := range o {
o[i] = make([]float64, len(t))
}
for j, tt := range t {
y := tt.Year()
if !(start <= y && y <= end) {
return nil, fmt.Errorf("eaprm: year %d out of range", y)
}
o[y-start][j] = 1
}
return o, nil
} | time.go | 0.675978 | 0.799481 | time.go | starcoder |
package data
import (
"fmt"
"math"
"math/big"
"math/rand"
"strconv"
"github.com/kode4food/ale/types"
"github.com/kode4food/ale/types/basic"
)
type (
// Float represents a 64-bit floating point number
Float float64
// Ratio represents a number having a numerator and denominator
Ratio big.Rat
)
var (
rationalHash = rand.Uint64()
one = big.NewInt(1)
)
// Error messages
const (
ErrExpectedFloat = "value is not a float: %s"
ErrExpectedRatio = "value is not a ratio: %s"
)
// ParseFloat attempts to parse a string representing a float
func ParseFloat(s string) (Number, error) {
res, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, fmt.Errorf(ErrExpectedFloat, s)
}
return Float(res), nil
}
// MustParseFloat forcefully parses a string representing a float
func MustParseFloat(s string) Number {
if res, err := ParseFloat(s); err != nil {
panic(err)
} else {
return res
}
}
// Cmp compares this Float to another Number
func (l Float) Cmp(r Number) Comparison {
if math.IsNaN(float64(l)) {
return Incomparable
}
if rf, ok := r.(Float); ok {
if math.IsNaN(float64(rf)) {
return Incomparable
}
if l > rf {
return GreaterThan
}
if l < rf {
return LessThan
}
return EqualTo
}
pl, pr := purify(l, r)
return pl.Cmp(pr)
}
// Add adds this Float to another Number
func (l Float) Add(r Number) Number {
if rf, ok := r.(Float); ok {
return l + rf
}
pl, pr := purify(l, r)
return pl.Add(pr)
}
// Sub subtracts another Number from this Float
func (l Float) Sub(r Number) Number {
if rf, ok := r.(Float); ok {
return l - rf
}
pl, pr := purify(l, r)
return pl.Sub(pr)
}
// Mul multiplies this Float by another Number
func (l Float) Mul(r Number) Number {
if rf, ok := r.(Float); ok {
return l * rf
}
pl, pr := purify(l, r)
return pl.Mul(pr)
}
// Div divides this Float by another Number
func (l Float) Div(r Number) Number {
if rf, ok := r.(Float); ok {
return l / rf
}
pl, pr := purify(l, r)
return pl.Div(pr)
}
// Mod calculates the remainder of dividing this Float by another Number
func (l Float) Mod(r Number) Number {
if rf, ok := r.(Float); ok {
return Float(math.Mod(float64(l), float64(rf)))
}
pl, pr := purify(l, r)
return pl.Mod(pr)
}
// IsNaN returns whether this Float is not a number
func (l Float) IsNaN() bool {
return math.IsNaN(float64(l))
}
// IsPosInf returns whether this Float represents positive infinity
func (l Float) IsPosInf() bool {
return math.IsInf(float64(l), 1)
}
// IsNegInf returns whether this Float represents negative infinity
func (l Float) IsNegInf() bool {
return math.IsInf(float64(l), -1)
}
// Equal compares this Float to another for equality
func (l Float) Equal(r Value) bool {
if r, ok := r.(Float); ok {
return l == r
}
return false
}
// String converts this Float to a string
func (l Float) String() string {
i := int64(l)
if Float(i) == l {
return fmt.Sprintf("%d.0", i)
}
return fmt.Sprintf("%g", l)
}
// Type returns the Type for this Float Value
func (Float) Type() types.Type {
return basic.Number
}
// HashCode returns a hash code for this Float
func (l Float) HashCode() uint64 {
return rationalHash * uint64(l)
}
// ParseRatio attempts to parse a string representing a ratio
func ParseRatio(s string) (Number, error) {
if res, ok := new(big.Rat).SetString(s); ok {
return maybeWhole(res), nil
}
return nil, fmt.Errorf(ErrExpectedRatio, s)
}
// MustParseRatio forcefully parses a string representing a ratio
func MustParseRatio(s string) Number {
if res, err := ParseRatio(s); err != nil {
panic(err)
} else {
return res
}
}
// Cmp compares this Ratio to another Number
func (l *Ratio) Cmp(r Number) Comparison {
if rr, ok := r.(*Ratio); ok {
lb := (*big.Rat)(l)
rb := (*big.Rat)(rr)
return Comparison(lb.Cmp(rb))
}
pl, pr := purify(l, r)
return pl.Cmp(pr)
}
// Add adds this Ratio to another Number
func (l *Ratio) Add(r Number) Number {
if rr, ok := r.(*Ratio); ok {
lb := (*big.Rat)(l)
rb := (*big.Rat)(rr)
res := new(big.Rat).Add(lb, rb)
return maybeWhole(res)
}
pl, pr := purify(l, r)
return pl.Add(pr)
}
// Sub subtracts another Number from this Ratio
func (l *Ratio) Sub(r Number) Number {
if rr, ok := r.(*Ratio); ok {
lb := (*big.Rat)(l)
rb := (*big.Rat)(rr)
res := new(big.Rat).Sub(lb, rb)
return maybeWhole(res)
}
pl, pr := purify(l, r)
return pl.Sub(pr)
}
// Mul multiplies this Ratio by another Number
func (l *Ratio) Mul(r Number) Number {
if rr, ok := r.(*Ratio); ok {
lb := (*big.Rat)(l)
rb := (*big.Rat)(rr)
res := new(big.Rat).Mul(lb, rb)
return maybeWhole(res)
}
pl, pr := purify(l, r)
return pl.Mul(pr)
}
// Div divides this Ratio by another Number
func (l *Ratio) Div(r Number) Number {
if rr, ok := r.(*Ratio); ok {
lb := (*big.Rat)(l)
rb := (*big.Rat)(rr)
res := new(big.Rat).Quo(lb, rb)
return maybeWhole(res)
}
pl, pr := purify(l, r)
return pl.Div(pr)
}
// Mod calculates the remainder of dividing this Ratio by another Number
func (l *Ratio) Mod(r Number) Number {
if rr, ok := r.(*Ratio); ok {
lb := (*big.Rat)(l)
rb := (*big.Rat)(rr)
n := new(big.Int).Mul(lb.Num(), rb.Denom())
d := new(big.Int).Mul(lb.Denom(), rb.Num())
res := new(big.Rat).SetFrac(new(big.Int).Div(n, d), one)
res = res.Mul(res, rb)
res = res.Sub(lb, res)
return maybeWhole(res)
}
pl, pr := purify(l, r)
return pl.Mod(pr)
}
// IsNaN returns whether this Ratio is not a number
func (*Ratio) IsNaN() bool {
return false
}
// IsPosInf returns whether this Ratio represents positive infinity
func (*Ratio) IsPosInf() bool {
return false
}
// IsNegInf returns whether this Ratio represents negative infinity
func (*Ratio) IsNegInf() bool {
return false
}
// Equal compares this Ratio to another for equality
func (l *Ratio) Equal(r Value) bool {
if l == r {
return true
}
if r, ok := r.(*Ratio); ok {
lb := (*big.Rat)(l)
rb := (*big.Rat)(r)
return lb.Cmp(rb) == 0
}
return false
}
// String converts this Ratio to a string
func (l *Ratio) String() string {
return (*big.Rat)(l).String()
}
// Type returns the Type for this Ratio Value
func (*Ratio) Type() types.Type {
return basic.Number
}
// HashCode returns a hash code for this Ratio
func (l *Ratio) HashCode() uint64 {
br := (*big.Rat)(l)
return rationalHash * br.Num().Uint64() * br.Denom().Uint64()
}
func (l *Ratio) float() Float {
f, _ := (*big.Rat)(l).Float64()
return Float(f)
}
func maybeWhole(r *big.Rat) Number {
if r.IsInt() {
return maybeInteger(r.Num())
}
return (*Ratio)(r)
} | data/rational.go | 0.770637 | 0.40439 | rational.go | starcoder |
package pvoc
import(
"math"
)
var gOmegaPiImag []float64 = make([]float64, 31, 31)
var gOmegaPiReal []float64 = make([]float64, 31, 31)
func init() {
var N uint32 = 2
for i := 0; i < 31; i++ {
NFloat := float64(N)
gOmegaPiImag[i] = math.Sin(twoPi / NFloat)
gOmegaPiReal[i] = -2 * math.Sin(pi / NFloat) * math.Sin(pi / NFloat)
N <<= 1
}
}
// rearranges data array in bit-reversal order in-place
// NOTE: the data array contains data in pairs, so the
// bitreversal operates on data pairs (element i and i + 1)
// and is not exactly a straight bitreversal, if our array is:
// [0, 1, 2, 3, 4, 5, 6, 7] <- array data
// ____ ____ ____ ____
// 0 1 2 3 <- indexes to bit reverse
func bitReverse(data []float64) {
var m int
for i, j := 0, 0; i < len(data); i, j = i + 2, j + m {
if j > i {
realTemp := data[j]
imagTemp := data[j + 1]
data[j] = data[i]
data[j + 1] = data[i + 1]
data[i] = realTemp
data[i + 1] = imagTemp
}
for m = len(data) / 2; m >= 2 && j >= m; m /= 2 {
j -= m
}
}
}
func FFT(data []float64, direction int) {
bitReverse(data)
numberData := len(data)
halfPoints := numberData / 2
var twoMMax int
n := 0
for mMax := 2; mMax < numberData; mMax = twoMMax {
twoMMax = mMax * 2
omegaPiReal := gOmegaPiReal[n]
var omegaPiImag float64
if direction == Time2Freq {
omegaPiImag = gOmegaPiImag[n]
} else {
omegaPiImag = -gOmegaPiImag[n]
}
n++
omegaReal := 1.0
omegaImag := 0.0
for m := 0; m < mMax; m += 2 {
var imagTemp, realTemp float64
for i := m; i < numberData; i += twoMMax {
j := i + mMax
realTemp = omegaReal * data[j] - omegaImag * data[j + 1]
imagTemp = omegaReal * data[j + 1] + omegaImag * data[j]
data[j] = data[i] - realTemp
data[j + 1] = data[i + 1] - imagTemp
data[i] += realTemp
data[i + 1] += imagTemp
}
realTemp = omegaReal
omegaReal = omegaReal * omegaPiReal - omegaImag * omegaPiImag + omegaReal
omegaImag = omegaImag * omegaPiReal + realTemp * omegaPiImag + omegaImag
}
}
if (direction == Freq2Time) {
scale := 1.0 / float64(halfPoints);
for i := 0; i < numberData; i++ {
data[i] *= scale;
}
}
}
// Comment from SoundHack:
// RealFFT - performs fft with only real values and positive frequencies
func RealFFT(data []float64, direction int) {
points := len(data)
halfPoints := points / 2
twoPiOmmax := pi / float64(halfPoints)
omegaReal := 1.0
omegaImag := 0.0
c1 := 0.5
var c2, xr, xi float64
if direction == Time2Freq {
c2 = -0.5
FFT(data, direction)
xr = data[0]
xi = data[1]
} else {
c2 = 0.5
twoPiOmmax = -twoPiOmmax
xr = data[1]
xi = 0.0
data[1] = 0.0
}
temp := math.Sin(0.5 * twoPiOmmax)
omegaPiReal := -2.0 * temp * temp
omegaPiImag := math.Sin(twoPiOmmax)
N2p1 := points + 1;
for i := 0; i <= halfPoints / 2; i++ {
i1 := i * 2
i2 := i1 + 1
i3 := N2p1 - i2
i4 := i3 + 1
if i == 0 {
h1r := c1 * (data[i1] + xr)
h1i := c1 * (data[i2] - xi)
h2r := -c2 * (data[i2] + xi)
h2i := c2 * (data[i1] - xr)
data[i1] = h1r + omegaReal * h2r - omegaImag * h2i
data[i2] = h1i + omegaReal * h2i + omegaImag * h2r
xr = h1r - omegaReal * h2r + omegaImag * h2i
xi = -h1i + omegaReal * h2i + omegaImag * h2r
} else {
h1r := c1 * (data[i1] + data[i3])
h1i := c1 * (data[i2] - data[i4])
h2r := -c2 * (data[i2] + data[i4])
h2i := c2 * (data[i1] - data[i3])
data[i1] = h1r + omegaReal * h2r - omegaImag * h2i
data[i2] = h1i + omegaReal * h2i + omegaImag * h2r
data[i3] = h1r - omegaReal * h2r + omegaImag * h2i
data[i4] = -h1i + omegaReal * h2i + omegaImag * h2r
}
temp = omegaReal
omegaReal = omegaReal * omegaPiReal - omegaImag * omegaPiImag + omegaReal
omegaImag = omegaImag * omegaPiReal + temp * omegaPiImag + omegaImag
}
if direction == Time2Freq {
data[1] = xr
} else {
FFT(data, direction)
}
} | pvoc/fft.go | 0.626353 | 0.494812 | fft.go | starcoder |
package swagger
// It includes links to several endpoints (e.g. /oauth2/token) and exposes information on supported signature algorithms among others.
type WellKnown struct {
// URL of the OP's OAuth 2.0 Authorization Endpoint.
AuthorizationEndpoint string `json:"authorization_endpoint"`
// Boolean value specifying whether the OP supports use of the claims parameter, with true indicating support.
ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"`
// JSON array containing a list of the Claim Names of the Claims that the OpenID Provider MAY be able to supply values for. Note that for privacy or other reasons, this might not be an exhaustive list.
ClaimsSupported []string `json:"claims_supported,omitempty"`
// JSON array containing a list of the OAuth 2.0 Grant Type values that this OP supports.
GrantTypesSupported []string `json:"grant_types_supported,omitempty"`
// JSON array containing a list of the JWS signing algorithms (alg values) supported by the OP for the ID Token to encode the Claims in a JWT.
IdTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"`
// URL using the https scheme with no query or fragment component that the OP asserts as its IssuerURL Identifier. If IssuerURL discovery is supported , this value MUST be identical to the issuer value returned by WebFinger. This also MUST be identical to the iss Claim value in ID Tokens issued from this IssuerURL.
Issuer string `json:"issuer"`
// URL of the OP's JSON Web Key Set [JWK] document. This contains the signing key(s) the RP uses to validate signatures from the OP. The JWK Set MAY also contain the Server's encryption key(s), which are used by RPs to encrypt requests to the Server. When both signing and encryption keys are made available, a use (Key Use) parameter value is REQUIRED for all keys in the referenced JWK Set to indicate each key's intended usage. Although some algorithms allow the same key to be used for both signatures and encryption, doing so is NOT RECOMMENDED, as it is less secure. The JWK x5c parameter MAY be used to provide X.509 representations of keys provided. When used, the bare key values MUST still be present and MUST match those in the certificate.
JwksUri string `json:"jwks_uri"`
// URL of the OP's Dynamic Client Registration Endpoint.
RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
// Boolean value specifying whether the OP supports use of the request parameter, with true indicating support.
RequestParameterSupported bool `json:"request_parameter_supported,omitempty"`
// Boolean value specifying whether the OP supports use of the request_uri parameter, with true indicating support.
RequestUriParameterSupported bool `json:"request_uri_parameter_supported,omitempty"`
// Boolean value specifying whether the OP requires any request_uri values used to be pre-registered using the request_uris registration parameter.
RequireRequestUriRegistration bool `json:"require_request_uri_registration,omitempty"`
// JSON array containing a list of the OAuth 2.0 response_mode values that this OP supports.
ResponseModesSupported []string `json:"response_modes_supported,omitempty"`
// JSON array containing a list of the OAuth 2.0 response_type values that this OP supports. Dynamic OpenID Providers MUST support the code, id_token, and the token id_token Response Type values.
ResponseTypesSupported []string `json:"response_types_supported"`
// SON array containing a list of the OAuth 2.0 [RFC6749] scope values that this server supports. The server MUST support the openid scope value. Servers MAY choose not to advertise some supported scope values even when this parameter is used
ScopesSupported []string `json:"scopes_supported,omitempty"`
// JSON array containing a list of the Subject Identifier types that this OP supports. Valid types include pairwise and public.
SubjectTypesSupported []string `json:"subject_types_supported"`
// URL of the OP's OAuth 2.0 Token Endpoint
TokenEndpoint string `json:"token_endpoint"`
// JSON array containing a list of Client Authentication methods supported by this Token Endpoint. The options are client_secret_post, client_secret_basic, client_secret_jwt, and private_key_jwt, as described in Section 9 of OpenID Connect Core 1.0
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"`
// URL of the OP's UserInfo Endpoint.
UserinfoEndpoint string `json:"userinfo_endpoint,omitempty"`
// JSON array containing a list of the JWS [JWS] signing algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT [JWT].
UserinfoSigningAlgValuesSupported []string `json:"userinfo_signing_alg_values_supported,omitempty"`
} | sdk/go/hydra/swagger/well_known.go | 0.849347 | 0.486332 | well_known.go | starcoder |
package collection
import (
"errors"
"reflect"
"runtime"
"sync"
)
// Enumerable offers a means of easily converting into a channel. It is most
// useful for types where mutability is not in question.
type Enumerable interface {
Enumerate(cancel <-chan struct{}) Enumerator
}
// Enumerator exposes a new syntax for querying familiar data structures.
type Enumerator <-chan interface{}
// Predicate defines an interface for funcs that make some logical test.
type Predicate func(interface{}) bool
// Transform defines a function which takes a value, and returns some value based on the original.
type Transform func(interface{}) interface{}
// Unfolder defines a function which takes a single value, and exposes many of them as an Enumerator
type Unfolder func(interface{}) Enumerator
type emptyEnumerable struct{}
var (
errNoElements = errors.New("enumerator encountered no elements")
errMultipleElements = errors.New("enumerator encountered multiple elements")
)
// IsErrorNoElements determines whethr or not the given error is the result of no values being
// returned when one or more were expected.
func IsErrorNoElements(err error) bool {
return err == errNoElements
}
// IsErrorMultipleElements determines whether or not the given error is the result of multiple values
// being returned when one or zero were expected.
func IsErrorMultipleElements(err error) bool {
return err == errMultipleElements
}
// Identity is a trivial Transform which applies no operation on the value.
var Identity Transform = func(value interface{}) interface{} {
return value
}
// Empty is an Enumerable that has no elements, and will never have any elements.
var Empty Enumerable = &emptyEnumerable{}
func (e emptyEnumerable) Enumerate(cancel <-chan struct{}) Enumerator {
results := make(chan interface{})
close(results)
return results
}
// All tests whether or not all items present in an Enumerable meet a criteria.
func All(subject Enumerable, p Predicate) bool {
done := make(chan struct{})
defer close(done)
return subject.Enumerate(done).All(p)
}
// All tests whether or not all items present meet a criteria.
func (iter Enumerator) All(p Predicate) bool {
for entry := range iter {
if !p(entry) {
return false
}
}
return true
}
// Any tests an Enumerable to see if there are any elements present.
func Any(iterator Enumerable) bool {
done := make(chan struct{})
defer close(done)
for range iterator.Enumerate(done) {
return true
}
return false
}
// Anyp tests an Enumerable to see if there are any elements present that meet a criteria.
func Anyp(iterator Enumerable, p Predicate) bool {
done := make(chan struct{})
defer close(done)
for element := range iterator.Enumerate(done) {
if p(element) {
return true
}
}
return false
}
type enumerableSlice []interface{}
func (f enumerableSlice) Enumerate(cancel <-chan struct{}) Enumerator {
results := make(chan interface{})
go func() {
defer close(results)
for _, entry := range f {
select {
case results <- entry:
break
case <-cancel:
return
}
}
}()
return results
}
type enumerableValue struct {
reflect.Value
}
func (v enumerableValue) Enumerate(cancel <-chan struct{}) Enumerator {
results := make(chan interface{})
go func() {
defer close(results)
elements := v.Len()
for i := 0; i < elements; i++ {
select {
case results <- v.Index(i).Interface():
break
case <-cancel:
return
}
}
}()
return results
}
// AsEnumerable allows for easy conversion of a slice to a re-usable Enumerable object.
func AsEnumerable(entries ...interface{}) Enumerable {
if len(entries) != 1 {
return enumerableSlice(entries)
}
val := reflect.ValueOf(entries[0])
if kind := val.Kind(); kind == reflect.Slice || kind == reflect.Array {
return enumerableValue{
Value: val,
}
}
return enumerableSlice(entries)
}
// AsEnumerable stores the results of an Enumerator so the results can be enumerated over repeatedly.
func (iter Enumerator) AsEnumerable() Enumerable {
return enumerableSlice(iter.ToSlice())
}
// Count iterates over a list and keeps a running tally of the number of elements
// satisfy a predicate.
func Count(iter Enumerable, p Predicate) int {
return iter.Enumerate(nil).Count(p)
}
// Count iterates over a list and keeps a running tally of the number of elements
// satisfy a predicate.
func (iter Enumerator) Count(p Predicate) int {
tally := 0
for entry := range iter {
if p(entry) {
tally++
}
}
return tally
}
// CountAll iterates over a list and keeps a running tally of how many it's seen.
func CountAll(iter Enumerable) int {
return iter.Enumerate(nil).CountAll()
}
// CountAll iterates over a list and keeps a running tally of how many it's seen.
func (iter Enumerator) CountAll() int {
tally := 0
for range iter {
tally++
}
return tally
}
// Discard reads an enumerator to the end but does nothing with it.
// This method should be used in circumstances when it doesn't make sense to explicitly cancel the Enumeration.
func (iter Enumerator) Discard() {
for range iter {
// Intentionally Left Blank
}
}
// ElementAt retreives an item at a particular position in an Enumerator.
func ElementAt(iter Enumerable, n uint) interface{} {
done := make(chan struct{})
defer close(done)
return iter.Enumerate(done).ElementAt(n)
}
// ElementAt retreives an item at a particular position in an Enumerator.
func (iter Enumerator) ElementAt(n uint) interface{} {
for i := uint(0); i < n; i++ {
<-iter
}
return <-iter
}
// First retrieves just the first item in the list, or returns an error if there are no elements in the array.
func First(subject Enumerable) (retval interface{}, err error) {
done := make(chan struct{})
err = errNoElements
var isOpen bool
if retval, isOpen = <-subject.Enumerate(done); isOpen {
err = nil
}
close(done)
return
}
// Last retreives the item logically behind all other elements in the list.
func Last(iter Enumerable) interface{} {
return iter.Enumerate(nil).Last()
}
// Last retreives the item logically behind all other elements in the list.
func (iter Enumerator) Last() (retval interface{}) {
for retval = range iter {
// Intentionally Left Blank
}
return
}
type merger struct {
originals []Enumerable
}
func (m merger) Enumerate(cancel <-chan struct{}) Enumerator {
retval := make(chan interface{})
var wg sync.WaitGroup
wg.Add(len(m.originals))
for _, item := range m.originals {
go func(input Enumerable) {
defer wg.Done()
for value := range input.Enumerate(cancel) {
retval <- value
}
}(item)
}
go func() {
wg.Wait()
close(retval)
}()
return retval
}
// Merge takes the results as it receives them from several channels and directs
// them into a single channel.
func Merge(channels ...Enumerable) Enumerable {
return merger{
originals: channels,
}
}
// Merge takes the results of this Enumerator and others, and funnels them into
// a single Enumerator. The order of in which they will be combined is non-deterministic.
func (iter Enumerator) Merge(others ...Enumerator) Enumerator {
retval := make(chan interface{})
var wg sync.WaitGroup
wg.Add(len(others) + 1)
funnel := func(prevResult Enumerator) {
for entry := range prevResult {
retval <- entry
}
wg.Done()
}
go funnel(iter)
for _, item := range others {
go funnel(item)
}
go func() {
wg.Wait()
close(retval)
}()
return retval
}
type parallelSelecter struct {
original Enumerable
operation Transform
}
func (ps parallelSelecter) Enumerate(cancel <-chan struct{}) Enumerator {
return ps.original.Enumerate(cancel).ParallelSelect(ps.operation)
}
// ParallelSelect creates an Enumerable which will use all logically available CPUs to
// execute a Transform.
func ParallelSelect(original Enumerable, operation Transform) Enumerable {
return parallelSelecter{
original: original,
operation: operation,
}
}
// ParallelSelect will execute a Transform across all logical CPUs available to the current process.
func (iter Enumerator) ParallelSelect(operation Transform) Enumerator {
if cpus := runtime.NumCPU(); cpus != 1 {
intermediate := iter.splitN(operation, uint(cpus))
return intermediate[0].Merge(intermediate[1:]...)
}
return iter
}
type reverser struct {
original Enumerable
}
// Reverse will enumerate all values of an enumerable, store them in a Stack, then replay them all.
func Reverse(original Enumerable) Enumerable {
return reverser{
original: original,
}
}
func (r reverser) Enumerate(cancel <-chan struct{}) Enumerator {
return r.original.Enumerate(cancel).Reverse()
}
// Reverse returns items in the opposite order it encountered them in.
func (iter Enumerator) Reverse() Enumerator {
cache := NewStack()
for entry := range iter {
cache.Push(entry)
}
retval := make(chan interface{})
go func() {
for !cache.IsEmpty() {
val, _ := cache.Pop()
retval <- val
}
close(retval)
}()
return retval
}
type selecter struct {
original Enumerable
transform Transform
}
func (s selecter) Enumerate(cancel <-chan struct{}) Enumerator {
return s.original.Enumerate(cancel).Select(s.transform)
}
// Select creates a reusable stream of transformed values.
func Select(subject Enumerable, transform Transform) Enumerable {
return selecter{
original: subject,
transform: transform,
}
}
// Select iterates over a list and returns a transformed item.
func (iter Enumerator) Select(transform Transform) Enumerator {
retval := make(chan interface{})
go func() {
for item := range iter {
retval <- transform(item)
}
close(retval)
}()
return retval
}
type selectManyer struct {
original Enumerable
toMany Unfolder
}
func (s selectManyer) Enumerate(cancel <-chan struct{}) Enumerator {
return s.original.Enumerate(cancel).SelectMany(s.toMany)
}
// SelectMany allows for unfolding of values.
func SelectMany(subject Enumerable, toMany Unfolder) Enumerable {
return selectManyer{
original: subject,
toMany: toMany,
}
}
// SelectMany allows for flattening of data structures.
func (iter Enumerator) SelectMany(lister Unfolder) Enumerator {
retval := make(chan interface{})
go func() {
for parent := range iter {
for child := range lister(parent) {
retval <- child
}
}
close(retval)
}()
return retval
}
// Single retreives the only element from a list, or returns nil and an error.
func Single(iter Enumerable) (retval interface{}, err error) {
done := make(chan struct{})
defer close(done)
err = errNoElements
firstPass := true
for entry := range iter.Enumerate(done) {
if firstPass {
retval = entry
err = nil
} else {
retval = nil
err = errMultipleElements
break
}
firstPass = false
}
return
}
// Singlep retrieces the only element from a list that matches a criteria. If
// no match is found, or two or more are found, `Singlep` returns nil and an
// error.
func Singlep(iter Enumerable, pred Predicate) (retval interface{}, err error) {
iter = Where(iter, pred)
return Single(iter)
}
type skipper struct {
original Enumerable
skipCount uint
}
func (s skipper) Enumerate(cancel <-chan struct{}) Enumerator {
return s.original.Enumerate(cancel).Skip(s.skipCount)
}
// Skip creates a reusable stream which will skip the first `n` elements before iterating
// over the rest of the elements in an Enumerable.
func Skip(subject Enumerable, n uint) Enumerable {
return skipper{
original: subject,
skipCount: n,
}
}
// Skip retreives all elements after the first 'n' elements.
func (iter Enumerator) Skip(n uint) Enumerator {
results := make(chan interface{})
go func() {
defer close(results)
i := uint(0)
for entry := range iter {
if i < n {
i++
continue
}
results <- entry
}
}()
return results
}
// splitN creates N Enumerators, each will be a subset of the original Enumerator and will have
// distinct populations from one another.
func (iter Enumerator) splitN(operation Transform, n uint) []Enumerator {
results, cast := make([]chan interface{}, n, n), make([]Enumerator, n, n)
for i := uint(0); i < n; i++ {
results[i] = make(chan interface{})
cast[i] = results[i]
}
go func() {
for i := uint(0); i < n; i++ {
go func(addr uint) {
defer close(results[addr])
for {
read, ok := <-iter
if !ok {
return
}
results[addr] <- operation(read)
}
}(i)
}
}()
return cast
}
type taker struct {
original Enumerable
n uint
}
func (t taker) Enumerate(cancel <-chan struct{}) Enumerator {
return t.original.Enumerate(cancel).Take(t.n)
}
// Take retreives just the first `n` elements from an Enumerable.
func Take(subject Enumerable, n uint) Enumerable {
return taker{
original: subject,
n: n,
}
}
// Take retreives just the first 'n' elements from an Enumerator.
func (iter Enumerator) Take(n uint) Enumerator {
results := make(chan interface{})
go func() {
defer close(results)
i := uint(0)
for entry := range iter {
if i >= n {
return
}
i++
results <- entry
}
}()
return results
}
type takeWhiler struct {
original Enumerable
criteria func(interface{}, uint) bool
}
func (tw takeWhiler) Enumerate(cancel <-chan struct{}) Enumerator {
return tw.original.Enumerate(cancel).TakeWhile(tw.criteria)
}
// TakeWhile creates a reusable stream which will halt once some criteria is no longer met.
func TakeWhile(subject Enumerable, criteria func(interface{}, uint) bool) Enumerable {
return takeWhiler{
original: subject,
criteria: criteria,
}
}
// TakeWhile continues returning items as long as 'criteria' holds true.
func (iter Enumerator) TakeWhile(criteria func(interface{}, uint) bool) Enumerator {
results := make(chan interface{})
go func() {
defer close(results)
i := uint(0)
for entry := range iter {
if !criteria(entry, i) {
return
}
i++
results <- entry
}
}()
return results
}
// Tee creates two Enumerators which will have identical contents as one another.
func (iter Enumerator) Tee() (Enumerator, Enumerator) {
left, right := make(chan interface{}), make(chan interface{})
go func() {
for entry := range iter {
left <- entry
right <- entry
}
close(left)
close(right)
}()
return left, right
}
// ToSlice places all iterated over values in a Slice for easy consumption.
func ToSlice(iter Enumerable) []interface{} {
return iter.Enumerate(nil).ToSlice()
}
// ToSlice places all iterated over values in a Slice for easy consumption.
func (iter Enumerator) ToSlice() []interface{} {
retval := make([]interface{}, 0)
for entry := range iter {
retval = append(retval, entry)
}
return retval
}
type wherer struct {
original Enumerable
filter Predicate
}
func (w wherer) Enumerate(cancel <-chan struct{}) Enumerator {
retval := make(chan interface{})
go func() {
defer close(retval)
for entry := range w.original.Enumerate(cancel) {
if w.filter(entry) {
retval <- entry
}
}
}()
return retval
}
// Where creates a reusable means of filtering a stream.
func Where(original Enumerable, p Predicate) Enumerable {
return wherer{
original: original,
filter: p,
}
}
// Where iterates over a list and returns only the elements that satisfy a
// predicate.
func (iter Enumerator) Where(predicate Predicate) Enumerator {
retval := make(chan interface{})
go func() {
for item := range iter {
if predicate(item) {
retval <- item
}
}
close(retval)
}()
return retval
}
// UCount iterates over a list and keeps a running tally of the number of elements
// satisfy a predicate.
func UCount(iter Enumerable, p Predicate) uint {
return iter.Enumerate(nil).UCount(p)
}
// UCount iterates over a list and keeps a running tally of the number of elements
// satisfy a predicate.
func (iter Enumerator) UCount(p Predicate) uint {
tally := uint(0)
for entry := range iter {
if p(entry) {
tally++
}
}
return tally
}
// UCountAll iterates over a list and keeps a running tally of how many it's seen.
func UCountAll(iter Enumerable) uint {
return iter.Enumerate(nil).UCountAll()
}
// UCountAll iterates over a list and keeps a running tally of how many it's seen.
func (iter Enumerator) UCountAll() uint {
tally := uint(0)
for range iter {
tally++
}
return tally
} | query.go | 0.68941 | 0.479565 | query.go | starcoder |
package canvas
import (
"image/color"
"github.com/tfriedel6/canvas/backend/backendbase"
)
// LinearGradient is a gradient with any number of
// stops and any number of colors. The gradient will
// be drawn such that each point on the gradient
// will correspond to a straight line
type LinearGradient struct {
cv *Canvas
from, to vec
created bool
loaded bool
deleted bool
opaque bool
grad backendbase.LinearGradient
data backendbase.Gradient
}
// RadialGradient is a gradient with any number of
// stops and any number of colors. The gradient will
// be drawn such that each point on the gradient
// will correspond to a circle
type RadialGradient struct {
cv *Canvas
from, to vec
radFrom float64
radTo float64
created bool
loaded bool
deleted bool
opaque bool
grad backendbase.RadialGradient
data backendbase.Gradient
}
// CreateLinearGradient creates a new linear gradient with
// the coordinates from where to where the gradient
// will apply on the canvas
func (cv *Canvas) CreateLinearGradient(x0, y0, x1, y1 float64) *LinearGradient {
return &LinearGradient{
cv: cv,
opaque: true,
from: vec{x0, y0},
to: vec{x1, y1},
data: make(backendbase.Gradient, 0, 20),
}
}
// CreateRadialGradient creates a new radial gradient with
// the coordinates and the radii for two circles. The
// gradient will apply from the first to the second
// circle
func (cv *Canvas) CreateRadialGradient(x0, y0, r0, x1, y1, r1 float64) *RadialGradient {
return &RadialGradient{
cv: cv,
opaque: true,
from: vec{x0, y0},
to: vec{x1, y1},
radFrom: r0,
radTo: r1,
data: make(backendbase.Gradient, 0, 20),
}
}
// Delete explicitly deletes the gradient
func (lg *LinearGradient) Delete() {
if lg.deleted {
return
}
lg.grad.Delete()
lg.grad = nil
lg.deleted = true
}
// Delete explicitly deletes the gradient
func (rg *RadialGradient) Delete() {
if rg.deleted {
return
}
rg.grad.Delete()
rg.grad = nil
rg.deleted = true
}
func (lg *LinearGradient) load() {
if lg.loaded || len(lg.data) < 1 || lg.deleted {
return
}
if !lg.created {
lg.grad = lg.cv.b.LoadLinearGradient(lg.data)
} else {
lg.grad.Replace(lg.data)
}
lg.created = true
lg.loaded = true
}
func (rg *RadialGradient) load() {
if rg.loaded || len(rg.data) < 1 || rg.deleted {
return
}
if !rg.created {
rg.grad = rg.cv.b.LoadRadialGradient(rg.data)
} else {
rg.grad.Replace(rg.data)
}
rg.created = true
rg.loaded = true
}
// AddColorStop adds a color stop to the gradient. The stops
// don't have to be added in order, they are sorted into the
// right place
func (lg *LinearGradient) AddColorStop(pos float64, stopColor ...interface{}) {
var c color.RGBA
lg.data, c = addColorStop(lg.data, pos, stopColor...)
if c.A < 255 {
lg.opaque = false
}
lg.loaded = false
}
// AddColorStop adds a color stop to the gradient. The stops
// don't have to be added in order, they are sorted into the
// right place
func (rg *RadialGradient) AddColorStop(pos float64, stopColor ...interface{}) {
var c color.RGBA
rg.data, c = addColorStop(rg.data, pos, stopColor...)
if c.A < 255 {
rg.opaque = false
}
rg.loaded = false
}
func addColorStop(stops backendbase.Gradient, pos float64, stopColor ...interface{}) (backendbase.Gradient, color.RGBA) {
c, _ := parseColor(stopColor...)
insert := len(stops)
for i, stop := range stops {
if stop.Pos > pos {
insert = i
break
}
}
stops = append(stops, backendbase.GradientStop{})
if insert < len(stops)-1 {
copy(stops[insert+1:], stops[insert:len(stops)-1])
}
stops[insert] = backendbase.GradientStop{Pos: pos, Color: c}
return stops, c
} | gradients.go | 0.771241 | 0.535706 | gradients.go | starcoder |
package static
// Countries contains a static mapping of two-letter country codes to their
// geographic center. Source: https://developers.google.com/public-data/docs/canonical/countries_csv
var Countries = map[string]string{
"AD": "42.546245,1.601554",
"AE": "23.424076,53.847818",
"AF": "33.93911,67.709953",
"AG": "17.060816,-61.796428",
"AI": "18.220554,-63.068615",
"AL": "41.153332,20.168331",
"AM": "40.069099,45.038189",
"AN": "12.226079,-69.060087",
"AO": "-11.202692,17.873887",
"AQ": "-75.250973,-0.071389",
"AR": "-38.416097,-63.616672",
"AS": "-14.270972,-170.132217",
"AT": "47.516231,14.550072",
"AU": "-25.274398,133.775136",
"AW": "12.52111,-69.968338",
"AZ": "40.143105,47.576927",
"BA": "43.915886,17.679076",
"BB": "13.193887,-59.543198",
"BD": "23.684994,90.356331",
"BE": "50.503887,4.469936",
"BF": "12.238333,-1.561593",
"BG": "42.733883,25.48583",
"BH": "25.930414,50.637772",
"BI": "-3.373056,29.918886",
"BJ": "9.30769,2.315834",
"BM": "32.321384,-64.75737",
"BN": "4.535277,114.727669",
"BO": "-16.290154,-63.588653",
"BR": "-14.235004,-51.92528",
"BS": "25.03428,-77.39628",
"BT": "27.514162,90.433601",
"BV": "-54.423199,3.413194",
"BW": "-22.328474,24.684866",
"BY": "53.709807,27.953389",
"BZ": "17.189877,-88.49765",
"CA": "56.130366,-106.346771",
"CC": "-12.164165,96.870956",
"CD": "-4.038333,21.758664",
"CF": "6.611111,20.939444",
"CG": "-0.228021,15.827659",
"CH": "46.818188,8.227512",
"CI": "7.539989,-5.54708",
"CK": "-21.236736,-159.777671",
"CL": "-35.675147,-71.542969",
"CM": "7.369722,12.354722",
"CN": "35.86166,104.195397",
"CO": "4.570868,-74.297333",
"CR": "9.748917,-83.753428",
"CU": "21.521757,-77.781167",
"CV": "16.002082,-24.013197",
"CX": "-10.447525,105.690449",
"CY": "35.126413,33.429859",
"CZ": "49.817492,15.472962",
"DE": "51.165691,10.451526",
"DJ": "11.825138,42.590275",
"DK": "56.26392,9.501785",
"DM": "15.414999,-61.370976",
"DO": "18.735693,-70.162651",
"DZ": "28.033886,1.659626",
"EC": "-1.831239,-78.183406",
"EE": "58.595272,25.013607",
"EG": "26.820553,30.802498",
"EH": "24.215527,-12.885834",
"ER": "15.179384,39.782334",
"ES": "40.463667,-3.74922",
"ET": "9.145,40.489673",
"FI": "61.92411,25.748151",
"FJ": "-16.578193,179.414413",
"FK": "-51.796253,-59.523613",
"FM": "7.425554,150.550812",
"FO": "61.892635,-6.911806",
"FR": "46.227638,2.213749",
"GA": "-0.803689,11.609444",
"GB": "55.378051,-3.435973",
"GD": "12.262776,-61.604171",
"GE": "42.315407,43.356892",
"GF": "3.933889,-53.125782",
"GG": "49.465691,-2.585278",
"GH": "7.946527,-1.023194",
"GI": "36.137741,-5.345374",
"GL": "71.706936,-42.604303",
"GM": "13.443182,-15.310139",
"GN": "9.945587,-9.696645",
"GP": "16.995971,-62.067641",
"GQ": "1.650801,10.267895",
"GR": "39.074208,21.824312",
"GS": "-54.429579,-36.587909",
"GT": "15.783471,-90.230759",
"GU": "13.444304,144.793731",
"GW": "11.803749,-15.180413",
"GY": "4.860416,-58.93018",
"GZ": "31.354676,34.308825",
"HK": "22.396428,114.109497",
"HM": "-53.08181,73.504158",
"HN": "15.199999,-86.241905",
"HR": "45.1,15.2",
"HT": "18.971187,-72.285215",
"HU": "47.162494,19.503304",
"ID": "-0.789275,113.921327",
"IE": "53.41291,-8.24389",
"IL": "31.046051,34.851612",
"IM": "54.236107,-4.548056",
"IN": "20.593684,78.96288",
"IO": "-6.343194,71.876519",
"IQ": "33.223191,43.679291",
"IR": "32.427908,53.688046",
"IS": "64.963051,-19.020835",
"IT": "41.87194,12.56738",
"JE": "49.214439,-2.13125",
"JM": "18.109581,-77.297508",
"JO": "30.585164,36.238414",
"JP": "36.204824,138.252924",
"KE": "-0.023559,37.906193",
"KG": "41.20438,74.766098",
"KH": "12.565679,104.990963",
"KI": "-3.370417,-168.734039",
"KM": "-11.875001,43.872219",
"KN": "17.357822,-62.782998",
"KP": "40.339852,127.510093",
"KR": "35.907757,127.766922",
"KW": "29.31166,47.481766",
"KY": "19.513469,-80.566956",
"KZ": "48.019573,66.923684",
"LA": "19.85627,102.495496",
"LB": "33.854721,35.862285",
"LC": "13.909444,-60.978893",
"LI": "47.166,9.555373",
"LK": "7.873054,80.771797",
"LR": "6.428055,-9.429499",
"LS": "-29.609988,28.233608",
"LT": "55.169438,23.881275",
"LU": "49.815273,6.129583",
"LV": "56.879635,24.603189",
"LY": "26.3351,17.228331",
"MA": "31.791702,-7.09262",
"MC": "43.750298,7.412841",
"MD": "47.411631,28.369885",
"ME": "42.708678,19.37439",
"MG": "-18.766947,46.869107",
"MH": "7.131474,171.184478",
"MK": "41.608635,21.745275",
"ML": "17.570692,-3.996166",
"MM": "21.913965,95.956223",
"MN": "46.862496,103.846656",
"MO": "22.198745,113.543873",
"MP": "17.33083,145.38469",
"MQ": "14.641528,-61.024174",
"MR": "21.00789,-10.940835",
"MS": "16.742498,-62.187366",
"MT": "35.937496,14.375416",
"MU": "-20.348404,57.552152",
"MV": "3.202778,73.22068",
"MW": "-13.254308,34.301525",
"MX": "23.634501,-102.552784",
"MY": "4.210484,101.975766",
"MZ": "-18.665695,35.529562",
"NA": "-22.95764,18.49041",
"NC": "-20.904305,165.618042",
"NE": "17.607789,8.081666",
"NF": "-29.040835,167.954712",
"NG": "9.081999,8.675277",
"NI": "12.865416,-85.207229",
"NL": "52.132633,5.291266",
"NO": "60.472024,8.468946",
"NP": "28.394857,84.124008",
"NR": "-0.522778,166.931503",
"NU": "-19.054445,-169.867233",
"NZ": "-40.900557,174.885971",
"OM": "21.512583,55.923255",
"PA": "8.537981,-80.782127",
"PE": "-9.189967,-75.015152",
"PF": "-17.679742,-149.406843",
"PG": "-6.314993,143.95555",
"PH": "12.879721,121.774017",
"PK": "30.375321,69.345116",
"PL": "51.919438,19.145136",
"PM": "46.941936,-56.27111",
"PN": "-24.703615,-127.439308",
"PR": "18.220833,-66.590149",
"PS": "31.952162,35.233154",
"PT": "39.399872,-8.224454",
"PW": "7.51498,134.58252",
"PY": "-23.442503,-58.443832",
"QA": "25.354826,51.183884",
"RE": "-21.115141,55.536384",
"RO": "45.943161,24.96676",
"RS": "44.016521,21.005859",
"RU": "61.52401,105.318756",
"RW": "-1.940278,29.873888",
"SA": "23.885942,45.079162",
"SB": "-9.64571,160.156194",
"SC": "-4.679574,55.491977",
"SD": "12.862807,30.217636",
"SE": "60.128161,18.643501",
"SG": "1.352083,103.819836",
"SH": "-24.143474,-10.030696",
"SI": "46.151241,14.995463",
"SJ": "77.553604,23.670272",
"SK": "48.669026,19.699024",
"SL": "8.460555,-11.779889",
"SM": "43.94236,12.457777",
"SN": "14.497401,-14.452362",
"SO": "5.152149,46.199616",
"SR": "3.919305,-56.027783",
"ST": "0.18636,6.613081",
"SV": "13.794185,-88.89653",
"SY": "34.802075,38.996815",
"SZ": "-26.522503,31.465866",
"TC": "21.694025,-71.797928",
"TD": "15.454166,18.732207",
"TF": "-49.280366,69.348557",
"TG": "8.619543,0.824782",
"TH": "15.870032,100.992541",
"TJ": "38.861034,71.276093",
"TK": "-8.967363,-171.855881",
"TL": "-8.874217,125.727539",
"TM": "38.969719,59.556278",
"TN": "33.886917,9.537499",
"TO": "-21.178986,-175.198242",
"TR": "38.963745,35.243322",
"TT": "10.691803,-61.222503",
"TV": "-7.109535,177.64933",
"TW": "23.69781,120.960515",
"TZ": "-6.369028,34.888822",
"UA": "48.379433,31.16558",
"UG": "1.373333,32.290275",
"US": "37.09024,-95.712891",
"UY": "-32.522779,-55.765835",
"UZ": "41.377491,64.585262",
"VA": "41.902916,12.453389",
"VC": "12.984305,-61.287228",
"VE": "6.42375,-66.58973",
"VG": "18.420695,-64.639968",
"VI": "18.335765,-64.896335",
"VN": "14.058324,108.277199",
"VU": "-15.376706,166.959158",
"WF": "-13.768752,-177.156097",
"WS": "-13.759029,-172.104629",
"XK": "42.602636,20.902977",
"YE": "15.552727,48.516388",
"YT": "-12.8275,45.166244",
"ZA": "-30.559482,22.937506",
"ZM": "-13.133897,27.849332",
"ZW": "-19.015438,29.154857",
} | static/countries.go | 0.513912 | 0.439026 | countries.go | starcoder |
package mixins
import (
"io"
"github.com/ipld/go-ipld-prime/datamodel"
)
type ListTraits struct {
PkgName string
TypeName string // see doc in kindTraitsGenerator
TypeSymbol string // see doc in kindTraitsGenerator
}
func (ListTraits) Kind() datamodel.Kind {
return datamodel.Kind_List
}
func (g ListTraits) EmitNodeMethodKind(w io.Writer) {
doTemplate(`
func ({{ .TypeSymbol }}) Kind() datamodel.Kind {
return datamodel.Kind_List
}
`, w, g)
}
func (g ListTraits) EmitNodeMethodLookupByString(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodLookupByString(w)
}
func (g ListTraits) EmitNodeMethodLookupBySegment(w io.Writer) {
doTemplate(`
func (n {{ .TypeSymbol }}) LookupBySegment(seg datamodel.PathSegment) (datamodel.Node, error) {
i, err := seg.Index()
if err != nil {
return nil, datamodel.ErrInvalidSegmentForList{TypeName: "{{ .PkgName }}.{{ .TypeName }}", TroubleSegment: seg, Reason: err}
}
return n.LookupByIndex(i)
}
`, w, g)
}
func (g ListTraits) EmitNodeMethodMapIterator(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodMapIterator(w)
}
func (g ListTraits) EmitNodeMethodIsAbsent(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodIsAbsent(w)
}
func (g ListTraits) EmitNodeMethodIsNull(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodIsNull(w)
}
func (g ListTraits) EmitNodeMethodAsBool(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodAsBool(w)
}
func (g ListTraits) EmitNodeMethodAsInt(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodAsInt(w)
}
func (g ListTraits) EmitNodeMethodAsFloat(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodAsFloat(w)
}
func (g ListTraits) EmitNodeMethodAsString(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodAsString(w)
}
func (g ListTraits) EmitNodeMethodAsBytes(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodAsBytes(w)
}
func (g ListTraits) EmitNodeMethodAsLink(w io.Writer) {
kindTraitsGenerator{g.PkgName, g.TypeName, g.TypeSymbol, datamodel.Kind_List}.emitNodeMethodAsLink(w)
}
type ListAssemblerTraits struct {
PkgName string
TypeName string // see doc in kindAssemblerTraitsGenerator
AppliedPrefix string // see doc in kindAssemblerTraitsGenerator
}
func (ListAssemblerTraits) Kind() datamodel.Kind {
return datamodel.Kind_List
}
func (g ListAssemblerTraits) EmitNodeAssemblerMethodBeginMap(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_List}.emitNodeAssemblerMethodBeginMap(w)
}
func (g ListAssemblerTraits) EmitNodeAssemblerMethodAssignNull(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_List}.emitNodeAssemblerMethodAssignNull(w)
}
func (g ListAssemblerTraits) EmitNodeAssemblerMethodAssignBool(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_List}.emitNodeAssemblerMethodAssignBool(w)
}
func (g ListAssemblerTraits) EmitNodeAssemblerMethodAssignInt(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_List}.emitNodeAssemblerMethodAssignInt(w)
}
func (g ListAssemblerTraits) EmitNodeAssemblerMethodAssignFloat(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_List}.emitNodeAssemblerMethodAssignFloat(w)
}
func (g ListAssemblerTraits) EmitNodeAssemblerMethodAssignString(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_List}.emitNodeAssemblerMethodAssignString(w)
}
func (g ListAssemblerTraits) EmitNodeAssemblerMethodAssignBytes(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_List}.emitNodeAssemblerMethodAssignBytes(w)
}
func (g ListAssemblerTraits) EmitNodeAssemblerMethodAssignLink(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_List}.emitNodeAssemblerMethodAssignLink(w)
}
func (g ListAssemblerTraits) EmitNodeAssemblerMethodPrototype(w io.Writer) {
kindAssemblerTraitsGenerator{g.PkgName, g.TypeName, g.AppliedPrefix, datamodel.Kind_List}.emitNodeAssemblerMethodPrototype(w)
} | schema/gen/go/mixins/listGenMixin.go | 0.520009 | 0.437523 | listGenMixin.go | starcoder |
package wl
const DisplayErrorSinceVersion = 1
const DisplayDeleteIdSinceVersion = 1
const DisplaySyncSinceVersion = 1
const DisplayGetRegistrySinceVersion = 1
const RegistryGlobalSinceVersion = 1
const RegistryGlobalRemoveSinceVersion = 1
const RegistryBindSinceVersion = 1
const CallbackDoneSinceVersion = 1
const CompositorCreateSurfaceSinceVersion = 1
const CompositorCreateRegionSinceVersion = 1
const ShmPoolCreateBufferSinceVersion = 1
const ShmPoolDestroySinceVersion = 1
const ShmPoolResizeSinceVersion = 1
const ShmFormatSinceVersion = 1
const ShmCreatePoolSinceVersion = 1
const BufferReleaseSinceVersion = 1
const BufferDestroySinceVersion = 1
const DataOfferOfferSinceVersion = 1
const DataOfferSourceActionsSinceVersion = 3
const DataOfferActionSinceVersion = 3
const DataOfferAcceptSinceVersion = 1
const DataOfferReceiveSinceVersion = 1
const DataOfferDestroySinceVersion = 1
const DataOfferFinishSinceVersion = 3
const DataOfferSetActionsSinceVersion = 3
const DataSourceTargetSinceVersion = 1
const DataSourceSendSinceVersion = 1
const DataSourceCancelledSinceVersion = 1
const DataSourceDndDropPerformedSinceVersion = 3
const DataSourceDndFinishedSinceVersion = 3
const DataSourceActionSinceVersion = 3
const DataSourceOfferSinceVersion = 1
const DataSourceDestroySinceVersion = 1
const DataSourceSetActionsSinceVersion = 3
const DataDeviceDataOfferSinceVersion = 1
const DataDeviceEnterSinceVersion = 1
const DataDeviceLeaveSinceVersion = 1
const DataDeviceMotionSinceVersion = 1
const DataDeviceDropSinceVersion = 1
const DataDeviceSelectionSinceVersion = 1
const DataDeviceStartDragSinceVersion = 1
const DataDeviceSetSelectionSinceVersion = 1
const DataDeviceReleaseSinceVersion = 2
const DataDeviceManagerCreateDataSourceSinceVersion = 1
const DataDeviceManagerGetDataDeviceSinceVersion = 1
const ShellGetShellSurfaceSinceVersion = 1
const ShellSurfacePingSinceVersion = 1
const ShellSurfaceConfigureSinceVersion = 1
const ShellSurfacePopupDoneSinceVersion = 1
const ShellSurfacePongSinceVersion = 1
const ShellSurfaceMoveSinceVersion = 1
const ShellSurfaceResizeSinceVersion = 1
const ShellSurfaceSetToplevelSinceVersion = 1
const ShellSurfaceSetTransientSinceVersion = 1
const ShellSurfaceSetFullscreenSinceVersion = 1
const ShellSurfaceSetPopupSinceVersion = 1
const ShellSurfaceSetMaximizedSinceVersion = 1
const ShellSurfaceSetTitleSinceVersion = 1
const ShellSurfaceSetClassSinceVersion = 1
const SurfaceEnterSinceVersion = 1
const SurfaceLeaveSinceVersion = 1
const SurfaceDestroySinceVersion = 1
const SurfaceAttachSinceVersion = 1
const SurfaceDamageSinceVersion = 1
const SurfaceFrameSinceVersion = 1
const SurfaceSetOpaqueRegionSinceVersion = 1
const SurfaceSetInputRegionSinceVersion = 1
const SurfaceCommitSinceVersion = 1
const SurfaceSetBufferTransformSinceVersion = 2
const SurfaceSetBufferScaleSinceVersion = 3
const SurfaceDamageBufferSinceVersion = 4
const SeatCapabilitiesSinceVersion = 1
const SeatNameSinceVersion = 2
const SeatGetPointerSinceVersion = 1
const SeatGetKeyboardSinceVersion = 1
const SeatGetTouchSinceVersion = 1
const SeatReleaseSinceVersion = 5
const PointerAxisSourceWheelTiltSinceVersion = 6
const PointerEnterSinceVersion = 1
const PointerLeaveSinceVersion = 1
const PointerMotionSinceVersion = 1
const PointerButtonSinceVersion = 1
const PointerAxisSinceVersion = 1
const PointerFrameSinceVersion = 5
const PointerAxisSourceSinceVersion = 5
const PointerAxisStopSinceVersion = 5
const PointerAxisDiscreteSinceVersion = 5
const PointerSetCursorSinceVersion = 1
const PointerReleaseSinceVersion = 3
const KeyboardKeymapSinceVersion = 1
const KeyboardEnterSinceVersion = 1
const KeyboardLeaveSinceVersion = 1
const KeyboardKeySinceVersion = 1
const KeyboardModifiersSinceVersion = 1
const KeyboardRepeatInfoSinceVersion = 4
const KeyboardReleaseSinceVersion = 3
const TouchDownSinceVersion = 1
const TouchUpSinceVersion = 1
const TouchMotionSinceVersion = 1
const TouchFrameSinceVersion = 1
const TouchCancelSinceVersion = 1
const TouchShapeSinceVersion = 6
const TouchOrientationSinceVersion = 6
const TouchReleaseSinceVersion = 3
const OutputGeometrySinceVersion = 1
const OutputModeSinceVersion = 1
const OutputDoneSinceVersion = 2
const OutputScaleSinceVersion = 2
const OutputReleaseSinceVersion = 3
const RegionDestroySinceVersion = 1
const RegionAddSinceVersion = 1
const RegionSubtractSinceVersion = 1
const SubcompositorDestroySinceVersion = 1
const SubcompositorGetSubsurfaceSinceVersion = 1
const SubsurfaceDestroySinceVersion = 1
const SubsurfaceSetPositionSinceVersion = 1
const SubsurfacePlaceAboveSinceVersion = 1
const SubsurfacePlaceBelowSinceVersion = 1
const SubsurfaceSetSyncSinceVersion = 1
const SubsurfaceSetDesyncSinceVersion = 1 | wl/constants.go | 0.5 | 0.400398 | constants.go | starcoder |
package diff
import (
"fmt"
"reflect"
"github.com/arr-ai/wbnf/parser"
)
type Report interface {
Equal() bool
}
type InterfaceDiff struct {
A, B interface{}
}
func (d InterfaceDiff) Equal() bool {
return d.A == d.B
}
func diffInterfaces(a, b interface{}) InterfaceDiff {
if diff := (InterfaceDiff{A: a, B: b}); !diff.Equal() {
return diff
}
return InterfaceDiff{}
}
//-----------------------------------------------------------------------------
type GrammarDiff struct {
OnlyInA []parser.Rule
OnlyInB []parser.Rule
Prods map[parser.Rule]TermDiff
}
func (d GrammarDiff) Equal() bool {
return len(d.OnlyInA) == 0 && len(d.OnlyInB) == 0 && len(d.Prods) == 0
}
func Grammars(a, b parser.Grammar) GrammarDiff {
diff := GrammarDiff{
Prods: map[parser.Rule]TermDiff{},
}
for rule, aTerm := range a {
if bTerm, ok := b[rule]; ok {
if td := Terms(aTerm, bTerm); !td.Equal() {
diff.Prods[rule] = td
}
} else {
diff.OnlyInA = append(diff.OnlyInA, rule)
}
}
for rule := range b {
if _, ok := a[rule]; !ok {
diff.OnlyInB = append(diff.OnlyInB, rule)
}
}
if diff.Equal() != reflect.DeepEqual(a, b) {
panic(fmt.Sprintf(
"diff.Equal() == %v != %v == reflect.DeepEqual(a, b): %#v\n%#v\n%#v",
diff.Equal(), reflect.DeepEqual(a, b), diff, a, b))
}
return diff
}
//-----------------------------------------------------------------------------
type TermDiff interface {
Report
}
type TypesDiffer struct {
InterfaceDiff
}
func (d TypesDiffer) Equal() bool {
return false
}
func Terms(a, b parser.Term) TermDiff {
if reflect.TypeOf(a) != reflect.TypeOf(b) {
return TypesDiffer{
InterfaceDiff: diffInterfaces(
reflect.TypeOf(a).String(),
reflect.TypeOf(b).String(),
),
}
}
switch a := a.(type) {
case parser.Rule:
return diffRules(a, b.(parser.Rule))
case parser.S:
return diffSes(a, b.(parser.S))
case parser.RE:
return diffREs(a, b.(parser.RE))
case parser.Seq:
return diffSeqs(a, b.(parser.Seq))
case parser.Oneof:
return diffOneofs(a, b.(parser.Oneof))
case parser.Stack:
return diffTowers(a, b.(parser.Stack))
case parser.Delim:
return diffDelims(a, b.(parser.Delim))
case parser.Quant:
return diffQuants(a, b.(parser.Quant))
case parser.Named:
return diffNameds(a, b.(parser.Named))
case parser.ScopedGrammar:
return diffScopedGrammars(a, b.(parser.ScopedGrammar))
case parser.CutPoint:
return Terms(a.Term, b.(parser.CutPoint).Term)
case parser.ExtRef:
return diffSes(parser.S(string(a)), parser.S(string(a)))
case parser.REF:
return diffRefs(a, b.(parser.REF))
default:
panic(fmt.Errorf("unknown term type: %v %[1]T", a))
}
}
//-----------------------------------------------------------------------------
type RuleDiff struct {
A, B parser.Rule
}
func (d RuleDiff) Equal() bool {
return d.A == d.B
}
func diffRules(a, b parser.Rule) RuleDiff {
return RuleDiff{A: a, B: b}
}
//-----------------------------------------------------------------------------
type SDiff struct {
A, B parser.S
}
func (d SDiff) Equal() bool {
return d.A == d.B
}
func diffSes(a, b parser.S) SDiff {
return SDiff{A: a, B: b}
}
//-----------------------------------------------------------------------------
type REDiff struct {
A, B parser.RE
}
func (d REDiff) Equal() bool {
return d.A == d.B
}
func diffREs(a, b parser.RE) REDiff {
return REDiff{A: a, B: b}
}
//-----------------------------------------------------------------------------
type RefDiff struct {
A, B parser.REF
}
func (d RefDiff) Equal() bool {
if d.A.Ident == d.B.Ident {
return (d.A.Default == nil && d.B.Default == nil) || Terms(d.A.Default, d.B.Default).Equal()
}
return false
}
func diffRefs(a, b parser.REF) RefDiff {
return RefDiff{A: a, B: b}
}
//-----------------------------------------------------------------------------
type termsesDiff struct {
Len InterfaceDiff
Terms []TermDiff
}
func (d termsesDiff) Equal() bool {
return d.Len.Equal() && d.Terms == nil
}
func diffTermses(a, b []parser.Term) termsesDiff {
var tsd termsesDiff
tsd.Len = diffInterfaces(len(a), len(b))
lenDiff := len(a) - len(b)
switch {
case lenDiff < 0:
b = b[:len(a)]
case lenDiff > 0:
a = a[:len(b)]
}
for i, term := range a {
if td := Terms(term, b[i]); !td.Equal() {
tsd.Terms = append(tsd.Terms, td)
}
}
return tsd
}
type SeqDiff termsesDiff
func (d SeqDiff) Equal() bool {
return (termsesDiff(d)).Equal()
}
func diffSeqs(a, b parser.Seq) SeqDiff {
return SeqDiff(diffTermses(a, b))
}
type OneofDiff termsesDiff
func (d OneofDiff) Equal() bool {
return (termsesDiff(d)).Equal()
}
func diffOneofs(a, b parser.Oneof) OneofDiff {
return OneofDiff(diffTermses(a, b))
}
type TowerDiff termsesDiff
func (d TowerDiff) Equal() bool {
return (termsesDiff(d)).Equal()
}
func diffTowers(a, b parser.Stack) TowerDiff {
return TowerDiff(diffTermses(a, b))
}
//-----------------------------------------------------------------------------
type DelimDiff struct {
Term TermDiff
Sep TermDiff
Assoc InterfaceDiff
CanStartWithSep InterfaceDiff
CanEndWithSep InterfaceDiff
}
func (d DelimDiff) Equal() bool {
return d.Term.Equal() &&
d.Sep.Equal() &&
d.Assoc.Equal() &&
d.CanStartWithSep.Equal() &&
d.CanEndWithSep.Equal()
}
func diffDelims(a, b parser.Delim) DelimDiff {
return DelimDiff{
Term: Terms(a.Term, b.Term),
Sep: Terms(a.Sep, b.Sep),
Assoc: diffInterfaces(a.Assoc, b.Assoc),
CanStartWithSep: diffInterfaces(a.CanStartWithSep, b.CanStartWithSep),
CanEndWithSep: diffInterfaces(a.CanEndWithSep, b.CanEndWithSep),
}
}
//-----------------------------------------------------------------------------
type QuantDiff struct {
Term TermDiff
Min InterfaceDiff
Max InterfaceDiff
}
func (d QuantDiff) Equal() bool {
return d.Term.Equal() && d.Min.Equal() && d.Max.Equal()
}
func diffQuants(a, b parser.Quant) QuantDiff {
return QuantDiff{
Term: Terms(a.Term, b.Term),
Min: diffInterfaces(a.Min, b.Min),
Max: diffInterfaces(a.Max, b.Max),
}
}
//-----------------------------------------------------------------------------
type NamedDiff struct {
Name InterfaceDiff
Term TermDiff
}
func (d NamedDiff) Equal() bool {
return d.Name.Equal() && d.Term.Equal()
}
func diffNameds(a, b parser.Named) NamedDiff {
return NamedDiff{
Name: diffInterfaces(a.Name, b.Name),
Term: Terms(a.Term, b.Term),
}
}
//-----------------------------------------------------------------------------
type ScopedGrammarDiff struct {
Term TermDiff
Grammar GrammarDiff
}
func (d ScopedGrammarDiff) Equal() bool {
return d.Term.Equal() && d.Grammar.Equal()
}
func diffScopedGrammars(a, b parser.ScopedGrammar) ScopedGrammarDiff {
return ScopedGrammarDiff{
Term: Terms(a.Term, b.Term),
Grammar: Grammars(a.Grammar, b.Grammar),
}
} | parser/diff/terms.go | 0.604632 | 0.461017 | terms.go | starcoder |
package layer
import tf "github.com/galeone/tensorflow/tensorflow/go"
type LSpatialDropout1D struct {
dtype DataType
inputs []Layer
name string
noiseShape interface{}
rate float64
seed interface{}
shape tf.Shape
trainable bool
layerWeights []*tf.Tensor
}
func SpatialDropout1D(rate float64) *LSpatialDropout1D {
return &LSpatialDropout1D{
dtype: Float32,
name: UniqueName("spatial_dropout1d"),
noiseShape: nil,
rate: rate,
seed: nil,
trainable: true,
}
}
func (l *LSpatialDropout1D) SetDtype(dtype DataType) *LSpatialDropout1D {
l.dtype = dtype
return l
}
func (l *LSpatialDropout1D) SetName(name string) *LSpatialDropout1D {
l.name = name
return l
}
func (l *LSpatialDropout1D) SetNoiseShape(noiseShape interface{}) *LSpatialDropout1D {
l.noiseShape = noiseShape
return l
}
func (l *LSpatialDropout1D) SetSeed(seed interface{}) *LSpatialDropout1D {
l.seed = seed
return l
}
func (l *LSpatialDropout1D) SetShape(shape tf.Shape) *LSpatialDropout1D {
l.shape = shape
return l
}
func (l *LSpatialDropout1D) SetTrainable(trainable bool) *LSpatialDropout1D {
l.trainable = trainable
return l
}
func (l *LSpatialDropout1D) SetLayerWeights(layerWeights []*tf.Tensor) *LSpatialDropout1D {
l.layerWeights = layerWeights
return l
}
func (l *LSpatialDropout1D) GetShape() tf.Shape {
return l.shape
}
func (l *LSpatialDropout1D) GetDtype() DataType {
return l.dtype
}
func (l *LSpatialDropout1D) SetInputs(inputs ...Layer) Layer {
l.inputs = inputs
return l
}
func (l *LSpatialDropout1D) GetInputs() []Layer {
return l.inputs
}
func (l *LSpatialDropout1D) GetName() string {
return l.name
}
func (l *LSpatialDropout1D) GetLayerWeights() []*tf.Tensor {
return l.layerWeights
}
type jsonConfigLSpatialDropout1D struct {
ClassName string `json:"class_name"`
Name string `json:"name"`
Config map[string]interface{} `json:"config"`
InboundNodes [][][]interface{} `json:"inbound_nodes"`
}
func (l *LSpatialDropout1D) GetKerasLayerConfig() interface{} {
inboundNodes := [][][]interface{}{
{},
}
for _, input := range l.inputs {
inboundNodes[0] = append(inboundNodes[0], []interface{}{
input.GetName(),
0,
0,
map[string]bool{},
})
}
return jsonConfigLSpatialDropout1D{
ClassName: "SpatialDropout1D",
Name: l.name,
Config: map[string]interface{}{
"dtype": l.dtype.String(),
"name": l.name,
"noise_shape": l.noiseShape,
"rate": l.rate,
"seed": l.seed,
"trainable": l.trainable,
},
InboundNodes: inboundNodes,
}
}
func (l *LSpatialDropout1D) GetCustomLayerDefinition() string {
return ``
} | layer/SpatialDropout1D.go | 0.67104 | 0.402833 | SpatialDropout1D.go | starcoder |
package main
import (
reflect "reflect"
groo "github.com/grolang/gro/ops"
assert "github.com/grolang/gro/assert"
time "time"
)
import ops "github.com/grolang/gro/ops"
var v int
const c = 2
type t int
func init() {
{
assert.AssertTrue(groo.IsEqual(groo.Mod(reflect.TypeOf(7), groo.MakeText("v")), groo.MakeText("int")))
assert.AssertTrue(groo.IsEqual(groo.Mod(reflect.TypeOf(groo.Identity(7)), groo.MakeText("v")), groo.MakeText("int64")))
assert.AssertTrue(groo.IsEqual(groo.Mod(reflect.TypeOf(groo.Negate(7)), groo.MakeText("v")), groo.MakeText("int64")))
var a = 0x100000000
assert.AssertTrue(groo.IsEqual(groo.Mod(reflect.TypeOf(groo.Identity(a)), groo.MakeText("v")), groo.MakeText("int64")))
assert.AssertTrue(groo.IsEqual(groo.Mod(reflect.TypeOf(groo.Mult(a, a)), groo.MakeText("v")), groo.MakeText("ops.BigInt")))
assert.AssertTrue(groo.IsEqual(groo.Mod(reflect.TypeOf(groo.Divide(7, 9)), groo.MakeText("v")), groo.MakeText("ops.BigRat")))
assert.AssertTrue(groo.IsEqual(groo.Mod(reflect.TypeOf(1.1e250), groo.MakeText("v")), groo.MakeText("float64")))
assert.AssertTrue(groo.IsEqual(groo.Mult(1.1e250, 1.1e250), inf))
}
assert.AssertTrue(groo.IsEqual(groo.Plus(1, 3), 4))
assert.AssertTrue(groo.IsEqual(groo.Plus(3, 4), 7))
assert.AssertTrue(groo.IsEqual(groo.Minus(9, 2), 7))
assert.AssertTrue(groo.IsEqual(groo.Plus(4, 5), 9))
}
func retFloat() any {
return 1.234
}
//unary reflect-operator
func init() {
assert.AssertTrue(groo.IsEqual(retFloat(), 1.234))
assert.AssertTrue(groo.IsEqual(groo.Divide(37, 23), groo.Divide(37, (groo.Plus(20, 3)))))
assert.AssertTrue(groo.IsEqual(groo.Negate(0x80), groo.Minus(groo.Negate(0x40), 0x40)))
assert.AssertTrue(groo.IsEqual(0xff, 0xff))
assert.AssertTrue(groo.IsEqual(123456789, groo.Plus(groo.Plus(groo.Mult(123, 1000000), groo.Mult(456, 1000)), 789)))
assert.AssertTrue(groo.IsEqual(groo.Mod(7, 4), 3))
assert.AssertTrue(groo.IsEqual(groo.Plus(1.2, 3.4i), groo.Plus(1.2, 3.4i)))
func() {
assert.AssertTrue(groo.IsEqual((groo.And(true, func() interface{} {
return false
})), false))
assert.AssertTrue(groo.IsEqual(groo.Negate(true), false))
assert.AssertTrue(groo.IsEqual(groo.Negate(false), true))
assert.AssertTrue(groo.IsEqual(groo.Negate(nil), nil))
}()
func() {
assert.AssertTrue(groo.IsEqual((groo.And(true, func() interface{} {
return false
})), false))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Plus(false, false), false))
assert.AssertTrue(groo.IsEqual(groo.Plus(true, false), true))
assert.AssertTrue(groo.IsEqual(groo.Plus(false, true), true))
assert.AssertTrue(groo.IsEqual(groo.Plus(true, true), true))
assert.AssertTrue(groo.IsEqual(groo.Minus(false, false), true))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Mult(false, false), false))
assert.AssertTrue(groo.IsEqual(groo.Mult(true, false), false))
assert.AssertTrue(groo.IsEqual(groo.Mult(false, true), false))
assert.AssertTrue(groo.IsEqual(groo.Mult(true, true), true))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Divide(1, nil), nil))
assert.AssertTrue(groo.IsEqual(groo.Divide(1, true), false))
assert.AssertTrue(groo.IsEqual(groo.Divide(1, false), true))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Divide(false, false), false))
assert.AssertTrue(groo.IsEqual(groo.Divide(true, false), true))
assert.AssertTrue(groo.IsEqual(groo.Divide(false, true), false))
assert.AssertTrue(groo.IsEqual(groo.Divide(true, true), false))
}()
func() {
assert.AssertTrue(groo.IsEqual(inf, inf))
assert.AssertTrue(groo.IsEqual(groo.Divide(1, 0), inf))
assert.AssertTrue(groo.IsEqual(groo.Divide(1, 1), 1))
assert.AssertTrue(groo.IsEqual(0, 0))
assert.AssertTrue(groo.IsEqual(groo.Divide(1, inf), 0))
assert.AssertTrue(groo.IsEqual(groo.Divide(nil, nil), nil))
assert.AssertTrue(groo.IsEqual(groo.Divide(0, 0), nil))
assert.AssertTrue(groo.IsEqual(groo.Divide(inf, inf), nil))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod(4, 0), inf))
assert.AssertTrue(groo.IsEqual(groo.Mod(4, inf), 4))
assert.AssertTrue(groo.IsEqual(groo.Mod(0, 0), inf))
assert.AssertTrue(groo.IsEqual(groo.Mod(inf, inf), inf))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Plus(time.Date(2017, 12, 12, 0, 0, 0, 0, time.UTC), 7), time.Date(2017, 12, 19, 0, 0, 0, 0, time.UTC)))
assert.AssertTrue(groo.IsNotEqual(groo.Plus(time.Date(2017, 12, 12, 0, 0, 0, 0, time.UTC), 7), time.Date(2017, 12, 20, 0, 0, 0, 0, time.UTC)))
assert.AssertTrue(groo.IsEqual(groo.Minus(time.Date(2017, 12, 12, 0, 0, 0, 0, time.UTC), 5), time.Date(2017, 12, 7, 0, 0, 0, 0, time.UTC)))
assert.AssertTrue(groo.IsEqual(groo.Minus(time.Date(2017, 12, 12, 0, 0, 0, 0, time.UTC), 5), time.Date(2017, 12, 07, 0, 0, 0, 0, time.UTC)))
assert.AssertTrue(groo.IsEqual(groo.Plus(7, time.Date(2017, 12, 12, 0, 0, 0, 0, time.UTC)), time.Date(2017, 12, 19, 0, 0, 0, 0, time.UTC)))
assert.AssertTrue(groo.IsGreaterThan(time.Date(2017, 12, 12, 0, 0, 0, 0, time.UTC), time.Date(2003, 8, 29, 0, 0, 0, 0, time.UTC)))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.NewPair(0, 5), groo.NewPair(0, 5)))
assert.AssertTrue(groo.IsNotEqual(groo.NewPair(0, 5), groo.NewPair(2, 13)))
assert.AssertTrue(groo.IsNotEqual(groo.NewPair(0, 5), groo.NewPair(0, 13)))
assert.AssertTrue(groo.IsNotEqual(groo.NewPair(0, 5), groo.NewPair(2, 5)))
assert.AssertTrue(groo.IsEqual(groo.NewPair(0, 20), groo.NewPair(0, 20)))
assert.AssertTrue(groo.IsEqual(groo.NewPair(12, groo.Inf), groo.NewPair(12, inf)))
assert.AssertTrue(groo.IsEqual(groo.NewPair(0, groo.Inf), groo.NewPair(0, inf)))
}()
assert.AssertTrue(groo.IsEqual(groo.Plus(groo.MakeText(""), groo.Mod(groo.Runex("\\t"), groo.MakeText("v"))), groo.MakeText("\t")))
assert.AssertTrue(groo.IsEqual(groo.Plus(groo.MakeText(""), groo.Mod(groo.Runex("\\t"), groo.MakeText("v"))), groo.MakeText(` `)))
assert.AssertTrue(groo.IsEqual(groo.MakeText(` `), groo.Mod(groo.Runex("\\t"), groo.MakeText("v"))))
assert.AssertTrue(groo.IsEqual(ops.Sprf(groo.MakeText("Hey, you!")), groo.MakeText("Hey, you!")))
assert.AssertTrue(groo.IsEqual(groo.Plus(groo.Plus(groo.Plus(groo.MakeText("Hello, "), groo.Mod(groo.MakeText("to"), groo.MakeText("s "))), groo.Mod(groo.MakeText("the"), groo.MakeText("s "))), groo.Mod(groo.MakeText("world"), groo.MakeText("s!"))), groo.MakeText("Hello, to the world!")))
assert.AssertTrue(groo.IsEqual(groo.Plus(groo.MakeText("there are "), groo.Mod(6, groo.MakeText("d green bottles"))), groo.MakeText("there are 6 green bottles")))
assert.AssertTrue(groo.IsEqual(groo.Mod(123, groo.MakeText("5d has type %[1]T.")), groo.MakeText(" 123 has type int64.")))
assert.AssertTrue(groo.IsEqual(groo.Plus(groo.Mod(123, groo.MakeText("5d has type %[1]T, and ")), groo.Mod(true, groo.MakeText("t is true."))), groo.MakeText(" 123 has type int64, and true is true.")))
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.Runex("a"), groo.MakeText("v")), groo.MakeText("a")))
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.Runex("ab"), groo.MakeText("v")), groo.MakeText("[ab]")))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.RightShift(groo.MakeText("a"), groo.MakeText("a|b"))), groo.MakeText("v")), groo.MakeText("{{{ 0 1 a}}}")))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(0, groo.Inf), groo.Runex("a"))), groo.MakeText("v")), groo.MakeText(`a*`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.Runex("a"), groo.NewPair(0, groo.Inf))), groo.MakeText("v")), groo.MakeText(`a*?`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(0, groo.Inf), groo.MakeText("a"))), groo.MakeText("v")), groo.MakeText(`(?:\Qa\E)*`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.MakeText("a"), groo.NewPair(0, groo.Inf))), groo.MakeText("v")), groo.MakeText(`(?:\Qa\E)*?`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(0, groo.Inf), groo.Runex("a-z"))), groo.MakeText("v")), groo.MakeText(`[a-z]*`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(0, groo.Inf), groo.MakeText("abc"))), groo.MakeText("v")), groo.MakeText(`(?:\Qabc\E)*`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(0, groo.Inf), (groo.Alt(groo.Runex("a"), groo.Runex("b"))))), groo.MakeText("v")), groo.MakeText(`[ab]*`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(0, groo.Inf), (groo.Alt(groo.Runex("a"), groo.MakeText("bc"))))), groo.MakeText("v")), groo.MakeText(`(?:a|(?:\Qbc\E))*`)))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(1, groo.Inf), groo.Runex("a"))), groo.MakeText("v")), groo.MakeText(`a+`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.Runex("a"), groo.NewPair(1, groo.Inf))), groo.MakeText("v")), groo.MakeText(`a+?`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(0, 1), groo.Runex("a"))), groo.MakeText("v")), groo.MakeText(`a?`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(3, groo.Runex("a"))), groo.MakeText("v")), groo.MakeText(`a{3}`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.Runex("a"), 3)), groo.MakeText("v")), groo.MakeText(`a{3}?`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(3, 5), groo.Runex("a"))), groo.MakeText("v")), groo.MakeText(`a{3,5}`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.NewPair(3, groo.Inf), groo.Runex("a"))), groo.MakeText("v")), groo.MakeText(`a{3,}`)))
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.Not(groo.Runex("a")), groo.MakeText("v")), groo.MakeText(`[^a]`)))
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.Not(groo.Runex("2a-c")), groo.MakeText("v")), groo.MakeText(`[^2a-c]`)))
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.Not(groo.Runex("^2a-c")), groo.MakeText("v")), groo.MakeText(`[2a-c]`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Alt(groo.Runex("a"), groo.Runex("b"))), groo.MakeText("v")), groo.MakeText(`[ab]`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Alt(groo.Runex("a"), groo.MakeText("fg"))), groo.MakeText("v")), groo.MakeText(`a|(?:\Qfg\E)`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Alt(groo.Not(groo.Runex("a-e")), groo.Not(groo.Runex("g-j")))), groo.MakeText("v")), groo.MakeText(`[^a-eg-j]`)))
}()
p := ops.Spr
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Seq(groo.Runex("a"), groo.Runex("b"))), groo.MakeText("v")), groo.MakeText(`ab`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Seq(groo.Runex("a-e"), groo.MakeText("fgh"))), groo.MakeText("v")), groo.MakeText(`[a-e]fgh`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Seq((groo.Alt(groo.Runex("f"), groo.Runex("g"))), (groo.Alt(groo.Runex("J"), groo.Runex("k"))))), groo.MakeText("v")), groo.MakeText(`[fg][Jk]`)))
assert.AssertTrue(groo.IsEqual(p(groo.Seq((groo.Alt(groo.Runex("f"), groo.Runex("g"))), (groo.Alt(groo.Runex("J"), groo.Runex("k"))))), groo.MakeText(`[fg][Jk]`)))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.Identity([]int{1, 2, 3}), groo.MakeText("T")), groo.MakeText("ops.Slice")))
assert.AssertTrue(groo.IsEqual(groo.Mod([]int{1, 2, 3}, groo.MakeText("T")), groo.MakeText("ops.Slice")))
assert.AssertTrue(groo.IsEqual(groo.MakeText("ops.Slice"), groo.Mod([]int{1, 2, 3}, groo.MakeText("T"))))
assert.AssertTrue(groo.IsEqual(groo.Mod([]int{1, 2, 3}, groo.MakeText("v")), groo.MakeText("{1, 2, 3}")))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Plus([]int{1, 2, 3}, []any{groo.MakeText("4"), groo.MakeText("5")})), groo.MakeText("v")), groo.MakeText(`{1, 2, 3, 4, 5}`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.LeftShift([]int{1, 2, 3}, groo.MakeText("456"))), groo.MakeText("v")), groo.MakeText(`{1, 2, 3, 456}`)))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.LeftShift([]int{1, 2, 3}, []any{groo.MakeText("4"), groo.MakeText("5")})), groo.MakeText("v")), groo.MakeText(`{1, 2, 3, {4, 5}}`)))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.LeftShift(groo.InitMap(groo.NewPair(11, groo.MakeText("Hey")), groo.NewPair(12, groo.MakeText("Wow")), groo.NewPair(13, groo.MakeText("Dude"))), groo.NewPair(14, groo.MakeText("Man"))), ops.InitMap(groo.NewPair(11, groo.MakeText("Hey")), groo.NewPair(12, groo.MakeText("Wow")), groo.NewPair(13, groo.MakeText("Dude")), groo.NewPair(14, groo.MakeText("Man")))))
assert.AssertTrue(groo.IsEqual(groo.LeftShift(groo.InitMap(groo.NewPair(11, groo.MakeText("Hey")), groo.NewPair(12, groo.MakeText("Wow")), groo.NewPair(13, groo.MakeText("Dude"))), groo.NewPair(14, groo.MakeText("Man"))), groo.InitMap(groo.NewPair(11, groo.MakeText("Hey")), groo.NewPair(12, groo.MakeText("Wow")), groo.NewPair(13, groo.MakeText("Dude")), groo.NewPair(14, groo.MakeText("Man")))))
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.LeftShift(groo.InitMap(groo.NewPair(11, groo.MakeText("Hey")), groo.NewPair(12, groo.MakeText("Wow")), groo.NewPair(13, groo.MakeText("Dude"))), groo.NewPair(14, groo.MakeText("Man"))), groo.MakeText("v")), groo.MakeText("{11: Hey, 12: Wow, 13: Dude, 14: Man}")))
assert.AssertTrue(groo.IsEqual(groo.Plus(groo.InitMap(groo.NewPair(11, groo.MakeText("Hey")), groo.NewPair(12, groo.MakeText("Wow")), groo.NewPair(13, groo.MakeText("Dude"))), groo.InitMap(groo.NewPair(14, groo.MakeText("Man")), groo.NewPair(12, groo.MakeText("NewWow")), groo.NewPair(8, groo.MakeText("OldMan")))), groo.InitMap(groo.NewPair(11, groo.MakeText("Hey")), groo.NewPair(13, groo.MakeText("Dude")), groo.NewPair(14, groo.MakeText("Man")), groo.NewPair(12, groo.MakeText("NewWow")), groo.NewPair(8, groo.MakeText("OldMan")))))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod(7, groo.MakeText("v %[1]T")), groo.MakeText("7 int64")))
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.Reflect(7), groo.MakeText("v %[1]T")), groo.MakeText("7 reflect.Value")))
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.Reflect(groo.Reflect(7)), groo.MakeText("v %[1]T")), groo.MakeText("7 int64")))
}()
{
a := groo.Identity([]any{11, 12, 13})
groo.LeftShiftAssign(&a, 14)
assert.AssertTrue(groo.IsEqual(groo.Mod(a, groo.MakeText("v")), groo.MakeText("{11, 12, 13, 14}")))
}
{
a := groo.Identity([]any{11, 12, 13})
a = groo.LeftShift(a, 14)
assert.AssertTrue(groo.IsEqual(groo.Mod(a, groo.MakeText("v")), groo.MakeText("{11, 12, 13, 14}")))
}
{
a := groo.Identity([]any{groo.MakeText("Hey"), groo.MakeText("Wow"), groo.MakeText("Pal"), groo.MakeText("Mate"), groo.MakeText("Dude")})
assert.AssertTrue(groo.IsEqual(groo.Mod((*groo.GetIndex(&a, 2)), groo.MakeText("v")), groo.MakeText("Pal")))
assert.AssertTrue(groo.IsEqual(groo.Mod((*groo.GetIndex(&a, 2, 4)), groo.MakeText("v")), groo.MakeText("{Pal, Mate}")))
}
{
a := groo.MakeText("Hey there!")
assert.AssertTrue(groo.IsEqual(groo.Mod((*groo.GetIndex(&a, 4)), groo.MakeText("v")), groo.MakeText("t")))
assert.AssertTrue(groo.IsEqual(groo.Mod((*groo.GetIndex(&a, 4)), groo.MakeText("v %[1]T")), groo.MakeText("t utf88.Codepoint")))
assert.AssertTrue(groo.IsEqual(groo.Mod((*groo.GetIndex(&a, 4, 7)), groo.MakeText("v")), groo.MakeText("the")))
}
{
x := (groo.RightShift(groo.MakeText("a"), groo.MakeText("a|b")))
assert.AssertTrue(groo.IsEqual(groo.Mod((*groo.GetIndex(&(*groo.GetIndex(&x, 0)), 0)).(ops.RegexMatch).Match, groo.MakeText("v")), groo.MakeText("a")))
}
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.Mult(groo.MakeText("abc"), 3)), groo.MakeText("v %[1]T")), groo.MakeText(`(?:\Qabc\E){3}? ops.Regex`)))
}()
func() {
assert.AssertTrue(groo.IsEqual(func(groo_it ...interface{}) interface{} {
h := groo.MakeText("H")
I := groo.MakeText("i")
return groo.Plus(h, I)
}(), groo.MakeText("Hi")))
assert.AssertTrue(groo.IsEqual(groo.Mod(func(groo_it ...interface{}) interface{} {
return groo.Mult(groo_it[0], groo_it[1])
}(3, 4), groo.MakeText("d")), groo.MakeText("12")))
}()
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.RightShift(groo.MakeText("abc"), groo.Runex("a"))), groo.MakeText("v")), groo.MakeText("{{{ 0 1 a}}}")))
assert.AssertTrue(groo.IsEqual(groo.Mod((groo.RightShift(groo.MakeText("bcd"), groo.Runex("a"))), groo.MakeText("v")), groo.MakeText("{}")))
}()
{
{
letterA := func(groo_it ...interface{}) interface{} {
return groo.Runex("a")
}
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("abc"), letterA), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("bcd"), letterA), nil))
}
}
{
{
letterA := func(groo_it ...interface{}) interface{} {
return func(groo_it ...interface{}) interface{} {
return groo.Runex("a")
}
}
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("abc"), letterA), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("defg"), letterA), nil))
}
}
{
var expr func(...any) any
paren := func(groo_it ...interface{}) interface{} {
return groo.LeftSeq(groo.RightSeq(groo.MakeText("("), func(groo_it ...interface{}) interface{} {
return expr
}), groo.Runex(")"))
} //call &> or <& on a func returning a func returning a Parser
expr = func(groo_it ...interface{}) interface{} {
return groo.Alt(groo.Runex("a"), paren())
} //call | on a Parser
func() {
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("a"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("(a)"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("((a))"), expr()), groo.Runex("a")))
}()
}
{
var expr func(...any) any
paren := func(groo_it ...interface{}) interface{} {
return groo.LeftSeq(groo.RightSeq(groo.MakeText("("), expr), groo.Runex(")"))
} //call &> or <& on a func returning a Parser
expr = func(groo_it ...interface{}) interface{} {
return groo.Alt(groo.Runex("a"), paren())
} //call | on a Parser
func() {
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("a"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("(a)"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("((a))"), expr()), groo.Runex("a")))
}()
}
{
var expr func(...any) any
paren := func(groo_it ...interface{}) interface{} {
return groo.LeftSeq(groo.RightSeq(groo.MakeText("("), func(groo_it ...interface{}) interface{} {
return expr
}), groo.Runex(")"))
}
expr = func(groo_it ...interface{}) interface{} {
return groo.Alt(groo.Runex("a"), paren)
} //call | on func returning a Parser
func() {
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("a"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("(a)"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("((a))"), expr()), groo.Runex("a")))
}()
}
{
var expr func(...any) any
expr = func(groo_it ...interface{}) interface{} {
return groo.Alt(groo.Runex("a"), (groo.LeftSeq(groo.RightSeq(groo.MakeText("("), func(groo_it ...interface{}) interface{} {
return expr
}), groo.Runex(")"))))
}
func() {
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("a"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("(a)"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("((a))"), expr()), groo.Runex("a")))
}()
}
{
var expr func(...any) any
expr = func(groo_it ...interface{}) interface{} {
return groo.Alt(groo.Runex("a"), (groo.LeftSeq(groo.RightSeq(groo.MakeText("("), expr), groo.Runex(")"))))
} //call &> or <& on a func returning a Parser
func() {
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("a"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("(a)"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("((a))"), expr()), groo.Runex("a")))
}()
}
{
var expr func(...any) any
expr = func(groo_it ...interface{}) interface{} {
return groo.Alt(groo.Runex("a"), (groo.LeftSeq(groo.RightSeq(groo.MakeText("("), func(groo_it ...interface{}) interface{} {
return expr
}), groo.Runex(")"))))
}
func() {
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("a"), expr), groo.Runex("a")))
}()
}
{
var expr func(...any) any
expr = func(groo_it ...interface{}) interface{} {
return groo.Alt(groo.Runex("a"), groo.Mult(groo.NewPair(2, 2), (groo.LeftSeq(groo.RightSeq(groo.MakeText("("), func(groo_it ...interface{}) interface{} {
return expr
}), groo.Runex(")")))))
} //call * on a Parser
func() {
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("aa"), expr()), groo.Runex("a")))
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.RightShift(groo.MakeText("(a)(a)"), expr()), groo.MakeText("v")), groo.MakeText("{a, a}")))
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.RightShift(groo.MakeText("((a)(a))((a)(a))"), expr()), groo.MakeText("v")), groo.MakeText("{{a, a}, {a, a}}")))
}()
}
{
var expr func(...any) any
expr = func(groo_it ...interface{}) interface{} {
return groo.Alt(groo.Runex("a"), groo.Mult(groo.NewPair(2, 2), func(groo_it ...interface{}) interface{} {
return groo.LeftSeq(groo.RightSeq(groo.MakeText("("), func(groo_it ...interface{}) interface{} {
return expr
}), groo.Runex(")"))
}))
} //call * on func returning Parser
func() {
assert.AssertTrue(groo.IsEqual(groo.RightShift(groo.MakeText("aa"), expr()), groo.Runex("a")))
}()
}
{
{
manyA := groo.Mult(groo.NewPair(3, 3), func(groo_it ...interface{}) interface{} {
return groo.Runex("a")
}) //call * on func returning codepoint
func() {
assert.AssertTrue(groo.IsEqual(groo.Mod(groo.RightShift(groo.MakeText("aaab"), manyA), groo.MakeText("v")), groo.MakeText("{a, a, a}")))
}()
}
}
}
func main() {}
type (
any = interface{}
void = struct{}
)
var inf = groo.Inf
func init() {
groo.UseUtf88 = true
} | dynamic/basic.go | 0.575827 | 0.699158 | basic.go | starcoder |
package convert
import (
"github.com/hashicorp/go-cty/cty"
)
// conversion is an internal variant of Conversion that carries around
// a cty.Path to be used in error responses.
type conversion func(cty.Value, cty.Path) (cty.Value, error)
func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion {
conv := getConversionKnown(in, out, unsafe)
if conv == nil {
return nil
}
// Wrap the conversion in some standard checks that we don't want to
// have to repeat in every conversion function.
var ret conversion
ret = func(in cty.Value, path cty.Path) (cty.Value, error) {
if in.IsMarked() {
// We must unmark during the conversion and then re-apply the
// same marks to the result.
in, inMarks := in.Unmark()
v, err := ret(in, path)
if v != cty.NilVal {
v = v.WithMarks(inMarks)
}
return v, err
}
if out == cty.DynamicPseudoType {
// Conversion to DynamicPseudoType always just passes through verbatim.
return in, nil
}
if !in.IsKnown() {
return cty.UnknownVal(out), nil
}
if in.IsNull() {
// We'll pass through nulls, albeit type converted, and let
// the caller deal with whatever handling they want to do in
// case null values are considered valid in some applications.
return cty.NullVal(out), nil
}
return conv(in, path)
}
return ret
}
func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion {
switch {
case out == cty.DynamicPseudoType:
// Conversion *to* DynamicPseudoType means that the caller wishes
// to allow any type in this position, so we'll produce a do-nothing
// conversion that just passes through the value as-is.
return dynamicPassthrough
case unsafe && in == cty.DynamicPseudoType:
// Conversion *from* DynamicPseudoType means that we have a value
// whose type isn't yet known during type checking. For these we will
// assume that conversion will succeed and deal with any errors that
// result (which is why we can only do this when "unsafe" is set).
return dynamicFixup(out)
case in.IsPrimitiveType() && out.IsPrimitiveType():
conv := primitiveConversionsSafe[in][out]
if conv != nil {
return conv
}
if unsafe {
return primitiveConversionsUnsafe[in][out]
}
return nil
case out.IsObjectType() && in.IsObjectType():
return conversionObjectToObject(in, out, unsafe)
case out.IsTupleType() && in.IsTupleType():
return conversionTupleToTuple(in, out, unsafe)
case out.IsListType() && (in.IsListType() || in.IsSetType()):
inEty := in.ElementType()
outEty := out.ElementType()
if inEty.Equals(outEty) {
// This indicates that we're converting from list to set with
// the same element type, so we don't need an element converter.
return conversionCollectionToList(outEty, nil)
}
convEty := getConversion(inEty, outEty, unsafe)
if convEty == nil {
return nil
}
return conversionCollectionToList(outEty, convEty)
case out.IsSetType() && (in.IsListType() || in.IsSetType()):
if in.IsListType() && !unsafe {
// Conversion from list to map is unsafe because it will lose
// information: the ordering will not be preserved, and any
// duplicate elements will be conflated.
return nil
}
inEty := in.ElementType()
outEty := out.ElementType()
convEty := getConversion(inEty, outEty, unsafe)
if inEty.Equals(outEty) {
// This indicates that we're converting from set to list with
// the same element type, so we don't need an element converter.
return conversionCollectionToSet(outEty, nil)
}
if convEty == nil {
return nil
}
return conversionCollectionToSet(outEty, convEty)
case out.IsMapType() && in.IsMapType():
inEty := in.ElementType()
outEty := out.ElementType()
convEty := getConversion(inEty, outEty, unsafe)
if convEty == nil {
return nil
}
return conversionCollectionToMap(outEty, convEty)
case out.IsListType() && in.IsTupleType():
outEty := out.ElementType()
return conversionTupleToList(in, outEty, unsafe)
case out.IsSetType() && in.IsTupleType():
outEty := out.ElementType()
return conversionTupleToSet(in, outEty, unsafe)
case out.IsMapType() && in.IsObjectType():
outEty := out.ElementType()
return conversionObjectToMap(in, outEty, unsafe)
case out.IsObjectType() && in.IsMapType():
if !unsafe {
// Converting a map to an object is an "unsafe" conversion,
// because we don't know if all the map keys will correspond to
// object attributes.
return nil
}
return conversionMapToObject(in, out, unsafe)
case in.IsCapsuleType() || out.IsCapsuleType():
if !unsafe {
// Capsule types can only participate in "unsafe" conversions,
// because we don't know enough about their conversion behaviors
// to be sure that they will always be safe.
return nil
}
if in.Equals(out) {
// conversion to self is never allowed
return nil
}
if out.IsCapsuleType() {
if fn := out.CapsuleOps().ConversionTo; fn != nil {
return conversionToCapsule(in, out, fn)
}
}
if in.IsCapsuleType() {
if fn := in.CapsuleOps().ConversionFrom; fn != nil {
return conversionFromCapsule(in, out, fn)
}
}
// No conversion operation is available, then.
return nil
default:
return nil
}
}
// retConversion wraps a conversion (internal type) so it can be returned
// as a Conversion (public type).
func retConversion(conv conversion) Conversion {
if conv == nil {
return nil
}
return func(in cty.Value) (cty.Value, error) {
return conv(in, cty.Path(nil))
}
} | vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go | 0.640074 | 0.427337 | conversion.go | starcoder |
// Package deepequalerrors defines an Analyzer that checks for the use
// of reflect.DeepEqual with error values.
package deepequalerrors
import (
"go/ast"
"go/types"
"github.com/kdy1/tools/go/analysis"
"github.com/kdy1/tools/go/analysis/passes/inspect"
"github.com/kdy1/tools/go/ast/inspector"
"github.com/kdy1/tools/go/types/typeutil"
)
const Doc = `check for calls of reflect.DeepEqual on error values
The deepequalerrors checker looks for calls of the form:
reflect.DeepEqual(err1, err2)
where err1 and err2 are errors. Using reflect.DeepEqual to compare
errors is discouraged.`
var Analyzer = &analysis.Analyzer{
Name: "deepequalerrors",
Doc: Doc,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: run,
}
func run(pass *analysis.Pass) (interface{}, error) {
inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
nodeFilter := []ast.Node{
(*ast.CallExpr)(nil),
}
inspect.Preorder(nodeFilter, func(n ast.Node) {
call := n.(*ast.CallExpr)
fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func)
if !ok {
return
}
if fn.FullName() == "reflect.DeepEqual" && hasError(pass, call.Args[0]) && hasError(pass, call.Args[1]) {
pass.ReportRangef(call, "avoid using reflect.DeepEqual with errors")
}
})
return nil, nil
}
var errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface)
// hasError reports whether the type of e contains the type error.
// See containsError, below, for the meaning of "contains".
func hasError(pass *analysis.Pass, e ast.Expr) bool {
tv, ok := pass.TypesInfo.Types[e]
if !ok { // no type info, assume good
return false
}
return containsError(tv.Type)
}
// Report whether any type that typ could store and that could be compared is the
// error type. This includes typ itself, as well as the types of struct field, slice
// and array elements, map keys and elements, and pointers. It does not include
// channel types (incomparable), arg and result types of a Signature (not stored), or
// methods of a named or interface type (not stored).
func containsError(typ types.Type) bool {
// Track types being processed, to avoid infinite recursion.
// Using types as keys here is OK because we are checking for the identical pointer, not
// type identity. See analysis/passes/printf/types.go.
inProgress := make(map[types.Type]bool)
var check func(t types.Type) bool
check = func(t types.Type) bool {
if t == errorType {
return true
}
if inProgress[t] {
return false
}
inProgress[t] = true
switch t := t.(type) {
case *types.Pointer:
return check(t.Elem())
case *types.Slice:
return check(t.Elem())
case *types.Array:
return check(t.Elem())
case *types.Map:
return check(t.Key()) || check(t.Elem())
case *types.Struct:
for i := 0; i < t.NumFields(); i++ {
if check(t.Field(i).Type()) {
return true
}
}
case *types.Named:
return check(t.Underlying())
// We list the remaining valid type kinds for completeness.
case *types.Basic:
case *types.Chan: // channels store values, but they are not comparable
case *types.Signature:
case *types.Tuple: // tuples are only part of signatures
case *types.Interface:
}
return false
}
return check(typ)
} | go/analysis/passes/deepequalerrors/deepequalerrors.go | 0.712132 | 0.476762 | deepequalerrors.go | starcoder |
package assert
// The example in the package doc above can't be demonstrated by an Example test file.
import (
"fmt"
"reflect"
"testing"
)
//================================================================================
// T is a struct extending the one from standard library package testing with assertion capabilities.
type T struct {
*testing.T
AssertPrintSwitch bool
AssertCounter int
}
//--------------------------------------------------------------------------------
func AssertTrue(a interface{}) {
if !reflect.DeepEqual(a, true) {
panic(fmt.Sprintf("assert.AssertTrue: assertion of %x is false", a))
}
}
//--------------------------------------------------------------------------------
func isEqual(a, b interface{}) bool {
return reflect.DeepEqual(a, b)
}
//--------------------------------------------------------------------------------
// LogAsserts is the wrapper function within which calls to the various tester utility functions must be embedded.
// LogAsserts itself must be embedded within a function that will be called by the go testing package.
func LogAsserts(s string, t *testing.T, runner func(*T)) {
tt := T{t, false, 0}
runner(&tt)
if tt.AssertPrintSwitch {
tt.Fail()
}
fmt.Println(tt.AssertCounter, "asserts run for", s)
}
//--------------------------------------------------------------------------------
// Assert will increment the counter, and throw a testing error if the assertion is false.
func (tt *T) Assert(b bool) {
tt.AssertCounter += 1
if !b {
tt.Errorf("assert %d failed.\n"+
"....found:%v (%[2]T)\n", tt.AssertCounter, b)
}
}
// AssertEqual will increment the counter, and throw a testing error if the deep equality is false.
func (tt *T) AssertEqual(a, b interface{}) {
tt.AssertCounter += 1
if !isEqual(a, b) {
tt.Errorf("assert %d failed.\n"+
"......found:%v (%[2]T)\n"+
"...expected:%v (%[3]T)\n", tt.AssertCounter, a, b)
}
}
// AssertPanic will increment the counter, and throw a testing error if the supplied function doesn't throw the specified panic.
func (tt *T) AssertPanic(g func(), msg string) {
tt.AssertCounter += 1
defer func() {
if x := recover(); fmt.Sprintf("%v", x) != msg {
tt.Errorf("assert %d failed.\n"+
"....found recover:%v\n"+
"...expected panic:%v\n", tt.AssertCounter, fmt.Sprintf("%v", x), msg)
}
}()
g()
}
// AssertAnyPanic will increment the counter, and throw a testing error if the supplied function doesn't throw a panic.
func (tt *T) AssertAnyPanic(g func()) {
tt.AssertCounter += 1
defer func() {
if x := recover(); x == nil {
tt.Errorf("assert %d failed. %v\n"+
"....found nil recover\n", tt.AssertCounter, x)
}
}()
g()
}
// AssertEqualN will throw a testing error if the deep equality is false, using the supplied integer as the error number.
func (tt *T) AssertEqualN(a, b interface{}, n int) {
if !isEqual(a, b) {
tt.Errorf("assert %d failed.\n"+
"......found:%v (%[2]T)\n"+
"...expected:%v (%[3]T)\n", n, a, b)
}
}
// PrintValue will print the supplied value by throwing a testing failure.
func (tt *T) PrintValue(a interface{}) {
if !tt.AssertPrintSwitch {
tt.AssertPrintSwitch = true
}
fmt.Printf("%v(%[1]T)\n", a)
}
//================================================================================ | assert/assert.go | 0.61555 | 0.659254 | assert.go | starcoder |
package login
import (
"context"
"testing"
"github.com/bxcodec/faker/v3"
"github.com/gofrs/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ory/kratos/identity"
"github.com/ory/kratos/selfservice/flow"
"github.com/ory/kratos/selfservice/form"
"github.com/ory/kratos/x"
)
type (
FlowPersister interface {
UpdateLoginFlow(context.Context, *Flow) error
CreateLoginFlow(context.Context, *Flow) error
GetLoginFlow(context.Context, uuid.UUID) (*Flow, error)
UpdateLoginFlowMethod(context.Context, uuid.UUID, identity.CredentialsType, *FlowMethod) error
ForceLoginFlow(ctx context.Context, id uuid.UUID) error
}
FlowPersistenceProvider interface {
LoginFlowPersister() FlowPersister
}
)
func TestFlowPersister(p FlowPersister) func(t *testing.T) {
var clearids = func(r *Flow) {
r.ID = uuid.UUID{}
for k := range r.Methods {
r.Methods[k].ID = uuid.UUID{}
}
}
ctx := context.Background()
return func(t *testing.T) {
t.Run("case=should error when the login flow does not exist", func(t *testing.T) {
_, err := p.GetLoginFlow(ctx, x.NewUUID())
require.Error(t, err)
})
var newFlow = func(t *testing.T) *Flow {
var r Flow
require.NoError(t, faker.FakeData(&r))
clearids(&r)
methods := len(r.Methods)
assert.NotZero(t, methods)
return &r
}
t.Run("case=should create with set ids", func(t *testing.T) {
var r Flow
require.NoError(t, faker.FakeData(&r))
require.NoError(t, p.CreateLoginFlow(ctx, &r))
})
t.Run("case=should create a new login flow and properly set IDs", func(t *testing.T) {
r := newFlow(t)
methods := len(r.Methods)
err := p.CreateLoginFlow(ctx, r)
require.NoError(t, err, "%#v", err)
assert.Nil(t, r.MethodsRaw)
assert.NotEqual(t, uuid.Nil, r.ID)
for _, m := range r.Methods {
assert.NotEqual(t, uuid.Nil, m.ID)
}
assert.Len(t, r.Methods, methods)
})
t.Run("case=should create and fetch a login flow", func(t *testing.T) {
expected := newFlow(t)
err := p.CreateLoginFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
assert.Empty(t, actual.MethodsRaw)
assert.EqualValues(t, expected.ID, actual.ID)
x.AssertEqualTime(t, expected.IssuedAt, actual.IssuedAt)
x.AssertEqualTime(t, expected.ExpiresAt, actual.ExpiresAt)
assert.EqualValues(t, expected.RequestURL, actual.RequestURL)
assert.EqualValues(t, expected.Active, actual.Active)
require.Equal(t, len(expected.Methods), len(actual.Methods), "expected:\t%s\nactual:\t%s", expected.Methods, actual.Methods)
})
t.Run("case=should properly set the flow type", func(t *testing.T) {
expected := newFlow(t)
expected.Forced = true
expected.Type = flow.TypeAPI
expected.Methods = map[identity.CredentialsType]*FlowMethod{
identity.CredentialsTypeOIDC: {
Method: identity.CredentialsTypeOIDC,
Config: &FlowMethodConfig{FlowMethodConfigurator: form.NewHTMLForm(string(identity.CredentialsTypeOIDC))},
},
identity.CredentialsTypePassword: {
Method: identity.CredentialsTypePassword,
Config: &FlowMethodConfig{FlowMethodConfigurator: form.NewHTMLForm(string(identity.CredentialsTypePassword))},
},
}
err := p.CreateLoginFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
assert.Equal(t, flow.TypeAPI, actual.Type)
actual.Methods = map[identity.CredentialsType]*FlowMethod{identity.CredentialsTypeOIDC: {
Method: identity.CredentialsTypeOIDC,
Config: &FlowMethodConfig{FlowMethodConfigurator: form.NewHTMLForm("ory-sh")},
}}
actual.Type = flow.TypeBrowser
actual.Forced = true
require.NoError(t, p.UpdateLoginFlow(ctx, actual))
actual, err = p.GetLoginFlow(ctx, actual.ID)
require.NoError(t, err)
assert.Equal(t, flow.TypeBrowser, actual.Type)
assert.True(t, actual.Forced)
require.Len(t, actual.Methods, 1)
assert.Equal(t, "ory-sh",
actual.Methods[identity.CredentialsTypeOIDC].Config.
FlowMethodConfigurator.(*form.HTMLForm).Action)
})
t.Run("case=should properly update a flow", func(t *testing.T) {
expected := newFlow(t)
expected.Type = flow.TypeAPI
err := p.CreateLoginFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
assert.Equal(t, flow.TypeAPI, actual.Type)
})
t.Run("case=should update a login flow", func(t *testing.T) {
expected := newFlow(t)
delete(expected.Methods, identity.CredentialsTypeOIDC)
err := p.CreateLoginFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
assert.Len(t, actual.Methods, 1)
require.NoError(t, p.UpdateLoginFlowMethod(ctx, expected.ID, identity.CredentialsTypeOIDC, &FlowMethod{
Method: identity.CredentialsTypeOIDC,
Config: &FlowMethodConfig{FlowMethodConfigurator: form.NewHTMLForm(string(identity.CredentialsTypeOIDC))},
}))
require.NoError(t, p.UpdateLoginFlowMethod(ctx, expected.ID, identity.CredentialsTypePassword, &FlowMethod{
Method: identity.CredentialsTypePassword,
Config: &FlowMethodConfig{FlowMethodConfigurator: form.NewHTMLForm(string(identity.CredentialsTypePassword))},
}))
actual, err = p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
require.Len(t, actual.Methods, 2)
assert.EqualValues(t, identity.CredentialsTypePassword, actual.Active)
assert.Equal(t, string(identity.CredentialsTypePassword), actual.Methods[identity.CredentialsTypePassword].Config.FlowMethodConfigurator.(*form.HTMLForm).Action)
assert.Equal(t, string(identity.CredentialsTypeOIDC), actual.Methods[identity.CredentialsTypeOIDC].Config.FlowMethodConfigurator.(*form.HTMLForm).Action)
})
t.Run("case=should not cause data loss when updating a request without changes", func(t *testing.T) {
expected := newFlow(t)
err := p.CreateLoginFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
assert.Len(t, actual.Methods, 2)
require.NoError(t, p.UpdateLoginFlow(ctx, actual))
actual, err = p.GetLoginFlow(ctx, expected.ID)
require.NoError(t, err)
require.Len(t, actual.Methods, 2)
assert.Equal(t,
expected.Methods[identity.CredentialsTypePassword].Config.FlowMethodConfigurator.(*form.HTMLForm).Action,
actual.Methods[identity.CredentialsTypePassword].Config.FlowMethodConfigurator.(*form.HTMLForm).Action,
)
assert.Equal(t,
expected.Methods[identity.CredentialsTypeOIDC].Config.FlowMethodConfigurator.(*form.HTMLForm).Action,
actual.Methods[identity.CredentialsTypeOIDC].Config.FlowMethodConfigurator.(*form.HTMLForm).Action,
)
})
}
} | selfservice/flow/login/persistence.go | 0.512693 | 0.569045 | persistence.go | starcoder |
// Package kinesisiface provides an interface for the Amazon Kinesis.
package kinesisiface
import (
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/kinesis"
)
// KinesisAPI is the interface type for kinesis.Kinesis.
type KinesisAPI interface {
AddTagsToStreamRequest(*kinesis.AddTagsToStreamInput) (*request.Request, *kinesis.AddTagsToStreamOutput)
AddTagsToStream(*kinesis.AddTagsToStreamInput) (*kinesis.AddTagsToStreamOutput, error)
CreateStreamRequest(*kinesis.CreateStreamInput) (*request.Request, *kinesis.CreateStreamOutput)
CreateStream(*kinesis.CreateStreamInput) (*kinesis.CreateStreamOutput, error)
DecreaseStreamRetentionPeriodRequest(*kinesis.DecreaseStreamRetentionPeriodInput) (*request.Request, *kinesis.DecreaseStreamRetentionPeriodOutput)
DecreaseStreamRetentionPeriod(*kinesis.DecreaseStreamRetentionPeriodInput) (*kinesis.DecreaseStreamRetentionPeriodOutput, error)
DeleteStreamRequest(*kinesis.DeleteStreamInput) (*request.Request, *kinesis.DeleteStreamOutput)
DeleteStream(*kinesis.DeleteStreamInput) (*kinesis.DeleteStreamOutput, error)
DescribeStreamRequest(*kinesis.DescribeStreamInput) (*request.Request, *kinesis.DescribeStreamOutput)
DescribeStream(*kinesis.DescribeStreamInput) (*kinesis.DescribeStreamOutput, error)
DescribeStreamPages(*kinesis.DescribeStreamInput, func(*kinesis.DescribeStreamOutput, bool) bool) error
DisableEnhancedMonitoringRequest(*kinesis.DisableEnhancedMonitoringInput) (*request.Request, *kinesis.EnhancedMonitoringOutput)
DisableEnhancedMonitoring(*kinesis.DisableEnhancedMonitoringInput) (*kinesis.EnhancedMonitoringOutput, error)
EnableEnhancedMonitoringRequest(*kinesis.EnableEnhancedMonitoringInput) (*request.Request, *kinesis.EnhancedMonitoringOutput)
EnableEnhancedMonitoring(*kinesis.EnableEnhancedMonitoringInput) (*kinesis.EnhancedMonitoringOutput, error)
GetRecordsRequest(*kinesis.GetRecordsInput) (*request.Request, *kinesis.GetRecordsOutput)
GetRecords(*kinesis.GetRecordsInput) (*kinesis.GetRecordsOutput, error)
GetShardIteratorRequest(*kinesis.GetShardIteratorInput) (*request.Request, *kinesis.GetShardIteratorOutput)
GetShardIterator(*kinesis.GetShardIteratorInput) (*kinesis.GetShardIteratorOutput, error)
IncreaseStreamRetentionPeriodRequest(*kinesis.IncreaseStreamRetentionPeriodInput) (*request.Request, *kinesis.IncreaseStreamRetentionPeriodOutput)
IncreaseStreamRetentionPeriod(*kinesis.IncreaseStreamRetentionPeriodInput) (*kinesis.IncreaseStreamRetentionPeriodOutput, error)
ListStreamsRequest(*kinesis.ListStreamsInput) (*request.Request, *kinesis.ListStreamsOutput)
ListStreams(*kinesis.ListStreamsInput) (*kinesis.ListStreamsOutput, error)
ListStreamsPages(*kinesis.ListStreamsInput, func(*kinesis.ListStreamsOutput, bool) bool) error
ListTagsForStreamRequest(*kinesis.ListTagsForStreamInput) (*request.Request, *kinesis.ListTagsForStreamOutput)
ListTagsForStream(*kinesis.ListTagsForStreamInput) (*kinesis.ListTagsForStreamOutput, error)
MergeShardsRequest(*kinesis.MergeShardsInput) (*request.Request, *kinesis.MergeShardsOutput)
MergeShards(*kinesis.MergeShardsInput) (*kinesis.MergeShardsOutput, error)
PutRecordRequest(*kinesis.PutRecordInput) (*request.Request, *kinesis.PutRecordOutput)
PutRecord(*kinesis.PutRecordInput) (*kinesis.PutRecordOutput, error)
PutRecordsRequest(*kinesis.PutRecordsInput) (*request.Request, *kinesis.PutRecordsOutput)
PutRecords(*kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error)
RemoveTagsFromStreamRequest(*kinesis.RemoveTagsFromStreamInput) (*request.Request, *kinesis.RemoveTagsFromStreamOutput)
RemoveTagsFromStream(*kinesis.RemoveTagsFromStreamInput) (*kinesis.RemoveTagsFromStreamOutput, error)
SplitShardRequest(*kinesis.SplitShardInput) (*request.Request, *kinesis.SplitShardOutput)
SplitShard(*kinesis.SplitShardInput) (*kinesis.SplitShardOutput, error)
}
var _ KinesisAPI = (*kinesis.Kinesis)(nil) | Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/kinesis/kinesisiface/interface.go | 0.610221 | 0.407392 | interface.go | starcoder |
package iterator
import (
"context"
"github.com/cayleygraph/cayley/graph"
"github.com/cayleygraph/quad"
)
var _ graph.IteratorFuture = &Count{}
// Count iterator returns one element with size of underlying iterator.
type Count struct {
it *count
graph.Iterator
}
// NewCount creates a new iterator to count a number of results from a provided subiterator.
// qs may be nil - it's used to check if count Contains (is) a given value.
func NewCount(sub graph.Iterator, qs graph.Namer) *Count {
it := &Count{
it: newCount(graph.AsShape(sub), qs),
}
it.Iterator = graph.NewLegacy(it.it, it)
return it
}
func (it *Count) AsShape() graph.IteratorShape {
it.Close()
return it.it
}
var _ graph.IteratorShapeCompat = &count{}
// Count iterator returns one element with size of underlying iterator.
type count struct {
it graph.IteratorShape
qs graph.Namer
}
// NewCount creates a new iterator to count a number of results from a provided subiterator.
// qs may be nil - it's used to check if count Contains (is) a given value.
func newCount(it graph.IteratorShape, qs graph.Namer) *count {
return &count{
it: it, qs: qs,
}
}
func (it *count) Iterate() graph.Scanner {
return newCountNext(it.it)
}
func (it *count) Lookup() graph.Index {
return newCountContains(it.it, it.qs)
}
func (it *count) AsLegacy() graph.Iterator {
it2 := &Count{it: it}
it2.Iterator = graph.NewLegacy(it, it2)
return it2
}
// SubIterators returns a slice of the sub iterators.
func (it *count) SubIterators() []graph.IteratorShape {
return []graph.IteratorShape{it.it}
}
func (it *count) Optimize(ctx context.Context) (graph.IteratorShape, bool) {
sub, optimized := it.it.Optimize(ctx)
it.it = sub
return it, optimized
}
func (it *count) Stats(ctx context.Context) (graph.IteratorCosts, error) {
stats := graph.IteratorCosts{
NextCost: 1,
Size: graph.Size{
Size: 1,
Exact: true,
},
}
if sub, err := it.it.Stats(ctx); err == nil && !sub.Size.Exact {
stats.NextCost = sub.NextCost * sub.Size.Size
}
stats.ContainsCost = stats.NextCost
return stats, nil
}
func (it *count) String() string { return "Count" }
// Count iterator returns one element with size of underlying iterator.
type countNext struct {
it graph.IteratorShape
done bool
result quad.Value
err error
}
// NewCount creates a new iterator to count a number of results from a provided subiterator.
// qs may be nil - it's used to check if count Contains (is) a given value.
func newCountNext(it graph.IteratorShape) *countNext {
return &countNext{
it: it,
}
}
func (it *countNext) TagResults(dst map[string]graph.Ref) {}
// Next counts a number of results in underlying iterator.
func (it *countNext) Next(ctx context.Context) bool {
if it.done {
return false
}
// TODO(dennwc): this most likely won't include the NextPath
st, err := it.it.Stats(ctx)
if err != nil {
it.err = err
return false
}
if !st.Size.Exact {
sit := it.it.Iterate()
defer sit.Close()
for st.Size.Size = 0; sit.Next(ctx); st.Size.Size++ {
// TODO(dennwc): it's unclear if we should call it here or not
for ; sit.NextPath(ctx); st.Size.Size++ {
}
}
it.err = sit.Err()
}
it.result = quad.Int(st.Size.Size)
it.done = true
return true
}
func (it *countNext) Err() error {
return it.err
}
func (it *countNext) Result() graph.Ref {
if it.result == nil {
return nil
}
return graph.PreFetched(it.result)
}
func (it *countNext) NextPath(ctx context.Context) bool {
return false
}
func (it *countNext) Close() error {
return nil
}
func (it *countNext) String() string { return "CountNext" }
// Count iterator returns one element with size of underlying iterator.
type countContains struct {
it *countNext
qs graph.Namer
}
// NewCount creates a new iterator to count a number of results from a provided subiterator.
// qs may be nil - it's used to check if count Contains (is) a given value.
func newCountContains(it graph.IteratorShape, qs graph.Namer) *countContains {
return &countContains{
it: newCountNext(it),
qs: qs,
}
}
func (it *countContains) TagResults(dst map[string]graph.Ref) {}
func (it *countContains) Err() error {
return it.it.Err()
}
func (it *countContains) Result() graph.Ref {
return it.it.Result()
}
func (it *countContains) Contains(ctx context.Context, val graph.Ref) bool {
if !it.it.done {
it.it.Next(ctx)
}
if v, ok := val.(graph.PreFetchedValue); ok {
return v.NameOf() == it.it.result
}
if it.qs != nil {
return it.qs.NameOf(val) == it.it.result
}
return false
}
func (it *countContains) NextPath(ctx context.Context) bool {
return false
}
func (it *countContains) Close() error {
return it.it.Close()
}
func (it *countContains) String() string { return "CountContains" } | graph/iterator/count.go | 0.765418 | 0.462534 | count.go | starcoder |
package common
import (
"math"
"math/rand"
)
// Distribution provides an interface to model a statistical distribution.
type Distribution interface {
Advance()
Get() float64 // should be idempotent
}
// NormalDistribution models a normal distribution.
type NormalDistribution struct {
Mean float64
StdDev float64
value float64
}
func ND(mean, stddev float64) *NormalDistribution {
return &NormalDistribution{Mean: mean, StdDev: stddev}
}
// Unsynchronized random source
var localRand = rand.New(rand.NewSource(1))
// Seed uses the provided seed value to initialize the generator to a deterministic state.
func Seed(seed int64) {
localRand.Seed(seed)
}
// Advance advances this distribution. Since a normal distribution is
// stateless, this is just overwrites the internal cache value.
func (d *NormalDistribution) Advance() {
d.value = localRand.NormFloat64()*d.StdDev + d.Mean
}
// Get returns the last computed value for this distribution.
func (d *NormalDistribution) Get() float64 {
return d.value
}
// UniformDistribution models a uniform distribution.
type UniformDistribution struct {
Low float64
High float64
value float64
}
func UD(low, high float64) *UniformDistribution {
return &UniformDistribution{Low: low, High: high}
}
// Advance advances this distribution. Since a uniform distribution is
// stateless, this is just overwrites the internal cache value.
func (d *UniformDistribution) Advance() {
x := rand.Float64() // uniform
x *= d.High - d.Low
x += d.Low
d.value = x
}
// Get computes and returns the next value in the distribution.
func (d *UniformDistribution) Get() float64 {
return d.value
}
// RandomWalkDistribution is a stateful random walk. Initialize it with an
// underlying distribution, which is used to compute the new step value.
type RandomWalkDistribution struct {
Step Distribution
State float64 // optional
}
func WD(step Distribution, state float64) *RandomWalkDistribution {
return &RandomWalkDistribution{Step: step, State: state}
}
// Advance computes the next value of this distribution and stores it.
func (d *RandomWalkDistribution) Advance() {
d.Step.Advance()
d.State += d.Step.Get()
}
// Get returns the last computed value for this distribution.
func (d *RandomWalkDistribution) Get() float64 {
return d.State
}
// ClampedRandomWalkDistribution is a stateful random walk, with minimum and
// maximum bounds. Initialize it with a Min, Max, and an underlying
// distribution, which is used to compute the new step value.
type ClampedRandomWalkDistribution struct {
Step Distribution
Min float64
Max float64
State float64 // optional
}
func CWD(step Distribution, min, max, state float64) *ClampedRandomWalkDistribution {
return &ClampedRandomWalkDistribution{
Step: step,
Min: min,
Max: max,
State: state,
}
}
// Advance computes the next value of this distribution and stores it.
func (d *ClampedRandomWalkDistribution) Advance() {
d.Step.Advance()
d.State += d.Step.Get()
if d.State > d.Max {
d.State = d.Max
}
if d.State < d.Min {
d.State = d.Min
}
}
// Get returns the last computed value for this distribution.
func (d *ClampedRandomWalkDistribution) Get() float64 {
return d.State
}
// MonotonicRandomWalkDistribution is a stateful random walk that only
// increases. Initialize it with a Start and an underlying distribution,
// which is used to compute the new step value. The sign of any value of the
// u.d. is always made positive.
type MonotonicRandomWalkDistribution struct {
Step Distribution
State float64
}
// Advance computes the next value of this distribution and stores it.
func (d *MonotonicRandomWalkDistribution) Advance() {
d.Step.Advance()
d.State += math.Abs(d.Step.Get())
}
func (d *MonotonicRandomWalkDistribution) Get() float64 {
return d.State
}
func MWD(step Distribution, state float64) *MonotonicRandomWalkDistribution {
return &MonotonicRandomWalkDistribution{Step: step, State: state}
}
// MonotonicUpDownRandomWalkDistribution is a stateful random walk that continually
// increases and decreases. Initialize it with State, Min And Max an underlying distribution,
// which is used to compute the new step value.
type MonotonicUpDownRandomWalkDistribution struct {
Step Distribution
State float64
Min float64
Max float64
direction int //1 or -1
}
// Advance computes the next value of this distribution and stores it.
func (d *MonotonicUpDownRandomWalkDistribution) Advance() {
d.Step.Advance()
d.State += d.Step.Get() * float64(d.direction)
if d.State < d.Min {
d.State = d.Min
d.direction = 1
} else if d.State > d.Max {
d.State = d.Max
d.direction = -1
}
}
func (d *MonotonicUpDownRandomWalkDistribution) Get() float64 {
return d.State
}
func MUDWD(step Distribution, min float64, max float64, state float64) *MonotonicUpDownRandomWalkDistribution {
direction := -1
if state < max {
direction = 1
}
return &MonotonicUpDownRandomWalkDistribution{Step: step, Min: min, Max: max, State: state, direction: direction}
}
type ConstantDistribution struct {
State float64
}
func (d *ConstantDistribution) Advance() {
}
func (d *ConstantDistribution) Get() float64 {
return d.State
}
//TwoStateDistribution randomly chooses state from two values
type TwoStateDistribution struct {
Low float64
High float64
State float64
}
func (d *TwoStateDistribution) Advance() {
d.State = d.Low
if rand.Float64() > 0.5 {
d.State = d.High
}
}
func (d *TwoStateDistribution) Get() float64 {
return d.State
}
func TSD(low float64, high float64, state float64) *TwoStateDistribution {
return &TwoStateDistribution{Low: low, High: high, State: state}
}
func RandChoice(choices [][]byte) []byte {
idx := rand.Int63n(int64(len(choices)))
return choices[idx]
} | bulk_data_gen/common/distribution.go | 0.908537 | 0.626895 | distribution.go | starcoder |
package distuv
import (
"math"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/mathext"
"gonum.org/v1/gonum/stat/combin"
)
// Binomial implements the binomial distribution, a discrete probability distribution
// that expresses the probability of a given number of successful Bernoulli trials
// out of a total of n, each with success probability p.
// The binomial distribution has the density function:
// f(k) = (n choose k) p^k (1-p)^(n-k)
// For more information, see https://en.wikipedia.org/wiki/Binomial_distribution.
type Binomial struct {
// N is the total number of Bernoulli trials. N must be greater than 0.
N float64
// P is the probability of success in any given trial. P must be in [0, 1].
P float64
Src rand.Source
}
// CDF computes the value of the cumulative distribution function at x.
func (b Binomial) CDF(x float64) float64 {
if x < 0 {
return 0
}
if x >= b.N {
return 1
}
x = math.Floor(x)
return mathext.RegIncBeta(b.N-x, x+1, 1-b.P)
}
// ExKurtosis returns the excess kurtosis of the distribution.
func (b Binomial) ExKurtosis() float64 {
v := b.P * (1 - b.P)
return (1 - 6*v) / (b.N * v)
}
// LogProb computes the natural logarithm of the value of the probability
// density function at x.
func (b Binomial) LogProb(x float64) float64 {
if x < 0 || x > b.N || math.Floor(x) != x {
return math.Inf(-1)
}
lb := combin.LogGeneralizedBinomial(b.N, x)
return lb + x*math.Log(b.P) + (b.N-x)*math.Log(1-b.P)
}
// Mean returns the mean of the probability distribution.
func (b Binomial) Mean() float64 {
return b.N * b.P
}
// NumParameters returns the number of parameters in the distribution.
func (Binomial) NumParameters() int {
return 2
}
// Prob computes the value of the probability density function at x.
func (b Binomial) Prob(x float64) float64 {
return math.Exp(b.LogProb(x))
}
// Rand returns a random sample drawn from the distribution.
func (b Binomial) Rand() float64 {
// NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING (ISBN 0-521-43108-5)
// p. 295-6
// http://www.aip.de/groups/soe/local/numres/bookcpdf/c7-3.pdf
runif := rand.Float64
rexp := rand.ExpFloat64
if b.Src != nil {
rnd := rand.New(b.Src)
runif = rnd.Float64
rexp = rnd.ExpFloat64
}
p := b.P
if p > 0.5 {
p = 1 - p
}
am := b.N * p
if b.N < 25 {
// Use direct method.
bnl := 0.0
for i := 0; i < int(b.N); i++ {
if runif() < p {
bnl++
}
}
if p != b.P {
return b.N - bnl
}
return bnl
}
if am < 1 {
// Use rejection method with Poisson proposal.
const logM = 2.6e-2 // constant for rejection sampling (https://en.wikipedia.org/wiki/Rejection_sampling)
var bnl float64
z := -p
pclog := (1 + 0.5*z) * z / (1 + (1+1.0/6*z)*z) // Padé approximant of log(1 + x)
for {
bnl = 0.0
t := 0.0
for i := 0; i < int(b.N); i++ {
t += rexp()
if t >= am {
break
}
bnl++
}
bnlc := b.N - bnl
z = -bnl / b.N
log1p := (1 + 0.5*z) * z / (1 + (1+1.0/6*z)*z)
t = (bnlc+0.5)*log1p + bnl - bnlc*pclog + 1/(12*bnlc) - am + logM // Uses Stirling's expansion of log(n!)
if rexp() >= t {
break
}
}
if p != b.P {
return b.N - bnl
}
return bnl
}
// Original algorithm samples from a Poisson distribution with the
// appropriate expected value. However, the Poisson approximation is
// asymptotic such that the absolute deviation in probability is O(1/n).
// Rejection sampling produces exact variates with at worst less than 3%
// rejection with miminal additional computation.
// Use rejection method with Cauchy proposal.
g, _ := math.Lgamma(b.N + 1)
plog := math.Log(p)
pclog := math.Log1p(-p)
sq := math.Sqrt(2 * am * (1 - p))
for {
var em, y float64
for {
y = math.Tan(math.Pi * runif())
em = sq*y + am
if em >= 0 && em < b.N+1 {
break
}
}
em = math.Floor(em)
lg1, _ := math.Lgamma(em + 1)
lg2, _ := math.Lgamma(b.N - em + 1)
t := 1.2 * sq * (1 + y*y) * math.Exp(g-lg1-lg2+em*plog+(b.N-em)*pclog)
if runif() <= t {
if p != b.P {
return b.N - em
}
return em
}
}
}
// Skewness returns the skewness of the distribution.
func (b Binomial) Skewness() float64 {
return (1 - 2*b.P) / b.StdDev()
}
// StdDev returns the standard deviation of the probability distribution.
func (b Binomial) StdDev() float64 {
return math.Sqrt(b.Variance())
}
// Survival returns the survival function (complementary CDF) at x.
func (b Binomial) Survival(x float64) float64 {
return 1 - b.CDF(x)
}
// Variance returns the variance of the probability distribution.
func (b Binomial) Variance() float64 {
return b.N * b.P * (1 - b.P)
} | stat/distuv/binomial.go | 0.871543 | 0.590691 | binomial.go | starcoder |
package engine
// Cropped quantity refers to a cut-out piece of a large quantity
import (
"fmt"
data "github.com/seeder-research/uMagNUS/data"
opencl "github.com/seeder-research/uMagNUS/opencl"
util "github.com/seeder-research/uMagNUS/util"
)
func init() {
DeclFunc("Crop", Crop, "Crops a quantity to cell ranges [x1,x2[, [y1,y2[, [z1,z2[")
DeclFunc("CropX", CropX, "Crops a quantity to cell ranges [x1,x2[")
DeclFunc("CropY", CropY, "Crops a quantity to cell ranges [y1,y2[")
DeclFunc("CropZ", CropZ, "Crops a quantity to cell ranges [z1,z2[")
DeclFunc("CropLayer", CropLayer, "Crops a quantity to a single layer")
DeclFunc("CropRegion", CropRegion, "Crops a quantity to a region")
}
type cropped struct {
parent Quantity
name string
x1, x2, y1, y2, z1, z2 int
}
// Crop quantity to a box enclosing the given region.
// Used to output a region of interest, even if the region is non-rectangular.
func CropRegion(parent Quantity, region int) *cropped {
n := MeshOf(parent).Size()
// use -1 for unset values
x1, y1, z1 := -1, -1, -1
x2, y2, z2 := -1, -1, -1
r := regions.HostArray()
for iz := 0; iz < n[Z]; iz++ {
for iy := 0; iy < n[Y]; iy++ {
for ix := 0; ix < n[X]; ix++ {
if r[iz][iy][ix] == byte(region) {
// initialize all indices if unset
if x1 == -1 {
x1, y1, z1 = ix, iy, iz
x2, y2, z2 = ix, iy, iz
}
if ix < x1 {
x1 = ix
}
if iy < y1 {
y1 = iy
}
if iz < z1 {
z1 = iz
}
if ix > x2 {
x2 = ix
}
if iy > y2 {
y2 = iy
}
if iz > z2 {
z2 = iz
}
}
}
}
}
return Crop(parent, x1, x2+1, y1, y2+1, z1, z2+1)
}
func CropLayer(parent Quantity, layer int) *cropped {
n := MeshOf(parent).Size()
return Crop(parent, 0, n[X], 0, n[Y], layer, layer+1)
}
func CropX(parent Quantity, x1, x2 int) *cropped {
n := MeshOf(parent).Size()
return Crop(parent, x1, x2, 0, n[Y], 0, n[Z])
}
func CropY(parent Quantity, y1, y2 int) *cropped {
n := MeshOf(parent).Size()
return Crop(parent, 0, n[X], y1, y2, 0, n[Z])
}
func CropZ(parent Quantity, z1, z2 int) *cropped {
n := MeshOf(parent).Size()
return Crop(parent, 0, n[X], 0, n[Y], z1, z2)
}
func Crop(parent Quantity, x1, x2, y1, y2, z1, z2 int) *cropped {
n := MeshOf(parent).Size()
util.Argument(x1 < x2 && y1 < y2 && z1 < z2)
util.Argument(x1 >= 0 && y1 >= 0 && z1 >= 0)
util.Argument(x2 <= n[X] && y2 <= n[Y] && z2 <= n[Z])
name := NameOf(parent) + "_"
if x1 != 0 || x2 != n[X] {
name += "xrange" + rangeStr(x1, x2)
}
if y1 != 0 || y2 != n[Y] {
name += "yrange" + rangeStr(y1, y2)
}
if z1 != 0 || z2 != n[Z] {
name += "zrange" + rangeStr(z1, z2)
}
return &cropped{parent, name, x1, x2, y1, y2, z1, z2}
}
func rangeStr(a, b int) string {
if a+1 == b {
return fmt.Sprint(a, "_")
} else {
return fmt.Sprint(a, "-", b, "_")
}
// (trailing underscore to separate from subsequent autosave number)
}
func (q *cropped) NComp() int { return q.parent.NComp() }
func (q *cropped) Name() string { return q.name }
func (q *cropped) Unit() string { return UnitOf(q.parent) }
func (q *cropped) EvalTo(dst *data.Slice) { EvalTo(q, dst) }
func (q *cropped) Mesh() *data.Mesh {
c := MeshOf(q.parent).CellSize()
return data.NewMesh(q.x2-q.x1, q.y2-q.y1, q.z2-q.z1, c[X], c[Y], c[Z])
}
func (q *cropped) average() []float64 { return qAverageUniverse(q) } // needed for table
func (q *cropped) Average() []float64 { return q.average() } // handy for script
func (q *cropped) Slice() (*data.Slice, bool) {
src := ValueOf(q.parent)
defer opencl.Recycle(src)
dst := opencl.Buffer(q.NComp(), q.Mesh().Size())
opencl.Crop(dst, src, q.x1, q.y1, q.z1)
return dst, true
} | engine/crop.go | 0.667473 | 0.500061 | crop.go | starcoder |
// Package h28 contains the data structures for HL7 v2.8.
package h28
// Registry implements the required interface for unmarshalling data.
var Registry = registry{}
type registry struct{}
func (registry) Version() string {
return Version
}
func (registry) ControlSegment() map[string]any {
return ControlSegmentRegistry
}
func (registry) Segment() map[string]any {
return SegmentRegistry
}
func (registry) Trigger() map[string]any {
return TriggerRegistry
}
func (registry) DataType() map[string]any {
return DataTypeRegistry
}
// Version of this HL7 package.
var Version = `2.8`
// Segments specific to file and batch control.
var ControlSegmentRegistry = map[string]any{
"BHS": BHS{},
"BTS": BTS{},
"FHS": FHS{},
"FTS": FTS{},
"DSC": DSC{},
"OVR": OVR{},
"ADD": ADD{},
"SFT": SFT{},
"ARV": ARV{},
"UAC": UAC{},
}
// Segment lookup by ID.
var SegmentRegistry = map[string]any{
"ABS": ABS{},
"ACC": ACC{},
"ADD": ADD{},
"ADJ": ADJ{},
"AFF": AFF{},
"AIG": AIG{},
"AIL": AIL{},
"AIP": AIP{},
"AIS": AIS{},
"AL1": AL1{},
"APR": APR{},
"ARQ": ARQ{},
"ARV": ARV{},
"AUT": AUT{},
"BHS": BHS{},
"BLC": BLC{},
"BLG": BLG{},
"BPO": BPO{},
"BPX": BPX{},
"BTS": BTS{},
"BTX": BTX{},
"BUI": BUI{},
"CDM": CDM{},
"CDO": CDO{},
"CER": CER{},
"CM0": CM0{},
"CM1": CM1{},
"CM2": CM2{},
"CNS": CNS{},
"CON": CON{},
"CSP": CSP{},
"CSR": CSR{},
"CSS": CSS{},
"CTD": CTD{},
"CTI": CTI{},
"DB1": DB1{},
"DG1": DG1{},
"DMI": DMI{},
"DON": DON{},
"DRG": DRG{},
"DSC": DSC{},
"DSP": DSP{},
"ECD": ECD{},
"ECR": ECR{},
"EDU": EDU{},
"EQP": EQP{},
"EQU": EQU{},
"ERR": ERR{},
"EVN": EVN{},
"FHS": FHS{},
"FT1": FT1{},
"FTS": FTS{},
"GOL": GOL{},
"GP1": GP1{},
"GP2": GP2{},
"GT1": GT1{},
"Hxx": Hxx{},
"IAM": IAM{},
"IAR": IAR{},
"IIM": IIM{},
"ILT": ILT{},
"IN1": IN1{},
"IN2": IN2{},
"IN3": IN3{},
"INV": INV{},
"IPC": IPC{},
"IPR": IPR{},
"ISD": ISD{},
"ITM": ITM{},
"IVC": IVC{},
"IVT": IVT{},
"LAN": LAN{},
"LCC": LCC{},
"LCH": LCH{},
"LDP": LDP{},
"LOC": LOC{},
"LRL": LRL{},
"MFA": MFA{},
"MFE": MFE{},
"MFI": MFI{},
"MRG": MRG{},
"MSA": MSA{},
"MSH": MSH{},
"NCK": NCK{},
"NDS": NDS{},
"NK1": NK1{},
"NPU": NPU{},
"NSC": NSC{},
"NST": NST{},
"NTE": NTE{},
"OBR": OBR{},
"OBX": OBX{},
"ODS": ODS{},
"ODT": ODT{},
"OM1": OM1{},
"OM2": OM2{},
"OM3": OM3{},
"OM4": OM4{},
"OM5": OM5{},
"OM6": OM6{},
"OM7": OM7{},
"ORC": ORC{},
"ORG": ORG{},
"OVR": OVR{},
"PAC": PAC{},
"PCE": PCE{},
"PCR": PCR{},
"PD1": PD1{},
"PDA": PDA{},
"PEO": PEO{},
"PES": PES{},
"PID": PID{},
"PKG": PKG{},
"PMT": PMT{},
"PR1": PR1{},
"PRA": PRA{},
"PRB": PRB{},
"PRC": PRC{},
"PRD": PRD{},
"PRT": PRT{},
"PSG": PSG{},
"PSL": PSL{},
"PSS": PSS{},
"PTH": PTH{},
"PV1": PV1{},
"PV2": PV2{},
"PYE": PYE{},
"QAK": QAK{},
"QID": QID{},
"QPD": QPD{},
"QRD": QRD{},
"QRF": QRF{},
"QRI": QRI{},
"RCP": RCP{},
"RDF": RDF{},
"RDT": RDT{},
"REL": REL{},
"RF1": RF1{},
"RFI": RFI{},
"RGS": RGS{},
"RMI": RMI{},
"ROL": ROL{},
"RQ1": RQ1{},
"RQD": RQD{},
"RXA": RXA{},
"RXC": RXC{},
"RXD": RXD{},
"RXE": RXE{},
"RXG": RXG{},
"RXO": RXO{},
"RXR": RXR{},
"RXV": RXV{},
"SAC": SAC{},
"SCD": SCD{},
"SCH": SCH{},
"SCP": SCP{},
"SDD": SDD{},
"SFT": SFT{},
"SHP": SHP{},
"SID": SID{},
"SLT": SLT{},
"SPM": SPM{},
"STF": STF{},
"STZ": STZ{},
"TCC": TCC{},
"TCD": TCD{},
"TQ1": TQ1{},
"TQ2": TQ2{},
"TXA": TXA{},
"UAC": UAC{},
"UB1": UB1{},
"UB2": UB2{},
"URD": URD{},
"URS": URS{},
"VAR": VAR{},
"VND": VND{},
}
// Trigger lookup by ID.
var TriggerRegistry = map[string]any{
"ACK": ACK{},
"ADT_A01": ADT_A01{},
"ADT_A02": ADT_A02{},
"ADT_A03": ADT_A03{},
"ADT_A04": ADT_A04{},
"ADT_A05": ADT_A05{},
"ADT_A06": ADT_A06{},
"ADT_A07": ADT_A07{},
"ADT_A08": ADT_A08{},
"ADT_A09": ADT_A09{},
"ADT_A10": ADT_A10{},
"ADT_A11": ADT_A11{},
"ADT_A12": ADT_A12{},
"ADT_A13": ADT_A13{},
"ADT_A14": ADT_A14{},
"ADT_A15": ADT_A15{},
"ADT_A16": ADT_A16{},
"ADT_A17": ADT_A17{},
"ADT_A20": ADT_A20{},
"ADT_A21": ADT_A21{},
"ADT_A22": ADT_A22{},
"ADT_A23": ADT_A23{},
"ADT_A24": ADT_A24{},
"ADT_A25": ADT_A25{},
"ADT_A26": ADT_A26{},
"ADT_A27": ADT_A27{},
"ADT_A28": ADT_A28{},
"ADT_A29": ADT_A29{},
"ADT_A31": ADT_A31{},
"ADT_A32": ADT_A32{},
"ADT_A33": ADT_A33{},
"ADT_A37": ADT_A37{},
"ADT_A38": ADT_A38{},
"ADT_A40": ADT_A40{},
"ADT_A41": ADT_A41{},
"ADT_A42": ADT_A42{},
"ADT_A43": ADT_A43{},
"ADT_A44": ADT_A44{},
"ADT_A45": ADT_A45{},
"ADT_A47": ADT_A47{},
"ADT_A49": ADT_A49{},
"ADT_A50": ADT_A50{},
"ADT_A51": ADT_A51{},
"ADT_A52": ADT_A52{},
"ADT_A53": ADT_A53{},
"ADT_A54": ADT_A54{},
"ADT_A55": ADT_A55{},
"ADT_A60": ADT_A60{},
"ADT_A61": ADT_A61{},
"ADT_A62": ADT_A62{},
"BAR_P01": BAR_P01{},
"BAR_P02": BAR_P02{},
"BAR_P05": BAR_P05{},
"BAR_P06": BAR_P06{},
"BAR_P10": BAR_P10{},
"BAR_P12": BAR_P12{},
"BPS_O29": BPS_O29{},
"BRP_O30": BRP_O30{},
"BRT_O32": BRT_O32{},
"BTS_O31": BTS_O31{},
"CCF_I22": CCF_I22{},
"CCI_I22": CCI_I22{},
"CCM_I21": CCM_I21{},
"CCQ_I19": CCQ_I19{},
"CCR_I16": CCR_I16{},
"CCR_I17": CCR_I17{},
"CCR_I18": CCR_I18{},
"CCU_I20": CCU_I20{},
"CQU_I19": CQU_I19{},
"CRM_C01": CRM_C01{},
"CRM_C02": CRM_C02{},
"CRM_C03": CRM_C03{},
"CRM_C04": CRM_C04{},
"CRM_C05": CRM_C05{},
"CRM_C06": CRM_C06{},
"CRM_C07": CRM_C07{},
"CRM_C08": CRM_C08{},
"CSU_C09": CSU_C09{},
"CSU_C10": CSU_C10{},
"CSU_C11": CSU_C11{},
"CSU_C12": CSU_C12{},
"DBC_O41": DBC_O41{},
"DBU_O42": DBU_O42{},
"DEL_O46": DEL_O46{},
"DEO_O45": DEO_O45{},
"DER_O44": DER_O44{},
"DFT_P03": DFT_P03{},
"DFT_P11": DFT_P11{},
"DPR_O48": DPR_O48{},
"DRC_O47": DRC_O47{},
"DRG_O43": DRG_O43{},
"EAC_U07": EAC_U07{},
"EAN_U09": EAN_U09{},
"EAR_U08": EAR_U08{},
"EHC_E01": EHC_E01{},
"EHC_E02": EHC_E02{},
"EHC_E04": EHC_E04{},
"EHC_E10": EHC_E10{},
"EHC_E12": EHC_E12{},
"EHC_E13": EHC_E13{},
"EHC_E15": EHC_E15{},
"EHC_E20": EHC_E20{},
"EHC_E21": EHC_E21{},
"EHC_E24": EHC_E24{},
"ESR_U02": ESR_U02{},
"ESU_U01": ESU_U01{},
"INR_U06": INR_U06{},
"INU_U05": INU_U05{},
"LSR_U13": LSR_U13{},
"LSU_U12": LSU_U12{},
"MDM_T01": MDM_T01{},
"MDM_T02": MDM_T02{},
"MDM_T03": MDM_T03{},
"MDM_T04": MDM_T04{},
"MDM_T05": MDM_T05{},
"MDM_T06": MDM_T06{},
"MDM_T07": MDM_T07{},
"MDM_T08": MDM_T08{},
"MDM_T09": MDM_T09{},
"MDM_T10": MDM_T10{},
"MDM_T11": MDM_T11{},
"MFK_M02": MFK_M02{},
"MFK_M04": MFK_M04{},
"MFK_M05": MFK_M05{},
"MFK_M06": MFK_M06{},
"MFK_M07": MFK_M07{},
"MFK_M08": MFK_M08{},
"MFK_M09": MFK_M09{},
"MFK_M10": MFK_M10{},
"MFK_M11": MFK_M11{},
"MFK_M12": MFK_M12{},
"MFK_M13": MFK_M13{},
"MFK_M14": MFK_M14{},
"MFK_M15": MFK_M15{},
"MFK_M16": MFK_M16{},
"MFK_M17": MFK_M17{},
"MFN_M02": MFN_M02{},
"MFN_M04": MFN_M04{},
"MFN_M05": MFN_M05{},
"MFN_M06": MFN_M06{},
"MFN_M07": MFN_M07{},
"MFN_M08": MFN_M08{},
"MFN_M09": MFN_M09{},
"MFN_M10": MFN_M10{},
"MFN_M11": MFN_M11{},
"MFN_M12": MFN_M12{},
"MFN_M13": MFN_M13{},
"MFN_M14": MFN_M14{},
"MFN_M15": MFN_M15{},
"MFN_M16": MFN_M16{},
"MFN_M17": MFN_M17{},
"NMD_N02": NMD_N02{},
"OMB_O27": OMB_O27{},
"OMD_O03": OMD_O03{},
"OMG_O19": OMG_O19{},
"OMI_O23": OMI_O23{},
"OML_O21": OML_O21{},
"OML_O33": OML_O33{},
"OML_O35": OML_O35{},
"OML_O39": OML_O39{},
"OMN_O07": OMN_O07{},
"OMP_O09": OMP_O09{},
"OMQ_O42": OMQ_O42{},
"OMS_O05": OMS_O05{},
"OPL_O37": OPL_O37{},
"OPR_O38": OPR_O38{},
"OPU_R25": OPU_R25{},
"ORA_R33": ORA_R33{},
"ORA_R41": ORA_R41{},
"ORB_O28": ORB_O28{},
"ORD_O04": ORD_O04{},
"ORG_O20": ORG_O20{},
"ORI_O24": ORI_O24{},
"ORL_O22": ORL_O22{},
"ORL_O34": ORL_O34{},
"ORL_O36": ORL_O36{},
"ORL_O40": ORL_O40{},
"ORN_O08": ORN_O08{},
"ORP_O10": ORP_O10{},
"ORS_O06": ORS_O06{},
"ORU_R01": ORU_R01{},
"ORU_R30": ORU_R30{},
"ORU_R31": ORU_R31{},
"ORU_R32": ORU_R32{},
"ORU_R40": ORU_R40{},
"ORX_O43": ORX_O43{},
"OSM_R26": OSM_R26{},
"OSU_O41": OSU_O41{},
"OUL_R22": OUL_R22{},
"OUL_R23": OUL_R23{},
"OUL_R24": OUL_R24{},
"PEX_P07": PEX_P07{},
"PEX_P08": PEX_P08{},
"PGL_PC6": PGL_PC6{},
"PGL_PC7": PGL_PC7{},
"PGL_PC8": PGL_PC8{},
"PIN_I07": PIN_I07{},
"PMU_B01": PMU_B01{},
"PMU_B02": PMU_B02{},
"PMU_B03": PMU_B03{},
"PMU_B04": PMU_B04{},
"PMU_B05": PMU_B05{},
"PMU_B06": PMU_B06{},
"PMU_B07": PMU_B07{},
"PMU_B08": PMU_B08{},
"PPG_PCG": PPG_PCG{},
"PPG_PCH": PPG_PCH{},
"PPG_PCJ": PPG_PCJ{},
"PPP_PCB": PPP_PCB{},
"PPP_PCC": PPP_PCC{},
"PPP_PCD": PPP_PCD{},
"PPR_PC1": PPR_PC1{},
"PPR_PC2": PPR_PC2{},
"PPR_PC3": PPR_PC3{},
"QBP_E03": QBP_E03{},
"QBP_E22": QBP_E22{},
"QBP_Q11": QBP_Q11{},
"QBP_Q13": QBP_Q13{},
"QBP_Q15": QBP_Q15{},
"QBP_Q21": QBP_Q21{},
"QBP_Q22": QBP_Q22{},
"QBP_Q23": QBP_Q23{},
"QBP_Q24": QBP_Q24{},
"QBP_Q25": QBP_Q25{},
"QBP_Q31": QBP_Q31{},
"QBP_Q32": QBP_Q32{},
"QBP_Q33": QBP_Q33{},
"QBP_Q34": QBP_Q34{},
"QBP_Z73": QBP_Z73{},
"QBP_Z75": QBP_Z75{},
"QBP_Z77": QBP_Z77{},
"QBP_Z79": QBP_Z79{},
"QBP_Z81": QBP_Z81{},
"QBP_Z85": QBP_Z85{},
"QBP_Z87": QBP_Z87{},
"QBP_Z89": QBP_Z89{},
"QBP_Z91": QBP_Z91{},
"QBP_Z93": QBP_Z93{},
"QBP_Z95": QBP_Z95{},
"QBP_Z97": QBP_Z97{},
"QBP_Z99": QBP_Z99{},
"QBP_Znn": QBP_Znn{},
"QCN_J01": QCN_J01{},
"QSB_Q16": QSB_Q16{},
"QSB_Z83": QSB_Z83{},
"QSX_J02": QSX_J02{},
"QVR_Q17": QVR_Q17{},
"RAS_O17": RAS_O17{},
"RDE_O11": RDE_O11{},
"RDE_O25": RDE_O25{},
"RDR_RDR": RDR_RDR{},
"RDS_O13": RDS_O13{},
"RDY_K15": RDY_K15{},
"RDY_Z80": RDY_Z80{},
"RDY_Z98": RDY_Z98{},
"REF_I12": REF_I12{},
"REF_I13": REF_I13{},
"REF_I14": REF_I14{},
"REF_I15": REF_I15{},
"RGV_O15": RGV_O15{},
"RPA_I08": RPA_I08{},
"RPA_I09": RPA_I09{},
"RPA_I10": RPA_I10{},
"RPA_I11": RPA_I11{},
"RPI_I01": RPI_I01{},
"RPI_I04": RPI_I04{},
"RPL_I02": RPL_I02{},
"RPR_I03": RPR_I03{},
"RQA_I08": RQA_I08{},
"RQA_I09": RQA_I09{},
"RQA_I10": RQA_I10{},
"RQA_I11": RQA_I11{},
"RQI_I01": RQI_I01{},
"RQI_I02": RQI_I02{},
"RQI_I03": RQI_I03{},
"RQP_I04": RQP_I04{},
"RRA_O18": RRA_O18{},
"RRD_O14": RRD_O14{},
"RRE_O12": RRE_O12{},
"RRE_O26": RRE_O26{},
"RRG_O16": RRG_O16{},
"RRI_I12": RRI_I12{},
"RRI_I13": RRI_I13{},
"RRI_I14": RRI_I14{},
"RRI_I15": RRI_I15{},
"RSP_E03": RSP_E03{},
"RSP_E22": RSP_E22{},
"RSP_K11": RSP_K11{},
"RSP_K21": RSP_K21{},
"RSP_K22": RSP_K22{},
"RSP_K23": RSP_K23{},
"RSP_K24": RSP_K24{},
"RSP_K25": RSP_K25{},
"RSP_K31": RSP_K31{},
"RSP_K32": RSP_K32{},
"RSP_K33": RSP_K33{},
"RSP_K34": RSP_K34{},
"RSP_Z82": RSP_Z82{},
"RSP_Z84": RSP_Z84{},
"RSP_Z86": RSP_Z86{},
"RSP_Z88": RSP_Z88{},
"RSP_Z90": RSP_Z90{},
"RTB_K13": RTB_K13{},
"RTB_Z74": RTB_Z74{},
"RTB_Z76": RTB_Z76{},
"RTB_Z78": RTB_Z78{},
"RTB_Z92": RTB_Z92{},
"RTB_Z94": RTB_Z94{},
"RTB_Z96": RTB_Z96{},
"SCN_S37": SCN_S37{},
"SDN_S36": SDN_S36{},
"SDR_S31": SDR_S31{},
"SIU_S12": SIU_S12{},
"SIU_S13": SIU_S13{},
"SIU_S14": SIU_S14{},
"SIU_S15": SIU_S15{},
"SIU_S16": SIU_S16{},
"SIU_S17": SIU_S17{},
"SIU_S18": SIU_S18{},
"SIU_S19": SIU_S19{},
"SIU_S20": SIU_S20{},
"SIU_S21": SIU_S21{},
"SIU_S22": SIU_S22{},
"SIU_S23": SIU_S23{},
"SIU_S24": SIU_S24{},
"SIU_S26": SIU_S26{},
"SIU_S27": SIU_S27{},
"SLN_S34": SLN_S34{},
"SLN_S35": SLN_S35{},
"SLR_S28": SLR_S28{},
"SLR_S29": SLR_S29{},
"SMD_S32": SMD_S32{},
"SRM_S01": SRM_S01{},
"SRM_S02": SRM_S02{},
"SRM_S03": SRM_S03{},
"SRM_S04": SRM_S04{},
"SRM_S05": SRM_S05{},
"SRM_S06": SRM_S06{},
"SRM_S07": SRM_S07{},
"SRM_S08": SRM_S08{},
"SRM_S09": SRM_S09{},
"SRM_S10": SRM_S10{},
"SRM_S11": SRM_S11{},
"SRR_S01": SRR_S01{},
"SRR_S02": SRR_S02{},
"SRR_S03": SRR_S03{},
"SRR_S04": SRR_S04{},
"SRR_S05": SRR_S05{},
"SRR_S06": SRR_S06{},
"SRR_S07": SRR_S07{},
"SRR_S08": SRR_S08{},
"SRR_S09": SRR_S09{},
"SRR_S10": SRR_S10{},
"SRR_S11": SRR_S11{},
"SSR_U04": SSR_U04{},
"SSU_U03": SSU_U03{},
"STC_S33": STC_S33{},
"STI_S30": STI_S30{},
"TCR_U11": TCR_U11{},
"TCU_U10": TCU_U10{},
"UDM_Q05": UDM_Q05{},
"VXU_V04": VXU_V04{},
}
// Data Type lookup by ID.
var DataTypeRegistry = map[string]any{
"AUI": *(new(AUI)),
"CCD": *(new(CCD)),
"CNE": *(new(CNE)),
"CNN": *(new(CNN)),
"CP": *(new(CP)),
"CQ": *(new(CQ)),
"CWE": *(new(CWE)),
"CX": *(new(CX)),
"DDI": *(new(DDI)),
"DIN": *(new(DIN)),
"DLD": *(new(DLD)),
"DLN": *(new(DLN)),
"DLT": *(new(DLT)),
"DR": *(new(DR)),
"DT": *(new(DT)),
"DTM": *(new(DTM)),
"DTN": *(new(DTN)),
"ED": *(new(ED)),
"EI": *(new(EI)),
"EIP": *(new(EIP)),
"ERL": *(new(ERL)),
"FC": *(new(FC)),
"FN": *(new(FN)),
"FT": *(new(FT)),
"GTS": *(new(GTS)),
"HD": *(new(HD)),
"ICD": *(new(ICD)),
"ID": *(new(ID)),
"IS": *(new(IS)),
"JCC": *(new(JCC)),
"MO": *(new(MO)),
"MOC": *(new(MOC)),
"MOP": *(new(MOP)),
"MSG": *(new(MSG)),
"NA": *(new(NA)),
"NDL": *(new(NDL)),
"NM": *(new(NM)),
"NR": *(new(NR)),
"OCD": *(new(OCD)),
"OSP": *(new(OSP)),
"PIP": *(new(PIP)),
"PL": *(new(PL)),
"PLN": *(new(PLN)),
"PPN": *(new(PPN)),
"PRL": *(new(PRL)),
"PT": *(new(PT)),
"PTA": *(new(PTA)),
"RCD": *(new(RCD)),
"RFR": *(new(RFR)),
"RI": *(new(RI)),
"RMC": *(new(RMC)),
"RPT": *(new(RPT)),
"SAD": *(new(SAD)),
"SCV": *(new(SCV)),
"SI": *(new(SI)),
"SN": *(new(SN)),
"SNM": *(new(SNM)),
"SPD": *(new(SPD)),
"SRT": *(new(SRT)),
"ST": *(new(ST)),
"TM": *(new(TM)),
"TX": *(new(TX)),
"UVC": *(new(UVC)),
"VH": *(new(VH)),
"VID": *(new(VID)),
"XAD": *(new(XAD)),
"XCN": *(new(XCN)),
"XON": *(new(XON)),
"XPN": *(new(XPN)),
"XTN": *(new(XTN)),
"varies": *(new(varies)),
} | h28/registry.go | 0.590425 | 0.418222 | registry.go | starcoder |
package nmea
import (
"fmt"
"github.com/martinlindhe/unit"
)
const (
// TypeMWD type for MWD sentences
TypeMWD = "MWD"
)
// Sentence info:
// 1 Wind direction, 0.0 to 359.9 degrees True, to the nearest 0.1 degree
// 2 T: True
// 3 Wind direction, 0.0 to 359.9 degrees Magnetic, to the nearest 0.1 degree
// 4 M: Magnetic
// 5 Wind speed, knots, to the nearest 0.1 knot.
// 6 N: Knots
// 7 Wind speed, meters/second, to the nearest 0.1 m/s.
// 8 M: Meters/second
// MWD - Wind Direction & Speed
type MWD struct {
BaseSentence
WindDirectionTrue Float64
WindDirectionMagnetic Float64
WindSpeedInKnots Float64
WindSpeedInMetersPerSecond Float64
}
// newMWD constructor
func newMWD(s BaseSentence) (MWD, error) {
p := NewParser(s)
p.AssertType(TypeMWD)
m := MWD{
BaseSentence: s,
WindDirectionTrue: p.Float64(0, "WindDirectionTrue"),
WindDirectionMagnetic: p.Float64(2, "WindDirectionMagnetic"),
WindSpeedInKnots: p.Float64(4, "WindSpeedInKnots"),
WindSpeedInMetersPerSecond: p.Float64(6, "WindSpeedInMetersPerSecond"),
}
return m, p.Err()
}
// GetTrueWindDirection retrieves the true wind direction from the sentence
func (s MWD) GetTrueWindDirection() (float64, error) {
if v, err := s.WindDirectionTrue.GetValue(); err == nil {
return (unit.Angle(v) * unit.Degree).Radians(), nil
}
return 0, fmt.Errorf("value is unavailable")
}
// GetMagneticWindDirection retrieves the true wind direction from the sentence
func (s MWD) GetMagneticWindDirection() (float64, error) {
if v, err := s.WindDirectionMagnetic.GetValue(); err == nil {
return (unit.Angle(v) * unit.Degree).Radians(), nil
}
return 0, fmt.Errorf("value is unavailable")
}
// GetWindSpeed retrieves wind speed from the sentence
func (s MWD) GetWindSpeed() (float64, error) {
if v, err := s.WindSpeedInMetersPerSecond.GetValue(); err == nil {
return v, nil
}
if v, err := s.WindSpeedInKnots.GetValue(); err == nil {
return (unit.Speed(v) * unit.Knot).MetersPerSecond(), nil
}
return 0, fmt.Errorf("value is unavailable")
} | mwd.go | 0.669637 | 0.439386 | mwd.go | starcoder |
package graphics
import (
"fmt"
"github.com/go-gl/gl/v4.1-core/gl"
"github.com/go-gl/mathgl/mgl32"
"github.com/go-gl/mathgl/mgl64"
"github.com/maxfish/gojira2d/pkg/utils"
)
const (
// Float32Size is the size (in bytes) of a float32
Float32Size = 4
)
// ModelMatrix matrix representing the primitive transformation
type ModelMatrix struct {
mgl64.Mat4
mat32 mgl32.Mat4
size mgl64.Mat4
translation mgl64.Mat4
rotation mgl64.Mat4
scale mgl64.Mat4
anchor mgl64.Mat4
dirty bool
}
// Primitive2D a drawing primitive on the XY plane
type Primitive2D struct {
Primitive
position mgl64.Vec3
scale mgl64.Vec2
size mgl64.Vec2
anchor mgl64.Vec2
angle float64
flipX bool
flipY bool
color Color
modelMatrix ModelMatrix
}
// SetPosition sets the X,Y,Z position of the primitive. Z is used for the drawing order
func (p *Primitive2D) SetPosition(position mgl64.Vec3) {
p.position = position
p.modelMatrix.translation = mgl64.Translate3D(p.position.X(), p.position.Y(), p.position.Z())
p.modelMatrix.dirty = true
}
// Position gets X,Y,Z of the primitive.
func (p *Primitive2D) Position() mgl64.Vec3 {
return p.position
}
// SetAnchor sets the anchor point of the primitive, this will be the point placed at Position
func (p *Primitive2D) SetAnchor(anchor mgl64.Vec2) {
p.anchor = anchor
p.modelMatrix.anchor = mgl64.Translate3D(-p.anchor.X(), -p.anchor.Y(), 0)
p.modelMatrix.dirty = true
}
// SetAnchorToCenter sets the anchor at the center of the primitive
func (p *Primitive2D) SetAnchorToCenter() {
p.SetAnchor(mgl64.Vec2{p.size[0] / 2.0, p.size[1] / 2.0})
}
// Angle in radians
func (p *Primitive2D) Angle() float64 {
return p.angle
}
// SetAngle sets the rotation angle around the Z axis
func (p *Primitive2D) SetAngle(radians float64) {
p.angle = radians
p.modelMatrix.rotation = mgl64.HomogRotate3DZ(p.angle)
p.modelMatrix.dirty = true
}
// Size in pixels
func (p *Primitive2D) Size() mgl64.Vec2 {
return mgl64.Vec2{p.size.X(), p.size.Y()}
}
// SetSize sets the size (in pixels) of the current primitive
func (p *Primitive2D) SetSize(size mgl64.Vec2) {
p.size = size
p.modelMatrix.size = mgl64.Scale3D(p.size.X(), p.size.Y(), 1)
p.modelMatrix.dirty = true
}
// SetSizeFromTexture sets the size of the current primitive to the pixel size of the texture
func (p *Primitive2D) SetSizeFromTexture() {
if p.texture == nil {
return
}
p.SetSize(mgl64.Vec2{float64(p.texture.width), float64(p.texture.height)})
}
// SetScale sets the scaling factor on X and Y for the primitive. The scaling respects the anchor and the rotation
func (p *Primitive2D) SetScale(scale mgl64.Vec2) {
p.scale = scale
p.rebuildScaleMatrix()
}
// SetFlipX flips the primitive around the Y axis
func (p *Primitive2D) SetFlipX(flipX bool) {
p.flipX = flipX
p.rebuildScaleMatrix()
}
// SetFlipY flips the primitive around the X axis
func (p *Primitive2D) SetFlipY(flipY bool) {
p.flipY = flipY
p.rebuildScaleMatrix()
}
// SetColor sets the color passed to the shader
func (p *Primitive2D) SetColor(color Color) {
p.color = color
}
// SetUniforms sets the shader's uniform variables
func (p *Primitive2D) SetUniforms() {
p.shaderProgram.SetUniform("color", &p.color)
p.shaderProgram.SetUniform("model", p.ModelMatrix32())
}
// Draw draws the primitive
func (p *Primitive2D) Draw(context *Context) {
shaderID := p.shaderProgram.ID()
context.BindTexture(p.texture)
gl.UseProgram(shaderID)
cameraMatrix := context.Camera2D.ProjectionMatrix32()
p.shaderProgram.SetUniform("projection", &cameraMatrix)
p.SetUniforms()
gl.BindVertexArray(p.vaoId)
gl.DrawArrays(p.arrayMode, 0, p.arraySize)
}
// DrawInBatch draws the primitive assuming that the correct texture and shader are already bound
func (p *Primitive2D) DrawInBatch(context *Context) {
p.SetUniforms()
gl.BindVertexArray(p.vaoId)
gl.DrawArrays(p.arrayMode, 0, p.arraySize)
}
func (p *Primitive2D) rebuildMatrices() {
p.modelMatrix.translation = mgl64.Translate3D(p.position.X(), p.position.Y(), p.position.Z())
p.modelMatrix.anchor = mgl64.Translate3D(-p.anchor.X(), -p.anchor.Y(), 0)
p.modelMatrix.rotation = mgl64.HomogRotate3DZ(p.angle)
p.modelMatrix.size = mgl64.Scale3D(p.size.X(), p.size.Y(), 1)
p.rebuildScaleMatrix()
p.modelMatrix.dirty = true
}
func (p *Primitive2D) rebuildScaleMatrix() {
scaleX := p.scale.X()
if p.flipX {
scaleX *= -1
}
scaleY := p.scale.Y()
if p.flipY {
scaleY *= -1
}
p.modelMatrix.scale = mgl64.Scale3D(scaleX, scaleY, 1)
p.modelMatrix.dirty = true
}
func (p *Primitive2D) rebuildModelMatrix() {
if p.modelMatrix.dirty {
p.modelMatrix.Mat4 = p.modelMatrix.translation.Mul4(p.modelMatrix.rotation).Mul4(p.modelMatrix.scale).Mul4(p.modelMatrix.anchor).Mul4(p.modelMatrix.size)
// updates the float32 version
p.modelMatrix.mat32 = utils.Mat4From64to32Bits(p.modelMatrix.Mat4)
p.modelMatrix.dirty = false
}
}
// ModelMatrix returns the current model matrix
func (p *Primitive2D) ModelMatrix() *mgl64.Mat4 {
p.rebuildModelMatrix()
return &p.modelMatrix.Mat4
}
// ModelMatrix returns the current model matrix as mgl32.Mat4
func (p *Primitive2D) ModelMatrix32() *mgl32.Mat4 {
p.rebuildModelMatrix()
return &p.modelMatrix.mat32
}
// NewQuadPrimitive creates a rectangular primitive
func NewQuadPrimitive(position mgl64.Vec3, size mgl64.Vec2) *Primitive2D {
q := &Primitive2D{
position: position,
size: size,
scale: mgl64.Vec2{1, 1},
}
q.shaderProgram = NewShaderProgram(VertexShaderBase, "", FragmentShaderTexture)
q.rebuildMatrices()
q.arrayMode = gl.TRIANGLE_FAN
q.arraySize = 4
// Build the VAO
q.SetVertices([]float32{0, 0, 0, 1, 1, 1, 1, 0})
q.SetUVCoords([]float32{0, 0, 0, 1, 1, 1, 1, 0})
return q
}
// NewRegularPolygonPrimitive creates a primitive from a regular polygon
func NewRegularPolygonPrimitive(center mgl64.Vec3, radius float64, numSegments int, filled bool) *Primitive2D {
circlePoints, err := utils.CircleToPolygon(mgl64.Vec2{0, 0}, radius, numSegments, 0)
if err != nil {
fmt.Println(err)
return nil
}
q := &Primitive2D{
position: center,
size: mgl64.Vec2{1, 1},
scale: mgl64.Vec2{1, 1},
}
q.shaderProgram = NewShaderProgram(VertexShaderBase, "", FragmentShaderSolidColor)
q.rebuildMatrices()
// Vertices
vertices := make([]float32, 0, numSegments*2)
for _, v := range circlePoints {
vertices = append(vertices, float32(v[0]), float32(v[1]))
}
// Add one vertex for the last line
vertices = append(vertices, float32(circlePoints[0][0]), float32(circlePoints[0][1]))
if filled {
q.arrayMode = gl.TRIANGLE_FAN
} else {
q.arrayMode = gl.LINE_STRIP
}
q.SetVertices(vertices)
return q
}
// NewTriangles creates a primitive as a collection of triangles
func NewTriangles(
vertices []float32,
uvCoords []float32,
texture *Texture,
position mgl64.Vec3,
size mgl64.Vec2,
shaderProgram *ShaderProgram,
) *Primitive2D {
p := &Primitive2D{
position: position,
size: size,
scale: mgl64.Vec2{1, 1},
}
p.arrayMode = gl.TRIANGLES
p.arraySize = int32(len(vertices) / 2)
p.texture = texture
p.shaderProgram = shaderProgram
p.rebuildMatrices()
gl.GenVertexArrays(1, &p.vaoId)
gl.BindVertexArray(p.vaoId)
p.SetVertices(vertices)
p.SetUVCoords(uvCoords)
gl.BindVertexArray(0)
return p
}
// NewPolylinePrimitive creates a primitive from a sequence of points. The points coordinates are relative to the passed center
func NewPolylinePrimitive(center mgl64.Vec3, points []mgl64.Vec2, closed bool) *Primitive2D {
primitive := &Primitive2D{
position: center,
size: mgl64.Vec2{1, 1},
scale: mgl64.Vec2{1, 1},
}
primitive.shaderProgram = NewShaderProgram(VertexShaderBase, "", FragmentShaderSolidColor)
primitive.rebuildMatrices()
// Vertices
var numVertices int32 = int32(len(points))
vertices := make([]float32, 0, numVertices*2)
for _, p := range points {
vertices = append(vertices, float32(p[0]), float32(p[1]))
}
if closed {
// Add the first point again to close the loop
vertices = append(vertices, vertices[0], vertices[1])
numVertices++
}
primitive.arrayMode = gl.LINE_STRIP
primitive.arraySize = numVertices
primitive.SetVertices(vertices)
return primitive
}
// SetVertices uploads new set of vertices into opengl buffer
func (p *Primitive2D) SetVertices(vertices []float32) {
if p.vaoId == 0 {
gl.GenVertexArrays(1, &p.vaoId)
}
gl.BindVertexArray(p.vaoId)
if p.vboVertices == 0 {
gl.GenBuffers(1, &p.vboVertices)
}
gl.BindBuffer(gl.ARRAY_BUFFER, p.vboVertices)
gl.BufferData(gl.ARRAY_BUFFER, len(vertices)*Float32Size, gl.Ptr(vertices), gl.STATIC_DRAW)
gl.EnableVertexAttribArray(0)
gl.VertexAttribPointer(0, 2, gl.FLOAT, false, 0, gl.PtrOffset(0))
p.arraySize = int32(len(vertices) / 2)
gl.BindVertexArray(0)
}
// SetUVCoords uploads new UV coordinates
func (p *Primitive2D) SetUVCoords(uvCoords []float32) {
if p.vaoId == 0 {
gl.GenVertexArrays(1, &p.vaoId)
}
gl.BindVertexArray(p.vaoId)
if p.vboUVCoords == 0 {
gl.GenBuffers(1, &p.vboUVCoords)
}
gl.BindBuffer(gl.ARRAY_BUFFER, p.vboUVCoords)
gl.BufferData(gl.ARRAY_BUFFER, len(uvCoords)*Float32Size, gl.Ptr(uvCoords), gl.STATIC_DRAW)
gl.EnableVertexAttribArray(1)
gl.VertexAttribPointer(1, 2, gl.FLOAT, false, 0, gl.PtrOffset(0))
gl.BindVertexArray(0)
} | pkg/graphics/primitive_2d.go | 0.81772 | 0.620334 | primitive_2d.go | starcoder |
package cryptypes
import "database/sql/driver"
// EncryptedInt supports encrypting Int data
type EncryptedInt struct {
Field
Raw int
}
// Scan converts the value from the DB into a usable EncryptedInt value
func (s *EncryptedInt) Scan(value interface{}) error {
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized EncryptedInt value into a value that can safely be stored in the DB
func (s EncryptedInt) Value() (driver.Value, error) {
return encrypt(s.Raw)
}
// NullEncryptedInt supports encrypting nullable Int data
type NullEncryptedInt struct {
Field
Raw int
Empty bool
}
// Scan converts the value from the DB into a usable NullEncryptedInt value
func (s *NullEncryptedInt) Scan(value interface{}) error {
if value == nil {
s.Raw = 0
s.Empty = true
return nil
}
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized NullEncryptedInt value into a value that can safely be stored in the DB
func (s NullEncryptedInt) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encrypt(s.Raw)
}
// SignedInt supports signing Int data
type SignedInt struct {
Field
Raw int
Valid bool
}
// Scan converts the value from the DB into a usable SignedInt value
func (s *SignedInt) Scan(value interface{}) (err error) {
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedInt value into a value that can safely be stored in the DB
func (s SignedInt) Value() (driver.Value, error) {
return sign(s.Raw)
}
// NullSignedInt supports signing nullable Int data
type NullSignedInt struct {
Field
Raw int
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedInt value
func (s *NullSignedInt) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedInt value into a value that can safely be stored in the DB
func (s NullSignedInt) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return sign(s.Raw)
}
// SignedEncryptedInt supports signing and encrypting Int data
type SignedEncryptedInt struct {
Field
Raw int
Valid bool
}
// Scan converts the value from the DB into a usable SignedEncryptedInt value
func (s *SignedEncryptedInt) Scan(value interface{}) (err error) {
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedEncryptedInt value into a value that can safely be stored in the DB
func (s SignedEncryptedInt) Value() (driver.Value, error) {
return encryptSign(s.Raw)
}
// NullSignedEncryptedInt supports signing and encrypting nullable Int data
type NullSignedEncryptedInt struct {
Field
Raw int
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedEncryptedInt value
func (s *NullSignedEncryptedInt) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = 0
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedEncryptedInt value into a value that can safely be stored in the DB
func (s NullSignedEncryptedInt) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encryptSign(s.Raw)
} | cryptypes/type_int.go | 0.8059 | 0.585131 | type_int.go | starcoder |
package base36
// Simplified code based on https://godoc.org/github.com/mr-tron/base58
// which in turn is based on https://github.com/trezor/trezor-crypto/commit/89a7d7797b806fac
import (
"fmt"
)
const UcAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
const LcAlphabet = "0123456789abcdefghijklmnopqrstuvwxyz"
const maxDigitOrdinal = 'z'
const maxDigitValueB36 = 35
var revAlphabet [maxDigitOrdinal + 1]byte
func init() {
for i := range revAlphabet {
revAlphabet[i] = maxDigitValueB36 + 1
}
for i, c := range UcAlphabet {
revAlphabet[byte(c)] = byte(i)
if c > '9' {
revAlphabet[byte(c)+32] = byte(i)
}
}
}
// EncodeToStringUc encodes the given byte-buffer as base36 using [0-9A-Z] as
// the digit-alphabet
func EncodeToStringUc(b []byte) string { return encode(b, UcAlphabet) }
// EncodeToStringLc encodes the given byte-buffer as base36 using [0-9a-z] as
// the digit-alphabet
func EncodeToStringLc(b []byte) string { return encode(b, LcAlphabet) }
func encode(inBuf []byte, al string) string {
bufsz := len(inBuf)
zcnt := 0
for zcnt < bufsz && inBuf[zcnt] == 0 {
zcnt++
}
// It is crucial to make this as short as possible, especially for
// the usual case of CIDs.
bufsz = zcnt +
// This is an integer simplification of
// ceil(log(256)/log(36))
(bufsz-zcnt)*277/179 + 1
// Note: pools *DO NOT* help, the overhead of zeroing
// kills any performance gain to be had
out := make([]byte, bufsz)
var idx, stopIdx int
var carry uint32
stopIdx = bufsz - 1
for _, b := range inBuf[zcnt:] {
idx = bufsz - 1
for carry = uint32(b); idx > stopIdx || carry != 0; idx-- {
carry += uint32((out[idx])) * 256
out[idx] = byte(carry % 36)
carry /= 36
}
stopIdx = idx
}
// Determine the additional "zero-gap" in the buffer (aside from zcnt)
for stopIdx = zcnt; stopIdx < bufsz && out[stopIdx] == 0; stopIdx++ {
}
// Now encode the values with actual alphabet in-place
vBuf := out[stopIdx-zcnt:]
bufsz = len(vBuf)
for idx = 0; idx < bufsz; idx++ {
out[idx] = al[vBuf[idx]]
}
return string(out[:bufsz])
}
// DecodeString takes a base36 encoded string and returns a slice of the decoded
// bytes.
func DecodeString(s string) ([]byte, error) {
if len(s) == 0 {
return nil, fmt.Errorf("can not decode zero-length string")
}
zcnt := 0
for zcnt < len(s) && s[zcnt] == '0' {
zcnt++
}
// the 32bit algo stretches the result up to 2 times
binu := make([]byte, 2*(((len(s))*179/277)+1)) // no more than 84 bytes when len(s) <= 64
outi := make([]uint32, (len(s)+3)/4) // no more than 16 bytes when len(s) <= 64
for _, r := range s {
if r > maxDigitOrdinal || revAlphabet[r] > maxDigitValueB36 {
return nil, fmt.Errorf("invalid base36 character (%q)", r)
}
c := uint64(revAlphabet[r])
for j := len(outi) - 1; j >= 0; j-- {
t := uint64(outi[j])*36 + c
c = (t >> 32)
outi[j] = uint32(t & 0xFFFFFFFF)
}
}
mask := (uint(len(s)%4) * 8)
if mask == 0 {
mask = 32
}
mask -= 8
outidx := 0
for j := 0; j < len(outi); j++ {
for mask < 32 { // loop relies on uint overflow
binu[outidx] = byte(outi[j] >> mask)
mask -= 8
outidx++
}
mask = 24
}
// find the most significant byte post-decode, if any
for msb := zcnt; msb < outidx; msb++ {
if binu[msb] > 0 {
return binu[msb-zcnt : outidx : outidx], nil
}
}
// it's all zeroes
return binu[:outidx:outidx], nil
} | base36.go | 0.690037 | 0.443661 | base36.go | starcoder |
package main
/* Day 10 part A
For a given sequence of lenghts (input) apply the following rules to a circular
list of size 256 ints (numbered 0 to 255):
Starting with the first item in the list of numbers reverse the order of the
first n digits where n is the first length (input).
Once the reversal is complete move forward (skip) that many places in the list.
Repeat this process for each input.
Once the input is processed multiply the first two digits in the list and
provide the answer.
Part B:
Step 1:
Treat input as a string of bytes instead of numbers. Convert characters to bytes
using their ASCII codes. Append 17,31,73,47,23 to the end of each input
sequence. ex 1,2,3 -> 49,44,50,44,51,17,31,73,47,23 (note: `,` gets converted,
too). The ASCII codes (base 10) are the new numbers to use in place of treating
the provided input as a comma-separated list of numbers.
Step 2:
The part A solution is merely one "round" of the overall completion. For part
B, apply 64 rounds in total, using the same length sequence ("input") in each
round. The current position and skip size should be preserved between rounds.
Step 3:
Once the rounds have been completed the remaining (0..255) is called a sparse
hash. Reduce that list to one, of only 16 numbers (called the dense hash). To do
this, use numeric bitwise XOR to combine each consecutive block of 16 numbers
in the sparse hash (there are 16 such blocks in a list of 256 numbers). So, the
first element in the dense hash is the first sixteen elements of the sparse
hash XOR'd together, the second element in the dense hash is the second sixteen
elements of the sparse hash XOR'd together, etc.
Step 4:
Perform this operation on each of the 16 blocks of 16 numbers in the sparse
hash to determine the 16 numbers in the dense hash.
Step 5:
Represent the dense hash as a hex string. Convert each number to hex with
leading zero if necessary.
example hashes:
input = hash
"" = a2582a3a0e66e6e86e3812dcb672a272
AoC 2017 = 33efeb34ea91902bb2f59c9920caa6cd.
1,2,3 = 3efbe78a8d82f29979031a4aa0b16a9d.
1,2,4 = 63960835bcdc130f0b66d7ff4f6a5a8e
*/
import (
"container/ring"
"flag"
"fmt"
"os"
"strconv"
"strings"
)
var input = flag.String("input", "3,4,1,5", "Input for day 10")
var listLen = flag.Int("listLen", 5, "Length of the list")
var debug = flag.Bool("debug", false, "Debug output")
var debugHashComputation = flag.Bool("debugHashComputation", false, "Debug hash computations")
var partB = flag.Bool("partB", false, "Perform part B solution?")
// Append this to the input after it has been converted to ASCII for part B.
var partBSuffix = []int{17, 31, 73, 47, 23}
// For part B:
func ComputeDenseHash(ring *ring.Ring) []byte {
ret := make([]int, 16)
for chunk := 0; chunk < 16; chunk++ {
if *debugHashComputation {
fmt.Printf("ComputeDenseHash chunk %d\n", chunk)
}
// "Seed" the chunk bits with the first value of the 16 digits for ^=
ret[chunk] = ring.Value.(int)
if *debugHashComputation {
fmt.Printf(" * (digit=0) = %d\n", ring.Value.(int))
}
ring = ring.Next()
for digit := 1; digit < 16; digit++ {
// The digit-th digit in the dense hash
if *debugHashComputation {
fmt.Printf(" * (digit=%d) %d ^ %d == %d\n", digit, ret[chunk], ring.Value.(int), ret[chunk]^ring.Value.(int))
}
ret[chunk] ^= ring.Value.(int)
ring = ring.Next()
} // finsihed 16 digits
if *debugHashComputation {
fmt.Printf("Chunk %d: %d=%x\n", chunk, ret[chunk], ret[chunk])
}
} // done with the chunks
// coerce to []byte
byteRet := make([]byte, len(ret))
for i := 0; i < len(ret); i++ {
byteRet[i] = byte(ret[i])
}
return byteRet
}
// Print the ring, optionally highlighting the `highlight` value
func PrintRing(r *ring.Ring, highlight int) {
for i := 0; i < r.Len(); i++ {
if r.Value == highlight {
fmt.Printf("(%d)", r.Value)
} else {
fmt.Printf("%d", r.Value)
}
if i < r.Len()-1 {
fmt.Printf("->")
} else {
fmt.Printf("\n")
}
r = r.Next()
}
}
func PrintRingFrom(r *ring.Ring, startAt int) {
// Find startAt in the ring and then print based off of it. Make a new Ring
// to keep caller safe.
if startAt < 0 {
PrintRing(r, -1)
return
}
tempRing := ring.New(r.Len())
for i := 0; i < r.Len(); i++ {
tempRing.Value = r.Value
tempRing = tempRing.Next()
r = r.Next()
}
// Both rings will do a full run through their lengths and end up back at
// "start"
i := 0
for tempRing.Value != startAt || i == tempRing.Len() {
tempRing = tempRing.Next()
i += 1 // just in case we loop around
} // Found the start
PrintRing(tempRing, -1)
return
}
func ReverseRingSlice(r *ring.Ring, sliceLen int) *ring.Ring {
if sliceLen <= 1 {
//nothing to do
return r
}
returnRing := ring.New(r.Len())
r = r.Move(sliceLen - 1)
newRing := ring.New(sliceLen)
for i := 0; i < sliceLen; i++ {
newRing.Value = r.Value
newRing = newRing.Next()
r = r.Prev()
}
if *debug {
fmt.Printf("Set up temporary ring (len=%d): ", newRing.Len())
PrintRing(newRing, -1)
}
// build from newRing until i > newRing.Len(), then use r.
// Make sure r is ready to be read in the right order, +1 to undo Prev() above
r = r.Move(sliceLen + 1)
for i := 0; i < returnRing.Len(); i++ {
if i < newRing.Len() {
returnRing.Value = newRing.Value
newRing = newRing.Next()
} else {
returnRing.Value = r.Value
r = r.Next()
}
returnRing = returnRing.Next()
}
for i := 0; i < returnRing.Len(); i++ {
returnRing = returnRing.Next()
}
return returnRing
}
/*
return a slice of input numbers.
For part A this will treat `input` as a comma-separated list.
For part B this will treat each character as something to conver to its ASCII
representation.
*/
func userInputToLengths(input *string) []int {
ret := make([]int, 0)
if *partB {
for _, char := range strings.Split(*input, "") {
ret = append(ret, int(char[0]))
}
for i := 0; i < len(partBSuffix); i++ {
ret = append(ret, partBSuffix[i])
}
} else {
for _, numberString := range strings.Split(*input, ",") {
number, err := strconv.Atoi(numberString)
if err != nil {
fmt.Printf("Couldn't convert %s to a number: %s\n", numberString, err)
os.Exit(1)
}
ret = append(ret, number)
}
}
return ret
}
/* Performs a single round */
func doRound(inputLengths []int, skipSize, totalSkips *int, ring *ring.Ring) *ring.Ring {
if *debug {
fmt.Printf("doRound inputLengths: %v, skipSize=%d, totalSkips=%d\n", inputLengths, *skipSize, *totalSkips)
fmt.Printf("Input ring ")
PrintRingFrom(ring, -1)
}
for number := 0; number < len(inputLengths); number++ {
if *debug {
fmt.Printf(" Reversing %d digits\n", inputLengths[number])
}
ring = ReverseRingSlice(ring, inputLengths[number])
if *debug {
fmt.Printf(" Reversed portion : ")
PrintRingFrom(ring, -1)
}
// Current position moves forward by length + skipSize
if *debug {
fmt.Printf("Skipping main ring by length=%d+skipsize=%d total=%d\n", inputLengths[number], *skipSize, inputLengths[number]+*skipSize)
}
ring = ring.Move(inputLengths[number] + *skipSize)
if *debug {
fmt.Printf("Input (%d/%d) Skip ring: ", number+1, len(inputLengths))
PrintRingFrom(ring, -1)
}
// Save the total number of skips for later rewinding
*totalSkips += inputLengths[number] + *skipSize
// Then increase skipSize
*skipSize += 1
} // done with input
return ring
}
func main() {
flag.Parse()
skipSize := 0
totalSkips := 0
var rounds int
ring := ring.New(*listLen)
for i := 0; i < *listLen; i++ {
ring.Value = i
ring = ring.Next()
}
// Step 1
inputLengths := *input
if *partB {
rounds = 64
// inputLengths += partBSuffix
} else {
rounds = 1
}
if *debug {
fmt.Printf("rounds: %d\n", rounds)
}
numbers := userInputToLengths(&inputLengths)
if *debug {
fmt.Printf("Input lengths: %d, literal=%s\n", numbers, inputLengths)
}
//Step 1 complete
// Step 2 - Perform rounds
for i := 0; i < rounds; i++ {
if *debug {
fmt.Printf("Running round %d. skipSize=%d, totalSkips=%d, numbers=%v\n",
i+1, skipSize, totalSkips, numbers)
}
ring = doRound(numbers, &skipSize, &totalSkips, ring)
if *debug {
fmt.Printf("Round %d is over. skipSize=%d, totalSkips=%d\n", i+1, skipSize, totalSkips)
fmt.Printf("Ring: ")
PrintRing(ring, -1)
fmt.Println()
}
}
//part A only?
if true || !*partB {
// Go back to the "beginning," undoing skipping from rounds.
if *debug {
fmt.Printf("Moving back %d spots to get back to the 'start' of the ring\n", -1*totalSkips)
}
ring = ring.Move(-1 * totalSkips)
}
if *debug {
fmt.Printf("Ring after rounds: ")
PrintRing(ring, -1)
}
// Steps 3-4
dense := ComputeDenseHash(ring)
// Step 5
if *partB {
fmt.Printf("Dense hash: %02x\n", dense)
} else {
fmt.Printf("Product of first two %d * %d = %d\n", ring.Value, ring.Next().Value, (ring.Value.(int))*(ring.Next().Value.(int)))
}
} | 2017/day10.go | 0.770594 | 0.768168 | day10.go | starcoder |
package expression
import (
"fmt"
"reflect"
)
type numberOp func(x float64, y float64) interface{}
type stringOp func(x string, y string) interface{}
type boolOp func(x bool, y bool) interface{}
type timeOp func(x *DateTime, y *DateTime) interface{}
type opOther func() (interface{}, error)
func op(x interface{}, y interface{}, no numberOp, so stringOp, to timeOp, oe opOther) (interface{}, error) {
if no != nil {
xf, ok := x.(float64)
if ok {
yf, ok := y.(float64)
if ok {
return no(xf, yf), nil
}
}
}
if so != nil {
xs, ok := x.(string)
if ok {
ys, ok := y.(string)
if ok {
return so(xs, ys), nil
}
}
}
if to != nil {
xt, ok := x.(*DateTime)
if ok {
yt, ok := y.(*DateTime)
if ok {
return to(xt, yt), nil
}
}
}
return oe()
}
func bop(x interface{}, y interface{}, bo boolOp, oe opOther) (interface{}, error) {
if bo != nil {
xf, ok := x.(bool)
if ok {
yf, ok := y.(bool)
if ok {
return bo(xf, yf), nil
}
}
}
return oe()
}
func add(x interface{}, y interface{}) (interface{}, error) {
return op(x, y, func(x float64, y float64) interface{} {
return x + y
}, func(x string, y string) interface{} {
return x + y
}, nil, func() (interface{}, error) {
xs := fmt.Sprintf("%v", x)
ys := fmt.Sprintf("%v", y)
return xs + ys, nil
})
}
func subtract(x interface{}, y interface{}) (interface{}, error) {
return op(x, y, func(x float64, y float64) interface{} {
return x - y
}, nil, nil, func() (interface{}, error) {
return nil, fmt.Errorf("cannot subtract %v from %v", y, x)
})
}
func multiply(x interface{}, y interface{}) (interface{}, error) {
return op(x, y, func(x float64, y float64) interface{} {
return x * y
}, nil, nil, func() (interface{}, error) {
return nil, fmt.Errorf("cannot multiply %v and %v", x, y)
})
}
func divide(x interface{}, y interface{}) (interface{}, error) {
return op(x, y, func(x float64, y float64) interface{} {
return x / y
}, nil, nil, func() (interface{}, error) {
return nil, fmt.Errorf("cannot divide %v by %v", x, y)
})
}
func rem(x interface{}, y interface{}) (interface{}, error) {
return op(x, y, func(x float64, y float64) interface{} {
return float64(int64(x) % int64(y))
}, nil, nil, func() (interface{}, error) {
return nil, fmt.Errorf("cannot divide %v by %v (remainder)", x, y)
})
}
func Equal(x interface{}, y interface{}) (interface{}, error) {
return reflect.DeepEqual(x, y), nil
}
func NotEqual(x interface{}, y interface{}) (interface{}, error) {
return !reflect.DeepEqual(x, y), nil
}
func Less(x interface{}, y interface{}) (interface{}, error) {
return op(x, y, func(x float64, y float64) interface{} {
return x < y
}, func(x string, y string) interface{} {
return x < y
}, func(x *DateTime, y *DateTime) interface{} {
if x.Time != nil && y.Time != nil {
return x.Time.Before(*y.Time)
}
return fmt.Errorf("cannot compare file time instances")
}, func() (interface{}, error) {
return nil, fmt.Errorf("cannot compare %v and %v", x, y)
})
}
func Greater(x interface{}, y interface{}) (interface{}, error) {
return op(x, y, func(x float64, y float64) interface{} {
return x > y
}, func(x string, y string) interface{} {
return x > y
}, func(x *DateTime, y *DateTime) interface{} {
if x.Time != nil && y.Time != nil {
return x.Time.After(*y.Time)
}
return fmt.Errorf("cannot compare file time instances")
}, func() (interface{}, error) {
return nil, fmt.Errorf("cannot compare %v and %v", x, y)
})
}
func LessOrEq(x interface{}, y interface{}) (interface{}, error) {
return op(x, y, func(x float64, y float64) interface{} {
return x <= y
}, func(x string, y string) interface{} {
return x <= y
}, func(x *DateTime, y *DateTime) interface{} {
if x.Time != nil && y.Time != nil {
return x.Time.Before(*y.Time) || *x.Time == *y.Time
}
return fmt.Errorf("cannot compare file time instances")
}, func() (interface{}, error) {
return nil, fmt.Errorf("cannot compare %v and %v", x, y)
})
}
func GreaterOrEq(x interface{}, y interface{}) (interface{}, error) {
return op(x, y, func(x float64, y float64) interface{} {
return x >= y
}, func(x string, y string) interface{} {
return x >= y
}, func(x *DateTime, y *DateTime) interface{} {
if x.Time != nil && y.Time != nil {
return x.Time.After(*y.Time) || *x.Time == *y.Time
}
return fmt.Errorf("cannot compare file time instances")
}, func() (interface{}, error) {
return nil, fmt.Errorf("cannot compare %v and %v", x, y)
})
}
func and(x interface{}, y interface{}) (interface{}, error) {
return bop(x, y, func(x bool, y bool) interface{} {
return x && y
}, func() (interface{}, error) {
return nil, fmt.Errorf("cannot AND %v and %v", x, y)
})
}
func or(x interface{}, y interface{}) (interface{}, error) {
if x == nil || x == float64(0) || x == false || x == "" {
return y, nil
}
return x, nil
}
func not(x interface{}) (interface{}, error) {
return bop(x, true, func(x bool, y bool) interface{} {
return !x
}, func() (interface{}, error) {
return nil, fmt.Errorf("cannot negate %v", x)
})
}
func minus(x interface{}) (interface{}, error) {
return op(x, float64(0), func(x float64, y float64) interface{} {
return -x
}, nil, nil, func() (interface{}, error) {
return nil, fmt.Errorf("cannot use '-' on %v", x)
})
}
func plus(x interface{}) (interface{}, error) {
return op(x, float64(0), func(x float64, y float64) interface{} {
return x
}, nil, nil, func() (interface{}, error) {
return nil, fmt.Errorf("cannot use '+' on %v", x)
})
} | mg/expression/operators.go | 0.690037 | 0.502747 | operators.go | starcoder |
package primitive
import (
"fmt"
"math"
"math/rand"
"github.com/fogleman/gg"
)
type Rectangle struct {
W, H int
X1, Y1 int
X2, Y2 int
}
func NewRandomRectangle(w, h int) *Rectangle {
x1 := rand.Intn(w)
y1 := rand.Intn(h)
x2 := rand.Intn(w)
y2 := rand.Intn(h)
return &Rectangle{w, h, x1, y1, x2, y2}
}
func (r *Rectangle) bounds() (x1, y1, x2, y2 int) {
x1, y1 = r.X1, r.Y1
x2, y2 = r.X2, r.Y2
if x1 > x2 {
x1, x2 = x2, x1
}
if y1 > y2 {
y1, y2 = y2, y1
}
return
}
func (r *Rectangle) Draw(dc *gg.Context) {
x1, y1, x2, y2 := r.bounds()
dc.DrawRectangle(float64(x1), float64(y1), float64(x2-x1+1), float64(y2-y1+1))
}
func (r *Rectangle) SVG(attrs string) string {
x1, y1, x2, y2 := r.bounds()
w := x2 - x1 + 1
h := y2 - y1 + 1
return fmt.Sprintf(
"<rect %s x=\"%d\" y=\"%d\" width=\"%d\" height=\"%d\" />",
attrs, x1, y1, w, h)
}
func (r *Rectangle) Copy() Shape {
a := *r
return &a
}
func (r *Rectangle) Mutate() {
switch rand.Intn(2) {
case 0:
r.X1 = clampInt(r.X1+rand.Intn(21)-10, 0, r.W-1)
r.Y1 = clampInt(r.Y1+rand.Intn(21)-10, 0, r.H-1)
case 1:
r.X2 = clampInt(r.X2+rand.Intn(21)-10, 0, r.W-1)
r.Y2 = clampInt(r.Y2+rand.Intn(21)-10, 0, r.H-1)
}
}
func (r *Rectangle) Rasterize() []Scanline {
x1, y1, x2, y2 := r.bounds()
lines := make([]Scanline, y2-y1+1)
i := 0
for y := y1; y <= y2; y++ {
lines[i] = Scanline{y, x1, x2}
i++
}
return lines
}
type RotatedRectangle struct {
W, H int
X, Y int
Sx, Sy int
Angle int
}
func NewRandomRotatedRectangle(w, h int) *RotatedRectangle {
x := rand.Intn(w)
y := rand.Intn(h)
sx := rand.Intn(w / 2)
sy := rand.Intn(h / 2)
a := rand.Intn(360)
r := &RotatedRectangle{w, h, x, y, sx, sy, a}
r.Mutate()
return r
}
func (r *RotatedRectangle) Draw(dc *gg.Context) {
sx, sy := float64(r.Sx), float64(r.Sy)
dc.Push()
dc.Translate(float64(r.X), float64(r.Y))
dc.Rotate(radians(float64(r.Angle)))
dc.DrawRectangle(-sx/2, -sy/2, sx, sy)
dc.Pop()
}
func (r *RotatedRectangle) SVG(attrs string) string {
return fmt.Sprintf(
"<g transform=\"translate(%d %d) rotate(%d) scale(%d %d)\"><rect %s x=\"-0.5\" y=\"-0.5\" width=\"1\" height=\"1\" /></g>",
r.X, r.Y, r.Angle, r.Sx, r.Sy, attrs)
}
func (r *RotatedRectangle) Copy() Shape {
a := *r
return &a
}
func (r *RotatedRectangle) Mutate() {
switch rand.Intn(3) {
case 0:
r.X = clampInt(r.X+rand.Intn(21)-10, 0, r.W-1)
r.Y = clampInt(r.Y+rand.Intn(21)-10, 0, r.H-1)
case 1:
r.Sx = clampInt(r.Sx+rand.Intn(21)-10, 0, r.W-1)
r.Sy = clampInt(r.Sy+rand.Intn(21)-10, 0, r.H-1)
case 2:
r.Angle = r.Angle + rand.Intn(41) - 20
}
for !r.Valid() {
r.Sx = clampInt(r.Sx+rand.Intn(21)-10, 0, r.W-1)
r.Sy = clampInt(r.Sy+rand.Intn(21)-10, 0, r.H-1)
}
}
func (r *RotatedRectangle) Valid() bool {
a, b := r.Sx, r.Sy
if a < b {
a, b = b, a
}
aspect := float64(a) / float64(b)
return aspect <= 5
}
func (r *RotatedRectangle) Rasterize() []Scanline {
sx, sy := float64(r.Sx), float64(r.Sy)
angle := radians(float64(r.Angle))
rx1, ry1 := rotate(-sx/2, -sy/2, angle)
rx2, ry2 := rotate(sx/2, -sy/2, angle)
rx3, ry3 := rotate(sx/2, sy/2, angle)
rx4, ry4 := rotate(-sx/2, sy/2, angle)
x1, y1 := int(rx1)+r.X, int(ry1)+r.Y
x2, y2 := int(rx2)+r.X, int(ry2)+r.Y
x3, y3 := int(rx3)+r.X, int(ry3)+r.Y
x4, y4 := int(rx4)+r.X, int(ry4)+r.Y
miny := minInt(y1, minInt(y2, minInt(y3, y4)))
maxy := maxInt(y1, maxInt(y2, maxInt(y3, y4)))
n := maxy - miny + 1
min := make([]int, n)
max := make([]int, n)
for i := range min {
min[i] = r.W
}
xs := []int{x1, x2, x3, x4, x1}
ys := []int{y1, y2, y3, y4, y1}
// TODO: this could be better probably
for i := 0; i < 4; i++ {
x, y := float64(xs[i]), float64(ys[i])
dx, dy := float64(xs[i+1]-xs[i]), float64(ys[i+1]-ys[i])
count := int(math.Sqrt(dx*dx+dy*dy)) * 2
for j := 0; j < count; j++ {
t := float64(j) / float64(count-1)
xi := int(x + dx*t)
yi := int(y+dy*t) - miny
min[yi] = minInt(min[yi], xi)
max[yi] = maxInt(max[yi], xi)
}
}
lines := make([]Scanline, 0, n)
for i := 0; i < n; i++ {
y := miny + i
if y < 0 || y >= r.H {
continue
}
a := maxInt(min[i], 0)
b := minInt(max[i], r.W-1)
lines = append(lines, Scanline{y, a, b})
}
return lines
} | primitive/rectangle.go | 0.716913 | 0.489076 | rectangle.go | starcoder |
package config
import (
"math"
"math/rand"
"github.com/paulwrubel/photolum/config/geometry"
)
// A Camera holds information about the scene's camera
// and facilitates the casting of Rays into the scene
type Camera struct {
EyeLocation geometry.Point
TargetLocation geometry.Point
UpVector geometry.Vector
VerticalFOV float64
AspectRatio float64
Aperture float64
FocusDistance float64
lensRadius float64
theta float64
halfWidth float64
halfHeight float64
w geometry.Vector
u geometry.Vector
v geometry.Vector
lowerLeftCorner geometry.Point
horizonal geometry.Vector
verical geometry.Vector
}
// Setup is called after allocating the Camera struct and filling the exported fields
// It fills the unexported fields, such as derived vectors and measures
func (c *Camera) Setup(p *Parameters) error {
c.UpVector = c.UpVector.Unit()
c.AspectRatio = float64(p.ImageWidth) / float64(p.ImageHeight)
c.lensRadius = c.Aperture / 2.0
c.theta = c.VerticalFOV * math.Pi / 180.0
c.halfHeight = math.Tan(c.theta / 2.0)
c.halfWidth = c.AspectRatio * c.halfHeight
c.w = c.TargetLocation.To(c.EyeLocation).Unit()
c.u = c.UpVector.Cross(c.w)
c.v = c.w.Cross(c.u)
c.lowerLeftCorner = c.EyeLocation.SubVector(
c.u.MultScalar(c.halfWidth * c.FocusDistance)).SubVector(
c.v.MultScalar(c.halfHeight * c.FocusDistance)).SubVector(
c.w.MultScalar(c.FocusDistance))
c.horizonal = c.u.MultScalar(2.0 * c.halfWidth * c.FocusDistance)
c.verical = c.v.MultScalar(2.0 * c.halfHeight * c.FocusDistance)
return nil
}
// GetRay returns a Ray from the eye location to a point on the view place u% across and v% up
func (c *Camera) GetRay(u float64, v float64, rng *rand.Rand) geometry.Ray {
randomOnLens := geometry.RandomOnUnitDisk(rng).MultScalar(c.lensRadius)
offset := c.u.MultScalar(randomOnLens.X).Add(c.v.MultScalar(randomOnLens.Y))
return geometry.Ray{
Origin: c.EyeLocation.AddVector(offset),
Direction: c.lowerLeftCorner.AddVector(
c.horizonal.MultScalar(u)).AddVector(
c.verical.MultScalar(v)).From(
c.EyeLocation).Sub(
offset).Unit(),
}
} | config/camera.go | 0.792384 | 0.708994 | camera.go | starcoder |
package chart
import (
"strings"
util "github.com/beevee/go-chart/util"
)
// TextHorizontalAlign is an enum for the horizontal alignment options.
type TextHorizontalAlign int
const (
// TextHorizontalAlignUnset is the unset state for text horizontal alignment.
TextHorizontalAlignUnset TextHorizontalAlign = 0
// TextHorizontalAlignLeft aligns a string horizontally so that it's left ligature starts at horizontal pixel 0.
TextHorizontalAlignLeft TextHorizontalAlign = 1
// TextHorizontalAlignCenter left aligns a string horizontally so that there are equal pixels
// to the left and to the right of a string within a box.
TextHorizontalAlignCenter TextHorizontalAlign = 2
// TextHorizontalAlignRight right aligns a string horizontally so that the right ligature ends at the right-most pixel
// of a box.
TextHorizontalAlignRight TextHorizontalAlign = 3
)
// TextWrap is an enum for the word wrap options.
type TextWrap int
const (
// TextWrapUnset is the unset state for text wrap options.
TextWrapUnset TextWrap = 0
// TextWrapNone will spill text past horizontal boundaries.
TextWrapNone TextWrap = 1
// TextWrapWord will split a string on words (i.e. spaces) to fit within a horizontal boundary.
TextWrapWord TextWrap = 2
// TextWrapRune will split a string on a rune (i.e. utf-8 codepage) to fit within a horizontal boundary.
TextWrapRune TextWrap = 3
)
// TextVerticalAlign is an enum for the vertical alignment options.
type TextVerticalAlign int
const (
// TextVerticalAlignUnset is the unset state for vertical alignment options.
TextVerticalAlignUnset TextVerticalAlign = 0
// TextVerticalAlignBaseline aligns text according to the "baseline" of the string, or where a normal ascender begins.
TextVerticalAlignBaseline TextVerticalAlign = 1
// TextVerticalAlignBottom aligns the text according to the lowers pixel of any of the ligatures (ex. g or q both extend below the baseline).
TextVerticalAlignBottom TextVerticalAlign = 2
// TextVerticalAlignMiddle aligns the text so that there is an equal amount of space above and below the top and bottom of the ligatures.
TextVerticalAlignMiddle TextVerticalAlign = 3
// TextVerticalAlignMiddleBaseline aligns the text veritcally so that there is an equal number of pixels above and below the baseline of the string.
TextVerticalAlignMiddleBaseline TextVerticalAlign = 4
// TextVerticalAlignTop alignts the text so that the top of the ligatures are at y-pixel 0 in the container.
TextVerticalAlignTop TextVerticalAlign = 5
)
var (
// Text contains utilities for text.
Text = &text{}
)
// TextStyle encapsulates text style options.
type TextStyle struct {
HorizontalAlign TextHorizontalAlign
VerticalAlign TextVerticalAlign
Wrap TextWrap
}
type text struct{}
func (t text) WrapFit(r Renderer, value string, width int, style Style) []string {
switch style.TextWrap {
case TextWrapRune:
return t.WrapFitRune(r, value, width, style)
case TextWrapWord:
return t.WrapFitWord(r, value, width, style)
}
return []string{value}
}
func (t text) WrapFitWord(r Renderer, value string, width int, style Style) []string {
style.WriteToRenderer(r)
var output []string
var line string
var word string
var textBox Box
for _, c := range value {
if c == rune('\n') { // commit the line to output
output = append(output, t.Trim(line+word))
line = ""
word = ""
continue
}
textBox = r.MeasureText(line + word + string(c))
if textBox.Width() >= width {
output = append(output, t.Trim(line))
line = word
word = string(c)
continue
}
if c == rune(' ') || c == rune('\t') {
line = line + word + string(c)
word = ""
continue
}
word = word + string(c)
}
return append(output, t.Trim(line+word))
}
func (t text) WrapFitRune(r Renderer, value string, width int, style Style) []string {
style.WriteToRenderer(r)
var output []string
var line string
var textBox Box
for _, c := range value {
if c == rune('\n') {
output = append(output, line)
line = ""
continue
}
textBox = r.MeasureText(line + string(c))
if textBox.Width() >= width {
output = append(output, line)
line = string(c)
continue
}
line = line + string(c)
}
return t.appendLast(output, line)
}
func (t text) Trim(value string) string {
return strings.Trim(value, " \t\n\r")
}
func (t text) MeasureLines(r Renderer, lines []string, style Style) Box {
style.WriteTextOptionsToRenderer(r)
var output Box
for index, line := range lines {
lineBox := r.MeasureText(line)
output.Right = util.Math.MaxInt(lineBox.Right, output.Right)
output.Bottom += lineBox.Height()
if index < len(lines)-1 {
output.Bottom += +style.GetTextLineSpacing()
}
}
return output
}
func (t text) appendLast(lines []string, text string) []string {
if len(lines) == 0 {
return []string{text}
}
lastLine := lines[len(lines)-1]
lines[len(lines)-1] = lastLine + text
return lines
} | text.go | 0.585457 | 0.428712 | text.go | starcoder |
package openapi
// CheckDetailAddendumB struct for CheckDetailAddendumB
type CheckDetailAddendumB struct {
// CheckDetailAddendumB ID
ID string `json:"ID,omitempty"`
// ImageReferenceKeyIndicator identifies whether ImageReferenceKeyLength contains a variable value within the allowable range, or contains a defined value and the content is ItemReferenceKey. * `0` - ImageReferenceKeyIndicator has Defined Value of 0034 and ImageReferenceKey contains the Image Reference Key. * `1`- ImageReferenceKeyIndicator contains a value other than Value 0034; or ImageReferenceKeyIndicator contains Value 0034, which is not a Defined Value, and the content of ImageReferenceKey has no special significance with regards to an Image Reference Key; or ImageReferenceKeyIndicator is 0000, meaning the ImageReferenceKey is not present.
ImageReferenceKeyIndicator int32 `json:"imageReferenceKeyIndicator,omitempty"`
// microfilmArchiveSequenceNumber is a number that identifies the item in the microfilm archive system; it may be different than the Check.ECEInstitutionItemSequenceNumber and from the ImageReferenceKey.
MicrofilmArchiveSequenceNumber string `json:"microfilmArchiveSequenceNumber"`
// MicrofilmArchiveSequenceNumber A number that identifies the item in the microfilm archive system; it may be different than the Check.ECEInstitutionItemSequenceNumber and from the ImageReferenceKey. * `0034` - ImageReferenceKey contains the ImageReferenceKey (ImageReferenceKeyIndicator is 0). * `0000` - ImageReferenceKey not present (ImageReferenceKeyIndicator is 1). * `0001` - 9999: May include Value 0034, and ImageReferenceKey has no special significance to Image Reference Key (ImageReferenceKey is 1).
LengthImageReferenceKey string `json:"lengthImageReferenceKey,omitempty"`
// ImageReferenceKey is used to find the image of the item in the image data system. Size is variable based on lengthImageReferenceKey. The position within the file is variable based on the lengthImageReferenceKey.
ImageReferenceKey string `json:"imageReferenceKey,omitempty"`
// Descript describes the transaction. The position within the file is variable based on the lengthImageReferenceKey.
Descript string `json:"descript,omitempty"`
// UserField identifies a field used at the discretion of users of the standard.
UserField string `json:"userField,omitempty"`
} | client/model_check_detail_addendum_b.go | 0.678114 | 0.414721 | model_check_detail_addendum_b.go | starcoder |
package pgmodel
import (
"fmt"
"github.com/timescale/timescale-prometheus/pkg/prompb"
)
const (
MetricNameLabelName = "__name__"
)
var (
ErrNoMetricName = fmt.Errorf("metric name missing")
)
// SeriesID represents a globally unique id for the series. This should be equivalent
// to the PostgreSQL type in the series table (currently BIGINT).
type SeriesID int64
// inserter is responsible for inserting label, series and data into the storage.
type inserter interface {
InsertNewData(rows map[string][]samplesInfo) (uint64, error)
CompleteMetricCreation() error
Close()
}
type seriesWithCallback struct {
Series Labels
Callback func(l Labels, id SeriesID) error
}
// SeriesCache provides a caching mechanism for labels and series.
type SeriesCache interface {
GetSeries(lset Labels) (SeriesID, error)
SetSeries(lset Labels, id SeriesID) error
NumElements() int
Capacity() int
}
type samplesInfo struct {
labels *Labels
seriesID SeriesID
samples []prompb.Sample
}
// DBIngestor ingest the TimeSeries data into Timescale database.
type DBIngestor struct {
db inserter
}
// Ingest transforms and ingests the timeseries data into Timescale database.
func (i *DBIngestor) Ingest(tts []prompb.TimeSeries, req *prompb.WriteRequest) (uint64, error) {
data, totalRows, err := i.parseData(tts, req)
if err != nil {
return 0, err
}
rowsInserted, err := i.db.InsertNewData(data)
if err == nil && int(rowsInserted) != totalRows {
return rowsInserted, fmt.Errorf("Failed to insert all the data! Expected: %d, Got: %d", totalRows, rowsInserted)
}
return rowsInserted, err
}
func (i *DBIngestor) CompleteMetricCreation() error {
return i.db.CompleteMetricCreation()
}
func (i *DBIngestor) parseData(tts []prompb.TimeSeries, req *prompb.WriteRequest) (map[string][]samplesInfo, int, error) {
dataSamples := make(map[string][]samplesInfo)
rows := 0
for i := range tts {
t := &tts[i]
if len(t.Samples) == 0 {
continue
}
seriesLabels, metricName, err := labelProtosToLabels(t.Labels)
if err != nil {
return nil, rows, err
}
if metricName == "" {
return nil, rows, ErrNoMetricName
}
sample := samplesInfo{
seriesLabels,
-1, //sentinel marking the seriesId as unset
t.Samples,
}
rows += len(t.Samples)
dataSamples[metricName] = append(dataSamples[metricName], sample)
// we're going to free req after this, but we still need the samples,
// so nil the field
t.Samples = nil
}
FinishWriteRequest(req)
return dataSamples, rows, nil
}
// Close closes the ingestor
func (i *DBIngestor) Close() {
i.db.Close()
} | pkg/pgmodel/ingestor.go | 0.654564 | 0.428951 | ingestor.go | starcoder |
package store
import (
"math"
enc "github.com/KoddiDev/sketches-go/ddsketch/encoding"
)
// CollapsingLowestDenseStore is a dynamically growing contiguous (non-sparse) store.
// The lower bins get combined so that the total number of bins do not exceed maxNumBins.
type CollapsingLowestDenseStore struct {
DenseStore
maxNumBins int
isCollapsed bool
}
func NewCollapsingLowestDenseStore(maxNumBins int) *CollapsingLowestDenseStore {
// Bins are not allocated until values are added.
// When the first value is added, a small number of bins are allocated. The number of bins will
// grow as needed up to maxNumBins.
return &CollapsingLowestDenseStore{
DenseStore: DenseStore{minIndex: math.MaxInt32, maxIndex: math.MinInt32},
maxNumBins: maxNumBins,
isCollapsed: false,
}
}
func (s *CollapsingLowestDenseStore) Add(index int) {
s.AddWithCount(index, float64(1))
}
func (s *CollapsingLowestDenseStore) AddBin(bin Bin) {
index := bin.Index()
count := bin.Count()
if count == 0 {
return
}
s.AddWithCount(index, count)
}
func (s *CollapsingLowestDenseStore) AddWithCount(index int, count float64) {
if count == 0 {
return
}
arrayIndex := s.normalize(index)
s.bins[arrayIndex] += count
s.count += count
}
// Normalize the store, if necessary, so that the counter of the specified index can be updated.
func (s *CollapsingLowestDenseStore) normalize(index int) int {
if index < s.minIndex {
if s.isCollapsed {
return 0
} else {
s.extendRange(index, index)
if s.isCollapsed {
return 0
}
}
} else if index > s.maxIndex {
s.extendRange(index, index)
}
return index - s.offset
}
func (s *CollapsingLowestDenseStore) getNewLength(newMinIndex, newMaxIndex int) int {
return min(s.DenseStore.getNewLength(newMinIndex, newMaxIndex), s.maxNumBins)
}
func (s *CollapsingLowestDenseStore) extendRange(newMinIndex, newMaxIndex int) {
newMinIndex = min(newMinIndex, s.minIndex)
newMaxIndex = max(newMaxIndex, s.maxIndex)
if s.IsEmpty() {
initialLength := s.getNewLength(newMinIndex, newMaxIndex)
s.bins = append(s.bins, make([]float64, initialLength)...)
s.offset = newMinIndex
s.minIndex = newMinIndex
s.maxIndex = newMaxIndex
s.adjust(newMinIndex, newMaxIndex)
} else if newMinIndex >= s.offset && newMaxIndex < s.offset+len(s.bins) {
s.minIndex = newMinIndex
s.maxIndex = newMaxIndex
} else {
// To avoid shifting too often when nearing the capacity of the array,
// we may grow it before we actually reach the capacity.
newLength := s.getNewLength(newMinIndex, newMaxIndex)
if newLength > len(s.bins) {
s.bins = append(s.bins, make([]float64, newLength-len(s.bins))...)
}
s.adjust(newMinIndex, newMaxIndex)
}
}
// Adjust bins, offset, minIndex and maxIndex, without resizing the bins slice in order to make it fit the
// specified range.
func (s *CollapsingLowestDenseStore) adjust(newMinIndex, newMaxIndex int) {
if newMaxIndex-newMinIndex+1 > len(s.bins) {
// The range of indices is too wide, buckets of lowest indices need to be collapsed.
newMinIndex = newMaxIndex - len(s.bins) + 1
if newMinIndex >= s.maxIndex {
// There will be only one non-empty bucket.
s.bins = make([]float64, len(s.bins))
s.offset = newMinIndex
s.minIndex = newMinIndex
s.bins[0] = s.count
} else {
shift := s.offset - newMinIndex
if shift < 0 {
// Collapse the buckets.
n := float64(0)
for i := s.minIndex; i < newMinIndex; i++ {
n += s.bins[i-s.offset]
}
s.resetBins(s.minIndex, newMinIndex-1)
s.bins[newMinIndex-s.offset] += n
s.minIndex = newMinIndex
// Shift the buckets to make room for newMaxIndex.
s.shiftCounts(shift)
} else {
// Shift the buckets to make room for newMinIndex.
s.shiftCounts(shift)
s.minIndex = newMinIndex
}
}
s.maxIndex = newMaxIndex
s.isCollapsed = true
} else {
s.centerCounts(newMinIndex, newMaxIndex)
}
}
func (s *CollapsingLowestDenseStore) MergeWith(other Store) {
if other.IsEmpty() {
return
}
o, ok := other.(*CollapsingLowestDenseStore)
if !ok {
for bin := range other.Bins() {
s.AddBin(bin)
}
return
}
if o.minIndex < s.minIndex || o.maxIndex > s.maxIndex {
s.extendRange(o.minIndex, o.maxIndex)
}
idx := o.minIndex
for ; idx < s.minIndex && idx <= o.maxIndex; idx++ {
s.bins[0] += o.bins[idx-o.offset]
}
for ; idx < o.maxIndex; idx++ {
s.bins[idx-s.offset] += o.bins[idx-o.offset]
}
// This is a separate test so that the comparison in the previous loop is strict (<) and handles
// store.maxIndex = Integer.MAX_VALUE.
if idx == o.maxIndex {
s.bins[idx-s.offset] += o.bins[idx-o.offset]
}
s.count += o.count
}
func (s *CollapsingLowestDenseStore) Copy() Store {
bins := make([]float64, len(s.bins))
copy(bins, s.bins)
return &CollapsingLowestDenseStore{
DenseStore: DenseStore{
bins: bins,
count: s.count,
offset: s.offset,
minIndex: s.minIndex,
maxIndex: s.maxIndex,
},
maxNumBins: s.maxNumBins,
isCollapsed: s.isCollapsed,
}
}
func (s *CollapsingLowestDenseStore) Clear() {
s.DenseStore.Clear()
s.isCollapsed = false
}
func (s *CollapsingLowestDenseStore) DecodeAndMergeWith(r *[]byte, encodingMode enc.SubFlag) error {
return DecodeAndMergeWith(s, r, encodingMode)
}
var _ Store = (*CollapsingLowestDenseStore)(nil)
func max(x, y int) int {
if x > y {
return x
}
return y
}
func min(x, y int) int {
if x < y {
return x
}
return y
} | ddsketch/store/collapsing_lowest_dense_store.go | 0.699357 | 0.435181 | collapsing_lowest_dense_store.go | starcoder |
package manual
func syntax() string {
return `
Comments
Comments are nonsensical remarks that accompany code but are ignored by
the compiler. Comments start with a '#' hash symbol (or pound for some
Americans) and ends at the end of the line.
# Write whatever you want here
# Put two or more comment lines together for longer descriptions
Variables & Types
Variables are symbols used to represent a value which can be changed
through assignment. A value can be simple or complex entities from numbers
and strings of characters to lists and even functions. Variables are
extremely useful as they can hold values we cannot possibly know at the
time of coding. Variable names must start with a letter and then any
unbroken series of letters and underscores providing it is not a keyword.
Good: 'x'
'playerName'
'enemy_health'
Bad: '_x' First character must be a letter
'player name' Spaces are not allowed
'player123' Numbers are not allowed
The format and interaction rules of each value in Scarlet are defined by
their data type. There are three intrinsic types but new ones can be added
fairly easily. Specifing types is not required but programmers are
required to know a variables value data type as different operations limit
the types that can be used as operands.
Assignments to variables are performed in classic imperative fashion
allowing for multiple assignments using a single assignment operator.
The type information of a value stays solely with the value so variables
can be reassigned values of a different type.
alive := true
playerName := "bob"
answer := 6 * 7
answer := "42"
x := 2 * 3 Valid: multiplying two numbers
x := 2 * "hello" Invalid: multiplying a string is nonsensical
Operations
Intrinsic data types
Bool: Holds one of two possible values, 'true' or 'false'.
Num: Holds an arbitrary length floating point number. The standard
numeric operations can be perform on two numbers such as addition
and multiplication. When an operation or spell requires an integer,
the integer part of the number passed will be used, i.e. no
rounding will occur.
Str: Holds a sequance of UTF-8 characters. Scarlet is very high level
and does not intrinsically deal with byte data so string
manipulation is done purely in UTF-8.
These are the operations available with their precedence, a higher number
means greater precedence and those of equal precedence are prioritised by
first come first computed.
(6) Num * Num
Num / Num
Num % Num
(5) Num + Num
Num - Num
(4) Num < Num
Num > Num
Num <= Num
Num >= Num
(3) Any == Any
Any != Any
(2) Bool && Bool
(1) Bool || Bool
Guards
Guards are used to provide conditional code execution such as printing
a number only if a specific condition is meet.
# Code within the curly brackets is only executed if 'x' is greater
# than zero.
[x > 0] {
... # Some conditional code
}
Loops
Loops (while) are guards that are repeated until the guard condition
is false.
# A simple example that will only loop once
exit := false
loop [exit] {
exit := true
}
# This example loops 5 times printing the number held by 'i' on each
# iteration.
i := 1
loop [i < 6] {
@Print(i, " ")
i := i - 1
}
Spells
Spells are the central concept on which Scarlet was built. A less
glamorous name would be 'inbuilt functions', but where's the fun in that.
"Any sufficiently advanced technology is indistinguishable from magic"
- [One of] 'Clarke's three laws' by <NAME>
Spells are always prefixed with an at sign '@' followed by their name
and accept arguments in the same manner as iconic C-style functions.
Unlike variable names, spell names may contain dots '.' to mimic
namespaces. This can make spells more readable, better convey their
usage, and are easier to mass search-and-replace. A registered spell
name may have as many namespace segments as the coder likes but they
should strive to create names that are short and meaningful.
Usage: @spell_name([argument...])
@Println("6 * 7 = ", 6 * 7)
@Exit(0)`
} | _manual/syntax.go | 0.619126 | 0.72577 | syntax.go | starcoder |
package float128ppc
import (
"math"
"math/big"
)
const (
// precision specifies the number of bits in the mantissa (including the
// implicit lead bit).
precision = 106
)
// Positive and negative Not-a-Number, infinity and zero.
var (
// +NaN
NaN = Float{high: math.NaN(), low: 0}
// -NaN
NegNaN = Float{high: -math.NaN(), low: 0}
// +Inf
Inf = Float{high: math.Inf(1), low: 0}
// -Inf
NegInf = Float{high: -math.Inf(-1), low: 0}
// +zero
Zero = Float{high: 0, low: 0}
// -zero
NegZero = Float{high: math.Copysign(0, -1), low: 0}
)
// Float is a floating-point number in double-double format.
type Float struct {
// where a long double value is regarded as the exact sum of two double-precision values, giving at least a 106-bit precision
high float64
low float64
}
// NewFromBits returns the floating-point number corresponding to the
// double-double representation.
func NewFromBits(a, b uint64) Float {
high := math.Float64frombits(a)
low := math.Float64frombits(b)
return Float{
high: high,
low: low,
}
}
// NewFromFloat32 returns the nearest double-double precision floating-point
// number for x and the accuracy of the conversion.
func NewFromFloat32(x float32) (Float, big.Accuracy) {
f, acc := NewFromFloat64(float64(x))
if acc == big.Exact {
_, acc = f.Float32()
}
return f, acc
}
// NewFromFloat64 returns the nearest double-double precision floating-point
// number for x and the accuracy of the conversion.
func NewFromFloat64(x float64) (Float, big.Accuracy) {
// +-NaN
switch {
case math.IsNaN(x):
if math.Signbit(x) {
// -NaN
return NegNaN, big.Exact
}
// +NaN
return NaN, big.Exact
}
r := Float{high: x, low: 0}
br, _ := r.Big()
return r, br.Acc()
}
// NewFromBig returns the nearest double-double floating-point number for x and
// the accuracy of the conversion.
func NewFromBig(x *big.Float) (Float, big.Accuracy) {
// +-Inf
zero := big.NewFloat(0).SetPrec(precision)
switch {
case x.IsInf():
if x.Signbit() {
// -Inf
return NegInf, big.Exact
}
// +Inf
return Inf, big.Exact
// +-zero
case x.Cmp(zero) == 0:
if x.Signbit() {
// -zero
return NegZero, big.Exact
}
// +zero
return Zero, big.Exact
}
// set precision of x.
x.SetPrec(precision).SetMode(big.ToNearestEven)
// get high part of the double-double floating-point value.
high, _ := x.Float64()
h := big.NewFloat(high).SetPrec(precision).SetMode(big.ToNearestEven)
// compute low part by subtracting high from x.
l := big.NewFloat(0).SetPrec(precision).SetMode(big.ToNearestEven)
l.Sub(x, h)
low, _ := l.Float64()
// check accuracy of results.
result := big.NewFloat(0).SetPrec(precision).SetMode(big.ToNearestEven)
result.Add(h, l)
acc := big.Accuracy(x.Cmp(result))
return Float{high: high, low: low}, acc
}
// Bits returns the double-double binary representation of f.
func (f Float) Bits() (a, b uint64) {
return math.Float64bits(f.high), math.Float64bits(f.low)
}
// Float32 returns the float32 representation of f.
func (f Float) Float32() (float32, big.Accuracy) {
x, nan := f.Big()
if nan {
if x.Signbit() {
return float32(-math.NaN()), big.Exact
}
return float32(math.NaN()), big.Exact
}
return x.Float32()
}
// Float64 returns the float64 representation of f.
func (f Float) Float64() (float64, big.Accuracy) {
x, nan := f.Big()
if nan {
if x.Signbit() {
return -math.NaN(), big.Exact
}
return math.NaN(), big.Exact
}
return x.Float64()
}
// Big returns the multi-precision floating-point number representation of f and
// a boolean indicating whether f is Not-a-Number.
func (f Float) Big() (x *big.Float, nan bool) {
x = big.NewFloat(0)
x.SetPrec(precision)
x.SetMode(big.ToNearestEven)
if f.IsNaN() {
return x, true
}
h := big.NewFloat(f.high).SetPrec(precision)
l := big.NewFloat(f.low).SetPrec(precision)
x.Add(h, l)
zero := big.NewFloat(0).SetPrec(precision)
if x.Cmp(zero) == 0 && math.Signbit(f.high) {
// -zero
if !x.Signbit() {
x.Neg(x)
}
}
return x, false
}
// IsNaN returns true if the Float is NaN
func (f Float) IsNaN() bool {
// NaN + NaN should be NaN in consideration
return math.IsNaN(f.high) || math.IsNaN(f.low)
} | float128ppc/float128ppc.go | 0.869424 | 0.627466 | float128ppc.go | starcoder |
package specs
import (
"reflect"
"testing"
"time"
"github.com/Fs02/grimoire"
"github.com/Fs02/grimoire/changeset"
"github.com/Fs02/grimoire/params"
"github.com/stretchr/testify/assert"
)
// Insert tests insert specifications.
func Insert(t *testing.T, repo grimoire.Repo) {
user := User{}
repo.From(users).MustSave(&user)
tests := []struct {
query grimoire.Query
record interface{}
input params.Params
}{
{repo.From(users), &User{}, params.Map{}},
{repo.From(users), &User{}, params.Map{"name": "insert", "age": 100}},
{repo.From(users), &User{}, params.Map{"name": "insert", "age": 100, "note": "note"}},
{repo.From(users), &User{}, params.Map{"note": "note"}},
{repo.From(addresses), &Address{}, params.Map{}},
{repo.From(addresses), &Address{}, params.Map{"address": "address"}},
{repo.From(addresses), &Address{}, params.Map{"user_id": user.ID}},
{repo.From(addresses), &Address{}, params.Map{"address": "address", "user_id": user.ID}},
}
for _, test := range tests {
ch := changeset.Cast(test.record, test.input, []string{"name", "age", "note", "address", "user_id"})
statement, _ := builder.Insert(test.query.Collection, ch.Changes())
t.Run("Insert|"+statement, func(t *testing.T) {
assert.Nil(t, ch.Error())
assert.Nil(t, test.query.Insert(nil, ch))
assert.Nil(t, test.query.Insert(test.record, ch))
// multiple insert
assert.Nil(t, test.query.Insert(nil, ch, ch, ch))
})
}
}
// InsertAll tests insert multiple specifications.
func InsertAll(t *testing.T, repo grimoire.Repo) {
user := User{}
repo.From(users).MustSave(&user)
tests := []struct {
query grimoire.Query
schema interface{}
record interface{}
params params.Params
}{
{repo.From(users), User{}, &[]User{}, params.Map{}},
{repo.From(users), User{}, &[]User{}, params.Map{"name": "insert", "age": 100}},
{repo.From(users), User{}, &[]User{}, params.Map{"name": "insert", "age": 100, "note": "note"}},
{repo.From(users), User{}, &[]User{}, params.Map{"note": "note"}},
{repo.From(addresses), &Address{}, &[]Address{}, params.Map{}},
{repo.From(addresses), &Address{}, &[]Address{}, params.Map{"address": "address"}},
{repo.From(addresses), &Address{}, &[]Address{}, params.Map{"user_id": user.ID}},
{repo.From(addresses), &Address{}, &[]Address{}, params.Map{"address": "address", "user_id": user.ID}},
}
for _, test := range tests {
ch := changeset.Cast(test.schema, test.params, []string{"name", "age", "note", "address", "user_id"})
statement, _ := builder.Insert(test.query.Collection, ch.Changes())
t.Run("InsertAll|"+statement, func(t *testing.T) {
assert.Nil(t, ch.Error())
// multiple insert
assert.Nil(t, test.query.Insert(test.record, ch, ch, ch))
assert.Equal(t, 3, reflect.ValueOf(test.record).Elem().Len())
})
}
}
// InsertSet tests insert specifications only using Set query.
func InsertSet(t *testing.T, repo grimoire.Repo) {
user := User{}
repo.From(users).MustSave(&user)
now := time.Now()
tests := []struct {
query grimoire.Query
record interface{}
}{
{repo.From(users).Set("created_at", now).Set("updated_at", now).Set("name", "insert set"), &User{}},
{repo.From(users).Set("created_at", now).Set("updated_at", now).Set("name", "insert set").Set("age", 100), &User{}},
{repo.From(users).Set("created_at", now).Set("updated_at", now).Set("name", "insert set").Set("age", 100).Set("note", "note"), &User{}},
{repo.From(users).Set("created_at", now).Set("updated_at", now).Set("note", "note"), &User{}},
{repo.From(addresses).Set("created_at", now).Set("updated_at", now).Set("address", "address"), &Address{}},
{repo.From(addresses).Set("created_at", now).Set("updated_at", now).Set("address", "address").Set("user_id", user.ID), &Address{}},
{repo.From(addresses).Set("created_at", now).Set("updated_at", now).Set("user_id", user.ID), &Address{}},
}
for _, test := range tests {
statement, _ := builder.Insert(test.query.Collection, test.query.Changes)
t.Run("InsertSet|"+statement, func(t *testing.T) {
assert.Nil(t, test.query.Insert(nil))
assert.Nil(t, test.query.Insert(test.record))
})
}
} | adapter/specs/insert.go | 0.545286 | 0.502686 | insert.go | starcoder |
package plotter
import (
"image/color"
"code.google.com/p/plotinum/plot"
"code.google.com/p/plotinum/vg"
)
// Line implements the Plotter interface, drawing a line.
type Line struct {
// XYs is a copy of the points for this line.
XYs
// LineStyle is the style of the line connecting
// the points.
plot.LineStyle
// ShadeColor is the color of the shaded area.
ShadeColor *color.Color
}
// NewLine returns a Line that uses the default line style and
// does not draw glyphs.
func NewLine(xys XYer) (*Line, error) {
data, err := CopyXYs(xys)
if err != nil {
return nil, err
}
return &Line{
XYs: data,
LineStyle: DefaultLineStyle,
}, nil
}
// Plot draws the Line, implementing the plot.Plotter
// interface.
func (pts *Line) Plot(da plot.DrawArea, plt *plot.Plot) {
trX, trY := plt.Transforms(&da)
ps := make([]plot.Point, len(pts.XYs))
for i, p := range pts.XYs {
ps[i].X = trX(p.X)
ps[i].Y = trY(p.Y)
}
if pts.ShadeColor != nil && len(ps) > 0 {
da.SetColor(*pts.ShadeColor)
minY := trY(plt.Y.Min)
var pa vg.Path
pa.Move(ps[0].X, minY)
for i := range pts.XYs {
pa.Line(ps[i].X, ps[i].Y)
}
pa.Line(ps[len(pts.XYs)-1].X, minY)
pa.Close()
da.Fill(pa)
}
da.StrokeLines(pts.LineStyle, da.ClipLinesXY(ps)...)
}
// DataRange returns the minimum and maximum
// x and y values, implementing the plot.DataRanger
// interface.
func (pts *Line) DataRange() (xmin, xmax, ymin, ymax float64) {
return XYRange(pts)
}
// Thumbnail the thumbnail for the Line,
// implementing the plot.Thumbnailer interface.
func (pts *Line) Thumbnail(da *plot.DrawArea) {
if pts.ShadeColor != nil {
points := []plot.Point{
{da.Min.X, da.Min.Y},
{da.Min.X, da.Max().Y},
{da.Max().X, da.Max().Y},
{da.Max().X, da.Min.Y},
}
poly := da.ClipPolygonY(points)
da.FillPolygon(*pts.ShadeColor, poly)
points = append(points, plot.Pt(da.Min.X, da.Min.Y))
} else {
y := da.Center().Y
da.StrokeLine2(pts.LineStyle, da.Min.X, y, da.Max().X, y)
}
}
// NewLinePoints returns both a Line and a
// Points for the given point data.
func NewLinePoints(xys XYer) (*Line, *Scatter, error) {
s, err := NewScatter(xys)
if err != nil {
return nil, nil, err
}
l := &Line{
XYs: s.XYs,
LineStyle: DefaultLineStyle,
}
return l, s, nil
} | plotter/line.go | 0.83128 | 0.415195 | line.go | starcoder |
package largest_rectangle_in_histogram
import "container/list"
/*
84. 柱状图中最大的矩形 https://leetcode-cn.com/problems/largest-rectangle-in-histogram/
给定 n 个非负整数,用来表示柱状图中各个柱子的高度。每个柱子彼此相邻,且宽度为 1 。
求在该柱状图中,能够勾勒出来的矩形的最大面积。
以上是柱状图的示例,其中每个柱子的宽度为 1,给定的高度为 [2,1,5,6,2,3]。
图中阴影部分为所能勾勒出的最大矩形面积,其面积为 10 个单位。
示例:
输入: [2,1,5,6,2,3]
输出: 10
*/
/*
朴素实现1,枚举所有宽度形成的矩形,如在[i,j]形成的矩形,面积 = 宽度 * heights[i:j+1] 中最小高度
时间复杂度O(n^3), 空间复杂度O(1)
有用例超时
*/
func largestRectangleArea01(heights []int) int {
res := 0
for i := range heights {
for j := i; j < len(heights); j++ {
res = max(res, (j-i+1)*min(heights[i:j+1]))
}
}
return res
}
/*
朴素实现2, 遍历所有高度,对每个高度向左右扩展,直到到达边界或高度小于当前高度
时间复杂度O(n^2), 空间复杂度O(1), 实测AC了~~
*/
func largestRectangleArea02(heights []int) int {
res := 0
for i, h := range heights {
width := 0
for left := i; left >= 1 && heights[left-1] >= h; left-- {
width++
}
for right := i; right < len(heights)-1 && heights[right+1] >= h; right++ {
width++
}
res = max(res, width*h)
}
return res
}
/*
基于朴素实现2,借助单调栈来找每个位置左侧/右侧位置最近且高度小于当前位置高度的位置
时间复杂度O(n),空间复杂度O(n)
*/
func largestRectangleArea(heights []int) int {
left, right := calLeft(heights), calRight(heights)
res := 0
for i, h := range heights {
res = max(res, (right[i]-left[i]-1)*h)
}
return res
}
// 找到每个位置左侧距离最近且高度小于当前位置高度的位置
func calLeft(heights []int) []int {
res := make([]int, len(heights))
stack := list.New()
for i, h := range heights {
for stack.Len() > 0 && heights[stack.Back().Value.(int)] >= h {
stack.Remove(stack.Back())
}
if stack.Len() == 0 {
res[i] = -1
} else {
res[i] = stack.Back().Value.(int)
}
stack.PushBack(i)
}
return res
}
// 找到每个位置右侧距离最近且高度小于当前位置的位置
func calRight(heights []int) []int {
res := make([]int, len(heights))
stack := list.New()
for i := len(heights) - 1; i >= 0; i-- {
for stack.Len() > 0 && heights[stack.Back().Value.(int)] >= heights[i] {
stack.Remove(stack.Back())
}
if stack.Len() == 0 {
res[i] = len(heights)
} else {
res[i] = stack.Back().Value.(int)
}
stack.PushBack(i)
}
return res
}
func min(arr []int) int {
r := arr[0]
for _, v := range arr {
if v < r {
r = v
}
}
return r
}
func max(a, b int) int {
if a > b {
return a
}
return b
} | solutions/largest-rectangle-in-histogram/d.go | 0.60743 | 0.594698 | d.go | starcoder |
package stat
import "math"
// SummaryStatistics keeps track of the count, the sum, the min and the max of
// recorded values. We use a compensated sum to avoid accumulating rounding
// errors (see https://en.wikipedia.org/wiki/Kahan_summation_algorithm).
type SummaryStatistics struct {
count float64
sum float64
sumCompensation float64
simpleSum float64
min float64
max float64
}
func NewSummaryStatistics() *SummaryStatistics {
return &SummaryStatistics{
count: 0,
sum: 0,
sumCompensation: 0,
simpleSum: 0,
min: math.Inf(1),
max: math.Inf(-1),
}
}
func (s *SummaryStatistics) Count() float64 {
return s.count
}
func (s *SummaryStatistics) Sum() float64 {
// Better error bounds to add both terms as the final sum
tmp := s.sum + s.sumCompensation
if math.IsNaN(tmp) && math.IsInf(s.simpleSum, 0) {
// If the compensated sum is spuriously NaN from accumulating one or more same-signed infinite
// values, return the correctly-signed infinity stored in simpleSum.
return s.simpleSum
} else {
return tmp
}
}
func (s *SummaryStatistics) Min() float64 {
return s.min
}
func (s *SummaryStatistics) Max() float64 {
return s.max
}
func (s *SummaryStatistics) Add(value, count float64) {
s.AddToCount(count)
s.AddToSum(value * count)
if value < s.min {
s.min = value
}
if value > s.max {
s.max = value
}
}
func (s *SummaryStatistics) AddToCount(addend float64) {
s.count += addend
}
func (s *SummaryStatistics) AddToSum(addend float64) {
s.sumWithCompensation(addend)
s.simpleSum += addend
}
func (s *SummaryStatistics) MergeWith(o *SummaryStatistics) {
s.count += o.count
s.sumWithCompensation(o.sum)
s.sumWithCompensation(o.sumCompensation)
s.simpleSum += o.simpleSum
if o.min < s.min {
s.min = o.min
}
if o.max > s.max {
s.max = o.max
}
}
func (s *SummaryStatistics) sumWithCompensation(value float64) {
tmp := value - s.sumCompensation
velvel := s.sum + tmp // little wolf of rounding error
s.sumCompensation = velvel - s.sum - tmp
s.sum = velvel
}
// Reweight adjusts the statistics so that they are equal to what they would
// have been if AddWithCount had been called with counts multiplied by factor.
func (s *SummaryStatistics) Reweight(factor float64) {
s.count *= factor
s.sum *= factor
s.sumCompensation *= factor
s.simpleSum *= factor
if factor == 0 {
s.min = math.Inf(1)
s.max = math.Inf(-1)
}
}
// Rescale adjusts the statistics so that they are equal to what they would have
// been if AddWithCount had been called with values multiplied by factor.
func (s *SummaryStatistics) Rescale(factor float64) {
s.sum *= factor
s.sumCompensation *= factor
s.simpleSum *= factor
if factor > 0 {
s.min *= factor
s.max *= factor
} else if factor < 0 {
tmp := s.max * factor
s.max = s.min * factor
s.min = tmp
} else if s.count != 0 {
s.min = 0
s.max = 0
}
}
func (s *SummaryStatistics) Clear() {
s.count = 0
s.sum = 0
s.sumCompensation = 0
s.simpleSum = 0
s.min = math.Inf(1)
s.max = math.Inf(-1)
}
func (s *SummaryStatistics) Copy() *SummaryStatistics {
return &SummaryStatistics{
count: s.count,
sum: s.sum,
sumCompensation: s.sumCompensation,
simpleSum: s.simpleSum,
min: s.min,
max: s.max,
}
} | ddsketch/stat/summary.go | 0.802556 | 0.633509 | summary.go | starcoder |
package k2tree
import (
"fmt"
"math/bits"
"github.com/barakmich/k2tree/bytearray"
)
type byteArray struct {
bytes bytearray.ByteArray
length int
total int
}
var _ bitarray = (*byteArray)(nil)
func newByteArray(bytes bytearray.ByteArray) *byteArray {
return &byteArray{
bytes: bytes,
length: 0,
total: 0,
}
}
func (b *byteArray) Len() int {
return b.length
}
func (b *byteArray) Set(at int, val bool) {
if at >= b.length {
panic("can't set a bit beyond the size of the array")
}
off := at >> 3
bit := byte(at & 0x07)
t := byte(0x01 << (7 - bit))
orig := b.bytes.Get(off)
var newbyte byte
if val {
newbyte = orig | t
} else {
newbyte = orig &^ t
}
b.bytes.Set(off, newbyte)
if newbyte != orig {
if val {
b.total++
} else {
b.total--
}
}
}
func (b *byteArray) Count(from, to int) int {
if from > to {
from, to = to, from
}
if from > b.length || to > b.length {
panic("out of range")
}
if from == to {
return 0
}
c := 0
startoff := from >> 3
startbit := byte(from & 0x07)
endoff := to >> 3
endbit := byte(to & 0x07)
if startoff == endoff {
abit := byte(0xFF >> startbit)
bbit := byte(0xFF >> endbit)
return bits.OnesCount8(b.bytes.Get(startoff) & (abit &^ bbit))
}
if startbit != 0 {
c += bits.OnesCount8(b.bytes.Get(startoff) & (0xFF >> startbit))
startoff++
}
if endbit != 0 {
c += bits.OnesCount8(b.bytes.Get(endoff) & (0xFF &^ (0xFF >> endbit)))
}
c += int(b.bytes.PopCount(startoff, endoff))
return c
}
func (b *byteArray) Total() int {
return b.total
}
func (b *byteArray) Get(at int) bool {
off := at >> 3
lowb := byte(at & 0x07)
mask := byte(0x01 << (7 - lowb))
return !(b.bytes.Get(off)&mask == 0x00)
}
func (b *byteArray) String() string {
str := fmt.Sprintf("L%d T%d ", b.length, b.total)
return str
}
func (b *byteArray) debug() string {
return b.String()
}
func (b *byteArray) Insert(n, at int) (err error) {
if at > b.length {
panic("can't extend starting at a too large offset")
}
if n == 0 {
return nil
}
if at%4 != 0 {
panic("can only insert a sliceArray at offset multiples of 4")
}
if n%8 == 0 {
err = b.insertEight(n, at)
} else if n == 4 {
err = b.insertFour(at)
} else if n%4 == 0 {
mult8 := (n >> 3) << 3
err = b.insertEight(mult8, at)
if err != nil {
return err
}
err = b.insertFour(at)
} else {
panic("can only extend a sliceArray by nibbles or multiples of 8")
}
if err != nil {
return err
}
b.length = b.length + n
return nil
}
func (b *byteArray) insertFour(at int) error {
if b.length%8 == 0 {
// We need more space
b.bytes.Insert(b.bytes.Len(), []byte{0x00})
}
off := at >> 3
var inbyte byte
if at%8 != 0 {
inbyte = b.bytes.Get(off)
b.bytes.Set(off, inbyte&0xF0)
off++
}
inbyte = inbyte << 4
for i := off; i < b.bytes.Len(); i++ {
t := b.bytes.Get(i)
b.bytes.Set(i, t>>4|inbyte)
inbyte = t << 4
}
if inbyte != 0x00 {
panic("Overshot")
}
return nil
}
func (b *byteArray) insertEight(n, at int) error {
nBytes := n >> 3
newbytes := make([]byte, nBytes)
if at == b.length {
b.bytes.Insert(b.bytes.Len(), newbytes)
return nil
}
off := at >> 3
if at%8 == 0 {
b.bytes.Insert(off, newbytes)
} else {
b.bytes.Insert(off+1, newbytes)
oldoff := b.bytes.Get(off)
b.bytes.Set(off+nBytes, oldoff&0x0F)
b.bytes.Set(off, oldoff&0xF0)
}
return nil
} | bytearray.go | 0.576661 | 0.426322 | bytearray.go | starcoder |
package okclient
import (
"encoding/json"
)
// RelationDTO struct for RelationDTO
type RelationDTO struct {
Id string `json:"id"`
FromCIID string `json:"fromCIID"`
ToCIID string `json:"toCIID"`
PredicateID string `json:"predicateID"`
State RelationState `json:"state"`
}
// NewRelationDTO instantiates a new RelationDTO object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewRelationDTO(id string, fromCIID string, toCIID string, predicateID string, state RelationState) *RelationDTO {
this := RelationDTO{}
this.Id = id
this.FromCIID = fromCIID
this.ToCIID = toCIID
this.PredicateID = predicateID
this.State = state
return &this
}
// NewRelationDTOWithDefaults instantiates a new RelationDTO object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewRelationDTOWithDefaults() *RelationDTO {
this := RelationDTO{}
return &this
}
// GetId returns the Id field value
func (o *RelationDTO) GetId() string {
if o == nil {
var ret string
return ret
}
return o.Id
}
// GetIdOk returns a tuple with the Id field value
// and a boolean to check if the value has been set.
func (o *RelationDTO) GetIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Id, true
}
// SetId sets field value
func (o *RelationDTO) SetId(v string) {
o.Id = v
}
// GetFromCIID returns the FromCIID field value
func (o *RelationDTO) GetFromCIID() string {
if o == nil {
var ret string
return ret
}
return o.FromCIID
}
// GetFromCIIDOk returns a tuple with the FromCIID field value
// and a boolean to check if the value has been set.
func (o *RelationDTO) GetFromCIIDOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.FromCIID, true
}
// SetFromCIID sets field value
func (o *RelationDTO) SetFromCIID(v string) {
o.FromCIID = v
}
// GetToCIID returns the ToCIID field value
func (o *RelationDTO) GetToCIID() string {
if o == nil {
var ret string
return ret
}
return o.ToCIID
}
// GetToCIIDOk returns a tuple with the ToCIID field value
// and a boolean to check if the value has been set.
func (o *RelationDTO) GetToCIIDOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.ToCIID, true
}
// SetToCIID sets field value
func (o *RelationDTO) SetToCIID(v string) {
o.ToCIID = v
}
// GetPredicateID returns the PredicateID field value
func (o *RelationDTO) GetPredicateID() string {
if o == nil {
var ret string
return ret
}
return o.PredicateID
}
// GetPredicateIDOk returns a tuple with the PredicateID field value
// and a boolean to check if the value has been set.
func (o *RelationDTO) GetPredicateIDOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.PredicateID, true
}
// SetPredicateID sets field value
func (o *RelationDTO) SetPredicateID(v string) {
o.PredicateID = v
}
// GetState returns the State field value
func (o *RelationDTO) GetState() RelationState {
if o == nil {
var ret RelationState
return ret
}
return o.State
}
// GetStateOk returns a tuple with the State field value
// and a boolean to check if the value has been set.
func (o *RelationDTO) GetStateOk() (*RelationState, bool) {
if o == nil {
return nil, false
}
return &o.State, true
}
// SetState sets field value
func (o *RelationDTO) SetState(v RelationState) {
o.State = v
}
func (o RelationDTO) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["id"] = o.Id
}
if true {
toSerialize["fromCIID"] = o.FromCIID
}
if true {
toSerialize["toCIID"] = o.ToCIID
}
if true {
toSerialize["predicateID"] = o.PredicateID
}
if true {
toSerialize["state"] = o.State
}
return json.Marshal(toSerialize)
}
type NullableRelationDTO struct {
value *RelationDTO
isSet bool
}
func (v NullableRelationDTO) Get() *RelationDTO {
return v.value
}
func (v *NullableRelationDTO) Set(val *RelationDTO) {
v.value = val
v.isSet = true
}
func (v NullableRelationDTO) IsSet() bool {
return v.isSet
}
func (v *NullableRelationDTO) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableRelationDTO(val *RelationDTO) *NullableRelationDTO {
return &NullableRelationDTO{value: val, isSet: true}
}
func (v NullableRelationDTO) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableRelationDTO) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_relation_dto.go | 0.738009 | 0.429908 | model_relation_dto.go | starcoder |
package resourcetree
import (
"reflect"
)
const (
v1TimeType = "v1.Time"
volatileTimeType = "apis.VolatileTime"
)
// StructKindNode represents nodes in the resource tree of type reflect.Kind.Struct
type StructKindNode struct {
NodeData
}
// GetData returns node data
func (s *StructKindNode) GetData() NodeData {
return s.NodeData
}
func (s *StructKindNode) initialize(field string, parent NodeInterface, t reflect.Type, rt *ResourceTree) {
s.NodeData.initialize(field, parent, t, rt)
}
func (s *StructKindNode) buildChildNodes(t reflect.Type) {
// For types that are part of the standard package, we treat them as leaf nodes and don't expand further.
// https://golang.org/pkg/reflect/#StructField.
if len(s.FieldType.PkgPath()) == 0 {
s.LeafNode = true
return
}
for i := 0; i < t.NumField(); i++ {
var childNode NodeInterface
if s.isTimeNode(t.Field(i).Type) {
childNode = new(TimeTypeNode)
childNode.initialize(t.Field(i).Name, s, t.Field(i).Type, s.Tree)
} else {
childNode = s.Tree.createNode(t.Field(i).Name, s, t.Field(i).Type)
}
s.Children[t.Field(i).Name] = childNode
childNode.buildChildNodes(t.Field(i).Type)
}
}
func (s *StructKindNode) isTimeNode(t reflect.Type) bool {
if t.Kind() == reflect.Struct {
return t.String() == v1TimeType || t.String() == volatileTimeType
} else if t.Kind() == reflect.Ptr {
return t.Elem().String() == v1TimeType || t.String() == volatileTimeType
} else {
return false
}
}
func (s *StructKindNode) updateCoverage(v reflect.Value) {
if v.IsValid() {
s.Covered = true
if !s.LeafNode {
for i := 0; i < v.NumField(); i++ {
s.Children[v.Type().Field(i).Name].updateCoverage(v.Field(i))
}
}
}
}
func (s *StructKindNode) buildCoverageData(coverageHelper coverageDataHelper) {
if len(s.Children) == 0 {
return
}
coverage := s.Tree.Forest.getConnectedNodeCoverage(s.FieldType, coverageHelper.fieldRules, coverageHelper.ignoredFields)
*coverageHelper.typeCoverage = append(*coverageHelper.typeCoverage, coverage)
// Adding the type to covered fields so as to avoid revisiting the same node in other parts of the resource tree.
coverageHelper.coveredTypes[s.FieldType.PkgPath()+"."+s.FieldType.Name()] = true
for field := range coverage.Fields {
node := s.Children[field]
if !coverage.Fields[field].Ignored && node.GetData().Covered && coverageHelper.nodeRules.Apply(node) {
// Check to see if the type has already been covered.
if ok, _ := coverageHelper.coveredTypes[node.GetData().FieldType.PkgPath()+"."+node.GetData().FieldType.Name()]; !ok {
node.buildCoverageData(coverageHelper)
}
}
}
}
func (s *StructKindNode) getValues() map[string]bool {
return nil
} | tools/webhook-apicoverage/resourcetree/structkindnode.go | 0.671255 | 0.470493 | structkindnode.go | starcoder |
package ro
import "github.com/MaxSlyugrov/cldr"
var calendar = cldr.Calendar{
Formats: cldr.CalendarFormats{
Date: cldr.CalendarDateFormat{Full: "EEEE, d MMMM y", Long: "d MMMM y", Medium: "d MMM y", Short: "dd.MM.y"},
Time: cldr.CalendarDateFormat{Full: "HH:mm:ss zzzz", Long: "HH:mm:ss z", Medium: "HH:mm:ss", Short: "HH:mm"},
DateTime: cldr.CalendarDateFormat{Full: "{1}, {0}", Long: "{1}, {0}", Medium: "{1}, {0}", Short: "{1}, {0}"},
},
FormatNames: cldr.CalendarFormatNames{
Months: cldr.CalendarMonthFormatNames{
Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "ian.", Feb: "feb.", Mar: "mar.", Apr: "apr.", May: "mai", Jun: "iun.", Jul: "iul.", Aug: "aug.", Sep: "sept.", Oct: "oct.", Nov: "nov.", Dec: "dec."},
Narrow: cldr.CalendarMonthFormatNameValue{Jan: "I", Feb: "F", Mar: "M", Apr: "A", May: "M", Jun: "I", Jul: "I", Aug: "A", Sep: "S", Oct: "O", Nov: "N", Dec: "D"},
Short: cldr.CalendarMonthFormatNameValue{},
Wide: cldr.CalendarMonthFormatNameValue{Jan: "Ianuarie", Feb: "Februarie", Mar: "Martie", Apr: "Aprilie", May: "Mai", Jun: "Iunie", Jul: "Iulie", Aug: "August", Sep: "Septembrie", Oct: "Octombrie", Nov: "Noiembrie", Dec: "Decembrie"},
},
Days: cldr.CalendarDayFormatNames{
Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "Dum", Mon: "Lun", Tue: "Mar", Wed: "Mie", Thu: "Joi", Fri: "Vin", Sat: "Sâm"},
Narrow: cldr.CalendarDayFormatNameValue{Sun: "D", Mon: "L", Tue: "M", Wed: "M", Thu: "J", Fri: "V", Sat: "S"},
Short: cldr.CalendarDayFormatNameValue{Sun: "Du", Mon: "Lu", Tue: "Ma", Wed: "Mi", Thu: "Jo", Fri: "Vi", Sat: "Sâ"},
Wide: cldr.CalendarDayFormatNameValue{Sun: "Duminică", Mon: "Luni", Tue: "Marți", Wed: "Miercuri", Thu: "Joi", Fri: "Vineri", Sat: "Sâmbătă"},
},
Periods: cldr.CalendarPeriodFormatNames{
Abbreviated: cldr.CalendarPeriodFormatNameValue{},
Narrow: cldr.CalendarPeriodFormatNameValue{AM: "a.m.", PM: "p.m."},
Short: cldr.CalendarPeriodFormatNameValue{},
Wide: cldr.CalendarPeriodFormatNameValue{AM: "a.m.", PM: "p.m."},
},
},
} | resources/locales/ro/calendar.go | 0.503906 | 0.416322 | calendar.go | starcoder |
package main
import (
"bufio"
"fmt"
"log"
"math"
"os"
"strconv"
"strings"
)
type point struct {
X, Y int
}
type wirePoint struct {
direction string
length int
}
type wire struct {
Path []point
}
func newPoint(x int, y int) *point {
return &point{X: x, Y: y}
}
func (p1 point) addPoint(p2 point) point {
return *newPoint(p1.X+p2.X, p1.Y+p2.Y)
}
func (p1 point) equals(p2 point) bool {
return p1.X == p2.X && p1.Y == p2.Y
}
func (p1 point) stringify() string {
return strconv.Itoa(p1.X) + "-" + strconv.Itoa(p1.Y)
}
func (wp wirePoint) generateCoveredPoints(referencePoint point) []point {
var result []point
pointDelta := newPoint(0, 0)
if wp.direction == "R" {
pointDelta.X = 1
} else if wp.direction == "U" {
pointDelta.Y = -1
} else if wp.direction == "D" {
pointDelta.Y = 1
} else {
pointDelta.X = -1
}
prevPoint := referencePoint
for i := 0; i < wp.length; i++ {
result = append(result, prevPoint.addPoint(*pointDelta))
prevPoint = result[len(result)-1]
}
return result
}
func (w *wire) addPointsToPath(points []point) {
for _, p := range points {
w.Path = append(w.Path, p)
}
}
func abs(x int) int {
if x < 0 {
x = -x
}
return x
}
func manhattanDistance(p1 point, p2 point) int {
return abs(p1.X-p2.X) + abs(p1.Y-p2.Y)
}
func newWire() *wire {
return &wire{}
}
func newWirePoint(direction string, length int) *wirePoint {
return &wirePoint{direction, length}
}
func constructWireWithPath(wirePath []wirePoint, referencePoint point) wire {
wire := newWire()
wire.addPointsToPath([]point{referencePoint})
for _, currentWirePoint := range wirePath {
coveredPoints := currentWirePoint.generateCoveredPoints(referencePoint)
wire.addPointsToPath(coveredPoints)
referencePoint = coveredPoints[len(coveredPoints)-1]
}
return *wire
}
func findStepsToPoint(points []point, targetPoint point) int {
for i, point := range points {
if point.equals(targetPoint) {
return i
}
}
return -1
}
func findMinimumSignalDelay(wire1 wire, wire2 wire, intersectionPoints []point) int {
minimumSignalDelay := math.MaxInt16
for _, point := range intersectionPoints {
currentSignalDelay := findSignalDelay(wire1, wire2, point)
if currentSignalDelay < minimumSignalDelay {
minimumSignalDelay = currentSignalDelay
}
}
return minimumSignalDelay
}
func findSignalDelay(wire1 wire, wire2 wire, intersectionPoint point) int {
return findStepsToPoint(wire1.Path, intersectionPoint) + findStepsToPoint(wire2.Path, intersectionPoint)
}
func findNearestPointToReference(pointsToSearch []point, referencePoint point) (point, int) {
nearestIntersectionDistance := math.MaxInt16
var nearestIntersectionPoint point
for _, point := range pointsToSearch {
distance := manhattanDistance(point, centralPortPoint)
if distance < nearestIntersectionDistance {
nearestIntersectionDistance = distance
nearestIntersectionPoint = point
}
}
return nearestIntersectionPoint, nearestIntersectionDistance
}
func findIntersectingPoints(points1 []point, points2 []point) []point {
var result []point
ht := make(map[string]bool)
for _, p1 := range points1 {
key := p1.stringify()
ht[key] = true
}
for _, p2 := range points2 {
key := p2.stringify()
_, ok := ht[key]
if ok && !p2.equals(centralPortPoint) {
result = append(result, p2)
}
}
return result
}
var (
centralPortPoint = *newPoint(0, 0)
)
func main() {
file, err := os.Open("input.txt")
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
var wirePaths [][]wirePoint
for scanner.Scan() {
text := scanner.Text()
wirePath := make([]wirePoint, 0)
for _, pathPoint := range strings.Split(strings.TrimRight(text, "\n"), ",") {
direction := string(pathPoint[0])
distance, _ := strconv.Atoi(pathPoint[1:])
wirePath = append(wirePath, *newWirePoint(direction, distance))
}
wirePaths = append(wirePaths, wirePath)
}
wire1 := constructWireWithPath(wirePaths[0], centralPortPoint)
wire2 := constructWireWithPath(wirePaths[1], centralPortPoint)
intersectingPoints := findIntersectingPoints(wire1.Path, wire2.Path)
nearestIntersectionPoint, nearestIntersectionDistance := findNearestPointToReference(intersectingPoints, centralPortPoint)
fmt.Println("Nearest intersection distance is ", nearestIntersectionDistance)
fmt.Println("Nearest intersection point is ", nearestIntersectionPoint)
fmt.Println("Minimum signal delay is ", findMinimumSignalDelay(wire1, wire2, intersectingPoints))
} | 2019/day3/day_3.go | 0.604632 | 0.5047 | day_3.go | starcoder |
package sdl
// #include "includes.h"
import "C"
// Header file for SDL_rect definition and management functions.
// The structure that defines a point.
//
// See also: SDL_EnclosePoints
//
// See also: SDL_PointInRect
//
// ↪ https://wiki.libsdl.org/SDL_Point
type Point struct {
X int
Y int
}
func fromC2Point(s C.SDL_Point) Point {
return Point{int(s.x), int(s.y)}
}
func toCFromPoint(s Point) (d C.SDL_Point) {
d.x = C.int(s.X)
d.y = C.int(s.Y)
return
}
// A rectangle, with the origin at the upper left.
//
// See also: SDL_RectEmpty
//
// See also: SDL_RectEquals
//
// See also: SDL_HasIntersection
//
// See also: SDL_IntersectRect
//
// See also: SDL_UnionRect
//
// See also: SDL_EnclosePoints
//
// ↪ https://wiki.libsdl.org/SDL_Rect
type Rect struct {
X int
Y int
W int
H int
}
func fromC2Rect(s C.SDL_Rect) Rect {
return Rect{int(s.x), int(s.y), int(s.w), int(s.h)}
}
func toCFromRect(s Rect) (d C.SDL_Rect) {
d.x = C.int(s.X)
d.y = C.int(s.Y)
d.w = C.int(s.W)
d.h = C.int(s.H)
return
}
// Returns true if point resides inside a rectangle.
// ↪ https://wiki.libsdl.org/SDL_PointInRect
func PointInRect(p Point, r Rect) (retval bool) {
tmp_p := toCFromPoint(p)
tmp_r := toCFromRect(r)
retval = C.SDL_TRUE==(C.SDL_PointInRect((*C.SDL_Point)(&tmp_p), (*C.SDL_Rect)(&tmp_r)))
return
}
// Returns true if the rectangle has no area.
// ↪ https://wiki.libsdl.org/SDL_RectEmpty
func RectEmpty(r Rect) (retval bool) {
tmp_r := toCFromRect(r)
retval = C.SDL_TRUE==(C.SDL_RectEmpty((*C.SDL_Rect)(&tmp_r)))
return
}
// Returns true if the two rectangles are equal.
// ↪ https://wiki.libsdl.org/SDL_RectEquals
func RectEquals(a Rect, b Rect) (retval bool) {
tmp_a := toCFromRect(a)
tmp_b := toCFromRect(b)
retval = C.SDL_TRUE==(C.SDL_RectEquals((*C.SDL_Rect)(&tmp_a), (*C.SDL_Rect)(&tmp_b)))
return
}
// Determine whether two rectangles intersect.
//
// Returns: SDL_TRUE if there is an intersection, SDL_FALSE otherwise.
//
// ↪ https://wiki.libsdl.org/SDL_HasIntersection
func HasIntersection(A Rect, B Rect) (retval bool) {
tmp_A := toCFromRect(A)
tmp_B := toCFromRect(B)
retval = C.SDL_TRUE==(C.SDL_HasIntersection((*C.SDL_Rect)(&tmp_A), (*C.SDL_Rect)(&tmp_B)))
return
}
// Calculate the intersection of two rectangles.
//
// Returns: SDL_TRUE if there is an intersection, SDL_FALSE otherwise.
//
// ↪ https://wiki.libsdl.org/SDL_IntersectRect
func IntersectRect(A Rect, B Rect) (retval bool, result Rect) {
tmp_A := toCFromRect(A)
tmp_B := toCFromRect(B)
tmp_result := new(C.SDL_Rect)
retval = C.SDL_TRUE==(C.SDL_IntersectRect((*C.SDL_Rect)(&tmp_A), (*C.SDL_Rect)(&tmp_B), (*C.SDL_Rect)(tmp_result)))
result = fromC2Rect(*(tmp_result))
return
}
// Calculate the union of two rectangles.
// ↪ https://wiki.libsdl.org/SDL_UnionRect
func UnionRect(A Rect, B Rect) (result Rect) {
tmp_A := toCFromRect(A)
tmp_B := toCFromRect(B)
tmp_result := new(C.SDL_Rect)
C.SDL_UnionRect((*C.SDL_Rect)(&tmp_A), (*C.SDL_Rect)(&tmp_B), (*C.SDL_Rect)(tmp_result))
result = fromC2Rect(*(tmp_result))
return
}
// Calculate a minimal rectangle enclosing a set of points.
//
// Returns: SDL_TRUE if any points were within the clipping rect
//
// ↪ https://wiki.libsdl.org/SDL_EnclosePoints
func EnclosePoints(points []Point, clip Rect) (retval bool, result Rect) {
var tmp_points *C.SDL_Point
if len(points) > 0 {
sl_tmp_points := make([]C.SDL_Point, len(points))
for i := range points {
sl_tmp_points[i] = toCFromPoint(points[i])
}
tmp_points = &(sl_tmp_points[0])
}
tmp_count := len(points)
tmp_clip := toCFromRect(clip)
tmp_result := new(C.SDL_Rect)
retval = C.SDL_TRUE==(C.SDL_EnclosePoints((tmp_points), C.int(tmp_count), (*C.SDL_Rect)(&tmp_clip), (*C.SDL_Rect)(tmp_result)))
result = fromC2Rect(*(tmp_result))
return
} | sdl/SDL_rect.h.go | 0.713931 | 0.737678 | SDL_rect.h.go | starcoder |
package render
import (
"strings"
"github.com/weaveworks/scope/probe/kubernetes"
"github.com/weaveworks/scope/report"
)
// KubernetesVolumesRenderer is a Renderer which combines all Kubernetes
// volumes components such as stateful Pods, Persistent Volume, Persistent Volume Claim, Storage Class.
var KubernetesVolumesRenderer = MakeReduce(
VolumesRenderer,
PodToVolumeRenderer,
PVCToStorageClassRenderer,
PVToControllerRenderer,
)
// VolumesRenderer is a Renderer which produces a renderable kubernetes PV & PVC
// graph by merging the pods graph and the Persistent Volume topology.
var VolumesRenderer = volumesRenderer{}
// volumesRenderer is a Renderer to render PV & PVC nodes.
type volumesRenderer struct{}
// Render renders PV & PVC nodes along with adjacency
func (v volumesRenderer) Render(rpt report.Report) Nodes {
nodes := make(report.Nodes)
for id, n := range rpt.PersistentVolumeClaim.Nodes {
volume, _ := n.Latest.Lookup(kubernetes.VolumeName)
for pvNodeID, p := range rpt.PersistentVolume.Nodes {
volumeName, _ := p.Latest.Lookup(kubernetes.Name)
if volume == volumeName {
n.Adjacency = n.Adjacency.Add(p.ID)
n.Children = n.Children.Add(p)
}
nodes[pvNodeID] = p
}
nodes[id] = n
}
return Nodes{Nodes: nodes}
}
// PodToVolumeRenderer is a Renderer which produces a renderable kubernetes Pod
// graph by merging the pods graph and the Persistent Volume Claim topology.
// Pods having persistent volumes are rendered.
var PodToVolumeRenderer = podToVolumesRenderer{}
// VolumesRenderer is a Renderer to render Pods & PVCs.
type podToVolumesRenderer struct{}
// Render renders the Pod nodes having volumes adjacency.
func (v podToVolumesRenderer) Render(rpt report.Report) Nodes {
nodes := make(report.Nodes)
for podID, podNode := range rpt.Pod.Nodes {
ClaimName, _ := podNode.Latest.Lookup(kubernetes.VolumeClaim)
for _, pvcNode := range rpt.PersistentVolumeClaim.Nodes {
pvcName, _ := pvcNode.Latest.Lookup(kubernetes.Name)
if pvcName == ClaimName {
podNode.Adjacency = podNode.Adjacency.Add(pvcNode.ID)
podNode.Children = podNode.Children.Add(pvcNode)
}
}
nodes[podID] = podNode
}
return Nodes{Nodes: nodes}
}
// PVCToStorageClassRenderer is a Renderer which produces a renderable kubernetes PVC
// & Storage class graph.
var PVCToStorageClassRenderer = pvcToStorageClassRenderer{}
// pvcToStorageClassRenderer is a Renderer to render PVC & StorageClass.
type pvcToStorageClassRenderer struct{}
// Render renders the PVC & Storage Class nodes with adjacency.
func (v pvcToStorageClassRenderer) Render(rpt report.Report) Nodes {
nodes := make(report.Nodes)
for scID, scNode := range rpt.StorageClass.Nodes {
storageClass, _ := scNode.Latest.Lookup(kubernetes.Name)
for _, pvcNode := range rpt.PersistentVolumeClaim.Nodes {
storageClassName, _ := pvcNode.Latest.Lookup(kubernetes.StorageClassName)
if storageClassName == storageClass {
scNode.Adjacency = scNode.Adjacency.Add(pvcNode.ID)
scNode.Children = scNode.Children.Add(pvcNode)
}
}
nodes[scID] = scNode
}
return Nodes{Nodes: nodes}
}
//PVToControllerRenderer is a Renderer which produces a renderable kubernetes PVC
var PVToControllerRenderer = pvTocontrollerRenderer{}
//pvTocontrollerRenderer is a Renderer to render PV & Controller.
type pvTocontrollerRenderer struct{}
//Render renders the PV & Controller nodes with adjacency.
func (v pvTocontrollerRenderer) Render(rpt report.Report) Nodes {
nodes := make(report.Nodes)
for pvNodeID, p := range rpt.PersistentVolume.Nodes {
volumeName, _ := p.Latest.Lookup(kubernetes.Name)
for _, podNode := range rpt.Pod.Nodes {
Controller, _ := podNode.Latest.Lookup(kubernetes.Name)
if strings.Contains(Controller, "-ctrl-") {
podName := strings.Split(Controller, "-ctrl-")
Controller = podName[0]
if volumeName == Controller {
p.Adjacency = p.Adjacency.Add(podNode.ID)
p.Children = p.Children.Add(podNode)
}
}
}
nodes[pvNodeID] = p
}
return Nodes{Nodes: nodes}
} | render/persistentvolume.go | 0.685002 | 0.452899 | persistentvolume.go | starcoder |
package decoder
import (
"github.com/rqme/neat"
"github.com/rqme/neat/network"
)
// Helper that decodes the genome into a neural network
type Classic struct{}
// Decodes the genome into a phenome
func (d Classic) Decode(g neat.Genome) (p neat.Phenome, err error) {
// Return the phenome
net, e := d.decode(g)
if e != nil {
err = e
}
p = Phenome{
Genome: g,
Network: net,
}
return
}
func (d Classic) decode(g neat.Genome) (net neat.Network, err error) {
// Identify the genes
nodes, conns := g.GenesByPosition()
// Create the neurons
nmap := make(map[int]int)
neurons := make([]network.Neuron, len(nodes))
for i, ng := range nodes {
nmap[ng.Innovation] = i
neurons[i] = network.Neuron{NeuronType: ng.NeuronType, ActivationType: ng.ActivationType, X: ng.X, Y: ng.Y}
}
// Create the synapses
//forward := true // Keep track of conenctions to determine if this is a feed-forward only network
synapses := make([]network.Synapse, 0, len(conns))
for _, cg := range conns {
if cg.Enabled {
//src, tgt := nodes[nmap[cg.Source]], nodes[nmap[cg.Target]]
//forward = forward && src.Y < tgt.Y
synapses = append(synapses, network.Synapse{
Source: nmap[cg.Source],
Target: nmap[cg.Target],
Weight: cg.Weight,
})
}
}
net, err = network.New(neurons, synapses)
return
}
// Removed recurrent functionality 2015-09-15 (BSH) to simplify and improve performance. Leaving this for now in case I bring it back.
func calcIters(neurons []network.Neuron, synapses []network.Synapse) int {
a := make(map[float64]bool, 10)
b := make(map[float64]bool, 10)
for _, s := range synapses {
src := neurons[s.Source]
tgt := neurons[s.Target]
a[tgt.Y] = true
if tgt.Y <= src.Y {
b[src.Y] = true
}
}
return len(a) + len(b)
}
type sortnodes []neat.Node
func (s sortnodes) Len() int { return len(s) }
func (s sortnodes) Less(i, j int) bool {
if s[i].Y == s[j].Y {
return s[i].X < s[j].X
} else {
return s[i].Y < s[j].Y
}
}
func (s sortnodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type sortconns struct {
nodes map[int]int
conns []neat.Connection
}
func (s sortconns) Len() int { return len(s.conns) }
func (s sortconns) Less(i, j int) bool {
si := s.nodes[s.conns[i].Source]
ti := s.nodes[s.conns[i].Target]
sj := s.nodes[s.conns[j].Source]
tj := s.nodes[s.conns[j].Target]
if ti == tj {
return si < sj
} else {
return ti < tj
}
}
func (s sortconns) Swap(i, j int) { s.conns[i], s.conns[j] = s.conns[j], s.conns[i] } | decoder/classic.go | 0.700997 | 0.467575 | classic.go | starcoder |
package main
var schemas = `
{
"API": {
"createAsset": {
"description": "Create an asset. One argument, a JSON encoded event. AssetID is required with zero or more writable properties. Establishes an initial asset state.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "A set of fields that constitute the writable fields in an asset's state. AssetID is mandatory along with at least one writable field. In this contract pattern, a partial state is used as an event.",
"properties": {
"assetID": {
"description": "The ID of a managed asset.In this case, the uniqie ID of the case machine.",
"type": "string"
},
"actiontype": {
"description": "A String with one of three values is expected: InitialBalance, Deposit or Withdraw",
"type": "string"
},
"amount": {
"description": "The transaction amount.",
"type": "number"
},
"timestamp": {
"description": "Current timestamp. If not sent in, the transaction time is set",
"type": "string"
}
},
"required": [
"assetID"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "createAsset function",
"enum": [
"createAsset"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
},
"deleteAsset": {
"description": "Delete an asset and its history. Argument is a JSON encoded string containing only an assetID.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "An object containing only an assetID for use as an argument to read or delete.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "deleteAsset function",
"enum": [
"deleteAsset"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
},
"init": {
"description": "Initializes the contract when started, either by deployment or by peer restart.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "event sent to init on deployment",
"properties": {
"version": {
"description": "The ID of a managed asset, the cash machine.",
"type": "string"
}
},
"required": [
"version"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "init function",
"enum": [
"init"
],
"type": "string"
},
"method": "deploy"
},
"type": "object"
},
"readAsset": {
"description": "Returns the state an asset. Argument is a JSON encoded string. AssetID is the only accepted property.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "An object containing only an assetID for use as an argument to read or delete.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "readAsset function",
"enum": [
"readAsset"
],
"type": "string"
},
"method": "query",
"result": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"assetID": {
"description": "The ID of a managed asset.In this case, the uniqie ID of the case machine.",
"type": "string"
},
"actiontype": {
"description": "The last transaction: InitialBalance, Deposit or Withdraw",
"type": "string"
},
"amount": {
"description": "The last transaction amount.",
"type": "number"
},
"balance": {
"description": "The current balance of the asset.",
"type": "number"
},
"timestamp": {
"description": "Current timestamp. If not sent in, the transaction time is set",
"type": "string"
}
},
"type": "object"
}
},
"type": "object"
},
"readAssetHistory": {
"description": "Requests a specified number of history states for an assets. Returns an array of states sorted with the most recent first. AssetID is required and count is optional. A missing count, a count of zero, or too large a count returns all existing history states.",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "Requested assetID",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"required": [
"assetID"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "readAssetHistory function",
"enum": [
"readAssetHistory"
],
"type": "string"
},
"method": "query",
"result": {
"description": "an array of states for one asset sorted by timestamp with the most recent entry first",
"items": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"assetID": {
"description": "The ID of a managed asset.In this case, the uniqie ID of the case machine.",
"type": "string"
},
"actiontype": {
"description": "The last transaction: InitialBalance, Deposit or Withdraw",
"type": "string"
},
"amount": {
"description": "The last transaction amount.",
"type": "number"
},
"balance": {
"description": "The current balance of the asset.",
"type": "number"
},
"timestamp": {
"description": "Current timestamp. If not sent in, the transaction time is set",
"type": "string"
}
},
"type": "object"
},
"minItems": 0,
"type": "array"
}
},
"type": "object"
},
"updateAsset": {
"description": "Update the state of an asset. The one argument is a JSON encoded event. AssetID is required along with one or more writable properties. Establishes the next asset state. ",
"properties": {
"args": {
"description": "args are JSON encoded strings",
"items": {
"description": "A set of fields that constitute the writable fields in an asset's state. AssetID is mandatory along with at least one writable field. In this contract pattern, a partial state is used as an event.",
"properties": {
"assetID": {
"description": "The ID of a managed asset.In this case, the uniqie ID of the case machine.",
"type": "string"
},
"actiontype": {
"description": "A String with one of three values is expected: InitialBalance, Deposit or Withdraw",
"type": "string"
},
"amount": {
"description": "The transaction amount.",
"type": "number"
},
"timestamp": {
"description": "Current timestamp. If not sent in, the transaction time is set",
"type": "string"
}
},
"required": [
"assetID"
],
"type": "object"
},
"maxItems": 1,
"minItems": 1,
"type": "array"
},
"function": {
"description": "updateAsset function",
"enum": [
"updateAsset"
],
"type": "string"
},
"method": "invoke"
},
"type": "object"
}
},
"objectModelSchemas": {
"assetIDKey": {
"description": "An object containing only an assetID for use as an argument to read or delete.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"type": "object"
},
"assetIDandCount": {
"description": "Requested assetID with item count.",
"properties": {
"assetID": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
},
"count": {
"type": "integer"
}
},
"required": [
"assetID"
],
"type": "object"
},
"event": {
"description": "A set of fields that constitute the writable fields in an asset's state. AssetID is mandatory along with at least one writable field. In this contract pattern, a partial state is used as an event.",
"properties": {
"assetID": {
"description": "The ID of a managed asset.In this case, the uniqie ID of the case machine.",
"type": "string"
},
"actiontype": {
"description": "A String with one of three values is expected: InitialBalance, Deposit or Withdraw",
"type": "string"
},
"amount": {
"description": "The transaction amount.",
"type": "number"
},
"timestamp": {
"description": "Current timestamp. If not sent in, the transaction time is set",
"type": "string"
}
},
"required": [
"assetID"
],
"type": "object"
},
"initEvent": {
"description": "event sent to init on deployment",
"properties": {
"version": {
"description": "The ID of a managed asset. The resource focal point for a smart contract.",
"type": "string"
}
},
"required": [
"version"
],
"type": "object"
},
"state": {
"description": "A set of fields that constitute the complete asset state.",
"properties": {
"assetID": {
"description": "The ID of a managed asset.In this case, the uniqie ID of the case machine.",
"type": "string"
},
"actiontype": {
"description": "A String with one of three values is expected: InitialBalance, Deposit or Withdraw",
"type": "string"
},
"amount": {
"description": "The transaction amount.",
"type": "number"
},
"balance": {
"description": "The balance on the asset.",
"type": "number"
},
"timestamp": {
"description": "Current timestamp. If not sent in, the transaction time is set",
"type": "string"
}
},
"type": "object"
}
}
}` | contracts/industry/cashMachine/schemas.go | 0.885903 | 0.567098 | schemas.go | starcoder |
package main
// Move contains the information needed to transition from one Position to another.
type Move struct {
From Square
To Square // invariant: not equal to From
Piece Piece // the moving Piece; invariant: not None
CapturePiece Piece // the Piece being captured, or else None
EP bool // whether the Move is an en passant capture
PromotePiece Piece // the Piece being promoted to, or else None
}
// IsCapture reports whether m is a capture.
func (m Move) IsCapture() bool { return m.CapturePiece != None }
// IsPromotion reports whether m promotes a pawn.
func (m Move) IsPromotion() bool { return m.PromotePiece != None }
// IsDouble reports whether m is an initial pawn push.
func (m Move) IsDouble() bool {
return m.Piece == Pawn && ((m.From.Rank() == 1 && m.To.Rank() == 3) || m.From.Rank() == 6 && m.To.Rank() == 4)
}
// IsCastle reports whether m castles and to which side.
// If ok is true, side reports the Side to which m castles.
func (m Move) IsCastle() (side Side, ok bool) {
switch {
case m.Piece != King:
return
case m.From-m.To == 2:
return QS, true
case m.To-m.From == 2:
return KS, true
default:
return
}
}
// Make applies m to pos and reports the resulting Position.
// Behavior is undefined when m is illegal in pos.
func Make(pos Position, m Move) Position {
// update changes pos.z and the Boards of c when a Piece of type p moves to or from s
update := func(c Color, p Piece, s Square) {
b := s.Board()
pos.b[c][p] ^= b
pos.b[c][All] ^= b
pos.z.xorPiece(c, p, s)
}
// Remove en passant capturing rights from the Zobrist bitstring.
// In the event of an en passant capture, this must be done before the pawn bitboard is changed.
if pos.ep != 0 {
if a, b := eligibleEPCapturers(pos); a != 0 {
pos.z.xor(canEPCaptureZobrist[a.File()])
if b != 0 {
pos.z.xor(canEPCaptureZobrist[b.File()])
}
}
}
// Move the piece
update(pos.ToMove, m.Piece, m.From)
update(pos.ToMove, m.Piece, m.To)
if m.IsCapture() {
captureSquare := m.To
if m.EP {
// The captured pawn is one square centerward of To.
captureSquare ^= 8
}
update(pos.Opp(), m.CapturePiece, captureSquare)
// Lose the relevant castling right
switch {
case (pos.ToMove == Black && m.To == a1) || (pos.ToMove == White && m.To == a8):
if pos.Castle[pos.Opp()][QS] {
pos.z.xor(castleZobrist[pos.Opp()][QS])
}
pos.Castle[pos.Opp()][QS] = false
case (pos.ToMove == Black && m.To == h1) || (pos.ToMove == White && m.To == h8):
if pos.Castle[pos.Opp()][KS] {
pos.z.xor(castleZobrist[pos.Opp()][KS])
}
pos.Castle[pos.Opp()][KS] = false
}
}
switch side, ok := m.IsCastle(); {
case ok && side == QS:
// Move the castling rook
rookFrom, rookTo := m.From-4, m.From-1
update(pos.ToMove, Rook, rookFrom)
update(pos.ToMove, Rook, rookTo)
case ok && side == KS:
// Move the castling rook
rookFrom, rookTo := m.From+3, m.From+1
update(pos.ToMove, Rook, rookFrom)
update(pos.ToMove, Rook, rookTo)
}
if m.IsPromotion() {
// Replace the Pawn with PromotePiece
update(pos.ToMove, Pawn, m.To)
update(pos.ToMove, m.PromotePiece, m.To)
}
switch m.Piece {
case King:
// Update KingSquare and forfeit all castling rights
pos.KingSquare[pos.ToMove] = m.To
if pos.Castle[pos.ToMove][QS] {
pos.z.xor(castleZobrist[pos.ToMove][QS])
}
pos.Castle[pos.ToMove][QS] = false
if pos.Castle[pos.ToMove][KS] {
pos.z.xor(castleZobrist[pos.ToMove][KS])
}
pos.Castle[pos.ToMove][KS] = false
case Rook:
// Forfeit the relevant castling right
switch {
case (pos.ToMove == White && m.From == a1) || (pos.ToMove == Black && m.From == a8):
if pos.Castle[pos.ToMove][QS] {
pos.z.xor(castleZobrist[pos.ToMove][QS])
}
pos.Castle[pos.ToMove][QS] = false
case (pos.ToMove == White && m.From == h1) || (pos.ToMove == Black && m.From == h8):
if pos.Castle[pos.ToMove][KS] {
pos.z.xor(castleZobrist[pos.ToMove][KS])
}
pos.Castle[pos.ToMove][KS] = false
}
}
if m.Piece == Pawn || m.IsCapture() {
pos.HalfMove = 0
} else {
pos.HalfMove++
}
if pos.ToMove == Black {
pos.FullMove++
}
pos.ToMove = pos.Opp()
pos.z.xor(blackToMoveZobrist)
if m.IsDouble() {
pos.ep = (m.From + m.To) / 2
// Add en passant capturing rights to the Zobrist bitstring.
// This must be done after the side to move is changed.
if a, b := eligibleEPCapturers(pos); a != 0 {
pos.z.xor(canEPCaptureZobrist[a.File()])
if b != 0 {
pos.z.xor(canEPCaptureZobrist[b.File()])
}
}
} else {
pos.ep = 0
}
return pos
}
// PseudoLegalMoves returns all pseudo-legal Moves in pos.
func PseudoLegalMoves(pos Position) []Move {
moves := make([]Move, 0, 100)
moves = append(moves, PawnMoves(pos)...)
moves = append(moves, KnightMoves(pos)...)
moves = append(moves, BishopMoves(pos)...)
moves = append(moves, RookMoves(pos)...)
moves = append(moves, QueenMoves(pos)...)
moves = append(moves, KingMoves(pos)...)
// Counting sort into the order winning captures, equal captures, losing captures, non-captures.
// (This terminology anticipates that the captured piece is defended and the capturing piece is liable to be captured in exchange.)
const (
winning = iota
equal
losing
noncapture
)
moveType := func(m Move) int {
switch {
case !m.IsCapture():
return noncapture
case m.Piece == m.CapturePiece ||
(m.Piece == Bishop && m.CapturePiece == Knight) ||
(m.Piece == Knight && m.CapturePiece == Bishop):
return equal
case m.Piece < m.CapturePiece:
return winning
default:
return losing
}
}
bins := make([]int, 4)
for _, m := range moves {
bins[moveType(m)]++
}
index := make([]int, len(bins))
for i := range index {
for j := 0; j < i; j++ {
index[i] += bins[j]
}
}
sorted := make([]Move, len(moves))
for _, m := range moves {
mt := moveType(m)
sorted[index[mt]] = m
index[mt]++
}
return sorted
}
// LegalMoves returns all legal Moves in pos.
func LegalMoves(pos Position) []Move {
pl := PseudoLegalMoves(pos)
legal := make([]Move, 0, len(pl))
for _, m := range pl {
if !IsLegal(Make(pos, m)) {
continue
}
legal = append(legal, m)
}
return legal
}
// hasLegalMove reports whether pos has at least one legal move.
func hasLegalMove(pos Position) bool {
// Generate the pseudolegal moves of each type of piece.
// If one of them is legal, return immediately.
for _, pl := range [][]Move{
PawnMoves(pos),
KnightMoves(pos),
BishopMoves(pos),
RookMoves(pos),
QueenMoves(pos),
KingMoves(pos),
} {
for _, m := range pl {
if IsLegal(Make(pos, m)) {
return true
}
}
}
return false
}
// rangeBits applies f sequentially to each set bit in board.
func rangeBits(board Board, f func(Board, Square)) {
for bits := board; bits != 0; bits = ResetLS1B(bits) {
b, s := LS1B(bits), LS1BIndex(bits)
f(b, s)
}
}
// PawnMoves returns a slice of all pseudo-legal Moves that pawns can make in pos.
func PawnMoves(pos Position) []Move {
moves := make([]Move, 0, 8*2*4) // cap: all pawns are on the 7th rank and can promote via capture to either side
empty := ^pos.b[White][All] & ^pos.b[Black][All]
// Pawn movesets are asymmetrical and their capture and non-capture movesets are disjoint
var pawnAdv, pawnAtk func(Board, Board) Board
var promoteRank byte
switch pos.ToMove {
case White:
pawnAdv, pawnAtk, promoteRank = whitePawnAdvances, whitePawnAttacks, 7
case Black:
pawnAdv, pawnAtk, promoteRank = blackPawnAdvances, blackPawnAttacks, 0
}
rangeBits(pos.b[pos.ToMove][Pawn], func(f Board, from Square) {
rangeBits(pawnAdv(f, empty), func(_ Board, to Square) {
if to.Rank() == promoteRank {
for _, pp := range []Piece{Queen, Rook, Bishop, Knight} {
moves = append(moves, Move{From: from, To: to, Piece: Pawn, PromotePiece: pp})
}
return
}
moves = append(moves, Move{From: from, To: to, Piece: Pawn})
})
rangeBits(pawnAtk(f, empty)&pos.b[pos.Opp()][All], func(_ Board, to Square) {
_, cp := pos.PieceOn(to)
if to.Rank() == promoteRank {
for _, pp := range []Piece{Queen, Rook, Bishop, Knight} {
moves = append(moves, Move{From: from, To: to, Piece: Pawn, CapturePiece: cp, PromotePiece: pp})
}
return
}
moves = append(moves, Move{From: from, To: to, Piece: Pawn, CapturePiece: cp})
})
})
if pos.ep != 0 {
// Double pawn push occurred on the previous move
epcs := pos.ep ^ 8
epSources := west(epcs.Board()) | east(epcs.Board())
rangeBits(epSources&pos.b[pos.ToMove][Pawn], func(_ Board, s Square) {
moves = append(moves, Move{From: s, To: pos.ep, Piece: Pawn, CapturePiece: Pawn, EP: true})
})
}
return moves
}
// KnightMoves returns a slice of all pseudo-legal Moves that knights can make in pos.
func KnightMoves(pos Position) []Move { return pMoves(pos, Knight) }
// BishopMoves returns a slice of all pseudo-legal Moves that bishops can make in pos.
func BishopMoves(pos Position) []Move { return pMoves(pos, Bishop) }
// RookMoves returns a slice of all pseudo-legal Moves that rooks can make in pos.
func RookMoves(pos Position) []Move { return pMoves(pos, Rook) }
// QueenMoves returns a slice of all pseudo-legal Moves that queens can make in pos.
func QueenMoves(pos Position) []Move { return pMoves(pos, Queen) }
// KingMoves returns a slice of all pseudo-legal Moves that the king can make in pos.
func KingMoves(pos Position) []Move {
moves := pMoves(pos, King)
from := pos.KingSquare[pos.ToMove]
if canCastle(pos, QS) {
moves = append(moves, Move{From: from, To: from - 2, Piece: King})
}
if canCastle(pos, KS) {
moves = append(moves, Move{From: from, To: from + 2, Piece: King})
}
return moves
}
// canCastle returns whether castling to side is legal in pos.
func canCastle(pos Position, side Side) bool {
if !pos.Castle[pos.ToMove][side] {
return false
}
empty := ^pos.b[White][All] & ^pos.b[Black][All]
if CastleEmptySquares[pos.ToMove][side]&^empty != 0 {
return false
}
var attacked bool
rangeBits(CastleKingSquares[pos.ToMove][side], func(_ Board, s Square) {
if attacked {
return
}
attacked = IsAttacked(pos, s, pos.Opp())
})
return !attacked
}
// pMoves returns a slice of all pseudo-legal Moves that non-pawn pieces of type p can make in pos, excluding castling.
func pMoves(pos Position, p Piece) []Move {
moves := make([]Move, 0, 28) // two bishops, two rooks, or one queen can have 28 moves
empty := ^pos.b[White][All] & ^pos.b[Black][All]
var pAttacks func(Board, Board) Board
switch p {
case Knight:
pAttacks = knightAttacks
case Bishop:
pAttacks = bishopAttacks
case Rook:
pAttacks = rookAttacks
case Queen:
pAttacks = queenAttacks
case King:
pAttacks = kingAttacks
}
rangeBits(pos.b[pos.ToMove][p], func(f Board, from Square) {
rangeBits(pAttacks(f, empty)&^pos.b[pos.ToMove][All], func(t Board, to Square) {
m := Move{From: from, To: to, Piece: p}
if t&pos.b[pos.Opp()][All] != 0 {
_, capturePiece := pos.PieceOn(to)
m.CapturePiece = capturePiece
}
moves = append(moves, m)
})
})
return moves
}
var (
kAttacks = make([]Board, 64)
nAttacks = make([]Board, 64)
)
func init() {
for s := a1; s <= h8; s++ {
b := s.Board()
// +7 +8 +9
// -1 K +1
// -9 -8 -7
kAttacks[s] = southwest(b) | south(b) | southeast(b) | west(b) | east(b) | northwest(b) | north(b) | northeast(b)
// +15 +17
// +6 +10
// N
// -10 -6
// -17 -15
nAttacks[s] = southwest(south(b)) | southeast(south(b)) | southwest(west(b)) | southeast(east(b)) | northwest(west(b)) | northeast(east(b)) | northwest(north(b)) | northeast(north(b))
}
}
// whitePawnAdvances returns a Board of all squares to which a white pawn at p can advance when there are no pieces at empty.
func whitePawnAdvances(p, empty Board) Board {
return (north(north(p)&empty)&empty)<<32>>32 | (north(p) & empty)
}
// blackPawnAdvances returns a Board of all squares to which a black pawn at p can advance when there are no pieces at empty.
func blackPawnAdvances(p, empty Board) Board {
return (south(south(p)&empty)&empty)>>32<<32 | (south(p) & empty)
}
// whitePawnAttacks returns a Board of all squares attacked by a white pawn at p.
func whitePawnAttacks(p, _ Board) Board { return northwest(p) | northeast(p) }
// blackPawnAttacks returns a Board of all squares attacked by a black pawn at p.
func blackPawnAttacks(p, _ Board) Board { return southwest(p) | southeast(p) }
// knightAttacks returns a Board of all squares attacked by a knight at p.
func knightAttacks(p, _ Board) Board { return nAttacks[LS1BIndex(p)] }
// bishopAttacks returns a Board of all squares attacked by a bishop at p when there are no pieces at empty.
func bishopAttacks(p, empty Board) Board {
return attackFill(p, empty, southwest) | attackFill(p, empty, southeast) | attackFill(p, empty, northwest) | attackFill(p, empty, northeast)
}
// rookAttacks returns a Board of all squares attacked by a rook at p when there are no pieces at empty.
func rookAttacks(p, empty Board) Board {
return attackFill(p, empty, south) | attackFill(p, empty, west) | attackFill(p, empty, east) | attackFill(p, empty, north)
}
// queenAttacks returns a Board of all squares attacked by a queen at p when there are no pieces at empty.
func queenAttacks(p, empty Board) Board { return rookAttacks(p, empty) | bishopAttacks(p, empty) }
// kingAttacks returns a Board of all squares attacked by a king at p.
func kingAttacks(p, _ Board) Board { return kAttacks[LS1BIndex(p)] }
// IsAttacked returns whether s is attacked by any piece of Color c in pos.
func IsAttacked(pos Position, s Square, c Color) bool {
b := s.Board()
empty := ^pos.b[White][All] & ^pos.b[Black][All]
switch c {
case White:
if blackPawnAttacks(b, empty)&pos.b[c][Pawn] != 0 {
return true
}
case Black:
if whitePawnAttacks(b, empty)&pos.b[c][Pawn] != 0 {
return true
}
}
return rookAttacks(b, empty)&(pos.b[c][Rook]|pos.b[c][Queen]) != 0 ||
bishopAttacks(b, empty)&(pos.b[c][Bishop]|pos.b[c][Queen]) != 0 ||
knightAttacks(b, empty)&pos.b[c][Knight] != 0 ||
kingAttacks(b, empty)&pos.b[c][King] != 0
}
// attackFill returns a Board showing all of the squares attacked by the input Board in the direction represented by shift.
func attackFill(piece, empty Board, shift func(Board) Board) Board {
var fill Board
for piece != 0 {
fill, piece = fill|piece, shift(piece)&empty
}
return shift(fill) // Include the blocking piece and not the sliding piece
}
func south(b Board) Board { return b >> 8 }
func west(b Board) Board { return b >> 1 &^ HFile }
func east(b Board) Board { return b << 1 &^ AFile }
func north(b Board) Board { return b << 8 }
func southwest(b Board) Board { return west(south(b)) }
func southeast(b Board) Board { return east(south(b)) }
func northwest(b Board) Board { return west(north(b)) }
func northeast(b Board) Board { return east(north(b)) }
// eligibleEPCapturers returns the Squares of the pawns, if any, that may pseudo-legally capture en passant in a Position.
// If only one pawn can capture en passant, its Square is the first return value and the second is 0.
func eligibleEPCapturers(pos Position) (Square, Square) {
var a, b Square
if pos.ep == 0 {
return 0, 0
}
if pos.ep.File() != 0 {
westcs := pos.ep ^ 8 - 1
if c, p := pos.PieceOn(westcs); c == pos.ToMove && p == Pawn {
a = westcs
}
}
if pos.ep.File() != 7 {
eastcs := pos.ep ^ 8 + 1
if c, p := pos.PieceOn(eastcs); c == pos.ToMove && p == Pawn {
if a != 0 {
b = eastcs
} else {
a = eastcs
}
}
}
return a, b
} | move.go | 0.699973 | 0.608187 | move.go | starcoder |
package types
import (
"github.com/vron/compute/glbind/input"
)
type Types struct {
m map[string]*GlslType
l []*GlslType
}
func New(inp input.Input) *Types {
ts := &Types{
m: map[string]*GlslType{},
l: []*GlslType{},
}
ts.createBasicBuiltinTypes()
ts.createComplexBuiltinTypes()
for _, str := range inp.Structs {
ts.createUserTypes(str)
}
ts.calculateAlignments()
for _, arg := range inp.Arguments {
ts.exportType(ts.Get(arg.Ty).C)
}
return ts
}
func (ts *Types) put(t GlslType) {
_, o := ts.m[t.Name]
if o {
panic("trying to create type with same name: " + t.Name)
}
t.C.GlslType = &t
ts.m[t.Name] = &t
ts.l = append(ts.l, &t)
}
func (ts *Types) Get(name string) *GlslType {
v, o := ts.m[name]
if !o {
panic("trying to get type: " + name)
}
return v
}
func (ts *Types) ListExportedTypes() (l []*GlslType) {
for _, v := range ts.l {
if v.Export {
l = append(l, v)
}
}
return
}
func (tt *Types) ListAllTypes() (ts []*GlslType) {
return tt.l
}
func (ts *Types) createBasicBuiltinTypes() {
ts.put(GlslType{Builtin: true, Name: "uint8_t", C: &CType{Basic: CBasicType{Name: "uint8_t"}, Size: align(1, 1)}})
ts.put(GlslType{Builtin: true, Name: "Bool", C: &CType{Basic: CBasicType{Name: "int32_t"}, Size: align(4, 4)}})
ts.put(GlslType{Builtin: true, Name: "int32_t", C: &CType{Basic: CBasicType{Name: "int32_t"}, Size: align(4, 4)}})
ts.put(GlslType{Builtin: true, Name: "uint32_t", C: &CType{Basic: CBasicType{Name: "uint32_t"}, Size: align(4, 4)}})
ts.put(GlslType{Builtin: true, Name: "float", C: &CType{Basic: CBasicType{Name: "float"}, Size: align(4, 4)}})
ts.put(GlslType{Builtin: true, Name: "vec2", C: &CType{Vector: CVector{Len: 2, Basic: ts.Get("float").C}, Size: align(8, 8)}})
ts.put(GlslType{Builtin: true, Name: "vec3", C: &CType{Vector: CVector{Len: 3, Basic: ts.Get("float").C}, Size: align(16, 16)}})
ts.put(GlslType{Builtin: true, Name: "vec4", C: &CType{Vector: CVector{Len: 4, Basic: ts.Get("float").C}, Size: align(16, 16)}})
ts.put(GlslType{Builtin: true, Name: "ivec2", C: &CType{Vector: CVector{Len: 2, Basic: ts.Get("int32_t").C}, Size: align(8, 8)}})
ts.put(GlslType{Builtin: true, Name: "ivec3", C: &CType{Vector: CVector{Len: 3, Basic: ts.Get("int32_t").C}, Size: align(16, 16)}})
ts.put(GlslType{Builtin: true, Name: "ivec4", C: &CType{Vector: CVector{Len: 4, Basic: ts.Get("int32_t").C}, Size: align(16, 16)}})
ts.put(GlslType{Builtin: true, Name: "uvec2", C: &CType{Vector: CVector{Len: 2, Basic: ts.Get("uint32_t").C}, Size: align(8, 8)}})
ts.put(GlslType{Builtin: true, Name: "uvec3", C: &CType{Vector: CVector{Len: 3, Basic: ts.Get("uint32_t").C}, Size: align(16, 16)}})
ts.put(GlslType{Builtin: true, Name: "uvec4", C: &CType{Vector: CVector{Len: 4, Basic: ts.Get("uint32_t").C}, Size: align(16, 16)}})
ts.put(GlslType{Builtin: true, Name: "mat2", C: &CType{
Struct: CStruct{Fields: []CField{
{Name: "column0", CType: ts.Get("vec2").C, ByteOffset: 0},
{Name: "column1", CType: ts.Get("vec2").C, ByteOffset: 8},
}},
Size: align(16, 8)}})
ts.put(GlslType{Builtin: true, Name: "mat3", C: &CType{
Struct: CStruct{Fields: []CField{
{Name: "column0", CType: ts.Get("vec3").C, ByteOffset: 0},
{Name: "column1", CType: ts.Get("vec3").C, ByteOffset: 16},
{Name: "column2", CType: ts.Get("vec3").C, ByteOffset: 16 * 2},
}},
Size: align(16*3, 16)}})
ts.put(GlslType{Builtin: true, Name: "mat4", C: &CType{
Struct: CStruct{Fields: []CField{
{Name: "column0", CType: ts.Get("vec4").C, ByteOffset: 0},
{Name: "column1", CType: ts.Get("vec4").C, ByteOffset: 16},
{Name: "column2", CType: ts.Get("vec4").C, ByteOffset: 16 * 2},
{Name: "column3", CType: ts.Get("vec4").C, ByteOffset: 16 * 3},
}},
Size: align(16*4, 16)}})
}
func (ts *Types) createComplexBuiltinTypes() {
ts.put(GlslType{Builtin: true, Name: "image2Drgba32f", C: &CType{Struct: CStruct{
Fields: []CField{
{Name: "data", CType: CreateArray(ts.Get("float").C, []int{-1}), ByteOffset: 0},
{Name: "width", CType: ts.Get("int32_t").C, ByteOffset: 28},
}}, Size: align(32, 8)}})
ts.put(GlslType{Builtin: true, Name: "image2Drgba8", C: &CType{Struct: CStruct{
Fields: []CField{
{Name: "data", CType: CreateArray(ts.Get("uint8_t").C, []int{-1}), ByteOffset: 0},
{Name: "width", CType: ts.Get("int32_t").C, ByteOffset: 28},
}}, Size: align(32, 8)}})
}
func (ts *Types) createUserTypes(str input.InputStruct) {
fields := []CField{}
for _, f := range str.Fields {
fields = append(fields, CField{
Name: f.Name,
CType: CreateArray(ts.Get(f.Ty).C, f.Arrno),
})
}
gt := GlslType{
Name: str.Name,
C: &CType{
Struct: CStruct{
Fields: fields,
}}}
ts.put(gt)
ts.exportType(gt.C)
}
func (ts *Types) exportType(t *CType) {
if t.GlslType != nil {
if t.GlslType.Export {
return
}
t.GlslType.Export = true
}
if t.IsArray() {
ts.exportType(t.Array.CType)
} else if t.IsStruct() {
for _, f := range t.Struct.Fields {
ts.exportType(f.CType)
}
}
} | glbind/types/types.go | 0.502197 | 0.54698 | types.go | starcoder |
package strings
import (
"unicode/utf8"
)
// NumericLess compares strings with respect to values of positive integer groups.
// For example, 'a9z' is considered less than 'a11z', because 9 < 11.
// If two numbers with leading zeroes have the same value, the shortest of them is considered less, i.e. 12 < 012.
// Digits and non-digits are compared lexicographically, i.e. ' ' (space) < 5 < 'a'.
func NumericLess(a, b string) bool {
nextA := func() (rune, int) {
r, size := utf8.DecodeRuneInString(a)
a = a[size:]
return r, size
}
nextB := func() (rune, int) {
r, size := utf8.DecodeRuneInString(b)
b = b[size:]
return r, size
}
for {
runeA, offsetA := nextA()
if offsetA == 0 {
return b != ""
}
runeB, offsetB := nextB()
if offsetB == 0 {
return false
}
if digitA, digitB := isDigit(runeA), isDigit(runeB); digitA != digitB {
return runeA < runeB
} else if digitA {
zeroBalance := 0
digitCmp := 0
for runeA == '0' {
zeroBalance++
runeA, offsetA = nextA()
}
for runeB == '0' {
zeroBalance--
runeB, offsetB = nextB()
}
if offsetA == 0 {
return offsetB != 0 || zeroBalance < 0
}
if offsetB == 0 {
return false
}
if digitA, digitB = isDigit(runeA), isDigit(runeB); !digitA && !digitB {
if zeroBalance != 0 {
return zeroBalance < 0
} else if runeA != runeB {
return runeA < runeB
}
} else if digitA != digitB {
return digitB
} else {
for {
if digitCmp == 0 && runeA != runeB {
if runeA < runeB {
digitCmp = -1
} else {
digitCmp = 1
}
}
runeA, offsetA = nextA()
runeB, offsetB = nextB()
if digitA, digitB = isDigit(runeA), isDigit(runeB); digitA != digitB {
return digitB
} else if !digitA {
if digitCmp != 0 {
return digitCmp < 0
}
if zeroBalance != 0 {
return zeroBalance < 0
}
if offsetA == 0 {
return offsetB != 0
}
if offsetB == 0 {
return false
}
if runeA != runeB {
return runeA < runeB
}
break
}
}
}
} else if runeA != runeB {
return runeA < runeB
}
}
}
func isDigit(r rune) bool {
return '0' <= r && r <= '9'
} | strings/sort.go | 0.642208 | 0.568476 | sort.go | starcoder |
package dda
import "gonum.org/v1/gonum/graph"
func graphDegeneracy(g graph.Undirected) int {
nodes := graph.NodesOf(g.Nodes())
// The algorithm used here is essentially as described at
// http://en.wikipedia.org/w/index.php?title=Degeneracy_%28graph_theory%29&oldid=640308710
// Initialize an output list L in return parameters.
// Compute a number d_v for each vertex v in G,
// the number of neighbors of v that are not already in L.
// Initially, these numbers are just the degrees of the vertices.
dv := make(map[int64]int, len(nodes))
var (
maxDegree int
neighbours = make(map[int64][]graph.Node)
)
for _, n := range nodes {
id := n.ID()
adj := graph.NodesOf(g.From(id))
neighbours[id] = adj
dv[id] = len(adj)
if len(adj) > maxDegree {
maxDegree = len(adj)
}
}
// Initialize an array D such that D[i] contains a list of the
// vertices v that are not already in L for which d_v = i.
d := make([][]graph.Node, maxDegree+1)
for _, n := range nodes {
deg := dv[n.ID()]
d[deg] = append(d[deg], n)
}
// Initialize k to 0.
k := 0
// Repeat n times:
s := []int{0}
for range nodes {
// Scan the array cells D[0], D[1], ... until
// finding an i for which D[i] is nonempty.
var (
i int
di []graph.Node
)
for i, di = range d {
if len(di) != 0 {
break
}
}
// Set k to max(k,i).
if i > k {
k = i
s = append(s, make([]int, k-len(s)+1)...)
}
// Select a vertex v from D[i]. Add v to the
// beginning of L and remove it from D[i].
var v graph.Node
v, d[i] = di[len(di)-1], di[:len(di)-1]
var l []graph.Node
l = append(l, v)
s[k]++
delete(dv, v.ID())
// For each neighbor w of v not already in L,
// subtract one from d_w and move w to the
// cell of D corresponding to the new value of d_w.
for _, w := range neighbours[v.ID()] {
dw, ok := dv[w.ID()]
if !ok {
continue
}
for i, n := range d[dw] {
if n.ID() == w.ID() {
d[dw][i], d[dw] = d[dw][len(d[dw])-1], d[dw][:len(d[dw])-1]
dw--
d[dw] = append(d[dw], w)
break
}
}
dv[w.ID()] = dw
}
}
return k
} | dda/degeneracy.go | 0.62395 | 0.467818 | degeneracy.go | starcoder |
package dither
import (
"image"
"image/color"
)
var white = color.Gray{Y: 255}
var black = color.Gray{Y: 0}
func threshold(pixel color.Gray) color.Gray {
if pixel.Y > 123 {
return white
}
return black
}
func Threshold(input *image.Gray) *image.Gray {
bounds := input.Bounds()
dithered := image.NewGray(bounds)
dx := bounds.Dx()
dy := bounds.Dy()
for x := 0; x < dx; x++ {
for y := 0; y < dy; y++ {
dithered.Set(x, y, threshold(input.GrayAt(x, y)))
}
}
return dithered
}
func Grayscale(input image.Image) *image.Gray {
bounds := input.Bounds()
gray := image.NewGray(bounds)
dx := bounds.Dx()
dy := bounds.Dy()
for x := bounds.Min.X; x < dx; x++ {
for y := bounds.Min.Y; y < dy; y++ {
gray.Set(x, y, input.At(x, y))
}
}
return gray
}
func Color(m Matrixer, input image.Image, errorMultiplier float32) image.Image {
bounds := input.Bounds()
img := image.NewRGBA(bounds)
for x := bounds.Min.X; x < bounds.Dx(); x++ {
for y := bounds.Min.Y; y < bounds.Dy(); y++ {
pixel := input.At(x, y)
img.Set(x, y, pixel)
}
}
dx, dy := bounds.Dx(), bounds.Dy()
// Prepopulate multidimensional slices
redErrors := make([][]float32, dx)
greenErrors := make([][]float32, dx)
blueErrors := make([][]float32, dx)
for x := 0; x < dx; x++ {
redErrors[x] = make([]float32, dy)
greenErrors[x] = make([]float32, dy)
blueErrors[x] = make([]float32, dy)
for y := 0; y < dy; y++ {
redErrors[x][y] = 0
greenErrors[x][y] = 0
blueErrors[x][y] = 0
}
}
// Diffuse error in two dimension
matrix := m.Matrix()
ydim := matrix.Rows() - 1
xdim := matrix.Cols() / 2
var qrr, qrg, qrb float32
for x := 0; x < dx; x++ {
for y := 0; y < dy; y++ {
r32, g32, b32, a := img.At(x, y).RGBA()
r, g, b := float32(uint8(r32)), float32(uint8(g32)), float32(uint8(b32))
r -= redErrors[x][y] * errorMultiplier
g -= greenErrors[x][y] * errorMultiplier
b -= blueErrors[x][y] * errorMultiplier
// Diffuse the error of each calculation to the neighboring pixels
if r < 128 {
qrr = -r
r = 0
} else {
qrr = 255 - r
r = 255
}
if g < 128 {
qrg = -g
g = 0
} else {
qrg = 255 - g
g = 255
}
if b < 128 {
qrb = -b
b = 0
} else {
qrb = 255 - b
b = 255
}
img.Set(x, y, color.RGBA{uint8(r), uint8(g), uint8(b), uint8(a)})
for xx := 0; xx < ydim+1; xx++ {
for yy := -xdim; yy <= xdim-1; yy++ {
if y+yy < 0 || dy <= y+yy || x+xx < 0 || dx <= x+xx {
continue
}
// Adds the error of the previous pixel to the current pixel
factor := matrix.Get(yy+ydim, xx)
redErrors[x+xx][y+yy] += qrr * factor
greenErrors[x+xx][y+yy] += qrg * factor
blueErrors[x+xx][y+yy] += qrb * factor
}
}
}
}
return img
}
func Monochrome(m Matrixer, input image.Image, errorMultiplier float32) image.Image {
bounds := input.Bounds()
img := image.NewGray(bounds)
for x := bounds.Min.X; x < bounds.Dx(); x++ {
for y := bounds.Min.Y; y < bounds.Dy(); y++ {
pixel := input.At(x, y)
img.Set(x, y, pixel)
}
}
dx, dy := bounds.Dx(), bounds.Dy()
// Prepopulate multidimensional slice
errors := NewMatrix(dx, dy)
matrix := m.Matrix()
ydim := matrix.Rows() - 1
xdim := matrix.Cols() / 2
for x := 0; x < dx; x++ {
for y := 0; y < dy; y++ {
pix := float32(img.GrayAt(x, y).Y)
pix -= errors.Get(x, y) * errorMultiplier
var quantError float32
// Diffuse the error of each calculation to the neighboring pixels
if pix < 128 {
quantError = -pix
pix = 0
} else {
quantError = 255 - pix
pix = 255
}
img.SetGray(x, y, color.Gray{Y: uint8(pix)})
// Diffuse error in two dimension
for xx := 0; xx < ydim+1; xx++ {
for yy := -xdim; yy <= xdim-1; yy++ {
if y+yy < 0 || dy <= y+yy || x+xx < 0 || dx <= x+xx {
continue
}
// Adds the error of the previous pixel to the current pixel
prev := errors.Get(x+xx, y+yy)
delta := quantError * matrix.Get(yy+ydim, xx)
errors.Set(x+xx, y+yy, prev+delta)
}
}
}
}
return img
} | dither.go | 0.710528 | 0.514766 | dither.go | starcoder |
package cmd
import (
"fmt"
"strconv"
"strings"
"github.com/bpicode/fritzctl/fritz"
"github.com/bpicode/fritzctl/logger"
"github.com/spf13/cobra"
)
var temperatureCmd = &cobra.Command{
Use: "temperature [value in °C, on, off, sav, comf] [device/group names]",
Short: "Set the temperature of HKR devices/groups or turn them on/off",
Long: "Change the temperature of HKR devices/groups by supplying the desired value in °C. " +
"When turning HKR devices on/off, replace the value by 'on'/'off' respectively." +
"To reset each devices to its comfort/saving temperature, replace the value by 'comf'/'sav'." +
"To increase/decrease temperatures relative to the current goal, supply '+' or '-' followed by space.",
Example: `fritzctl temperature 21.0 HKR_1 HKR_2
fritzctl temperature off HKR_1
fritzctl temperature on HKR_2
fritzctl temperature comf HK1 HKR_2
fritzctl temperature sav HK1 HKR_2
fritzctl temperature + 1.5 HK1
fritzctl temperature - 2 HK1
`,
RunE: changeTemperature,
}
func init() {
RootCmd.AddCommand(temperatureCmd)
}
func changeTemperature(_ *cobra.Command, args []string) error {
assertMinLen(args, 2, "insufficient input: at least two parameters expected (run with --help for more details)")
val := args[0]
action := changeAction(val)
action(val, args[1:]...)
logger.Info("It may take a few minutes until the changes propagate to the end device(s)")
return nil
}
func changeAction(s string) func(val string, args ...string) {
if strings.EqualFold(s, "sav") || strings.EqualFold(s, "saving") {
return changeToSav
}
if strings.EqualFold(s, "comf") || strings.EqualFold(s, "comfort") {
return changeToComf
}
if s == "+" || s == "-" {
return changeBy
}
return changeTo
}
func changeToSav(_ string, args ...string) {
changeByCallback(func(t fritz.Thermostat) string {
return t.FmtSavingTemperature()
}, args...)
}
func changeToComf(_ string, args ...string) {
changeByCallback(func(t fritz.Thermostat) string {
return t.FmtComfortTemperature()
}, args...)
}
func changeBy(val string, args ...string) {
assertMinLen(args, 2, "insufficient input: expected [+ or -] [amount] [devices]")
delta, err := strconv.ParseFloat(val+args[0], 64)
assertNoErr(err, "cannot parse temperature adjustment")
changeByCallback(func(t fritz.Thermostat) string {
cur, err := strconv.ParseFloat(t.FmtGoalTemperature(), 64)
assertNoErr(err, "unable to parse the current temperature goal '%s'", t.FmtGoalTemperature())
return strconv.FormatFloat(cur+delta, 'f', -1, 64)
}, args[1:]...)
}
func changeTo(val string, devs ...string) {
changeByValue(nil, val, devs...)
}
func changeByCallback(supplier func(t fritz.Thermostat) string, names ...string) {
c := homeAutoClient(fritz.Caching(true))
devices, err := c.List()
assertNoErr(err, "cannot list available devices")
for _, name := range names {
device, err := deviceWithName(name, devices.Thermostats())
assertNoErr(err, "unable extract device named '%s'", name)
changeByValue(c, supplier(device.Thermostat), name)
}
}
func deviceWithName(name string, list []fritz.Device) (*fritz.Device, error) {
for _, d := range list {
if d.Name == name {
return &d, nil
}
}
return nil, fmt.Errorf("device with name '%s' not found", name)
}
func changeByValue(c fritz.HomeAuto, val string, names ...string) {
temp, err := parseTemperature(val)
assertNoErr(err, "cannot parse temperature value")
if c == nil {
c = homeAutoClient()
}
err = c.Temp(temp, names...)
assertNoErr(err, "error setting temperature")
}
func parseTemperature(s string) (float64, error) {
if strings.EqualFold(s, "off") {
return 126.5, nil
}
if strings.EqualFold(s, "on") {
return 127.0, nil
}
temp, errorParse := strconv.ParseFloat(s, 64)
return temp, errorParse
} | cmd/temperature.go | 0.730386 | 0.450299 | temperature.go | starcoder |
package solution
import "sort"
/*
leetcode: https://leetcode.com/problems/design-search-autocomplete-system/
*/
/*
We build trie data structure.
Each trie will keep freq times and 3 hot sentences all trie below it and itself.
In AutocompleteSystem struct, we have root trie.
We also need Cursor and Buff to keep track our search.
when input have '#', we just insert word from Buff and reset cursor and buffer.
Time complexity:
Constructor O(N*M) where N is len(sentences) and M is len of sentences[i]
Input: O(len(buff))
Insert O(len(buffer))
UpdateHotSentences O(1)
Search O(1)
Space complexity: O(N*M + len(buff))
*/
type AutocompleteSystem struct {
Root *Trie
Cursor *Trie
Buff []byte
}
func Constructor(sentences []string, times []int) AutocompleteSystem {
root := &Trie{Children: make([]*Trie, 27)}
for i := 0; i < len(sentences); i++ {
root.Insert(sentences[i], times[i])
}
return AutocompleteSystem{Root: root}
}
func (this *AutocompleteSystem) Input(c byte) []string {
if c == '#' {
freq := 1
if this.Cursor != nil && this.Cursor.IsWord {
freq += this.Cursor.Freq
}
word := string(this.Buff)
this.Root.Insert(word, freq)
this.Cursor = nil
this.Buff = nil
return nil
}
if len(this.Buff) == 0 {
this.Cursor = this.Root.Search(c)
} else if this.Cursor != nil {
this.Cursor = this.Cursor.Search(c)
}
this.Buff = append(this.Buff, c)
if this.Cursor == nil {
return nil
}
return this.Cursor.Hot.ListString()
}
/**
* Your AutocompleteSystem object will be instantiated and called as such:
* obj := Constructor(sentences, times);
* param_1 := obj.Input(c);
*/
type Trie struct {
Children []*Trie
Freq int
IsWord bool
Hot HotSentences
}
func (t *Trie) Insert(word string, freq int) {
curr := t
sentence := Sentence{Val: word, Freq: freq}
for i := 0; i < len(word); i++ {
idx := getIndex(word[i])
if curr.Children[idx] == nil {
curr.Children[idx] = &Trie{Children: make([]*Trie, 27)}
}
curr.Hot = UpdateHotSentences(curr.Hot, sentence)
curr = curr.Children[idx]
}
curr.Hot = UpdateHotSentences(curr.Hot, sentence)
curr.IsWord = true
curr.Freq = freq
}
func (t *Trie) Search(c byte) *Trie {
idx := getIndex(c)
return t.Children[idx]
}
func getIndex(c byte) int {
if c == '#' {
return -1
}
if c == ' ' {
return 26
}
return int(c - 'a')
}
func UpdateHotSentences(h HotSentences, s Sentence) HotSentences {
var isDuplicate bool
for i := 0; i < len(h); i++ {
if h[i].Val == s.Val {
h[i] = s
isDuplicate = true
break
}
}
if !isDuplicate {
h = append(h, s)
}
sort.Sort(h)
if len(h) < 3 {
return h
}
return h[:3]
}
type Sentence struct {
Val string
Freq int
}
type HotSentences []Sentence
func (h HotSentences) Len() int {
return len(h)
}
func (h HotSentences) Less(i, j int) bool {
if h[i].Freq == h[j].Freq {
// sorted by ascii code (smaller to larger)
return h[i].Val < h[j].Val
}
// sorted by hot degree (larger to smaller)
return h[i].Freq > h[j].Freq
}
func (h HotSentences) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h HotSentences) ListString() []string {
var result []string
for _, v := range h {
result = append(result, v.Val)
}
return result
} | lesson-15/trie/642-design-search-autocomplete-system/solution.go | 0.708313 | 0.408926 | solution.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.