code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package cmp
import (
"fmt"
"reflect"
"strings"
"github.com/google/go-cmp/cmp/internal/flags"
"github.com/google/go-cmp/cmp/internal/value"
)
const (
pointerDelimPrefix = "⟪"
pointerDelimSuffix = "⟫"
)
// formatPointer prints the address of the pointer.
func formatPointer(p value.Pointer, withDelims bool) string {
v := p.Uintptr()
if flags.Deterministic {
v = 0xdeadf00f // Only used for stable testing purposes
}
if withDelims {
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
}
return formatHex(uint64(v))
}
// pointerReferences is a stack of pointers visited so far.
type pointerReferences [][2]value.Pointer
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
if deref && vx.IsValid() {
vx = vx.Addr()
}
if deref && vy.IsValid() {
vy = vy.Addr()
}
switch d {
case diffUnknown, diffIdentical:
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
case diffRemoved:
pp = [2]value.Pointer{value.PointerOf(vx), {}}
case diffInserted:
pp = [2]value.Pointer{{}, value.PointerOf(vy)}
}
*ps = append(*ps, pp)
return pp
}
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
p = value.PointerOf(v)
for _, pp := range *ps {
if p == pp[0] || p == pp[1] {
return p, true
}
}
*ps = append(*ps, [2]value.Pointer{p, p})
return p, false
}
func (ps *pointerReferences) Pop() {
*ps = (*ps)[:len(*ps)-1]
}
// trunkReferences is metadata for a textNode indicating that the sub-tree
// represents the value for either pointer in a pair of references.
type trunkReferences struct{ pp [2]value.Pointer }
// trunkReference is metadata for a textNode indicating that the sub-tree
// represents the value for the given pointer reference.
type trunkReference struct{ p value.Pointer }
// leafReference is metadata for a textNode indicating that the value is
// truncated as it refers to another part of the tree (i.e., a trunk).
type leafReference struct{ p value.Pointer }
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
switch {
case pp[0].IsNil():
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
case pp[1].IsNil():
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
case pp[0] == pp[1]:
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
default:
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
}
}
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
var prefix string
if printAddress {
prefix = formatPointer(p, true)
}
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
}
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
var prefix string
if printAddress {
prefix = formatPointer(p, true)
}
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
}
// resolveReferences walks the textNode tree searching for any leaf reference
// metadata and resolves each against the corresponding trunk references.
// Since pointer addresses in memory are not particularly readable to the user,
// it replaces each pointer value with an arbitrary and unique reference ID.
func resolveReferences(s textNode) {
var walkNodes func(textNode, func(textNode))
walkNodes = func(s textNode, f func(textNode)) {
f(s)
switch s := s.(type) {
case *textWrap:
walkNodes(s.Value, f)
case textList:
for _, r := range s {
walkNodes(r.Value, f)
}
}
}
// Collect all trunks and leaves with reference metadata.
var trunks, leaves []*textWrap
walkNodes(s, func(s textNode) {
if s, ok := s.(*textWrap); ok {
switch s.Metadata.(type) {
case leafReference:
leaves = append(leaves, s)
case trunkReference, trunkReferences:
trunks = append(trunks, s)
}
}
})
// No leaf references to resolve.
if len(leaves) == 0 {
return
}
// Collect the set of all leaf references to resolve.
leafPtrs := make(map[value.Pointer]bool)
for _, leaf := range leaves {
leafPtrs[leaf.Metadata.(leafReference).p] = true
}
// Collect the set of trunk pointers that are always paired together.
// This allows us to assign a single ID to both pointers for brevity.
// If a pointer in a pair ever occurs by itself or as a different pair,
// then the pair is broken.
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
unpair := func(p value.Pointer) {
if !pairedTrunkPtrs[p].IsNil() {
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
}
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
}
for _, trunk := range trunks {
switch p := trunk.Metadata.(type) {
case trunkReference:
unpair(p.p) // standalone pointer cannot be part of a pair
case trunkReferences:
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
switch {
case !ok0 && !ok1:
// Register the newly seen pair.
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
// Exact pair already seen; do nothing.
default:
// Pair conflicts with some other pair; break all pairs.
unpair(p.pp[0])
unpair(p.pp[1])
}
}
}
// Correlate each pointer referenced by leaves to a unique identifier,
// and print the IDs for each trunk that matches those pointers.
var nextID uint
ptrIDs := make(map[value.Pointer]uint)
newID := func() uint {
id := nextID
nextID++
return id
}
for _, trunk := range trunks {
switch p := trunk.Metadata.(type) {
case trunkReference:
if print := leafPtrs[p.p]; print {
id, ok := ptrIDs[p.p]
if !ok {
id = newID()
ptrIDs[p.p] = id
}
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
}
case trunkReferences:
print0 := leafPtrs[p.pp[0]]
print1 := leafPtrs[p.pp[1]]
if print0 || print1 {
id0, ok0 := ptrIDs[p.pp[0]]
id1, ok1 := ptrIDs[p.pp[1]]
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
if isPair {
var id uint
assert(ok0 == ok1) // must be seen together or not at all
if ok0 {
assert(id0 == id1) // must have the same ID
id = id0
} else {
id = newID()
ptrIDs[p.pp[0]] = id
ptrIDs[p.pp[1]] = id
}
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
} else {
if print0 && !ok0 {
id0 = newID()
ptrIDs[p.pp[0]] = id0
}
if print1 && !ok1 {
id1 = newID()
ptrIDs[p.pp[1]] = id1
}
switch {
case print0 && print1:
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
case print0:
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
case print1:
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
}
}
}
}
}
// Update all leaf references with the unique identifier.
for _, leaf := range leaves {
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
}
}
}
func formatReference(id uint) string {
return fmt.Sprintf("ref#%d", id)
}
func updateReferencePrefix(prefix, ref string) string {
if prefix == "" {
return pointerDelimPrefix + ref + pointerDelimSuffix
}
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
return pointerDelimPrefix + ref + ": " + suffix
} | vendor/github.com/google/go-cmp/cmp/report_references.go | 0.703855 | 0.419172 | report_references.go | starcoder |
package three
//go:generate go run geometry_method_generator/main.go -geometryType CylinderGeometry -geometrySlug cylinder_geometry
import (
"math"
"github.com/gopherjs/gopherjs/js"
)
// CylinderGeometry a class for generating cylinder geometries.
type CylinderGeometry struct {
*js.Object
RadiusTop float64 `js:"radiusTop"`
RadiusBottom float64 `js:"radiusBottom"`
Height float64 `js:"height"`
RadialSegments int `js:"radialSegments"`
HeightSegments int `js:"heightSegments"`
OpenEnded bool `js:"openEnded"`
ThetaStart float64 `js:"thetaStart"`
ThetaLength float64 `js:"thetaLength"`
}
// CylinderGeometryParameters .
type CylinderGeometryParameters struct {
RadiusTop float64
// Radius of the cylinder at the bottom. Default is 1.
RadiusBottom float64
// Height of the cylinder. Default is 1.
Height float64
// Number of segmented faces around the circumference of the cylinder. Default is 8
RadialSegments int
// Number of rows of faces along the height of the cylinder. Default is 1.
HeightSegments int
// A Boolean indicating whether the ends of the cylinder are open or capped. Default is false, meaning capped.
OpenEnded bool
// Start angle for first segment, default = 0 (three o'clock position).
ThetaStart float64
// The central angle, often called theta, of the circular sector. The default is 2*Pi, which makes for a complete cylinder.
ThetaLength float64
}
// NewCylinderGeometry creates a new BoxGeometry. Set ThetaLength to NaN to create empty geometry.
func NewCylinderGeometry(params *CylinderGeometryParameters) CylinderGeometry {
if params == nil {
params = &CylinderGeometryParameters{}
}
// Make sure all are defined to prevent unclear code. This could be changed though...
if params.Height == 0 || params.RadiusTop == 0 || params.RadiusBottom == 0 {
params.Height = 1
params.RadiusTop = 1
params.RadiusBottom = 1
}
// Probably don't want no cylinder.
if params.ThetaLength == 0 {
params.ThetaLength = 2 * math.Pi
}
if math.IsNaN(params.ThetaLength) {
params.ThetaLength = 0
}
if params.RadialSegments == 0 {
params.RadialSegments = 8
}
if params.HeightSegments == 0 {
params.HeightSegments = 1
}
obj := three.Get("CylinderGeometry").New(
params.RadiusTop,
params.RadiusBottom,
params.Height,
params.RadialSegments,
params.HeightSegments,
params.OpenEnded,
params.ThetaStart,
params.ThetaLength,
)
// js.Global.Get("console").Call("log", obj.Interface(), params)
return CylinderGeometry{
Object: obj,
}
} | geometries_cylinder_geometry.go | 0.77081 | 0.490663 | geometries_cylinder_geometry.go | starcoder |
package aesr
// Round32sle mixes the input values with aes tables and returns the result.
func Round32sle(x0, x1, x2, x3 uint32) (uint32, uint32, uint32, uint32) {
y0 := (kAes0[x0&0xFF] ^
kAes1[(x1>>8)&0xFF] ^
kAes2[(x2>>16)&0xFF] ^
kAes3[(x3>>24)&0xFF])
y1 := (kAes0[x1&0xFF] ^
kAes1[(x2>>8)&0xFF] ^
kAes2[(x3>>16)&0xFF] ^
kAes3[(x0>>24)&0xFF])
y2 := (kAes0[x2&0xFF] ^
kAes1[(x3>>8)&0xFF] ^
kAes2[(x0>>16)&0xFF] ^
kAes3[(x1>>24)&0xFF])
y3 := (kAes0[x3&0xFF] ^
kAes1[(x0>>8)&0xFF] ^
kAes2[(x1>>16)&0xFF] ^
kAes3[(x2>>24)&0xFF])
return y0, y1, y2, y3
}
// Round32ble mixes the input values with aes tables and returns the result.
func Round32ble(x0, x1, x2, x3, k0, k1, k2, k3 uint32) (uint32, uint32, uint32, uint32) {
y0 := (kAes0[x0&0xFF] ^
kAes1[(x1>>8)&0xFF] ^
kAes2[(x2>>16)&0xFF] ^
kAes3[(x3>>24)&0xFF] ^ k0)
y1 := (kAes0[x1&0xFF] ^
kAes1[(x2>>8)&0xFF] ^
kAes2[(x3>>16)&0xFF] ^
kAes3[(x0>>24)&0xFF] ^ k1)
y2 := (kAes0[x2&0xFF] ^
kAes1[(x3>>8)&0xFF] ^
kAes2[(x0>>16)&0xFF] ^
kAes3[(x1>>24)&0xFF] ^ k2)
y3 := (kAes0[x3&0xFF] ^
kAes1[(x0>>8)&0xFF] ^
kAes2[(x1>>16)&0xFF] ^
kAes3[(x2>>24)&0xFF] ^ k3)
return y0, y1, y2, y3
}
var kAes0 = [256]uint32{
uint32(0xA56363C6), uint32(0x847C7CF8), uint32(0x997777EE), uint32(0x8D7B7BF6),
uint32(0x0DF2F2FF), uint32(0xBD6B6BD6), uint32(0xB16F6FDE), uint32(0x54C5C591),
uint32(0x50303060), uint32(0x03010102), uint32(0xA96767CE), uint32(0x7D2B2B56),
uint32(0x19FEFEE7), uint32(0x62D7D7B5), uint32(0xE6ABAB4D), uint32(0x9A7676EC),
uint32(0x45CACA8F), uint32(0x9D82821F), uint32(0x40C9C989), uint32(0x877D7DFA),
uint32(0x15FAFAEF), uint32(0xEB5959B2), uint32(0xC947478E), uint32(0x0BF0F0FB),
uint32(0xECADAD41), uint32(0x67D4D4B3), uint32(0xFDA2A25F), uint32(0xEAAFAF45),
uint32(0xBF9C9C23), uint32(0xF7A4A453), uint32(0x967272E4), uint32(0x5BC0C09B),
uint32(0xC2B7B775), uint32(0x1CFDFDE1), uint32(0xAE93933D), uint32(0x6A26264C),
uint32(0x5A36366C), uint32(0x413F3F7E), uint32(0x02F7F7F5), uint32(0x4FCCCC83),
uint32(0x5C343468), uint32(0xF4A5A551), uint32(0x34E5E5D1), uint32(0x08F1F1F9),
uint32(0x937171E2), uint32(0x73D8D8AB), uint32(0x53313162), uint32(0x3F15152A),
uint32(0x0C040408), uint32(0x52C7C795), uint32(0x65232346), uint32(0x5EC3C39D),
uint32(0x28181830), uint32(0xA1969637), uint32(0x0F05050A), uint32(0xB59A9A2F),
uint32(0x0907070E), uint32(0x36121224), uint32(0x9B80801B), uint32(0x3DE2E2DF),
uint32(0x26EBEBCD), uint32(0x6927274E), uint32(0xCDB2B27F), uint32(0x9F7575EA),
uint32(0x1B090912), uint32(0x9E83831D), uint32(0x742C2C58), uint32(0x2E1A1A34),
uint32(0x2D1B1B36), uint32(0xB26E6EDC), uint32(0xEE5A5AB4), uint32(0xFBA0A05B),
uint32(0xF65252A4), uint32(0x4D3B3B76), uint32(0x61D6D6B7), uint32(0xCEB3B37D),
uint32(0x7B292952), uint32(0x3EE3E3DD), uint32(0x712F2F5E), uint32(0x97848413),
uint32(0xF55353A6), uint32(0x68D1D1B9), uint32(0x00000000), uint32(0x2CEDEDC1),
uint32(0x60202040), uint32(0x1FFCFCE3), uint32(0xC8B1B179), uint32(0xED5B5BB6),
uint32(0xBE6A6AD4), uint32(0x46CBCB8D), uint32(0xD9BEBE67), uint32(0x4B393972),
uint32(0xDE4A4A94), uint32(0xD44C4C98), uint32(0xE85858B0), uint32(0x4ACFCF85),
uint32(0x6BD0D0BB), uint32(0x2AEFEFC5), uint32(0xE5AAAA4F), uint32(0x16FBFBED),
uint32(0xC5434386), uint32(0xD74D4D9A), uint32(0x55333366), uint32(0x94858511),
uint32(0xCF45458A), uint32(0x10F9F9E9), uint32(0x06020204), uint32(0x817F7FFE),
uint32(0xF05050A0), uint32(0x443C3C78), uint32(0xBA9F9F25), uint32(0xE3A8A84B),
uint32(0xF35151A2), uint32(0xFEA3A35D), uint32(0xC0404080), uint32(0x8A8F8F05),
uint32(0xAD92923F), uint32(0xBC9D9D21), uint32(0x48383870), uint32(0x04F5F5F1),
uint32(0xDFBCBC63), uint32(0xC1B6B677), uint32(0x75DADAAF), uint32(0x63212142),
uint32(0x30101020), uint32(0x1AFFFFE5), uint32(0x0EF3F3FD), uint32(0x6DD2D2BF),
uint32(0x4CCDCD81), uint32(0x140C0C18), uint32(0x35131326), uint32(0x2FECECC3),
uint32(0xE15F5FBE), uint32(0xA2979735), uint32(0xCC444488), uint32(0x3917172E),
uint32(0x57C4C493), uint32(0xF2A7A755), uint32(0x827E7EFC), uint32(0x473D3D7A),
uint32(0xAC6464C8), uint32(0xE75D5DBA), uint32(0x2B191932), uint32(0x957373E6),
uint32(0xA06060C0), uint32(0x98818119), uint32(0xD14F4F9E), uint32(0x7FDCDCA3),
uint32(0x66222244), uint32(0x7E2A2A54), uint32(0xAB90903B), uint32(0x8388880B),
uint32(0xCA46468C), uint32(0x29EEEEC7), uint32(0xD3B8B86B), uint32(0x3C141428),
uint32(0x79DEDEA7), uint32(0xE25E5EBC), uint32(0x1D0B0B16), uint32(0x76DBDBAD),
uint32(0x3BE0E0DB), uint32(0x56323264), uint32(0x4E3A3A74), uint32(0x1E0A0A14),
uint32(0xDB494992), uint32(0x0A06060C), uint32(0x6C242448), uint32(0xE45C5CB8),
uint32(0x5DC2C29F), uint32(0x6ED3D3BD), uint32(0xEFACAC43), uint32(0xA66262C4),
uint32(0xA8919139), uint32(0xA4959531), uint32(0x37E4E4D3), uint32(0x8B7979F2),
uint32(0x32E7E7D5), uint32(0x43C8C88B), uint32(0x5937376E), uint32(0xB76D6DDA),
uint32(0x8C8D8D01), uint32(0x64D5D5B1), uint32(0xD24E4E9C), uint32(0xE0A9A949),
uint32(0xB46C6CD8), uint32(0xFA5656AC), uint32(0x07F4F4F3), uint32(0x25EAEACF),
uint32(0xAF6565CA), uint32(0x8E7A7AF4), uint32(0xE9AEAE47), uint32(0x18080810),
uint32(0xD5BABA6F), uint32(0x887878F0), uint32(0x6F25254A), uint32(0x722E2E5C),
uint32(0x241C1C38), uint32(0xF1A6A657), uint32(0xC7B4B473), uint32(0x51C6C697),
uint32(0x23E8E8CB), uint32(0x7CDDDDA1), uint32(0x9C7474E8), uint32(0x211F1F3E),
uint32(0xDD4B4B96), uint32(0xDCBDBD61), uint32(0x868B8B0D), uint32(0x858A8A0F),
uint32(0x907070E0), uint32(0x423E3E7C), uint32(0xC4B5B571), uint32(0xAA6666CC),
uint32(0xD8484890), uint32(0x05030306), uint32(0x01F6F6F7), uint32(0x120E0E1C),
uint32(0xA36161C2), uint32(0x5F35356A), uint32(0xF95757AE), uint32(0xD0B9B969),
uint32(0x91868617), uint32(0x58C1C199), uint32(0x271D1D3A), uint32(0xB99E9E27),
uint32(0x38E1E1D9), uint32(0x13F8F8EB), uint32(0xB398982B), uint32(0x33111122),
uint32(0xBB6969D2), uint32(0x70D9D9A9), uint32(0x898E8E07), uint32(0xA7949433),
uint32(0xB69B9B2D), uint32(0x221E1E3C), uint32(0x92878715), uint32(0x20E9E9C9),
uint32(0x49CECE87), uint32(0xFF5555AA), uint32(0x78282850), uint32(0x7ADFDFA5),
uint32(0x8F8C8C03), uint32(0xF8A1A159), uint32(0x80898909), uint32(0x170D0D1A),
uint32(0xDABFBF65), uint32(0x31E6E6D7), uint32(0xC6424284), uint32(0xB86868D0),
uint32(0xC3414182), uint32(0xB0999929), uint32(0x772D2D5A), uint32(0x110F0F1E),
uint32(0xCBB0B07B), uint32(0xFC5454A8), uint32(0xD6BBBB6D), uint32(0x3A16162C),
}
var kAes1 = [256]uint32{
uint32(0x6363C6A5), uint32(0x7C7CF884), uint32(0x7777EE99), uint32(0x7B7BF68D),
uint32(0xF2F2FF0D), uint32(0x6B6BD6BD), uint32(0x6F6FDEB1), uint32(0xC5C59154),
uint32(0x30306050), uint32(0x01010203), uint32(0x6767CEA9), uint32(0x2B2B567D),
uint32(0xFEFEE719), uint32(0xD7D7B562), uint32(0xABAB4DE6), uint32(0x7676EC9A),
uint32(0xCACA8F45), uint32(0x82821F9D), uint32(0xC9C98940), uint32(0x7D7DFA87),
uint32(0xFAFAEF15), uint32(0x5959B2EB), uint32(0x47478EC9), uint32(0xF0F0FB0B),
uint32(0xADAD41EC), uint32(0xD4D4B367), uint32(0xA2A25FFD), uint32(0xAFAF45EA),
uint32(0x9C9C23BF), uint32(0xA4A453F7), uint32(0x7272E496), uint32(0xC0C09B5B),
uint32(0xB7B775C2), uint32(0xFDFDE11C), uint32(0x93933DAE), uint32(0x26264C6A),
uint32(0x36366C5A), uint32(0x3F3F7E41), uint32(0xF7F7F502), uint32(0xCCCC834F),
uint32(0x3434685C), uint32(0xA5A551F4), uint32(0xE5E5D134), uint32(0xF1F1F908),
uint32(0x7171E293), uint32(0xD8D8AB73), uint32(0x31316253), uint32(0x15152A3F),
uint32(0x0404080C), uint32(0xC7C79552), uint32(0x23234665), uint32(0xC3C39D5E),
uint32(0x18183028), uint32(0x969637A1), uint32(0x05050A0F), uint32(0x9A9A2FB5),
uint32(0x07070E09), uint32(0x12122436), uint32(0x80801B9B), uint32(0xE2E2DF3D),
uint32(0xEBEBCD26), uint32(0x27274E69), uint32(0xB2B27FCD), uint32(0x7575EA9F),
uint32(0x0909121B), uint32(0x83831D9E), uint32(0x2C2C5874), uint32(0x1A1A342E),
uint32(0x1B1B362D), uint32(0x6E6EDCB2), uint32(0x5A5AB4EE), uint32(0xA0A05BFB),
uint32(0x5252A4F6), uint32(0x3B3B764D), uint32(0xD6D6B761), uint32(0xB3B37DCE),
uint32(0x2929527B), uint32(0xE3E3DD3E), uint32(0x2F2F5E71), uint32(0x84841397),
uint32(0x5353A6F5), uint32(0xD1D1B968), uint32(0x00000000), uint32(0xEDEDC12C),
uint32(0x20204060), uint32(0xFCFCE31F), uint32(0xB1B179C8), uint32(0x5B5BB6ED),
uint32(0x6A6AD4BE), uint32(0xCBCB8D46), uint32(0xBEBE67D9), uint32(0x3939724B),
uint32(0x4A4A94DE), uint32(0x4C4C98D4), uint32(0x5858B0E8), uint32(0xCFCF854A),
uint32(0xD0D0BB6B), uint32(0xEFEFC52A), uint32(0xAAAA4FE5), uint32(0xFBFBED16),
uint32(0x434386C5), uint32(0x4D4D9AD7), uint32(0x33336655), uint32(0x85851194),
uint32(0x45458ACF), uint32(0xF9F9E910), uint32(0x02020406), uint32(0x7F7FFE81),
uint32(0x5050A0F0), uint32(0x3C3C7844), uint32(0x9F9F25BA), uint32(0xA8A84BE3),
uint32(0x5151A2F3), uint32(0xA3A35DFE), uint32(0x404080C0), uint32(0x8F8F058A),
uint32(0x92923FAD), uint32(0x9D9D21BC), uint32(0x38387048), uint32(0xF5F5F104),
uint32(0xBCBC63DF), uint32(0xB6B677C1), uint32(0xDADAAF75), uint32(0x21214263),
uint32(0x10102030), uint32(0xFFFFE51A), uint32(0xF3F3FD0E), uint32(0xD2D2BF6D),
uint32(0xCDCD814C), uint32(0x0C0C1814), uint32(0x13132635), uint32(0xECECC32F),
uint32(0x5F5FBEE1), uint32(0x979735A2), uint32(0x444488CC), uint32(0x17172E39),
uint32(0xC4C49357), uint32(0xA7A755F2), uint32(0x7E7EFC82), uint32(0x3D3D7A47),
uint32(0x6464C8AC), uint32(0x5D5DBAE7), uint32(0x1919322B), uint32(0x7373E695),
uint32(0x6060C0A0), uint32(0x81811998), uint32(0x4F4F9ED1), uint32(0xDCDCA37F),
uint32(0x22224466), uint32(0x2A2A547E), uint32(0x90903BAB), uint32(0x88880B83),
uint32(0x46468CCA), uint32(0xEEEEC729), uint32(0xB8B86BD3), uint32(0x1414283C),
uint32(0xDEDEA779), uint32(0x5E5EBCE2), uint32(0x0B0B161D), uint32(0xDBDBAD76),
uint32(0xE0E0DB3B), uint32(0x32326456), uint32(0x3A3A744E), uint32(0x0A0A141E),
uint32(0x494992DB), uint32(0x06060C0A), uint32(0x2424486C), uint32(0x5C5CB8E4),
uint32(0xC2C29F5D), uint32(0xD3D3BD6E), uint32(0xACAC43EF), uint32(0x6262C4A6),
uint32(0x919139A8), uint32(0x959531A4), uint32(0xE4E4D337), uint32(0x7979F28B),
uint32(0xE7E7D532), uint32(0xC8C88B43), uint32(0x37376E59), uint32(0x6D6DDAB7),
uint32(0x8D8D018C), uint32(0xD5D5B164), uint32(0x4E4E9CD2), uint32(0xA9A949E0),
uint32(0x6C6CD8B4), uint32(0x5656ACFA), uint32(0xF4F4F307), uint32(0xEAEACF25),
uint32(0x6565CAAF), uint32(0x7A7AF48E), uint32(0xAEAE47E9), uint32(0x08081018),
uint32(0xBABA6FD5), uint32(0x7878F088), uint32(0x25254A6F), uint32(0x2E2E5C72),
uint32(0x1C1C3824), uint32(0xA6A657F1), uint32(0xB4B473C7), uint32(0xC6C69751),
uint32(0xE8E8CB23), uint32(0xDDDDA17C), uint32(0x7474E89C), uint32(0x1F1F3E21),
uint32(0x4B4B96DD), uint32(0xBDBD61DC), uint32(0x8B8B0D86), uint32(0x8A8A0F85),
uint32(0x7070E090), uint32(0x3E3E7C42), uint32(0xB5B571C4), uint32(0x6666CCAA),
uint32(0x484890D8), uint32(0x03030605), uint32(0xF6F6F701), uint32(0x0E0E1C12),
uint32(0x6161C2A3), uint32(0x35356A5F), uint32(0x5757AEF9), uint32(0xB9B969D0),
uint32(0x86861791), uint32(0xC1C19958), uint32(0x1D1D3A27), uint32(0x9E9E27B9),
uint32(0xE1E1D938), uint32(0xF8F8EB13), uint32(0x98982BB3), uint32(0x11112233),
uint32(0x6969D2BB), uint32(0xD9D9A970), uint32(0x8E8E0789), uint32(0x949433A7),
uint32(0x9B9B2DB6), uint32(0x1E1E3C22), uint32(0x87871592), uint32(0xE9E9C920),
uint32(0xCECE8749), uint32(0x5555AAFF), uint32(0x28285078), uint32(0xDFDFA57A),
uint32(0x8C8C038F), uint32(0xA1A159F8), uint32(0x89890980), uint32(0x0D0D1A17),
uint32(0xBFBF65DA), uint32(0xE6E6D731), uint32(0x424284C6), uint32(0x6868D0B8),
uint32(0x414182C3), uint32(0x999929B0), uint32(0x2D2D5A77), uint32(0x0F0F1E11),
uint32(0xB0B07BCB), uint32(0x5454A8FC), uint32(0xBBBB6DD6), uint32(0x16162C3A),
}
var kAes2 = [256]uint32{
uint32(0x63C6A563), uint32(0x7CF8847C), uint32(0x77EE9977), uint32(0x7BF68D7B),
uint32(0xF2FF0DF2), uint32(0x6BD6BD6B), uint32(0x6FDEB16F), uint32(0xC59154C5),
uint32(0x30605030), uint32(0x01020301), uint32(0x67CEA967), uint32(0x2B567D2B),
uint32(0xFEE719FE), uint32(0xD7B562D7), uint32(0xAB4DE6AB), uint32(0x76EC9A76),
uint32(0xCA8F45CA), uint32(0x821F9D82), uint32(0xC98940C9), uint32(0x7DFA877D),
uint32(0xFAEF15FA), uint32(0x59B2EB59), uint32(0x478EC947), uint32(0xF0FB0BF0),
uint32(0xAD41ECAD), uint32(0xD4B367D4), uint32(0xA25FFDA2), uint32(0xAF45EAAF),
uint32(0x9C23BF9C), uint32(0xA453F7A4), uint32(0x72E49672), uint32(0xC09B5BC0),
uint32(0xB775C2B7), uint32(0xFDE11CFD), uint32(0x933DAE93), uint32(0x264C6A26),
uint32(0x366C5A36), uint32(0x3F7E413F), uint32(0xF7F502F7), uint32(0xCC834FCC),
uint32(0x34685C34), uint32(0xA551F4A5), uint32(0xE5D134E5), uint32(0xF1F908F1),
uint32(0x71E29371), uint32(0xD8AB73D8), uint32(0x31625331), uint32(0x152A3F15),
uint32(0x04080C04), uint32(0xC79552C7), uint32(0x23466523), uint32(0xC39D5EC3),
uint32(0x18302818), uint32(0x9637A196), uint32(0x050A0F05), uint32(0x9A2FB59A),
uint32(0x070E0907), uint32(0x12243612), uint32(0x801B9B80), uint32(0xE2DF3DE2),
uint32(0xEBCD26EB), uint32(0x274E6927), uint32(0xB27FCDB2), uint32(0x75EA9F75),
uint32(0x09121B09), uint32(0x831D9E83), uint32(0x2C58742C), uint32(0x1A342E1A),
uint32(0x1B362D1B), uint32(0x6EDCB26E), uint32(0x5AB4EE5A), uint32(0xA05BFBA0),
uint32(0x52A4F652), uint32(0x3B764D3B), uint32(0xD6B761D6), uint32(0xB37DCEB3),
uint32(0x29527B29), uint32(0xE3DD3EE3), uint32(0x2F5E712F), uint32(0x84139784),
uint32(0x53A6F553), uint32(0xD1B968D1), uint32(0x00000000), uint32(0xEDC12CED),
uint32(0x20406020), uint32(0xFCE31FFC), uint32(0xB179C8B1), uint32(0x5BB6ED5B),
uint32(0x6AD4BE6A), uint32(0xCB8D46CB), uint32(0xBE67D9BE), uint32(0x39724B39),
uint32(0x4A94DE4A), uint32(0x4C98D44C), uint32(0x58B0E858), uint32(0xCF854ACF),
uint32(0xD0BB6BD0), uint32(0xEFC52AEF), uint32(0xAA4FE5AA), uint32(0xFBED16FB),
uint32(0x4386C543), uint32(0x4D9AD74D), uint32(0x33665533), uint32(0x85119485),
uint32(0x458ACF45), uint32(0xF9E910F9), uint32(0x02040602), uint32(0x7FFE817F),
uint32(0x50A0F050), uint32(0x3C78443C), uint32(0x9F25BA9F), uint32(0xA84BE3A8),
uint32(0x51A2F351), uint32(0xA35DFEA3), uint32(0x4080C040), uint32(0x8F058A8F),
uint32(0x923FAD92), uint32(0x9D21BC9D), uint32(0x38704838), uint32(0xF5F104F5),
uint32(0xBC63DFBC), uint32(0xB677C1B6), uint32(0xDAAF75DA), uint32(0x21426321),
uint32(0x10203010), uint32(0xFFE51AFF), uint32(0xF3FD0EF3), uint32(0xD2BF6DD2),
uint32(0xCD814CCD), uint32(0x0C18140C), uint32(0x13263513), uint32(0xECC32FEC),
uint32(0x5FBEE15F), uint32(0x9735A297), uint32(0x4488CC44), uint32(0x172E3917),
uint32(0xC49357C4), uint32(0xA755F2A7), uint32(0x7EFC827E), uint32(0x3D7A473D),
uint32(0x64C8AC64), uint32(0x5DBAE75D), uint32(0x19322B19), uint32(0x73E69573),
uint32(0x60C0A060), uint32(0x81199881), uint32(0x4F9ED14F), uint32(0xDCA37FDC),
uint32(0x22446622), uint32(0x2A547E2A), uint32(0x903BAB90), uint32(0x880B8388),
uint32(0x468CCA46), uint32(0xEEC729EE), uint32(0xB86BD3B8), uint32(0x14283C14),
uint32(0xDEA779DE), uint32(0x5EBCE25E), uint32(0x0B161D0B), uint32(0xDBAD76DB),
uint32(0xE0DB3BE0), uint32(0x32645632), uint32(0x3A744E3A), uint32(0x0A141E0A),
uint32(0x4992DB49), uint32(0x060C0A06), uint32(0x24486C24), uint32(0x5CB8E45C),
uint32(0xC29F5DC2), uint32(0xD3BD6ED3), uint32(0xAC43EFAC), uint32(0x62C4A662),
uint32(0x9139A891), uint32(0x9531A495), uint32(0xE4D337E4), uint32(0x79F28B79),
uint32(0xE7D532E7), uint32(0xC88B43C8), uint32(0x376E5937), uint32(0x6DDAB76D),
uint32(0x8D018C8D), uint32(0xD5B164D5), uint32(0x4E9CD24E), uint32(0xA949E0A9),
uint32(0x6CD8B46C), uint32(0x56ACFA56), uint32(0xF4F307F4), uint32(0xEACF25EA),
uint32(0x65CAAF65), uint32(0x7AF48E7A), uint32(0xAE47E9AE), uint32(0x08101808),
uint32(0xBA6FD5BA), uint32(0x78F08878), uint32(0x254A6F25), uint32(0x2E5C722E),
uint32(0x1C38241C), uint32(0xA657F1A6), uint32(0xB473C7B4), uint32(0xC69751C6),
uint32(0xE8CB23E8), uint32(0xDDA17CDD), uint32(0x74E89C74), uint32(0x1F3E211F),
uint32(0x4B96DD4B), uint32(0xBD61DCBD), uint32(0x8B0D868B), uint32(0x8A0F858A),
uint32(0x70E09070), uint32(0x3E7C423E), uint32(0xB571C4B5), uint32(0x66CCAA66),
uint32(0x4890D848), uint32(0x03060503), uint32(0xF6F701F6), uint32(0x0E1C120E),
uint32(0x61C2A361), uint32(0x356A5F35), uint32(0x57AEF957), uint32(0xB969D0B9),
uint32(0x86179186), uint32(0xC19958C1), uint32(0x1D3A271D), uint32(0x9E27B99E),
uint32(0xE1D938E1), uint32(0xF8EB13F8), uint32(0x982BB398), uint32(0x11223311),
uint32(0x69D2BB69), uint32(0xD9A970D9), uint32(0x8E07898E), uint32(0x9433A794),
uint32(0x9B2DB69B), uint32(0x1E3C221E), uint32(0x87159287), uint32(0xE9C920E9),
uint32(0xCE8749CE), uint32(0x55AAFF55), uint32(0x28507828), uint32(0xDFA57ADF),
uint32(0x8C038F8C), uint32(0xA159F8A1), uint32(0x89098089), uint32(0x0D1A170D),
uint32(0xBF65DABF), uint32(0xE6D731E6), uint32(0x4284C642), uint32(0x68D0B868),
uint32(0x4182C341), uint32(0x9929B099), uint32(0x2D5A772D), uint32(0x0F1E110F),
uint32(0xB07BCBB0), uint32(0x54A8FC54), uint32(0xBB6DD6BB), uint32(0x162C3A16),
}
var kAes3 = [256]uint32{
uint32(0xC6A56363), uint32(0xF8847C7C), uint32(0xEE997777), uint32(0xF68D7B7B),
uint32(0xFF0DF2F2), uint32(0xD6BD6B6B), uint32(0xDEB16F6F), uint32(0x9154C5C5),
uint32(0x60503030), uint32(0x02030101), uint32(0xCEA96767), uint32(0x567D2B2B),
uint32(0xE719FEFE), uint32(0xB562D7D7), uint32(0x4DE6ABAB), uint32(0xEC9A7676),
uint32(0x8F45CACA), uint32(0x1F9D8282), uint32(0x8940C9C9), uint32(0xFA877D7D),
uint32(0xEF15FAFA), uint32(0xB2EB5959), uint32(0x8EC94747), uint32(0xFB0BF0F0),
uint32(0x41ECADAD), uint32(0xB367D4D4), uint32(0x5FFDA2A2), uint32(0x45EAAFAF),
uint32(0x23BF9C9C), uint32(0x53F7A4A4), uint32(0xE4967272), uint32(0x9B5BC0C0),
uint32(0x75C2B7B7), uint32(0xE11CFDFD), uint32(0x3DAE9393), uint32(0x4C6A2626),
uint32(0x6C5A3636), uint32(0x7E413F3F), uint32(0xF502F7F7), uint32(0x834FCCCC),
uint32(0x685C3434), uint32(0x51F4A5A5), uint32(0xD134E5E5), uint32(0xF908F1F1),
uint32(0xE2937171), uint32(0xAB73D8D8), uint32(0x62533131), uint32(0x2A3F1515),
uint32(0x080C0404), uint32(0x9552C7C7), uint32(0x46652323), uint32(0x9D5EC3C3),
uint32(0x30281818), uint32(0x37A19696), uint32(0x0A0F0505), uint32(0x2FB59A9A),
uint32(0x0E090707), uint32(0x24361212), uint32(0x1B9B8080), uint32(0xDF3DE2E2),
uint32(0xCD26EBEB), uint32(0x4E692727), uint32(0x7FCDB2B2), uint32(0xEA9F7575),
uint32(0x121B0909), uint32(0x1D9E8383), uint32(0x58742C2C), uint32(0x342E1A1A),
uint32(0x362D1B1B), uint32(0xDCB26E6E), uint32(0xB4EE5A5A), uint32(0x5BFBA0A0),
uint32(0xA4F65252), uint32(0x764D3B3B), uint32(0xB761D6D6), uint32(0x7DCEB3B3),
uint32(0x527B2929), uint32(0xDD3EE3E3), uint32(0x5E712F2F), uint32(0x13978484),
uint32(0xA6F55353), uint32(0xB968D1D1), uint32(0x00000000), uint32(0xC12CEDED),
uint32(0x40602020), uint32(0xE31FFCFC), uint32(0x79C8B1B1), uint32(0xB6ED5B5B),
uint32(0xD4BE6A6A), uint32(0x8D46CBCB), uint32(0x67D9BEBE), uint32(0x724B3939),
uint32(0x94DE4A4A), uint32(0x98D44C4C), uint32(0xB0E85858), uint32(0x854ACFCF),
uint32(0xBB6BD0D0), uint32(0xC52AEFEF), uint32(0x4FE5AAAA), uint32(0xED16FBFB),
uint32(0x86C54343), uint32(0x9AD74D4D), uint32(0x66553333), uint32(0x11948585),
uint32(0x8ACF4545), uint32(0xE910F9F9), uint32(0x04060202), uint32(0xFE817F7F),
uint32(0xA0F05050), uint32(0x78443C3C), uint32(0x25BA9F9F), uint32(0x4BE3A8A8),
uint32(0xA2F35151), uint32(0x5DFEA3A3), uint32(0x80C04040), uint32(0x058A8F8F),
uint32(0x3FAD9292), uint32(0x21BC9D9D), uint32(0x70483838), uint32(0xF104F5F5),
uint32(0x63DFBCBC), uint32(0x77C1B6B6), uint32(0xAF75DADA), uint32(0x42632121),
uint32(0x20301010), uint32(0xE51AFFFF), uint32(0xFD0EF3F3), uint32(0xBF6DD2D2),
uint32(0x814CCDCD), uint32(0x18140C0C), uint32(0x26351313), uint32(0xC32FECEC),
uint32(0xBEE15F5F), uint32(0x35A29797), uint32(0x88CC4444), uint32(0x2E391717),
uint32(0x9357C4C4), uint32(0x55F2A7A7), uint32(0xFC827E7E), uint32(0x7A473D3D),
uint32(0xC8AC6464), uint32(0xBAE75D5D), uint32(0x322B1919), uint32(0xE6957373),
uint32(0xC0A06060), uint32(0x19988181), uint32(0x9ED14F4F), uint32(0xA37FDCDC),
uint32(0x44662222), uint32(0x547E2A2A), uint32(0x3BAB9090), uint32(0x0B838888),
uint32(0x8CCA4646), uint32(0xC729EEEE), uint32(0x6BD3B8B8), uint32(0x283C1414),
uint32(0xA779DEDE), uint32(0xBCE25E5E), uint32(0x161D0B0B), uint32(0xAD76DBDB),
uint32(0xDB3BE0E0), uint32(0x64563232), uint32(0x744E3A3A), uint32(0x141E0A0A),
uint32(0x92DB4949), uint32(0x0C0A0606), uint32(0x486C2424), uint32(0xB8E45C5C),
uint32(0x9F5DC2C2), uint32(0xBD6ED3D3), uint32(0x43EFACAC), uint32(0xC4A66262),
uint32(0x39A89191), uint32(0x31A49595), uint32(0xD337E4E4), uint32(0xF28B7979),
uint32(0xD532E7E7), uint32(0x8B43C8C8), uint32(0x6E593737), uint32(0xDAB76D6D),
uint32(0x018C8D8D), uint32(0xB164D5D5), uint32(0x9CD24E4E), uint32(0x49E0A9A9),
uint32(0xD8B46C6C), uint32(0xACFA5656), uint32(0xF307F4F4), uint32(0xCF25EAEA),
uint32(0xCAAF6565), uint32(0xF48E7A7A), uint32(0x47E9AEAE), uint32(0x10180808),
uint32(0x6FD5BABA), uint32(0xF0887878), uint32(0x4A6F2525), uint32(0x5C722E2E),
uint32(0x38241C1C), uint32(0x57F1A6A6), uint32(0x73C7B4B4), uint32(0x9751C6C6),
uint32(0xCB23E8E8), uint32(0xA17CDDDD), uint32(0xE89C7474), uint32(0x3E211F1F),
uint32(0x96DD4B4B), uint32(0x61DCBDBD), uint32(0x0D868B8B), uint32(0x0F858A8A),
uint32(0xE0907070), uint32(0x7C423E3E), uint32(0x71C4B5B5), uint32(0xCCAA6666),
uint32(0x90D84848), uint32(0x06050303), uint32(0xF701F6F6), uint32(0x1C120E0E),
uint32(0xC2A36161), uint32(0x6A5F3535), uint32(0xAEF95757), uint32(0x69D0B9B9),
uint32(0x17918686), uint32(0x9958C1C1), uint32(0x3A271D1D), uint32(0x27B99E9E),
uint32(0xD938E1E1), uint32(0xEB13F8F8), uint32(0x2BB39898), uint32(0x22331111),
uint32(0xD2BB6969), uint32(0xA970D9D9), uint32(0x07898E8E), uint32(0x33A79494),
uint32(0x2DB69B9B), uint32(0x3C221E1E), uint32(0x15928787), uint32(0xC920E9E9),
uint32(0x8749CECE), uint32(0xAAFF5555), uint32(0x50782828), uint32(0xA57ADFDF),
uint32(0x038F8C8C), uint32(0x59F8A1A1), uint32(0x09808989), uint32(0x1A170D0D),
uint32(0x65DABFBF), uint32(0xD731E6E6), uint32(0x84C64242), uint32(0xD0B86868),
uint32(0x82C34141), uint32(0x29B09999), uint32(0x5A772D2D), uint32(0x1E110F0F),
uint32(0x7BCBB0B0), uint32(0xA8FC5454), uint32(0x6DD6BBBB), uint32(0x2C3A1616),
} | crypto/x16rv3/aesr/aesr.go | 0.657318 | 0.644197 | aesr.go | starcoder |
package taxreturn
import (
"fmt"
"time"
)
const (
parseLayout string = "2006-01-02"
stringLayout string = "2006-01-02"
)
// Period describes interface of the generic time period.
type Period interface {
Start() time.Time
End() time.Time
String() string
}
// BillPeriod describes date period.
type BillPeriod struct {
start time.Time
end time.Time
}
// NewBillPeriod creates a new bill period, start and end dates are in fomat YYYY-MM-DD.
func NewBillPeriod(periodStart, periodEnd string) (BillPeriod, error) {
start, err := time.Parse(parseLayout, periodStart)
if err != nil {
return BillPeriod{}, err
}
end, err := time.Parse(parseLayout, periodEnd)
if err != nil {
return BillPeriod{}, err
}
return BillPeriod{start: start, end: end}, nil
}
// Days calculates amount of days in the period. Start and end dates are counted as part of period.
func (p BillPeriod) Days() int {
return DaysInPeriod(p.Start(), p.End()) + 1
}
// Start ...
func (p BillPeriod) Start() time.Time {
return p.start
}
// End ...
func (p BillPeriod) End() time.Time {
return p.end
}
func (p BillPeriod) String() string {
start := p.Start().Format(stringLayout)
end := p.End().Format(stringLayout)
return fmt.Sprintf("%s - %s", start, end)
}
// FinancialYear describes financial year.
type FinancialYear struct {
start time.Time
end time.Time
}
// FinancialYearStarting creates new financial year by starting year.
func FinancialYearStarting(year int) FinancialYear {
start := time.Date(year, time.July, 1, 0, 0, 0, 0, time.UTC)
end := time.Date(year+1, time.June, 30, 0, 0, 0, 0, time.UTC)
return FinancialYear{start, end}
}
// FinancialYearEnding creates new financial year by ending year.
func FinancialYearEnding(year int) FinancialYear {
start := time.Date(year-1, time.July, 1, 0, 0, 0, 0, time.UTC)
end := time.Date(year, time.June, 30, 0, 0, 0, 0, time.UTC)
return FinancialYear{start, end}
}
// Start start date of financial year.
func (fy FinancialYear) Start() time.Time {
return fy.start
}
// End end date of financial year.
func (fy FinancialYear) End() time.Time {
return fy.end
}
func (fy FinancialYear) String() string {
start := fy.Start().Format(stringLayout)
end := fy.End().Format(stringLayout)
return fmt.Sprintf("%s - %s", start, end)
}
// DaysInPeriod calculates amount of days between start and end date (end date excluded).
func DaysInPeriod(start, end time.Time) int {
days := end.Sub(start).Hours() / 24
return int(days)
}
// PeriodWithin retuns true when period a is within period b including period start and end dates.
func PeriodWithin(a, b Period) bool {
return (a.Start().After(b.Start()) || a.Start().Equal(b.Start())) &&
(a.End().Before(b.End()) || a.End().Equal(b.End()))
}
// PeriodOutside retuns true when period a is outside period b including period start and end dates.
func PeriodOutside(a, b Period) bool {
return a.Start().After(b.End()) || a.End().Before(b.Start())
}
// PeriodOverlapsStart retuns true when period a ovelaps with the start of period b.
func PeriodOverlapsStart(a, b Period) bool {
return a.Start().Before(b.Start()) && (a.End().After(b.Start()) || a.End().Equal(b.Start()))
}
// PeriodOverlapsEnd retuns true when period a ovelaps with the end of period b.
func PeriodOverlapsEnd(a, b Period) bool {
return (a.Start().Before(b.End()) || a.Start().Equal(b.End())) && a.End().After(b.End())
} | period.go | 0.800653 | 0.46952 | period.go | starcoder |
package script64
import (
"fmt"
"go/token"
"reflect"
data "github.com/seeder-research/uMagNUS/data64"
)
// converts in to an expression of type OutT.
// also serves as type check (not convertible == type error)
// pos is used for error message on impossible conversion.
func typeConv(pos token.Pos, in Expr, outT reflect.Type) Expr {
inT := in.Type()
switch {
default:
panic(err(pos, "type mismatch: can not use type", inT, "as", outT))
// treat 'void' (type nil) separately:
case inT == nil && outT != nil:
panic(err(pos, "void used as value"))
case inT != nil && outT == nil:
panic("script internal bug: void input type")
// strict go conversions:
case inT == outT:
return in
case inT.AssignableTo(outT):
return in
// extra conversions for ease-of-use:
// int -> float64
case outT == float64_t && inT == int_t:
return &intToFloat64{in}
// float64 -> int
case outT == int_t && inT == float64_t:
return &float64ToInt{in}
case outT == float64_t && inT.AssignableTo(ScalarIf_t):
return &getScalar{in.Eval().(ScalarIf)}
case outT == float64_t && inT.AssignableTo(VectorIf_t):
return &getVector{in.Eval().(VectorIf)}
// magical expression -> function conversions
case inT == float64_t && outT.AssignableTo(ScalarFunction_t):
return &scalFn{in}
case inT == int_t && outT.AssignableTo(ScalarFunction_t):
return &scalFn{&intToFloat64{in}}
case inT == vector_t && outT.AssignableTo(VectorFunction_t):
return &vecFn{in}
case inT == bool_t && outT == func_bool_t:
return &boolToFunc{in}
}
}
// returns input type for expression. Usually this is the same as the return type,
// unless the expression has a method InputType()reflect.Type.
func inputType(e Expr) reflect.Type {
if in, ok := e.(interface {
InputType() reflect.Type
}); ok {
return in.InputType()
}
return e.Type()
}
// common type definitions
var (
float64_t = reflect.TypeOf(float64(0))
bool_t = reflect.TypeOf(false)
func_float64_t = reflect.TypeOf(func() float64 { panic(0) })
func_bool_t = reflect.TypeOf(func() bool { panic(0) })
int_t = reflect.TypeOf(int(0))
string_t = reflect.TypeOf("")
vector_t = reflect.TypeOf(data.Vector{})
func_vector_t = reflect.TypeOf(func() data.Vector { panic(0) })
ScalarFunction_t = reflect.TypeOf(dummy_f).In(0)
VectorFunction_t = reflect.TypeOf(dummy_f3).In(0)
ScalarIf_t = reflect.TypeOf(dummy_scalarif).In(0)
VectorIf_t = reflect.TypeOf(dummy_vectorif).In(0)
)
// maneuvers to get interface type of Func (simpler way?)
func dummy_f(ScalarFunction) {}
func dummy_f3(VectorFunction) {}
func dummy_scalarif(ScalarIf) {}
func dummy_vectorif(VectorIf) {}
// converts int to float64
type intToFloat64 struct{ in Expr }
func (c *intToFloat64) Eval() interface{} { return float64(c.in.Eval().(int)) }
func (c *intToFloat64) Type() reflect.Type { return float64_t }
func (c *intToFloat64) Child() []Expr { return []Expr{c.in} }
func (c *intToFloat64) Fix() Expr { return &intToFloat64{in: c.in.Fix()} }
// converts float64 to int
type float64ToInt struct{ in Expr }
func (c *float64ToInt) Eval() interface{} { return safe_int(c.in.Eval().(float64)) }
func (c *float64ToInt) Type() reflect.Type { return int_t }
func (c *float64ToInt) Child() []Expr { return []Expr{c.in} }
func (c *float64ToInt) Fix() Expr { return &float64ToInt{in: c.in.Fix()} }
type boolToFunc struct{ in Expr }
func (c *boolToFunc) Eval() interface{} { return func() bool { return c.in.Eval().(bool) } }
func (c *boolToFunc) Type() reflect.Type { return func_bool_t }
func (c *boolToFunc) Child() []Expr { return []Expr{c.in} }
func (c *boolToFunc) Fix() Expr { return &boolToFunc{in: c.in.Fix()} }
type getScalar struct{ in ScalarIf }
type getVector struct{ in VectorIf }
func (c *getScalar) Eval() interface{} { return c.in.Get() }
func (c *getScalar) Type() reflect.Type { return float64_t }
func (c *getScalar) Child() []Expr { return nil }
func (c *getScalar) Fix() Expr { return NewConst(c) }
func (c *getVector) Eval() interface{} { return c.in.Get() }
func (c *getVector) Type() reflect.Type { return vector_t }
func (c *getVector) Child() []Expr { return nil }
func (c *getVector) Fix() Expr { return NewConst(c) }
func safe_int(x float64) int {
i := int(x)
if float64(i) != x {
panic(fmt.Errorf("can not use %v as int", x))
}
return i
}
type ScalarIf interface {
Get() float64
} // TODO: Scalar
type VectorIf interface {
Get() data.Vector
} // TODO: Vector | script64/typeconv.go | 0.593374 | 0.608085 | typeconv.go | starcoder |
package schema
type Power struct {
// The type of a resource. [RO]
OdataType string `json:"@odata.type"`
// The identifier that uniquely identifies the Resource within
// the collection of similar Resources. [RO]
Id string `json:"Id"`
// The name of the Resource or array member. [RO]
Name string `json:"Name"`
// The set of voltage sensors for this chassis. [RW]
Voltages []PowerVoltage `json:"Voltages"`
// The set of fans for this chassis. [RW]
PowerSupplies []PowerPowerSupply `json:"PowerSupplies"`
// The unique identifier for a resource. [RO]
OdataId string `json:"@odata.id"`
}
type PowerVoltage struct {
// The unique identifier for a resource. [RO]
OdataId string `json:"@odata.id"`
// The identifier for the member within the collection. [RO]
MemberId string `json:"MemberId"`
// The temperature sensor name. [RO]
Name string `json:"Name,omitempty"`
// The numerical identifier of the temperature sensor. [RO]
SensorNumber int64 `json:"SensorNumber,omitempty"`
// The status and health of a Resource and its children. [RW]
Status CommonStatus `json:"Status"`
// The reading of the voltage sensor. [RO]
ReadingVolts float64 `json:"ReadingVolts"`
// The value at which the reading is above normal range. [RO]
UpperThresholdNonCritical float64 `json:"UpperThresholdNonCritical,omitempty"`
// The value at which the reading is above normal range but not yet fatal. [RO]
UpperThresholdCritical float64 `json:"UpperThresholdCritical,omitempty"`
// The value at which the reading is above normal range and fatal. [RO]
UpperThresholdFatal float64 `json:"UpperThresholdFatal,omitempty"`
// The value at which the reading is below normal range. [RO]
LowerThresholdNonCritical float64 `json:"LowerThresholdNonCritical,omitempty"`
// The value at which the reading is below normal range but not yet fatal. [RO]
LowerThresholdCritical float64 `json:"LowerThresholdCritical,omitempty"`
// The value at which the reading is below normal range and fatal. [RO]
LowerThresholdFatal float64 `json:"LowerThresholdFatal,omitempty"`
// Minimum value for this sensor. [RO]
MinReadingRangeTemp float64 `json:"MinReadingRangeTemp,omitempty"`
// Maximum value for this sensor. [RO]
MaxReadingRangeTemp float64 `json:"MaxReadingRangeTemp,omitempty"`
// The area or device to which this temperature measurement applies. [RO]
// Valid values:
// ACInput: An AC input.
// ACMaintenanceBypassInput: An AC maintenance bypass input.
// ACOutput: An AC output.
// ACStaticBypassInput: An AC static bypass input.
// ACUtilityInput: An AC utility input.
// ASIC: An ASIC device, such as a networking chip or chipset component.
// Accelerator: An accelerator.
// Back: The back of the chassis.
// Backplane: A backplane within the chassis.
// CPU: A processor (CPU).
// CPUSubsystem: The entire processor (CPU) subsystem.
// Chassis: The entire chassis.
// ComputeBay: Within a compute bay.
// CoolingSubsystem: The entire cooling, or air and liquid, subsystem.
// DCBus: A DC bus.
// Exhaust: The air exhaust point or points or region of the chassis.
// ExpansionBay: Within an expansion bay.
// FPGA: An FPGA.
// Fan: A fan.
// Front: The front of the chassis.
// GPU: A graphics processor (GPU).
// GPUSubsystem: The entire graphics processor (GPU) subsystem.
// Intake: The air intake point or points or region of the chassis.
// LiquidInlet: The liquid inlet point of the chassis.
// LiquidOutlet: The liquid outlet point of the chassis.
// Lower: The lower portion of the chassis.
// Memory: A memory device.
// MemorySubsystem: The entire memory subsystem.
// Motor: A motor.
// NetworkBay: Within a networking bay.
// NetworkingDevice: A networking device.
// PowerSubsystem: The entire power subsystem.
// PowerSupply: A power supply.
// PowerSupplyBay: Within a power supply bay.
// Rectifier: A rectifier device.
// Room: The room.
// StorageBay: Within a storage bay.
// StorageDevice: A storage device.
// SystemBoard: The system board (PCB).
// Transformer: A transformer.
// Upper: The upper portion of the chassis.
// VoltageRegulator: A voltage regulator device.
PhysicalContext string `json:"PhysicalContext"`
// The areas or devices to which this temperature applies. [RO]
RelatedItem []CommonOid
}
type PowerPowerSupply struct {
// The unique identifier for a resource. [RO]
OdataId string `json:"@odata.id"`
// The identifier for the member within the collection. [RO]
MemberId string `json:"MemberId"`
// The temperature sensor name. [RO]
Name string `json:"Name,omitempty"`
// The numerical identifier of the temperature sensor. [RO]
SensorNumber int64 `json:"SensorNumber,omitempty"`
// The status and health of a Resource and its children. [RW]
Status CommonStatus `json:"Status"`
// The power supply type (AC or DC). [RO]
PowerSupplyType string `json:"PowerSupplyType"`
// The line voltage type supported as an input to this power supply. [RO]
//Valid values:
//AC120V AC 120V nominal input.
//AC240V AC 240V nominal input.
//AC277V AC 277V nominal input.
//ACHighLine 277V AC input.
//ACLowLine 100-127V AC input.
//ACMidLine 200-240V AC input.
//ACWideRange Wide range AC input.
//ACandDCWideRange Wide range AC or DC input.
//DC240V DC 240V nominal input.
//DC380V High Voltage DC input (380V).
//DCNeg48V -48V DC input.
//Unknown The power supply line input voltage type cannot be determined.
LineInputVoltageType string `json:"LineInputVoltageType"`
// The line input voltage at which the power supply is operating. [RO]
LineInputVoltage float64 `json:"LineInputVoltage"`
// The maximum capacity of this power supply. [RO]
PowerCapacityWatts float64 `json:"PowerCapacityWatts"`
// The average power output of this power supply. [RO]
LastPowerOutputWatts float64 `json:"LastPowerOutputWatts"`
// The power input of this power supply. [RO]
PowerInputWatts float64 `json:"PowerInputWatts"`
// The power output of this power supply. [RO]
PowerOutputWatts float64 `json:"PowerOutputWatts"`
// The model number for this power supply. [RO]
Model string `json:"Model"`
// The manufacturer of this power supply. [RO]
Manufacturer string `json:"Manufacturer"`
// The firmware version for this power supply. [RO]
FirmwareVersion string `json:"FirmwareVersion"`
// The serial number for this power supply. [RO]
SerialNumber string `json:"SerialNumber"`
// The part number for this power supply. [RO]
PartNumber string `json:"PartNumber"`
// The spare part number for this power supply. [RO]
SparePartNumber string `json:"SparePartNumber"`
// The input ranges that the power supply can use. [RW]
InputRanges []PowerSupplyInputRange `json:"InputRanges"`
// The areas or devices to which this temperature applies. [RO]
RelatedItem []CommonOid
}
type PowerSupplyInputRange struct {
// The Input type (AC or DC). [RO]
InputType string `json:"InputType"`
// The minimum line input voltage at which this power supply input range is effective. [RO]
MinimumVoltage float64 `json:"MinimumVoltage"`
// The maximum line input voltage at which this power supply input range is effective. [RO]
MaximumVoltage float64 `json:"MaximumVoltage"`
// The minimum input frequency. [RO]
MinimumFrequencyHz float64 `json:"MinimumFrequencyHz"`
// The maximum input frequency. [RO]
MaximumFrequencyHz float64 `json:"MaximumFrequencyHz"`
// The maximum capacity of this power supply when operating in this input range. [RO]
OutputWattage float64 `json:"OutputWattage"`
} | cmd/pemgr-server/schema/chassis-power.go | 0.801237 | 0.55941 | chassis-power.go | starcoder |
package msh
import (
"gosl/utl"
)
// EdgeKey holds 3 sorted numbers to identify an edge
type EdgeKey struct {
A int // id of one vertex on edge
B int // id of another vertex on edge
C int // id of a third vertex on edge or the number of mesh vertices if edge has only 2 vertices
}
// Edge holds the vertices and cells attached to an edge
type Edge struct {
Verts VertexSet // vertices on edge
Bdata BoundaryDataSet // cells attached to edge, including which local edge id of cell is attached
}
// EdgesMap is a map of edges
type EdgesMap map[EdgeKey]*Edge
// ExtractEdges find edges in mesh
func (o *Mesh) ExtractEdges() (edges EdgesMap) {
// new map
edges = make(map[EdgeKey]*Edge)
// loop over cells
var edgeKey EdgeKey
for _, cell := range o.Cells {
// loop over edges of cell
for localEdgeID, localVids := range EdgeLocalVerts[cell.TypeIndex] {
// set edge key as triple of vertices
nVertsOnEdge := len(localVids)
edgeKey.A = cell.V[localVids[0]]
edgeKey.B = cell.V[localVids[1]]
if nVertsOnEdge > 2 {
edgeKey.C = cell.V[localVids[2]]
} else {
edgeKey.C = len(o.Verts) // indicator of not-available
}
utl.IntSort3(&edgeKey.A, &edgeKey.B, &edgeKey.C)
// append this cell to list of shared cells of edge
if edge, ok := edges[edgeKey]; ok {
edge.Bdata = append(edge.Bdata, &BoundaryData{localEdgeID, cell})
// new edge
} else {
edge = new(Edge)
edge.Verts = make([]*Vertex, nVertsOnEdge)
edge.Bdata = []*BoundaryData{{localEdgeID, cell}}
for j, lvid := range localVids {
edge.Verts[j] = o.Verts[cell.V[lvid]]
}
edges[edgeKey] = edge
}
}
}
return
}
// Split splits map into two sets: internal and boundary edges
// NOTE: boundary edge is determined by checking if edge is shared by only cell only
func (o *EdgesMap) Split() (internal, boundary EdgesMap) {
internal = make(map[EdgeKey]*Edge)
boundary = make(map[EdgeKey]*Edge)
for ekey, edge := range *o {
if len(edge.Bdata) == 1 {
boundary[ekey] = edge
} else {
internal[ekey] = edge
}
}
return
} | gm/msh/topology.go | 0.588889 | 0.701189 | topology.go | starcoder |
package models
import (
"fmt"
"strings"
"github.com/SKF/go-utility/v2/uuid"
grpcapi "github.com/SKF/proto/v2/hierarchy"
)
// MeasurementPoint describes an assets measurement points
type MeasurementPoint struct {
// Bearing number on this asset
Bearing int32 `json:"bearing" example:"1"`
// Orientation of measurement
Angular Orientation `json:"orientation" swaggertype:"string" example:"vertical" enums:"axial,radial,radial90,horizontal,vertical,unknown"`
// Type of measurement
MeasurementType MeasurementType `json:"measurementType" swaggertype:"string" example:"acceleration" enums:"displacement,acceleration,velocity,temperature,dc_gap,ampl_phase,box,speed,envelope_2,envelope_3,unknown"`
// Identifier of shaft that this measurement point belongs to
Shaft string `json:"shaft" example:"C"`
// Which side of the given shaft this measurement point belongs to
ShaftSide ShaftSide `json:"shaftSide" swaggertype:"string" example:"nde" enums:"de,nde"`
// Speed in RPM if this shaft has a fixed speed
FixedSpeedRPM float64 `json:"fixedSpeedRpm,omitempty" example:"150"`
// ID of measurement point location
LocationId *uuid.UUID `json:"locationId,omitempty" swaggertype:"string" format:"uuid"`
// Type of device used to take measurements on this point
DADType string `json:"dadType,omitempty"`
}
// Orientation describes a measurement points orientation
type Orientation string
// Valid values of measurement points orientations
const (
Axial Orientation = "axial"
Horizontal Orientation = "horizontal"
Vertical Orientation = "vertical"
Radial Orientation = "radial"
Radial90 Orientation = "radial90"
UnknownOrientation Orientation = "unknown"
)
var orientations = []Orientation{
Axial, Horizontal, Vertical, Radial, Radial90, UnknownOrientation,
}
// MeasurementType is measurement type unit
type MeasurementType string
// Valid measurement type values
const (
Displacement MeasurementType = "displacement"
Acceleration MeasurementType = "acceleration"
Velocity MeasurementType = "velocity"
Temperature MeasurementType = "temperature"
DCGAP MeasurementType = "dc_gap"
AMPLPHASE MeasurementType = "ampl_phase"
BOV MeasurementType = "bov"
Speed MeasurementType = "speed"
Envelope3 MeasurementType = "envelope_3"
Envelope2 MeasurementType = "envelope_2"
UnknownMeasurementType MeasurementType = "unknown"
)
var measurementTypes = []MeasurementType{
Displacement, Acceleration, Velocity, Temperature, DCGAP, AMPLPHASE, BOV, Speed, Envelope3, Envelope2, UnknownMeasurementType,
}
// ShaftSide describes on what side of a shaft the measurement point is located
type ShaftSide string
// Valid shaft side values
const (
DE ShaftSide = "de"
NDE ShaftSide = "nde"
UnknownShaftSide ShaftSide = "unknown"
)
var shaftSides = []ShaftSide{
DE, NDE, UnknownShaftSide,
}
// ParseShaftSide takes a string and makes it a valid shaft side value
func ParseShaftSide(shaftSide string) ShaftSide {
switch shaftSide {
case "de":
return DE
case "nde":
return NDE
default:
return UnknownShaftSide
}
}
// ParseOrientation takes a string and makes it a valid orientation value
func ParseOrientation(orientation string) Orientation {
switch orientation {
case "axial":
return Axial
case "horizontal":
return Horizontal
case "vertical":
return Vertical
case "radial":
return Radial
case "radial90":
return Radial90
default:
return UnknownOrientation
}
}
// ParseMeasurementType takes a string and makes it a valid measurement type value
func ParseMeasurementType(measurementType string) MeasurementType {
switch measurementType {
case "displacement":
return Displacement
case "acceleration":
return Acceleration
case "velocity":
return Velocity
case "temperature":
return Temperature
case "dc_gap":
return DCGAP
case "ampl_phase":
return AMPLPHASE
case "bov":
return BOV
case "speed":
return Speed
case "envelope_2":
return Envelope2
case "envelope_3":
return Envelope3
default:
return UnknownMeasurementType
}
}
// ParseMeasurementType takes a string and makes it a valid measurement type value
func (m MeasurementType) ToShort() string {
switch m {
case Displacement, Acceleration, Velocity, Temperature, Speed, BOV:
return strings.ToUpper(m.String()[0:1])
case DCGAP:
return "G"
case AMPLPHASE:
return "P"
case Envelope2:
return "E2"
case Envelope3:
return "E3"
default:
return ""
}
}
func (mt MeasurementType) String() string {
return string(mt)
}
func (o Orientation) String() string {
return string(o)
}
func (ss ShaftSide) String() string {
return string(ss)
}
// Validate - validates a MeasurementType
func (mt MeasurementType) Validate() error {
for _, measurementType := range measurementTypes {
if mt == measurementType {
return nil
}
}
return fmt.Errorf("'%s' is not a valid measurement type", mt)
}
// Validate - validates an Orientation
func (o Orientation) Validate() error {
for _, orentation := range orientations {
if o == orentation {
return nil
}
}
return fmt.Errorf("'%s' is not a valid orientation type", o)
}
// Validate - validates a ShaftSide
func (ss ShaftSide) Validate() error {
for _, shiftside := range shaftSides {
if ss == shiftside {
return nil
}
}
return fmt.Errorf("'%s' is not a valid ShaftSide", ss)
}
// Validate - validates a MeasurementPoint
func (meas MeasurementPoint) Validate() error {
if meas.Bearing == 0 {
return fmt.Errorf("Bearing cannot be zero")
}
if err := meas.Angular.Validate(); err != nil {
return err
}
if err := meas.MeasurementType.Validate(); err != nil {
return err
}
if err := meas.ShaftSide.Validate(); err != nil {
return err
}
if meas.FixedSpeedRPM < 0 {
return fmt.Errorf("FixedSpeedRPM cannot be negative")
}
if meas.LocationId != nil {
if err := meas.LocationId.Validate(); err != nil {
return fmt.Errorf("locationId is invalid: %s", meas.LocationId)
}
}
return nil
}
// ToGRPC - converts a MeasurementPoint struct to grpcapi.MeasurementPoint
func (meas MeasurementPoint) ToGRPC() *grpcapi.MeasurementPoint {
ret := grpcapi.MeasurementPoint{
Bearing: meas.Bearing,
Angular: meas.Angular.String(),
MeasurementType: meas.MeasurementType.String(),
Shaft: meas.Shaft,
ShaftSide: meas.ShaftSide.String(),
FixedSpeedRPM: meas.FixedSpeedRPM,
DadType: meas.DADType,
}
if meas.LocationId != nil {
ret.LocationId = meas.LocationId.String()
}
return &ret
}
// FromGRPC - converts to a MeasurementPoint from the gRPC MeasurementPoint struct
func (meas *MeasurementPoint) FromGRPC(measPoint grpcapi.MeasurementPoint) {
meas.Bearing = measPoint.Bearing
meas.Angular = ParseOrientation(measPoint.Angular)
meas.MeasurementType = MeasurementType(measPoint.MeasurementType)
meas.Shaft = measPoint.Shaft
meas.ShaftSide = ParseShaftSide(measPoint.ShaftSide)
meas.FixedSpeedRPM = measPoint.FixedSpeedRPM
if measPoint.LocationId != "" {
meas.LocationId = (*uuid.UUID)(&measPoint.LocationId)
}
meas.DADType = measPoint.DadType
} | v2/services/hierarchy/models/measurement_point.go | 0.79956 | 0.530966 | measurement_point.go | starcoder |
package fp
func (q BoolQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q StringQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q IntQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Int64Queue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q ByteQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q RuneQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Float32Queue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Float64Queue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q AnyQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Tuple2Queue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q BoolArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q StringArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q IntArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Int64ArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q ByteArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q RuneArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Float32ArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Float64ArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q AnyArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Tuple2ArrayQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q BoolOptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q StringOptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q IntOptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Int64OptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q ByteOptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q RuneOptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Float32OptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Float64OptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q AnyOptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Tuple2OptionQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q BoolListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q StringListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q IntListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Int64ListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q ByteListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q RuneListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Float32ListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Float64ListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q AnyListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
}
func (q Tuple2ListQueue) IsEmpty() bool {
return (*q.in).IsEmpty() && (*q.out).IsEmpty()
} | fp/bootstrap_queue_isempty.go | 0.788257 | 0.584716 | bootstrap_queue_isempty.go | starcoder |
package ipld
import (
"crypto/sha256"
"github.com/ipfs/go-cid"
"github.com/tendermint/tendermint/pkg/consts"
"github.com/celestiaorg/celestia-node/ipld/pb"
"github.com/celestiaorg/celestia-node/ipld/plugin"
"github.com/celestiaorg/nmt"
"github.com/celestiaorg/nmt/namespace"
)
const (
// MaxSquareSize is currently the maximum size supported for unerasured data in rsmt2d.ExtendedDataSquare.
MaxSquareSize = consts.MaxSquareSize
// NamespaceSize is a system-wide size for NMT namespaces.
NamespaceSize = consts.NamespaceSize
// ShareSize is a system-wide size of a share, including both data and namespace ID
ShareSize = consts.ShareSize
)
// DefaultRSMT2DCodec sets the default rsmt2d.Codec for shares.
var DefaultRSMT2DCodec = consts.DefaultCodec
// Share contains the raw share data without the corresponding namespace.
// NOTE: Alias for the byte is chosen to keep maximal compatibility, especially with rsmt2d. Ideally, we should define
// reusable type elsewhere and make everyone(Core, rsmt2d, ipld) to rely on it.
type Share = []byte
// ShareID gets the namespace ID from the share.
func ShareID(s Share) namespace.ID {
return s[:NamespaceSize]
}
// ShareData gets data from the share.
func ShareData(s Share) []byte {
return s[NamespaceSize:]
}
// ShareWithProof contains data with corresponding Merkle Proof
type ShareWithProof struct {
// Share is a full data including namespace
Share
// Proof is a Merkle Proof of current share
Proof *nmt.Proof
}
// NewShareWithProof takes the given leaf and its path, starting from the tree root,
// and computes the nmt.Proof for it.
func NewShareWithProof(index int, share Share, pathToLeaf []cid.Cid) *ShareWithProof {
rangeProofs := make([][]byte, 0, len(pathToLeaf))
for i := len(pathToLeaf) - 1; i >= 0; i-- {
node := plugin.NamespacedSha256FromCID(pathToLeaf[i])
rangeProofs = append(rangeProofs, node)
}
proof := nmt.NewInclusionProof(index, index+1, rangeProofs, true)
return &ShareWithProof{
share,
&proof,
}
}
// Validate validates inclusion of the share under the given root CID.
func (s *ShareWithProof) Validate(root cid.Cid) bool {
return s.Proof.VerifyInclusion(
sha256.New(), // TODO(@Wondertan): This should be defined somewhere globally
ShareID(s.Share),
ShareData(s.Share),
plugin.NamespacedSha256FromCID(root),
)
}
func (s *ShareWithProof) ShareWithProofToProto() *pb.Share {
return &pb.Share{
Data: s.Share,
Proof: &pb.MerkleProof{
Start: int64(s.Proof.Start()),
End: int64(s.Proof.End()),
Nodes: s.Proof.Nodes(),
LeafHash: s.Proof.LeafHash(),
},
}
}
func ProtoToShare(protoShares []*pb.Share) []*ShareWithProof {
shares := make([]*ShareWithProof, len(protoShares))
for i, share := range protoShares {
proof := ProtoToProof(share.Proof)
shares[i] = &ShareWithProof{share.Data, &proof}
}
return shares
}
func ProtoToProof(protoProof *pb.MerkleProof) nmt.Proof {
return nmt.NewInclusionProof(int(protoProof.Start), int(protoProof.End), protoProof.Nodes, true)
} | ipld/share.go | 0.589835 | 0.435421 | share.go | starcoder |
package amdsi
import (
"fmt"
"strings"
)
// Instruction contains all field of an instruction
type Instruction struct {
InstType string
InstText string
DstRegs []*Register
SrcRegs []*Register
VRegs [255]int
SRegs [102]int
Raw string
}
// NewInstruction returns an instruction
func NewInstruction(asm string) *Instruction {
inst := new(Instruction)
inst.Raw = asm
// Split assembly, the 1st field is the instText
inputField := strings.FieldsFunc(asm, delimiter)
if len(inputField) > 0 {
if IsValidInst(inputField[0]) {
// Get instruction and type from the first field
inst.InstText = inputField[0]
inst.InstType = GetInstType(inst.InstText)
// Process the rest fields to get registers
for i := 1; i < len(inputField); i++ {
currField := inputField[i]
if IsRegister(currField) {
regs := ParseRegisters(currField)
if i == 1 {
for _, reg := range regs {
inst.DstRegs = append(inst.DstRegs, reg)
}
} else {
for _, reg := range regs {
inst.SrcRegs = append(inst.SrcRegs, reg)
}
}
}
}
}
}
for _, reg := range inst.DstRegs {
switch reg.Type {
case "s":
switch inst.SRegs[reg.Index] {
case 0:
inst.SRegs[reg.Index] = 1
case 1, 2, 3:
inst.SRegs[reg.Index] = 3
}
case "v":
switch inst.VRegs[reg.Index] {
case 0:
inst.VRegs[reg.Index] = 1
case 1, 2, 3:
inst.VRegs[reg.Index] = 3
}
default:
panic("Invalid")
}
}
for _, reg := range inst.SrcRegs {
switch reg.Type {
case "s":
switch inst.SRegs[reg.Index] {
case 0:
inst.SRegs[reg.Index] = 2
case 1, 2, 3:
inst.SRegs[reg.Index] = 3
}
case "v":
switch inst.VRegs[reg.Index] {
case 0:
inst.VRegs[reg.Index] = 1
case 1, 2, 3:
inst.VRegs[reg.Index] = 3
}
default:
panic("Invalid")
}
}
// Return
return inst
}
// Print instructions
func (inst *Instruction) Print() {
fmt.Print(inst.InstType, "\t", inst.InstText, " ")
for _, reg := range inst.DstRegs {
fmt.Print(reg.Type, reg.Index, " ")
}
for _, reg := range inst.SrcRegs {
fmt.Print(reg.Type, reg.Index, " ")
}
fmt.Println()
}
var symbolMap = map[int]string{0: "_", 1: "D", 2: "S", 3: "B"}
// PrintSRegs print scalar register usage as a vector
func (inst *Instruction) PrintSRegs(max int) {
for idx := 0; idx < max; idx++ {
fmt.Print(symbolMap[inst.SRegs[idx]])
}
}
// PrintVRegs print vector register usage as a vector
func (inst *Instruction) PrintVRegs(max int) {
for idx := 0; idx < max; idx++ {
fmt.Print(symbolMap[inst.VRegs[idx]])
}
}
// IsValidInst checks if instruction is a valid instruction
func IsValidInst(inst string) bool {
if _, ok := InstType[inst]; ok {
return true
}
return false
}
// GetInstType returns the type of an instruction
func GetInstType(inst string) string {
if val, ok := InstType[inst]; ok {
return val
}
return "Invalid"
}
func delimiter(r rune) bool {
return r == ' ' || r == ',' || r == '/' || r == '\t'
}
var InstType = map[string]string{
"s_add_u32": "SOP2",
"s_sub_u32": "SOP2",
"s_add_i32": "SOP2",
"s_sub_i32": "SOP2",
"s_addc_u32": "SOP2",
"s_subb_u32": "SOP2",
"s_min_i32": "SOP2",
"s_min_u32": "SOP2",
"s_max_i32": "SOP2",
"s_max_u32": "SOP2",
"s_cselect_b32": "SOP2",
"s_cselect_b64": "SOP2",
"s_and_b32": "SOP2",
"s_and_b64": "SOP2",
"s_or_b32": "SOP2",
"s_or_b64": "SOP2",
"s_xor_b32": "SOP2",
"s_xor_b64": "SOP2",
"s_andn2_b32": "SOP2",
"s_andn2_b64": "SOP2",
"s_orn2_b32": "SOP2",
"s_orn2_b64": "SOP2",
"s_nand_b32": "SOP2",
"s_nand_b64": "SOP2",
"s_nor_b32": "SOP2",
"s_nor_b64": "SOP2",
"s_xnor_b32": "SOP2",
"s_xnor_b64": "SOP2",
"s_lshl_b32": "SOP2",
"s_lshl_b64": "SOP2",
"s_lshr_b32": "SOP2",
"s_lshr_b64": "SOP2",
"s_ashr_i32": "SOP2",
"s_ashr_i64": "SOP2",
"s_bfm_b32": "SOP2",
"s_bfm_b64": "SOP2",
"s_mul_i32": "SOP2",
"s_bfe_u32": "SOP2",
"s_bfe_i32": "SOP2",
"s_bfe_u64": "SOP2",
"s_bfe_i64": "SOP2",
"s_cbranch_g_fork": "SOP2",
"s_absdiff_i32": "SOP2",
"s_movk_i32": "SOPK",
"s_cmovk_i32": "SOPK",
"s_cmpk_eq_i32": "SOPK",
"s_cmpk_lg_i32": "SOPK",
"s_cmpk_gt_i32": "SOPK",
"s_cmpk_ge_i32": "SOPK",
"s_cmpk_lt_i32": "SOPK",
"s_cmpk_le_i32": "SOPK",
"s_cmpk_eq_u32": "SOPK",
"s_cmpk_lg_u32": "SOPK",
"s_cmpk_gt_u32": "SOPK",
"s_cmpk_ge_u32": "SOPK",
"s_cmpk_lt_u32": "SOPK",
"s_cmpk_le_u32": "SOPK",
"s_addk_i32": "SOPK",
"s_mulk_i32": "SOPK",
"s_cbranch_i_fork": "SOPK",
"s_getreg_b32": "SOPK",
"s_setreg_b32": "SOPK",
"s_setreg_imm32_b32": "SOPK",
"s_mov_b32": "SOP1",
"s_mov_b64": "SOP1",
"s_cmov_b32": "SOP1",
"s_cmov_b64": "SOP1",
"s_not_b32": "SOP1",
"s_not_b64": "SOP1",
"s_wqm_b32": "SOP1",
"s_wqm_b64": "SOP1",
"s_brev_b32": "SOP1",
"s_brev_b64": "SOP1",
"s_bcnt0_i32_b32": "SOP1",
"s_bcnt0_i32_b64": "SOP1",
"s_bcnt1_i32_b32": "SOP1",
"s_bcnt1_i32_b64": "SOP1",
"s_ff0_i32_b32": "SOP1",
"s_ff0_i32_b64": "SOP1",
"s_ff1_i32_b32": "SOP1",
"s_ff1_i32_b64": "SOP1",
"s_flbit_i32_b32": "SOP1",
"s_flbit_i32_b64": "SOP1",
"s_flbit_i32": "SOP1",
"s_flbit_i32_i64": "SOP1",
"s_sext_i32_i8": "SOP1",
"s_sext_i32_i16": "SOP1",
"s_bitset0_b32": "SOP1",
"s_bitset0_b64": "SOP1",
"s_bitset1_b32": "SOP1",
"s_bitset1_b64": "SOP1",
"s_getpc_b64": "SOP1",
"s_setpc_b64": "SOP1",
"s_swappc_b64": "SOP1",
"s_rfe_b64": "SOP1",
"s_and_saveexec_b64": "SOP1",
"s_or_saveexec_b64": "SOP1",
"s_xor_saveexec_b64": "SOP1",
"s_andn2_saveexec_b64": "SOP1",
"s_orn2_saveexec_b64": "SOP1",
"s_nand_saveexec_b64": "SOP1",
"s_nor_saveexec_b64": "SOP1",
"s_xnor_saveexec_b64": "SOP1",
"s_quadmask_b32": "SOP1",
"s_quadmask_b64": "SOP1",
"s_movrels_b32": "SOP1",
"s_movrels_b64": "SOP1",
"s_movreld_b32": "SOP1",
"s_movreld_b64": "SOP1",
"s_cbranch_join": "SOP1",
"s_abs_i32": "SOP1",
"s_mov_fed_b32": "SOP1",
"s_cmp_eq_i32": "SOPC",
"s_cmp_lg_i32": "SOPC",
"s_cmp_gt_i32": "SOPC",
"s_cmp_ge_i32": "SOPC",
"s_cmp_lt_i32": "SOPC",
"s_cmp_le_i32": "SOPC",
"s_cmp_eq_u32": "SOPC",
"s_cmp_lg_u32": "SOPC",
"s_cmp_gt_u32": "SOPC",
"s_cmp_ge_u32": "SOPC",
"s_cmp_lt_u32": "SOPC",
"s_cmp_le_u32": "SOPC",
"s_bitcmp0_b32": "SOPC",
"s_bitcmp1_b32": "SOPC",
"s_bitcmp0_b64": "SOPC",
"s_bitcmp1_b64": "SOPC",
"s_setvskip": "SOPC",
"s_nop": "SOPP",
"s_endpgm": "SOPP",
"s_branch": "SOPP",
"s_cbranch_scc0": "SOPP",
"s_cbranch_scc1": "SOPP",
"s_cbranch_vccz": "SOPP",
"s_cbranch_vccnz": "SOPP",
"s_cbranch_execz": "SOPP",
"s_cbranch_execnz": "SOPP",
"s_barrier": "SOPP",
"s_waitcnt": "SOPP",
"s_sethalt": "SOPP",
"s_sleep": "SOPP",
"s_setprio": "SOPP",
"s_sendmsg": "SOPP",
"s_sendmsghalt": "SOPP",
"s_trap": "SOPP",
"s_icache_inv": "SOPP",
"s_incperflevel": "SOPP",
"s_decperflevel": "SOPP",
"s_ttracedata": "SOPP",
"s_load_dword": "SMRD",
"s_load_dwordx2": "SMRD",
"s_load_dwordx4": "SMRD",
"s_load_dwordx8": "SMRD",
"s_load_dwordx16": "SMRD",
"s_buffer_load_dword": "SMRD",
"s_buffer_load_dwordx2": "SMRD",
"s_buffer_load_dwordx4": "SMRD",
"s_buffer_load_dwordx8": "SMRD",
"s_buffer_load_dwordx16": "SMRD",
"s_memtime": "SMRD",
"s_dcache_inv": "SMRD",
"v_cndmask_b32": "VOP2",
"v_readlane_b32": "VOP2",
"v_writelane_b32": "VOP2",
"v_add_f32": "VOP2",
"v_sub_f32": "VOP2",
"v_subrev_f32": "VOP2",
"v_mac_legacy_f32": "VOP2",
"v_mul_legacy_f32": "VOP2",
"v_mul_f32": "VOP2",
"v_mul_i32_i24": "VOP2",
"v_mul_hi_i32_i24": "VOP2",
"v_mul_u32_u24": "VOP2",
"v_mul_hi_u32_u24": "VOP2",
"v_min_legacy_f32": "VOP2",
"v_max_legacy_f32": "VOP2",
"v_min_f32": "VOP2",
"v_max_f32": "VOP2",
"v_min_i32": "VOP2",
"v_max_i32": "VOP2",
"v_min_u32": "VOP2",
"v_max_u32": "VOP2",
"v_lshr_b32": "VOP2",
"v_lshrrev_b32": "VOP2",
"v_ashr_i32": "VOP2",
"v_ashrrev_i32": "VOP2",
"v_lshl_b32": "VOP2",
"v_lshlrev_b32": "VOP2",
"v_and_b32": "VOP2",
"v_or_b32": "VOP2",
"v_xor_b32": "VOP2",
"v_bfm_b32": "VOP2",
"v_mac_f32": "VOP2",
"v_madmk_f32": "VOP2",
"v_madak_f32": "VOP2",
"v_bcnt_u32_b32": "VOP2",
"v_mbcnt_lo_u32_b32": "VOP2",
"v_mbcnt_hi_u32_b32": "VOP2",
"v_add_i32": "VOP2",
"v_sub_i32": "VOP2",
"v_subrev_i32": "VOP2",
"v_addc_u32": "VOP2",
"v_subb_u32": "VOP2",
"v_subbrev_u32": "VOP2",
"v_ldexp_f32": "VOP2",
"v_cvt_pkaccum_u8_f32": "VOP2",
"v_cvt_pknorm_i16_f32": "VOP2",
"v_cvt_pknorm_u16_f32": "VOP2",
"v_cvt_pkrtz_f16_f32": "VOP2",
"v_cvt_pk_u16_u32": "VOP2",
"v_cvt_pk_i16_i32": "VOP2",
"v_nop": "VOP1",
"v_mov_b32": "VOP1",
"v_readfirstlane_b32": "VOP1",
"v_cvt_i32_f64": "VOP1",
"v_cvt_f64_i32": "VOP1",
"v_cvt_f32_i32": "VOP1",
"v_cvt_f32_u32": "VOP1",
"v_cvt_u32_f32": "VOP1",
"v_cvt_i32_f32": "VOP1",
"v_mov_fed_b32": "VOP1",
"v_cvt_f16_f32": "VOP1",
"v_cvt_f32_f16": "VOP1",
"v_cvt_rpi_i32_f32": "VOP1",
"v_cvt_flr_i32_f32": "VOP1",
"v_cvt_off_f32_i4": "VOP1",
"v_cvt_f32_f64": "VOP1",
"v_cvt_f64_f32": "VOP1",
"v_cvt_f32_ubyte0": "VOP1",
"v_cvt_f32_ubyte1": "VOP1",
"v_cvt_f32_ubyte2": "VOP1",
"v_cvt_f32_ubyte3": "VOP1",
"v_cvt_u32_f64": "VOP1",
"v_cvt_f64_u32": "VOP1",
"v_fract_f32": "VOP1",
"v_trunc_f32": "VOP1",
"v_ceil_f32": "VOP1",
"v_rndne_f32": "VOP1",
"v_floor_f32": "VOP1",
"v_exp_f32": "VOP1",
"v_log_clamp_f32": "VOP1",
"v_log_f32": "VOP1",
"v_rcp_clamp_f32": "VOP1",
"v_rcp_legacy_f32": "VOP1",
"v_rcp_f32": "VOP1",
"v_rcp_iflag_f32": "VOP1",
"v_rsq_clamp_f32": "VOP1",
"v_rsq_legacy_f32": "VOP1",
"v_rsq_f32": "VOP1",
"v_rcp_f64": "VOP1",
"v_rcp_clamp_f64": "VOP1",
"v_rsq_f64": "VOP1",
"v_rsq_clamp_f64": "VOP1",
"v_sqrt_f32": "VOP1",
"v_sqrt_f64": "VOP1",
"v_sin_f32": "VOP1",
"v_cos_f32": "VOP1",
"v_not_b32": "VOP1",
"v_bfrev_b32": "VOP1",
"v_ffbh_u32": "VOP1",
"v_ffbl_b32": "VOP1",
"v_ffbh_i32": "VOP1",
"v_frexp_exp_i32_f64": "VOP1",
"v_frexp_mant_f64": "VOP1",
"v_fract_f64": "VOP1",
"v_frexp_exp_i32_f32": "VOP1",
"v_frexp_mant_f32": "VOP1",
"v_clrexcp": "VOP1",
"v_movreld_b32": "VOP1",
"v_movrels_b32": "VOP1",
"v_movrelsd_b32": "VOP1",
"v_cmp_f_f32": "VOPC",
"v_cmp_lt_f32": "VOPC",
"v_cmp_eq_f32": "VOPC",
"v_cmp_le_f32": "VOPC",
"v_cmp_gt_f32": "VOPC",
"v_cmp_lg_f32": "VOPC",
"v_cmp_ge_f32": "VOPC",
"v_cmp_u_f32": "VOPC",
"v_cmp_o_f32": "VOPC",
"v_cmp_nge_f32": "VOPC",
"v_cmp_nlg_f32": "VOPC",
"v_cmp_ngt_f32": "VOPC",
"v_cmp_nle_f32": "VOPC",
"v_cmp_neq_f32": "VOPC",
"v_cmp_nlt_f32": "VOPC",
"v_cmp_tru_f32": "VOPC",
"v_cmp_f_f64": "VOPC",
"v_cmp_lt_f64": "VOPC",
"v_cmp_eq_f64": "VOPC",
"v_cmp_le_f64": "VOPC",
"v_cmp_gt_f64": "VOPC",
"v_cmp_lg_f64": "VOPC",
"v_cmp_ge_f64": "VOPC",
"v_cmp_u_f64": "VOPC",
"v_cmp_o_f64": "VOPC",
"v_cmp_nge_f64": "VOPC",
"v_cmp_nlg_f64": "VOPC",
"v_cmp_ngt_f64": "VOPC",
"v_cmp_nle_f64": "VOPC",
"v_cmp_neq_f64": "VOPC",
"v_cmp_nlt_f64": "VOPC",
"v_cmp_tru_f64": "VOPC",
"v_cmpx_f_f64": "VOPC",
"v_cmpx_lt_f64": "VOPC",
"v_cmpx_eq_f64": "VOPC",
"v_cmpx_le_f64": "VOPC",
"v_cmpx_gt_f64": "VOPC",
"v_cmpx_lg_f64": "VOPC",
"v_cmpx_ge_f64": "VOPC",
"v_cmpx_u_f64": "VOPC",
"v_cmpx_o_f64": "VOPC",
"v_cmpx_nge_f64": "VOPC",
"v_cmpx_nlg_f64": "VOPC",
"v_cmpx_ngt_f64": "VOPC",
"v_cmpx_nle_f64": "VOPC",
"v_cmpx_neq_f64": "VOPC",
"v_cmpx_nlt_f64": "VOPC",
"v_cmpx_tru_f64": "VOPC",
"v_cmps_f_f32": "VOPC",
"v_cmps_lt_f32": "VOPC",
"v_cmps_eq_f32": "VOPC",
"v_cmps_le_f32": "VOPC",
"v_cmps_gt_f32": "VOPC",
"v_cmps_lg_f32": "VOPC",
"v_cmps_ge_f32": "VOPC",
"v_cmps_u_f32": "VOPC",
"v_cmps_o_f32": "VOPC",
"v_cmps_nge_f32": "VOPC",
"v_cmps_nlg_f32": "VOPC",
"v_cmps_ngt_f32": "VOPC",
"v_cmps_nle_f32": "VOPC",
"v_cmps_neq_f32": "VOPC",
"v_cmps_nlt_f32": "VOPC",
"v_cmps_tru_f32": "VOPC",
"v_cmpsx_f_f32": "VOPC",
"v_cmpsx_lt_f32": "VOPC",
"v_cmpsx_eq_f32": "VOPC",
"v_cmpsx_le_f32": "VOPC",
"v_cmpsx_gt_f32": "VOPC",
"v_cmpsx_lg_f32": "VOPC",
"v_cmpsx_ge_f32": "VOPC",
"v_cmpsx_u_f32": "VOPC",
"v_cmpsx_o_f32": "VOPC",
"v_cmpsx_nge_f32": "VOPC",
"v_cmpsx_nlg_f32": "VOPC",
"v_cmpsx_ngt_f32": "VOPC",
"v_cmpsx_nle_f32": "VOPC",
"v_cmpsx_neq_f32": "VOPC",
"v_cmpsx_nlt_f32": "VOPC",
"v_cmpsx_tru_f32": "VOPC",
"v_cmps_f_f64": "VOPC",
"v_cmps_lt_f64": "VOPC",
"v_cmps_eq_f64": "VOPC",
"v_cmps_le_f64": "VOPC",
"v_cmps_gt_f64": "VOPC",
"v_cmps_lg_f64": "VOPC",
"v_cmps_ge_f64": "VOPC",
"v_cmps_u_f64": "VOPC",
"v_cmps_o_f64": "VOPC",
"v_cmps_nge_f64": "VOPC",
"v_cmps_nlg_f64": "VOPC",
"v_cmps_ngt_f64": "VOPC",
"v_cmps_nle_f64": "VOPC",
"v_cmps_neq_f64": "VOPC",
"v_cmps_nlt_f64": "VOPC",
"v_cmps_tru_f64": "VOPC",
"v_cmpsx_f_f64": "VOPC",
"v_cmpsx_lt_f64": "VOPC",
"v_cmpsx_eq_f64": "VOPC",
"v_cmpsx_le_f64": "VOPC",
"v_cmpsx_gt_f64": "VOPC",
"v_cmpsx_lg_f64": "VOPC",
"v_cmpsx_ge_f64": "VOPC",
"v_cmpsx_u_f64": "VOPC",
"v_cmpsx_o_f64": "VOPC",
"v_cmpsx_nge_f64": "VOPC",
"v_cmpsx_nlg_f64": "VOPC",
"v_cmpsx_ngt_f64": "VOPC",
"v_cmpsx_nle_f64": "VOPC",
"v_cmpsx_neq_f64": "VOPC",
"v_cmpsx_nlt_f64": "VOPC",
"v_cmpsx_tru_f64": "VOPC",
"v_cmp_f_i32": "VOPC",
"v_cmp_lt_i32": "VOPC",
"v_cmp_eq_i32": "VOPC",
"v_cmp_le_i32": "VOPC",
"v_cmp_gt_i32": "VOPC",
"v_cmp_lg_i32": "VOPC",
"v_cmp_ge_i32": "VOPC",
"v_cmp_o_i32": "VOPC",
"v_cmp_u_i32": "VOPC",
"v_cmp_nge_i32": "VOPC",
"v_cmp_nlg_i32": "VOPC",
"v_cmp_ngt_i32": "VOPC",
"v_cmp_nle_i32": "VOPC",
"v_cmp_ne_i32": "VOPC",
"v_cmp_nlt_i32": "VOPC",
"v_cmp_tru_i32": "VOPC",
"v_cmpx_f_i32": "VOPC",
"v_cmpx_lt_i32": "VOPC",
"v_cmpx_eq_i32": "VOPC",
"v_cmpx_le_i32": "VOPC",
"v_cmpx_gt_i32": "VOPC",
"v_cmpx_lg_i32": "VOPC",
"v_cmpx_ge_i32": "VOPC",
"v_cmpx_o_i32": "VOPC",
"v_cmpx_u_i32": "VOPC",
"v_cmpx_nge_i32": "VOPC",
"v_cmpx_nlg_i32": "VOPC",
"v_cmpx_ngt_i32": "VOPC",
"v_cmpx_nle_i32": "VOPC",
"v_cmpx_ne_i32": "VOPC",
"v_cmpx_nlt_i32": "VOPC",
"v_cmpx_tru_i32": "VOPC",
"v_cmp_f_i64": "VOPC",
"v_cmp_lt_i64": "VOPC",
"v_cmp_eq_i64": "VOPC",
"v_cmp_le_i64": "VOPC",
"v_cmp_gt_i64": "VOPC",
"v_cmp_lg_i64": "VOPC",
"v_cmp_ge_i64": "VOPC",
"v_cmp_o_i64": "VOPC",
"v_cmp_u_i64": "VOPC",
"v_cmp_nge_i64": "VOPC",
"v_cmp_nlg_i64": "VOPC",
"v_cmp_ngt_i64": "VOPC",
"v_cmp_nle_i64": "VOPC",
"v_cmp_ne_i64": "VOPC",
"v_cmp_nlt_i64": "VOPC",
"v_cmp_tru_i64": "VOPC",
"v_cmpx_f_i64": "VOPC",
"v_cmpx_lt_i64": "VOPC",
"v_cmpx_eq_i64": "VOPC",
"v_cmpx_le_i64": "VOPC",
"v_cmpx_gt_i64": "VOPC",
"v_cmpx_lg_i64": "VOPC",
"v_cmpx_ge_i64": "VOPC",
"v_cmpx_o_i64": "VOPC",
"v_cmpx_u_i64": "VOPC",
"v_cmpx_nge_i64": "VOPC",
"v_cmpx_nlg_i64": "VOPC",
"v_cmpx_ngt_i64": "VOPC",
"v_cmpx_nle_i64": "VOPC",
"v_cmpx_ne_i64": "VOPC",
"v_cmpx_nlt_i64": "VOPC",
"v_cmpx_tru_i64": "VOPC",
"v_cmp_f_u32": "VOPC",
"v_cmp_lt_u32": "VOPC",
"v_cmp_eq_u32": "VOPC",
"v_cmp_le_u32": "VOPC",
"v_cmp_gt_u32": "VOPC",
"v_cmp_lg_u32": "VOPC",
"v_cmp_ge_u32": "VOPC",
"v_cmp_o_u32": "VOPC",
"v_cmp_u_u32": "VOPC",
"v_cmp_nge_u32": "VOPC",
"v_cmp_nlg_u32": "VOPC",
"v_cmp_ngt_u32": "VOPC",
"v_cmp_nle_u32": "VOPC",
"v_cmp_ne_u32": "VOPC",
"v_cmp_nlt_u32": "VOPC",
"v_cmp_tru_u32": "VOPC",
"v_cmpx_f_u32": "VOPC",
"v_cmpx_lt_u32": "VOPC",
"v_cmpx_eq_u32": "VOPC",
"v_cmpx_le_u32": "VOPC",
"v_cmpx_gt_u32": "VOPC",
"v_cmpx_lg_u32": "VOPC",
"v_cmpx_ge_u32": "VOPC",
"v_cmpx_o_u32": "VOPC",
"v_cmpx_u_u32": "VOPC",
"v_cmpx_nge_u32": "VOPC",
"v_cmpx_nlg_u32": "VOPC",
"v_cmpx_ngt_u32": "VOPC",
"v_cmpx_nle_u32": "VOPC",
"v_cmpx_ne_u32": "VOPC",
"v_cmpx_nlt_u32": "VOPC",
"v_cmpx_tru_u32": "VOPC",
"v_cmp_f_u64": "VOPC",
"v_cmp_lt_u64": "VOPC",
"v_cmp_eq_u64": "VOPC",
"v_cmp_le_u64": "VOPC",
"v_cmp_gt_u64": "VOPC",
"v_cmp_lg_u64": "VOPC",
"v_cmp_ge_u64": "VOPC",
"v_cmp_o_u64": "VOPC",
"v_cmp_u_u64": "VOPC",
"v_cmp_nge_u64": "VOPC",
"v_cmp_nlg_u64": "VOPC",
"v_cmp_ngt_u64": "VOPC",
"v_cmp_nle_u64": "VOPC",
"v_cmp_ne_u64": "VOPC",
"v_cmp_nlt_u64": "VOPC",
"v_cmp_tru_u64": "VOPC",
"v_cmpx_f_u64": "VOPC",
"v_cmpx_lt_u64": "VOPC",
"v_cmpx_eq_u64": "VOPC",
"v_cmpx_le_u64": "VOPC",
"v_cmpx_gt_u64": "VOPC",
"v_cmpx_lg_u64": "VOPC",
"v_cmpx_ge_u64": "VOPC",
"v_cmpx_o_u64": "VOPC",
"v_cmpx_u_u64": "VOPC",
"v_cmpx_nge_u64": "VOPC",
"v_cmpx_nlg_u64": "VOPC",
"v_cmpx_ngt_u64": "VOPC",
"v_cmpx_nle_u64": "VOPC",
"v_cmpx_ne_u64": "VOPC",
"v_cmpx_nlt_u64": "VOPC",
"v_cmpx_tru_u64": "VOPC",
"v_mad_legacy_f32": "VOP3A",
"v_mad_f32": "VOP3A",
"v_mad_i32_i24": "VOP3A",
"v_mad_u32_u24": "VOP3A",
"v_cubeid_f32": "VOP3A",
"v_cubesc_f32": "VOP3A",
"v_cubetc_f32": "VOP3A",
"v_cubema_f32": "VOP3A",
"v_bfe_u32": "VOP3A",
"v_bfe_i32": "VOP3A",
"v_bfi_b32": "VOP3A",
"v_fma_f32": "VOP3A",
"v_fma_f64": "VOP3A",
"v_lerp_u8": "VOP3A",
"v_alignbit_b32": "VOP3A",
"v_alignbyte_b32": "VOP3A",
"v_mullit_f32": "VOP3A",
"v_min3_f32": "VOP3A",
"v_min3_i32": "VOP3A",
"v_min3_u32": "VOP3A",
"v_max3_f32": "VOP3A",
"v_max3_i32": "VOP3A",
"v_max3_u32": "VOP3A",
"v_med3_f32": "VOP3A",
"v_med3_i32": "VOP3A",
"v_med3_u32": "VOP3A",
"v_sad_u8": "VOP3A",
"v_sad_hi_u8": "VOP3A",
"v_sad_u16": "VOP3A",
"v_sad_u32": "VOP3A",
"v_cvt_pk_u8_f32": "VOP3A",
"v_div_fixup_f32": "VOP3A",
"v_div_fixup_f64": "VOP3A",
"v_lshl_b64": "VOP3A",
"v_lshr_b64": "VOP3A",
"v_ashr_i64": "VOP3A",
"v_add_f64": "VOP3A",
"v_mul_f64": "VOP3A",
"v_min_f64": "VOP3A",
"v_max_f64": "VOP3A",
"v_ldexp_f64": "VOP3A",
"v_mul_lo_u32": "VOP3A",
"v_mul_hi_u32": "VOP3A",
"v_mul_lo_i32": "VOP3A",
"v_mul_hi_i32": "VOP3A",
"v_div_fmas_f32": "VOP3A",
"v_div_fmas_f64": "VOP3A",
"v_msad_u8": "VOP3A",
"v_qsad_u8": "VOP3A",
"v_mqsad_u8": "VOP3A",
"v_trig_preop_f64": "VOP3A",
"v_div_scale_f32": "VOP3B",
"v_div_scale_f64": "VOP3B",
"v_interp_p1_f32": "VINTRP",
"v_interp_p2_f32": "VINTRP",
"v_interp_mov_f32": "VINTRP",
"ds_add_u32": "DS",
"ds_sub_u32": "DS",
"ds_rsub_u32": "DS",
"ds_inc_u32": "DS",
"ds_dec_u32": "DS",
"ds_min_i32": "DS",
"ds_max_i32": "DS",
"ds_min_u32": "DS",
"ds_max_u32": "DS",
"ds_and_b32": "DS",
"ds_or_b32": "DS",
"ds_xor_b32": "DS",
"ds_mskor_b32": "DS",
"ds_write_b32": "DS",
"ds_write2_b32": "DS",
"ds_write2st64_b32": "DS",
"ds_cmpst_b32": "DS",
"ds_cmpst_f32": "DS",
"ds_min_f32": "DS",
"ds_max_f32": "DS",
"ds_gws_init": "DS",
"ds_gws_sema_v": "DS",
"ds_gws_sema_br": "DS",
"ds_gws_sema_p": "DS",
"ds_gws_barrier": "DS",
"ds_write_b8": "DS",
"ds_write_b16": "DS",
"ds_add_rtn_u32": "DS",
"ds_sub_rtn_u32": "DS",
"ds_rsub_rtn_u32": "DS",
"ds_inc_rtn_u32": "DS",
"ds_dec_rtn_u32": "DS",
"ds_min_rtn_i32": "DS",
"ds_max_rtn_i32": "DS",
"ds_min_rtn_u32": "DS",
"ds_max_rtn_u32": "DS",
"ds_and_rtn_b32": "DS",
"ds_or_rtn_b32": "DS",
"ds_xor_rtn_b32": "DS",
"ds_mskor_rtn_b32": "DS",
"ds_wrxchg_rtn_b32": "DS",
"ds_wrxchg2st64_rtn_b32": "DS",
"ds_cmpst_rtn_b32": "DS",
"ds_cmpst_rtn_f32": "DS",
"ds_min_rtn_f32": "DS",
"ds_max_rtn_f32": "DS",
"ds_wrxchg2_rtn_b32": "DS",
"ds_swizzle_b32": "DS",
"ds_read_b32": "DS",
"ds_read2_b32": "DS",
"ds_read2st64_b32": "DS",
"ds_read_i8": "DS",
"ds_read_u8": "DS",
"ds_read_i16": "DS",
"ds_read_u16": "DS",
"ds_consume": "DS",
"ds_append": "DS",
"ds_ordered_count": "DS",
"ds_add_u64": "DS",
"ds_sub_u64": "DS",
"ds_rsub_u64": "DS",
"ds_inc_u64": "DS",
"ds_dec_u64": "DS",
"ds_min_i64": "DS",
"ds_max_i64": "DS",
"ds_min_u64": "DS",
"ds_max_u64": "DS",
"ds_and_b64": "DS",
"ds_or_b64": "DS",
"ds_xor_b64": "DS",
"ds_mskor_b64": "DS",
"ds_write_b64": "DS",
"ds_write2_b64": "DS",
"ds_write2st64_b64": "DS",
"ds_cmpst_b64": "DS",
"ds_cmpst_f64": "DS",
"ds_min_f64": "DS",
"ds_max_f64": "DS",
"ds_add_rtn_u64": "DS",
"ds_sub_rtn_u64": "DS",
"ds_rsub_rtn_u64": "DS",
"ds_inc_rtn_u64": "DS",
"ds_dec_rtn_u64": "DS",
"ds_min_rtn_i64": "DS",
"ds_max_rtn_i64": "DS",
"ds_min_rtn_u64": "DS",
"ds_max_rtn_u64": "DS",
"ds_and_rtn_b64": "DS",
"ds_or_rtn_b64": "DS",
"ds_xor_rtn_b64": "DS",
"ds_mskor_rtn_b64": "DS",
"ds_wrxchg_rtn_b64": "DS",
"ds_wrxchg2_rtn_b64": "DS",
"ds_wrxchg2st64_rtn_b64": "DS",
"ds_cmpst_rtn_b64": "DS",
"ds_cmpst_rtn_f64": "DS",
"ds_min_rtn_f64": "DS",
"ds_max_rtn_f64": "DS",
"ds_read_b64": "DS",
"ds_read2_b64": "DS",
"ds_read2st64_b64": "DS",
"ds_add_src2_u32": "DS",
"ds_sub_src2_u32": "DS",
"ds_rsub_src2_u32": "DS",
"ds_inc_src2_u32": "DS",
"ds_dec_src2_u32": "DS",
"ds_min_src2_i32": "DS",
"ds_max_src2_i32": "DS",
"ds_min_src2_u32": "DS",
"ds_max_src2_u32": "DS",
"ds_and_src2_b32": "DS",
"ds_or_src2_b32": "DS",
"ds_xor_src2_b32": "DS",
"ds_write_src2_b32": "DS",
"ds_min_src2_f32": "DS",
"ds_max_src2_f32": "DS",
"ds_add_src2_u64": "DS",
"ds_sub_src2_u64": "DS",
"ds_rsub_src2_u64": "DS",
"ds_inc_src2_u64": "DS",
"ds_dec_src2_u64": "DS",
"ds_min_src2_i64": "DS",
"ds_max_src2_i64": "DS",
"ds_min_src2_u64": "DS",
"ds_max_src2_u64": "DS",
"ds_and_src2_b64": "DS",
"ds_or_src2_b64": "DS",
"ds_xor_src2_b64": "DS",
"ds_write_src2_b64": "DS",
"ds_min_src2_f64": "DS",
"ds_max_src2_f64": "DS",
"buffer_load_format_x": "MUBUF",
"buffer_load_format_xy": "MUBUF",
"buffer_load_format_xyz": "MUBUF",
"buffer_load_format_xyzw": "MUBUF",
"buffer_store_format_x": "MUBUF",
"buffer_store_format_xy": "MUBUF",
"buffer_store_format_xyz": "MUBUF",
"buffer_store_format_xyzw": "MUBUF",
"buffer_load_ubyte": "MUBUF",
"buffer_load_sbyte": "MUBUF",
"buffer_load_ushort": "MUBUF",
"buffer_load_sshort": "MUBUF",
"buffer_load_dword": "MUBUF",
"buffer_load_dwordx2": "MUBUF",
"buffer_load_dwordx4": "MUBUF",
"buffer_store_byte": "MUBUF",
"buffer_store_short": "MUBUF",
"buffer_store_dword": "MUBUF",
"buffer_store_dwordx2": "MUBUF",
"buffer_store_dwordx4": "MUBUF",
"buffer_atomic_swap": "MUBUF",
"buffer_atomic_cmpswap": "MUBUF",
"buffer_atomic_add": "MUBUF",
"buffer_atomic_sub": "MUBUF",
"buffer_atomic_rsub": "MUBUF",
"buffer_atomic_smin": "MUBUF",
"buffer_atomic_umin": "MUBUF",
"buffer_atomic_smax": "MUBUF",
"buffer_atomic_umax": "MUBUF",
"buffer_atomic_and": "MUBUF",
"buffer_atomic_or": "MUBUF",
"buffer_atomic_xor": "MUBUF",
"buffer_atomic_inc": "MUBUF",
"buffer_atomic_dec": "MUBUF",
"buffer_atomic_fcmpswap": "MUBUF",
"buffer_atomic_fmin": "MUBUF",
"buffer_atomic_fmax": "MUBUF",
"buffer_atomic_swap_x2": "MUBUF",
"buffer_atomic_cmpswap_x2": "MUBUF",
"buffer_atomic_add_x2": "MUBUF",
"buffer_atomic_sub_x2": "MUBUF",
"buffer_atomic_rsub_x2": "MUBUF",
"buffer_atomic_smin_x2": "MUBUF",
"buffer_atomic_umin_x2": "MUBUF",
"buffer_atomic_smax_x2": "MUBUF",
"buffer_atomic_umax_x2": "MUBUF",
"buffer_atomic_and_x2": "MUBUF",
"buffer_atomic_or_x2": "MUBUF",
"buffer_atomic_xor_x2": "MUBUF",
"buffer_atomic_inc_x2": "MUBUF",
"buffer_atomic_dec_x2": "MUBUF",
"buffer_atomic_fcmpswap_x2": "MUBUF",
"buffer_atomic_fmin_x2": "MUBUF",
"buffer_atomic_fmax_x2": "MUBUF",
"buffer_wbinvl1_sc": "MUBUF",
"buffer_wbinvl1": "MUBUF",
"tbuffer_load_format_x": "MTBUF",
"tbuffer_load_format_xy": "MTBUF",
"tbuffer_load_format_xyz": "MTBUF",
"tbuffer_load_format_xyzw": "MTBUF",
"tbuffer_store_format_x": "MTBUF",
"tbuffer_store_format_xy": "MTBUF",
"tbuffer_store_format_xyz": "MTBUF",
"tbuffer_store_format_xyzw": "MTBUF",
"image_load": "MIMG",
"image_load_mip": "MIMG",
"image_load_pck": "MIMG",
"image_load_pck_sgn": "MIMG",
"image_load_mip_pck": "MIMG",
"image_load_mip_pck_sgn": "MIMG",
"image_store": "MIMG",
"image_store_mip": "MIMG",
"image_store_pck": "MIMG",
"image_store_mip_pck": "MIMG",
"image_atomic_swap": "MIMG",
"image_atomic_cmpswap": "MIMG",
"image_atomic_add": "MIMG",
"image_atomic_sub": "MIMG",
"image_atomic_rsub": "MIMG",
"image_atomic_smin": "MIMG",
"image_atomic_umin": "MIMG",
"image_atomic_smax": "MIMG",
"image_atomic_umax": "MIMG",
"image_atomic_and": "MIMG",
"image_atomic_or": "MIMG",
"image_atomic_xor": "MIMG",
"image_atomic_inc": "MIMG",
"image_atomic_dec": "MIMG",
"image_atomic_fcmpswap": "MIMG",
"image_atomic_fmin": "MIMG",
"image_atomic_fmax": "MIMG",
"image_sample": "MIMG",
"image_sample_cl": "MIMG",
"image_sample_d": "MIMG",
"image_sample_d_cl": "MIMG",
"image_sample_l": "MIMG",
"image_sample_b": "MIMG",
"image_sample_b_cl": "MIMG",
"image_sample_lz": "MIMG",
"image_sample_c": "MIMG",
"image_sample_c_cl": "MIMG",
"image_sample_c_d": "MIMG",
"image_sample_c_d_cl": "MIMG",
"image_sample_c_l": "MIMG",
"image_sample_c_b": "MIMG",
"image_sample_c_b_cl": "MIMG",
"image_sample_c_lz": "MIMG",
"image_sample_o": "MIMG",
"image_sample_cl_o": "MIMG",
"image_sample_d_o": "MIMG",
"image_sample_d_cl_o": "MIMG",
"image_sample_l_o": "MIMG",
"image_sample_b_o": "MIMG",
"image_sample_b_cl_o": "MIMG",
"image_sample_lz_o": "MIMG",
"image_sample_c_o": "MIMG",
"image_sample_c_cl_o": "MIMG",
"image_sample_c_d_o": "MIMG",
"image_sample_c_d_cl_o": "MIMG",
"image_sample_c_l_o": "MIMG",
"image_sample_c_b_o": "MIMG",
"image_sample_c_b_cl_o": "MIMG",
"image_sample_c_lz_o": "MIMG",
"image_sample_cd": "MIMG",
"image_sample_cd_cl": "MIMG",
"image_sample_c_cd": "MIMG",
"image_sample_c_cd_cl": "MIMG",
"image_sample_cd_o": "MIMG",
"image_sample_cd_cl_o": "MIMG",
"image_sample_c_cd_o": "MIMG",
"image_sample_c_cd_cl_o": "MIMG",
"image_gather4": "MIMG",
"image_gather4_cl": "MIMG",
"image_gather4_l": "MIMG",
"image_gather4_b": "MIMG",
"image_gather4_b_cl": "MIMG",
"image_gather4_lz": "MIMG",
"image_gather4_c": "MIMG",
"image_gather4_c_cl": "MIMG",
"image_gather4_c_l": "MIMG",
"image_gather4_c_b": "MIMG",
"image_gather4_c_b_cl": "MIMG",
"image_gather4_c_lz": "MIMG",
"image_gather4_o": "MIMG",
"image_gather4_cl_o": "MIMG",
"image_gather4_l_o": "MIMG",
"image_gather4_b_o": "MIMG",
"image_gather4_b_cl_o": "MIMG",
"image_gather4_lz_o": "MIMG",
"image_gather4_c_o": "MIMG",
"image_gather4_c_cl_o": "MIMG",
"image_gather4_c_l_o": "MIMG",
"image_gather4_c_b_o": "MIMG",
"image_gather4_c_b_cl_o": "MIMG",
"image_gather4_c_lz_o": "MIMG",
"image_get_resinfo": "MIMG",
"image_get_lod": "MIMG",
"export": "EXP",
} | modules/instruction.go | 0.564579 | 0.418994 | instruction.go | starcoder |
package deep
import (
"fmt"
)
// Neural is a neural network
type Neural struct {
Layers []*Layer
Biases [][]*Synapse
Config *Config
}
// Config defines the network topology, activations, losses etc
type Config struct {
// Number of inputs
Inputs int
// Defines topology:
// For instance, [5 3 3] signifies a network with two hidden layers
// containing 5 and 3 nodes respectively, followed an output layer
// containing 3 nodes.
Layout []int
// Activation functions: {ActivationTanh, ActivationReLU, ActivationSigmoid}
Activation ActivationType
// Solver modes: {ModeRegression, ModeBinary, ModeMultiClass, ModeMultiLabel}
Mode Mode
// Initializer for weights: {NewNormal(σ, μ), NewUniform(σ, μ)}
Weight WeightInitializer `json:"-"`
// Loss functions: {LossCrossEntropy, LossBinaryCrossEntropy, LossMeanSquared}
Loss LossType
// Apply bias nodes
Bias bool
}
// NewNeural returns a new neural network
func NewNeural(c *Config) *Neural {
if c.Weight == nil {
c.Weight = NewUniform(0.5, 0)
}
if c.Activation == ActivationNone {
c.Activation = ActivationSigmoid
}
if c.Loss == LossNone {
switch c.Mode {
case ModeMultiClass, ModeMultiLabel:
c.Loss = LossCrossEntropy
case ModeBinary:
c.Loss = LossBinaryCrossEntropy
default:
c.Loss = LossMeanSquared
}
}
layers := initializeLayers(c)
var biases [][]*Synapse
if c.Bias {
biases = make([][]*Synapse, len(layers))
for i := 0; i < len(layers); i++ {
if c.Mode == ModeRegression && i == len(layers)-1 {
continue
}
biases[i] = layers[i].ApplyBias(c.Weight)
}
}
return &Neural{
Layers: layers,
Biases: biases,
Config: c,
}
}
func initializeLayers(c *Config) []*Layer {
layers := make([]*Layer, len(c.Layout))
for i := range layers {
act := c.Activation
if i == (len(layers)-1) && c.Mode != ModeDefault {
act = OutputActivation(c.Mode)
}
layers[i] = NewLayer(c.Layout[i], act)
}
for i := 0; i < len(layers)-1; i++ {
layers[i].Connect(layers[i+1], c.Weight)
}
for _, neuron := range layers[0].Neurons {
neuron.In = make([]*Synapse, c.Inputs)
for i := range neuron.In {
neuron.In[i] = NewSynapse(c.Weight())
}
}
return layers
}
func (n *Neural) fire() {
for i := range n.Biases {
for j := range n.Biases[i] {
n.Biases[i][j].fire(1)
}
}
for _, l := range n.Layers {
l.fire()
}
}
// Forward computes a forward pass
func (n *Neural) Forward(input []float64) error {
if len(input) != n.Config.Inputs {
return fmt.Errorf("Invalid input dimension - expected: %d got: %d", n.Config.Inputs, len(input))
}
for _, n := range n.Layers[0].Neurons {
for i := 0; i < len(input); i++ {
n.In[i].fire(input[i])
}
}
n.fire()
return nil
}
// Predict computes a forward pass and returns a prediction
func (n *Neural) Predict(input []float64) []float64 {
n.Forward(input)
outLayer := n.Layers[len(n.Layers)-1]
out := make([]float64, len(outLayer.Neurons))
for i, neuron := range outLayer.Neurons {
out[i] = neuron.Value
}
return out
}
// NumWeights returns the number of weights in the network
func (n *Neural) NumWeights() (num int) {
for i := range n.Layers {
for j := range n.Layers[i].Neurons {
num += len(n.Layers[i].Neurons[j].In)
}
}
return
}
func (n *Neural) String() string {
var s string
for _, l := range n.Layers {
s = fmt.Sprintf("%s\n%s", s, l)
}
return s
} | plugins/data/learn/ml-libs-godeep/neural.go | 0.765155 | 0.500183 | neural.go | starcoder |
package parallel
import (
"encoding/base32"
"strconv"
"strings"
"time"
"github.com/mitchellh/hashstructure/v2"
"github.com/pkg/errors"
"k8s.io/utils/pointer"
execution "github.com/furiko-io/furiko/apis/execution/v1alpha1"
"github.com/furiko-io/furiko/pkg/utils/matrix"
)
// GenerateIndexes generates the indexes for a ParallelismSpec.
// The order of results is guaranteed to be deterministic.
func GenerateIndexes(spec *execution.ParallelismSpec) []execution.ParallelIndex {
if spec == nil {
spec = &execution.ParallelismSpec{}
}
switch {
case spec.WithCount != nil:
indexes := make([]execution.ParallelIndex, *spec.WithCount)
for i := int64(0); i < *spec.WithCount; i++ {
indexes[i] = execution.ParallelIndex{
IndexNumber: pointer.Int64(i),
}
}
return indexes
case len(spec.WithKeys) > 0:
indexes := make([]execution.ParallelIndex, 0, len(spec.WithKeys))
for _, key := range spec.WithKeys {
indexes = append(indexes, execution.ParallelIndex{
IndexKey: key,
})
}
return indexes
case len(spec.WithMatrix) > 0:
combinations := matrix.GenerateMatrixCombinations(spec.WithMatrix)
indexes := make([]execution.ParallelIndex, 0, len(combinations))
for _, combination := range combinations {
indexes = append(indexes, execution.ParallelIndex{
MatrixValues: combination,
})
}
return indexes
}
// Default to single count index.
return []execution.ParallelIndex{GetDefaultIndex()}
}
// GetDefaultIndex returns the default ParallelIndex for a non-parallel job.
func GetDefaultIndex() execution.ParallelIndex {
return execution.ParallelIndex{
IndexNumber: pointer.Int64(0),
}
}
// HashIndex returns a deterministic hash of a ParallelIndex.
// For example, the result of GetDefaultIndex() returns "gezdqo".
func HashIndex(index execution.ParallelIndex) (string, error) {
hashInt, err := hashstructure.Hash(index, hashstructure.FormatV2, nil)
if err != nil {
return "", errors.Wrapf(err, "cannot hash index")
}
hash := base32.StdEncoding.EncodeToString([]byte(strconv.FormatUint(hashInt, 10)))
// NOTE(irvinlim): Use first 6 bytes which should be sufficient for most cases.
return strings.ToLower(hash[:6]), nil
}
// HashIndexes returns mapping of hashes of ParallelIndex. The first maps the
// slice index to the hash, and the second maps the hash to the slice index.
func HashIndexes(indexes []execution.ParallelIndex) (map[int]string, map[string]int, error) {
hashes := make(map[int]string, len(indexes))
hashesIdx := make(map[string]int, len(indexes))
for i, index := range indexes {
hash, err := HashIndex(index)
if err != nil {
return nil, nil, errors.Wrapf(err, "cannot hash index %v", index)
}
hashesIdx[hash] = i
hashes[i] = hash
}
return hashes, hashesIdx, nil
}
// IndexCreationRequest contains an index that should be created, and the earliest time it can be created.
type IndexCreationRequest struct {
ParallelIndex execution.ParallelIndex
RetryIndex int64
Earliest time.Time
}
// ComputeMissingIndexesForCreation returns a list of expected indexes based on taskStatuses.
func ComputeMissingIndexesForCreation(
job *execution.Job,
indexes []execution.ParallelIndex,
) ([]IndexCreationRequest, error) {
foundList := make([]bool, len(indexes))
nextRetryIndex := make(map[string]int64, len(indexes))
latestFinishTimeByIndex := make(map[string]time.Time, len(indexes))
// Hash each index, and store a mapping of hash to index to look up later.
hashes, hashesIdx, err := HashIndexes(indexes)
if err != nil {
return nil, err
}
// Iterate all existing tasks in the Job's status.
for _, task := range job.Status.Tasks {
index := GetDefaultIndex()
if task.ParallelIndex != nil {
index = *task.ParallelIndex
}
hash, err := HashIndex(index)
if err != nil {
return nil, errors.Wrapf(err, "cannot hash index %v", index)
}
// Record the maximum retry index and finish time.
if nextRetryIndex[hash] < task.RetryIndex+1 {
nextRetryIndex[hash] = task.RetryIndex + 1
}
if finish := task.FinishTimestamp; !finish.IsZero() && latestFinishTimeByIndex[hash].Before(finish.Time) {
latestFinishTimeByIndex[hash] = finish.Time
}
// Only handle tasks that are active or successful.
if task.FinishTimestamp.IsZero() || task.Status.Result == execution.TaskSucceeded {
foundList[hashesIdx[hash]] = true
}
}
// Extract out all indexes that are not seen.
requests := make([]IndexCreationRequest, 0, len(indexes))
for i, found := range foundList {
index := indexes[i]
hash := hashes[i]
// We found an active or successful task.
if found {
continue
}
// Cannot create because exceeds maxAttempts.
if nextRetryIndex[hash] >= job.GetMaxAttempts() {
continue
}
// Found a missing index.
requests = append(requests, IndexCreationRequest{
ParallelIndex: index,
RetryIndex: nextRetryIndex[hash],
Earliest: latestFinishTimeByIndex[hash].Add(job.GetRetryDelay()),
})
}
return requests, nil
} | pkg/execution/util/parallel/indexes.go | 0.701304 | 0.435781 | indexes.go | starcoder |
package engine
import (
"fmt"
"math/bits"
)
// bitboard.go contains the implementation of a bitboard datatype for the engine.
// A type representing a bitboard, which is a unsigned 64-bit number. Blunder's
// bitboard representation has the most significant bit being A1 and the least signficanrt
// bit being H8.
type Bitboard uint64
// A constant representing a bitboard with every square set
const FullBB Bitboard = 0xffffffffffffffff
// A global constant where each entry represents a square on the chess board,
// and each entry contains a bitboard with the bit set high at that square.
// An extra entry is given so that the invalid square constant NoSq can be
// indexed into the table without the program crashing.
var SquareBB [65]Bitboard
// Set the bit at given square.
func (bitboard *Bitboard) SetBit(sq uint8) {
*bitboard |= SquareBB[sq]
}
// Clear the bit at given square.
func (bitboard *Bitboard) ClearBit(sq uint8) {
*bitboard &= ^SquareBB[sq]
}
// Test whether the bit of the given bitbord at the given
// position is set.
func (bb Bitboard) BitSet(sq uint8) bool {
return (bb & Bitboard((0x8000000000000000 >> sq))) > 0
}
// Get the position of the MSB of the given bitboard.
func (bitboard Bitboard) Msb() uint8 {
return uint8(bits.LeadingZeros64(uint64(bitboard)))
}
// Get the position of the LSB of the given bitboard,
// a bitboard with only the LSB set, and clear the LSB.
func (bitboard *Bitboard) PopBit() uint8 {
sq := bitboard.Msb()
bitboard.ClearBit(sq)
return sq
}
// Count the bits in a given bitboard using the SWAR-popcount
// algorithm for 64-bit integers.
func (bitboard Bitboard) CountBits() int {
return bits.OnesCount64(uint64(bitboard))
}
// Return a string representation of the given bitboard
func (bitboard Bitboard) String() (bitboardAsString string) {
bitstring := fmt.Sprintf("%064b\n", bitboard)
bitboardAsString += "\n"
for rankStartPos := 56; rankStartPos >= 0; rankStartPos -= 8 {
bitboardAsString += fmt.Sprintf("%v | ", (rankStartPos/8)+1)
for index := rankStartPos; index < rankStartPos+8; index++ {
squareChar := bitstring[index]
if squareChar == '0' {
squareChar = '.'
}
bitboardAsString += fmt.Sprintf("%c ", squareChar)
}
bitboardAsString += "\n"
}
bitboardAsString += " "
for fileNo := 0; fileNo < 8; fileNo++ {
bitboardAsString += "--"
}
bitboardAsString += "\n "
for _, file := range "abcdefgh" {
bitboardAsString += fmt.Sprintf("%c ", file)
}
bitboardAsString += "\n"
return bitboardAsString
}
// Initalize the bitboard constants.
func init() {
var sq uint8
for sq = 0; sq < 65; sq++ {
SquareBB[sq] = 0x8000000000000000 >> sq
}
} | engine/bitboard.go | 0.778186 | 0.436562 | bitboard.go | starcoder |
package chunk
import (
"unsafe"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/types/json"
"github.com/pingcap/tidb/util/hack"
)
// Chunk stores multiple rows of data in Apache Arrow format.
// See https://arrow.apache.org/docs/memory_layout.html
// Values are appended in compact format and can be directly accessed without decoding.
// When the chunk is done processing, we can reuse the allocated memory by resetting it.
type Chunk struct {
columns []*column
}
// AddFixedLenColumn adds a fixed length column with elemLen and initial data capacity.
func (c *Chunk) AddFixedLenColumn(elemLen, initCap int) {
c.columns = append(c.columns, &column{
elemBuf: make([]byte, elemLen),
data: make([]byte, 0, initCap),
})
}
// AddVarLenColumn adds a variable length column with initial data capacity.
func (c *Chunk) AddVarLenColumn(initCap int) {
c.columns = append(c.columns, &column{
offsets: []int32{0},
data: make([]byte, 0, initCap),
})
}
// AddInterfaceColumn adds an interface column which holds element as interface.
func (c *Chunk) AddInterfaceColumn() {
c.columns = append(c.columns, &column{
ifaces: []interface{}{},
})
}
// Reset resets the chunk, so the memory it allocated can be reused.
// Make sure all the data in the chunk is not used anymore before you reuse this chunk.
func (c *Chunk) Reset() {
for _, c := range c.columns {
c.reset()
}
}
// NumCols returns the number of columns in the chunk.
func (c *Chunk) NumCols() int {
return len(c.columns)
}
// NumRows returns the number of rows in the chunk.
func (c *Chunk) NumRows() int {
if len(c.columns) == 0 {
return 0
}
return c.columns[0].length
}
// GetRow gets the Row in the chunk with the row index.
func (c *Chunk) GetRow(idx int) Row {
return Row{c: c, idx: idx}
}
// AppendRow appends a row to the chunk.
func (c *Chunk) AppendRow(colIdx int, row Row) {
for i, rowCol := range row.c.columns {
chkCol := c.columns[colIdx+i]
chkCol.setNullBitmap(!rowCol.isNull(row.idx))
if rowCol.isFixed() {
elemLen := len(rowCol.elemBuf)
offset := row.idx * elemLen
chkCol.data = append(chkCol.data, rowCol.data[offset:offset+elemLen]...)
} else if rowCol.isVarlen() {
start, end := rowCol.offsets[row.idx], rowCol.offsets[row.idx+1]
chkCol.data = append(chkCol.data, rowCol.data[start:end]...)
chkCol.offsets = append(chkCol.offsets, int32(len(chkCol.data)))
} else {
chkCol.ifaces = append(chkCol.ifaces, rowCol.ifaces[row.idx])
}
chkCol.length++
}
}
// AppendNull appends a null value to the chunk.
func (c *Chunk) AppendNull(colIdx int) {
c.columns[colIdx].appendNull()
}
// AppendInt64 appends a int64 value to the chunk.
func (c *Chunk) AppendInt64(colIdx int, i int64) {
c.columns[colIdx].appendInt64(i)
}
// AppendUint64 appends a uint64 value to the chunk.
func (c *Chunk) AppendUint64(colIdx int, u uint64) {
c.columns[colIdx].appendUint64(u)
}
// AppendFloat32 appends a float32 value to the chunk.
func (c *Chunk) AppendFloat32(colIdx int, f float32) {
c.columns[colIdx].appendFloat32(f)
}
// AppendFloat64 appends a float64 value to the chunk.
func (c *Chunk) AppendFloat64(colIdx int, f float64) {
c.columns[colIdx].appendFloat64(f)
}
// AppendString appends a string value to the chunk.
func (c *Chunk) AppendString(colIdx int, str string) {
c.columns[colIdx].appendString(str)
}
// AppendBytes appends a bytes value to the chunk.
func (c *Chunk) AppendBytes(colIdx int, b []byte) {
c.columns[colIdx].appendBytes(b)
}
// AppendTime appends a Time value to the chunk.
// TODO: change the time structure so it can be directly written to memory.
func (c *Chunk) AppendTime(colIdx int, t types.Time) {
c.columns[colIdx].appendInterface(t)
}
// AppendDuration appends a Duration value to the chunk.
func (c *Chunk) AppendDuration(colIdx int, dur types.Duration) {
c.columns[colIdx].appendDuration(dur)
}
// AppendMyDecimal appends a MyDecimal value to the chunk.
func (c *Chunk) AppendMyDecimal(colIdx int, dec *types.MyDecimal) {
c.columns[colIdx].appendMyDecimal(dec)
}
// AppendEnum appends an Enum value to the chunk.
func (c *Chunk) AppendEnum(colIdx int, enum types.Enum) {
c.columns[colIdx].appendNameValue(enum.Name, enum.Value)
}
// AppendSet appends a Set value to the chunk.
func (c *Chunk) AppendSet(colIdx int, set types.Set) {
c.columns[colIdx].appendNameValue(set.Name, set.Value)
}
// AppendJSON appends a JSON value to the chunk.
func (c *Chunk) AppendJSON(colIdx int, j json.JSON) {
c.columns[colIdx].appendInterface(j)
}
type column struct {
length int
nullCount int
nullBitmap []byte
offsets []int32
data []byte
elemBuf []byte
ifaces []interface{}
}
func (c *column) isFixed() bool {
return c.elemBuf != nil
}
func (c *column) isVarlen() bool {
return c.offsets != nil
}
func (c *column) isInterface() bool {
return c.ifaces != nil
}
func (c *column) reset() {
c.length = 0
c.nullCount = 0
c.nullBitmap = c.nullBitmap[:0]
c.offsets = c.offsets[:0]
c.data = c.data[:0]
c.ifaces = c.ifaces[:0]
}
func (c *column) isNull(rowIdx int) bool {
nullByte := c.nullBitmap[rowIdx/8]
return nullByte&(1<<(uint(rowIdx)&7)) == 0
}
func (c *column) setNullBitmap(on bool) {
idx := c.length >> 3
if idx >= len(c.nullBitmap) {
c.nullBitmap = append(c.nullBitmap, 0)
}
if on {
pos := uint(c.length) & 7
c.nullBitmap[idx] |= byte((1 << pos))
} else {
c.nullCount++
}
}
func (c *column) appendNull() {
c.setNullBitmap(false)
if c.isFixed() {
c.data = append(c.data, c.elemBuf...)
} else if c.isVarlen() {
c.offsets = append(c.offsets, c.offsets[c.length])
} else {
c.ifaces = append(c.ifaces, nil)
}
c.length++
}
func (c *column) finishAppendFixed() {
c.data = append(c.data, c.elemBuf...)
c.setNullBitmap(true)
c.length++
}
func (c *column) appendInt64(i int64) {
*(*int64)(unsafe.Pointer(&c.elemBuf[0])) = i
c.finishAppendFixed()
}
func (c *column) appendUint64(u uint64) {
*(*uint64)(unsafe.Pointer(&c.elemBuf[0])) = u
c.finishAppendFixed()
}
func (c *column) appendFloat32(f float32) {
*(*float32)(unsafe.Pointer(&c.elemBuf[0])) = f
c.finishAppendFixed()
}
func (c *column) appendFloat64(f float64) {
*(*float64)(unsafe.Pointer(&c.elemBuf[0])) = f
c.finishAppendFixed()
}
func (c *column) finishAppendVar() {
c.setNullBitmap(true)
c.offsets = append(c.offsets, int32(len(c.data)))
c.length++
}
func (c *column) appendString(str string) {
c.data = append(c.data, str...)
c.finishAppendVar()
}
func (c *column) appendBytes(b []byte) {
c.data = append(c.data, b...)
c.finishAppendVar()
}
func (c *column) appendInterface(o interface{}) {
c.ifaces = append(c.ifaces, o)
c.setNullBitmap(true)
c.length++
}
func (c *column) appendDuration(dur types.Duration) {
*(*types.Duration)(unsafe.Pointer(&c.elemBuf[0])) = dur
c.finishAppendFixed()
}
func (c *column) appendMyDecimal(dec *types.MyDecimal) {
*(*types.MyDecimal)(unsafe.Pointer(&c.elemBuf[0])) = *dec
c.finishAppendFixed()
}
func (c *column) appendNameValue(name string, val uint64) {
var buf [8]byte
*(*uint64)(unsafe.Pointer(&buf[0])) = val
c.data = append(c.data, buf[:]...)
c.data = append(c.data, name...)
c.finishAppendVar()
}
// Row represents a row of data, can be used to assess values.
type Row struct {
c *Chunk
idx int
}
// GetInt64 returns the int64 value and isNull with the colIdx.
func (r Row) GetInt64(colIdx int) (int64, bool) {
col := r.c.columns[colIdx]
return *(*int64)(unsafe.Pointer(&col.data[r.idx*8])), col.isNull(r.idx)
}
// GetUint64 returns the uint64 value and isNull with the colIdx.
func (r Row) GetUint64(colIdx int) (uint64, bool) {
col := r.c.columns[colIdx]
return *(*uint64)(unsafe.Pointer(&col.data[r.idx*8])), col.isNull(r.idx)
}
// GetFloat32 returns the float64 value and isNull with the colIdx.
func (r Row) GetFloat32(colIdx int) (float32, bool) {
col := r.c.columns[colIdx]
return *(*float32)(unsafe.Pointer(&col.data[r.idx*8])), col.isNull(r.idx)
}
// GetFloat64 returns the float64 value and isNull with the colIdx.
func (r Row) GetFloat64(colIdx int) (float64, bool) {
col := r.c.columns[colIdx]
return *(*float64)(unsafe.Pointer(&col.data[r.idx*8])), col.isNull(r.idx)
}
// GetString returns the string value and isNull with the colIdx.
func (r Row) GetString(colIdx int) (string, bool) {
col := r.c.columns[colIdx]
start, end := col.offsets[r.idx], col.offsets[r.idx+1]
return hack.String(col.data[start:end]), col.isNull(r.idx)
}
// GetBytes returns the bytes value and isNull with the colIdx.
func (r Row) GetBytes(colIdx int) ([]byte, bool) {
col := r.c.columns[colIdx]
start, end := col.offsets[r.idx], col.offsets[r.idx+1]
return col.data[start:end], col.isNull(r.idx)
}
// GetTime returns the Time value and is isNull with the colIdx.
func (r Row) GetTime(colIdx int) (types.Time, bool) {
col := r.c.columns[colIdx]
t, ok := col.ifaces[r.idx].(types.Time)
return t, !ok
}
// GetDuration returns the Duration value and isNull with the colIdx.
func (r Row) GetDuration(colIdx int) (types.Duration, bool) {
col := r.c.columns[colIdx]
return *(*types.Duration)(unsafe.Pointer(&col.data[r.idx*16])), col.isNull(r.idx)
}
func (r Row) getNameValue(colIdx int) (string, uint64, bool) {
col := r.c.columns[colIdx]
start, end := col.offsets[r.idx], col.offsets[r.idx+1]
if start == end {
return "", 0, true
}
val := *(*uint64)(unsafe.Pointer(&col.data[start]))
name := hack.String(col.data[start+8 : end])
return name, val, false
}
// GetEnum returns the Enum value and isNull with the colIdx.
func (r Row) GetEnum(colIdx int) (types.Enum, bool) {
name, val, isNull := r.getNameValue(colIdx)
return types.Enum{Name: name, Value: val}, isNull
}
// GetSet returns the Set value and isNull with the colIdx.
func (r Row) GetSet(colIdx int) (types.Set, bool) {
name, val, isNull := r.getNameValue(colIdx)
return types.Set{Name: name, Value: val}, isNull
}
// GetMyDecimal returns the MyDecimal value and isNull with the colIdx.
func (r Row) GetMyDecimal(colIdx int) (*types.MyDecimal, bool) {
col := r.c.columns[colIdx]
return (*types.MyDecimal)(unsafe.Pointer(&col.data[r.idx*types.MyDecimalStructSize])), col.isNull(r.idx)
}
// GetJSON returns the JSON value and isNull with the colIdx.
func (r Row) GetJSON(colIdx int) (json.JSON, bool) {
col := r.c.columns[colIdx]
j, ok := col.ifaces[r.idx].(json.JSON)
return j, !ok
} | util/chunk/chunk.go | 0.631708 | 0.503113 | chunk.go | starcoder |
package conf
// Uint8Var defines a uint8 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a uint8 variable in which to store the value of the flag and/or environment variable.
func (c *Configurator) Uint8Var(p *uint8, name string, value uint8, usage string) {
c.env().Uint8Var(p, name, value, usage)
c.flag().Uint8Var(p, name, value, usage)
}
// Uint8 defines a uint8 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a uint8 variable that stores the value of the flag and/or environment variable.
func (c *Configurator) Uint8(name string, value uint8, usage string) *uint8 {
p := new(uint8)
c.Uint8Var(p, name, value, usage)
return p
}
// Uint8VarE defines a uint8 environment variable with specified name, default value, and usage string.
// The argument p points to a uint8 variable in which to store the value of the environment variable.
func (c *Configurator) Uint8VarE(p *uint8, name string, value uint8, usage string) {
c.env().Uint8Var(p, name, value, usage)
}
// Uint8E defines a uint8 environment variable with specified name, default value, and usage string.
// The return value is the address of a uint8 variable that stores the value of the environment variable.
func (c *Configurator) Uint8E(name string, value uint8, usage string) *uint8 {
p := new(uint8)
c.Uint8VarE(p, name, value, usage)
return p
}
// Uint8VarF defines a uint8 flag with specified name, default value, and usage string.
// The argument p points to a uint8 variable in which to store the value of the flag.
func (c *Configurator) Uint8VarF(p *uint8, name string, value uint8, usage string) {
c.flag().Uint8Var(p, name, value, usage)
}
// Uint8F defines a uint8 flag with specified name, default value, and usage string.
// The return value is the address of a uint8 variable that stores the value of the flag.
func (c *Configurator) Uint8F(name string, value uint8, usage string) *uint8 {
p := new(uint8)
c.Uint8VarF(p, name, value, usage)
return p
}
// Uint8Var defines a uint8 flag and environment variable with specified name, default value, and usage string.
// The argument p points to a uint8 variable in which to store the value of the flag and/or environment variable.
func Uint8Var(p *uint8, name string, value uint8, usage string) {
Global.Uint8Var(p, name, value, usage)
}
// Uint8 defines a uint8 flag and environment variable with specified name, default value, and usage string.
// The return value is the address of a uint8 variable that stores the value of the flag and/or environment variable.
func Uint8(name string, value uint8, usage string) *uint8 {
return Global.Uint8(name, value, usage)
}
// Uint8VarE defines a uint8 environment variable with specified name, default value, and usage string.
// The argument p points to a uint8 variable in which to store the value of the environment variable.
func Uint8VarE(p *uint8, name string, value uint8, usage string) {
Global.Uint8VarE(p, name, value, usage)
}
// Uint8E defines a uint8 environment variable with specified name, default value, and usage string.
// The return value is the address of a uint8 variable that stores the value of the environment variable.
func Uint8E(name string, value uint8, usage string) *uint8 {
return Global.Uint8E(name, value, usage)
}
// Uint8VarF defines a uint8 flag with specified name, default value, and usage string.
// The argument p points to a uint8 variable in which to store the value of the flag.
func Uint8VarF(p *uint8, name string, value uint8, usage string) {
Global.Uint8VarF(p, name, value, usage)
}
// Uint8F defines a uint8 flag with specified name, default value, and usage string.
// The return value is the address of a uint8 variable that stores the value of the flag.
func Uint8F(name string, value uint8, usage string) *uint8 {
return Global.Uint8F(name, value, usage)
} | value_uint8.go | 0.755186 | 0.449332 | value_uint8.go | starcoder |
// Package tabulator provides a generic interface for tabulating data (to CSV, to tabwriter, etc).
package tabulator
import (
"fmt"
"strconv"
)
// Tabulator is the interface of things that can create tables.
type Tabulator interface {
// Header adds a header to the current table, if one has not yet been set.
// Tabulators should silently ignore attempts to install a duplicate header; this means that sub-table functions
// can call Header even if the parent table calls Header.
Header(labels ...string)
RowTabulator
// Flush commits the table to the underlying writer, returning any error that occurred during tabulation.
Flush() error
}
// RowTabulator is the interface of things that can tabulate a row.
// Each method should return the current or parent tabulator for method chaining.
type RowTabulator interface {
// Cell adds some representation of the given cell value to the table's current row.
Cell(value interface{}) RowTabulator
// EndRow ends the current row.
EndRow() Tabulator
}
// LineWriter can write table lines to some underlying flushable writer.
type LineWriter interface {
// Write writes a single row with the given cells.
Write(cells []string) error
// Flush commits any lines written to the writer.
Flush() error
}
// LineTabulator uses a LineWriter to tabulate a table.
type LineTabulator struct {
w LineWriter
ncells int
err error
row []string
}
// NewLineTabulator constructs a new tabulator using a LineWriter w.
func NewLineTabulator(w LineWriter) *LineTabulator {
return &LineTabulator{w: w}
}
func (t *LineTabulator) Header(labels ...string) {
if 0 < t.ncells || t.err != nil {
return
}
t.ncells = len(labels)
t.resetRow()
for _, l := range labels {
t.Cell(l)
}
_ = t.EndRow()
}
func (t *LineTabulator) Cell(value interface{}) RowTabulator {
return t.stringCell(stringify(value))
}
func stringify(value interface{}) string {
switch v := value.(type) {
case int:
return strconv.Itoa(v)
case int64:
return strconv.FormatInt(v, 10)
case string:
return v
case fmt.Stringer:
return v.String()
default:
return "?"
}
}
func (t *LineTabulator) stringCell(value string) RowTabulator {
if t.err == nil {
t.row = append(t.row, value)
}
return t
}
func (t *LineTabulator) EndRow() Tabulator {
if t.err != nil {
return t
}
t.err = t.w.Write(t.row)
t.resetRow()
return t
}
func (t *LineTabulator) resetRow() {
t.row = make([]string, 0, t.ncells)
}
func (t LineTabulator) Flush() error {
if t.err != nil {
return t.err
}
return t.w.Flush()
} | internal/tabulator/tabulator.go | 0.669637 | 0.459622 | tabulator.go | starcoder |
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"errors"
"fmt"
)
// _Equal reports whether two slices are equal: the same length and all
// elements equal. All floating point NaNs are considered equal.
func _SliceEqual[Elem comparable](s1, s2 []Elem) bool {
if len(s1) != len(s2) {
return false
}
for i, v1 := range s1 {
v2 := s2[i]
if v1 != v2 {
isNaN := func(f Elem) bool { return f != f }
if !isNaN(v1) || !isNaN(v2) {
return false
}
}
}
return true
}
// A Graph is a collection of nodes. A node may have an arbitrary number
// of edges. An edge connects two nodes. Both nodes and edges must be
// comparable. This is an undirected simple graph.
type _Graph[_Node _NodeC[_Edge], _Edge _EdgeC[_Node]] struct {
nodes []_Node
}
// _NodeC is the contraints on a node in a graph, given the _Edge type.
type _NodeC[_Edge any] interface {
comparable
Edges() []_Edge
}
// Edgec is the constraints on an edge in a graph, given the _Node type.
type _EdgeC[_Node any] interface {
comparable
Nodes() (a, b _Node)
}
// _New creates a new _Graph from a collection of Nodes.
func _New[_Node _NodeC[_Edge], _Edge _EdgeC[_Node]](nodes []_Node) *_Graph[_Node, _Edge] {
return &_Graph[_Node, _Edge]{nodes: nodes}
}
// nodePath holds the path to a node during ShortestPath.
// This should ideally be a type defined inside ShortestPath,
// but the translator tool doesn't support that.
type nodePath[_Node _NodeC[_Edge], _Edge _EdgeC[_Node]] struct {
node _Node
path []_Edge
}
// ShortestPath returns the shortest path between two nodes,
// as an ordered list of edges. If there are multiple shortest paths,
// which one is returned is unpredictable.
func (g *_Graph[_Node, _Edge]) ShortestPath(from, to _Node) ([]_Edge, error) {
visited := make(map[_Node]bool)
visited[from] = true
workqueue := []nodePath[_Node, _Edge]{nodePath[_Node, _Edge]{from, nil}}
for len(workqueue) > 0 {
current := workqueue
workqueue = nil
for _, np := range current {
edges := np.node.Edges()
for _, edge := range edges {
a, b := edge.Nodes()
if a == np.node {
a = b
}
if !visited[a] {
ve := append([]_Edge(nil), np.path...)
ve = append(ve, edge)
if a == to {
return ve, nil
}
workqueue = append(workqueue, nodePath[_Node, _Edge]{a, ve})
visited[a] = true
}
}
}
}
return nil, errors.New("no path")
}
type direction int
const (
north direction = iota
ne
east
se
south
sw
west
nw
up
down
)
func (dir direction) String() string {
strs := map[direction]string{
north: "north",
ne: "ne",
east: "east",
se: "se",
south: "south",
sw: "sw",
west: "west",
nw: "nw",
up: "up",
down: "down",
}
if str, ok := strs[dir]; ok {
return str
}
return fmt.Sprintf("direction %d", dir)
}
type mazeRoom struct {
index int
exits [10]int
}
type mazeEdge struct {
from, to int
dir direction
}
// Edges returns the exits from the room.
func (m mazeRoom) Edges() []mazeEdge {
var r []mazeEdge
for i, exit := range m.exits {
if exit != 0 {
r = append(r, mazeEdge{
from: m.index,
to: exit,
dir: direction(i),
})
}
}
return r
}
// Nodes returns the rooms connected by an edge.
//go:noinline
func (e mazeEdge) Nodes() (mazeRoom, mazeRoom) {
m1, ok := zork[e.from]
if !ok {
panic("bad edge")
}
m2, ok := zork[e.to]
if !ok {
panic("bad edge")
}
return m1, m2
}
// The first maze in Zork. Room indexes based on original Fortran data file.
// You are in a maze of twisty little passages, all alike.
var zork = map[int]mazeRoom{
11: {exits: [10]int{north: 11, south: 12, east: 14}}, // west to Troll Room
12: {exits: [10]int{south: 11, north: 14, east: 13}},
13: {exits: [10]int{west: 12, north: 14, up: 16}},
14: {exits: [10]int{west: 13, north: 11, east: 15}},
15: {exits: [10]int{south: 14}}, // Dead End
16: {exits: [10]int{east: 17, north: 13, sw: 18}}, // skeleton, etc.
17: {exits: [10]int{west: 16}}, // Dead End
18: {exits: [10]int{down: 16, east: 19, west: 18, up: 22}},
19: {exits: [10]int{up: 29, west: 18, ne: 15, east: 20, south: 30}},
20: {exits: [10]int{ne: 19, west: 20, se: 21}},
21: {exits: [10]int{north: 20}}, // Dead End
22: {exits: [10]int{north: 18, east: 24, down: 23, south: 28, west: 26, nw: 22}},
23: {exits: [10]int{east: 22, west: 28, up: 24}},
24: {exits: [10]int{ne: 25, down: 23, nw: 28, sw: 26}},
25: {exits: [10]int{sw: 24}}, // Grating room (up to Clearing)
26: {exits: [10]int{west: 16, sw: 24, east: 28, up: 22, north: 27}},
27: {exits: [10]int{south: 26}}, // Dead End
28: {exits: [10]int{east: 22, down: 26, south: 23, west: 24}},
29: {exits: [10]int{west: 30, nw: 29, ne: 19, south: 19}},
30: {exits: [10]int{west: 29, south: 19}}, // ne to Cyclops Room
}
func TestShortestPath() {
// The Zork maze is not a proper undirected simple graph,
// as there are some one way paths (e.g., 19 -> 15),
// but for this test that doesn't matter.
// Set the index field in the map. Simpler than doing it in the
// composite literal.
for k := range zork {
r := zork[k]
r.index = k
zork[k] = r
}
var nodes []mazeRoom
for idx, room := range zork {
mridx := room
mridx.index = idx
nodes = append(nodes, mridx)
}
g := _New[mazeRoom, mazeEdge](nodes)
path, err := g.ShortestPath(zork[11], zork[30])
if err != nil {
panic(fmt.Sprintf("%v", err))
}
var steps []direction
for _, edge := range path {
steps = append(steps, edge.dir)
}
want := []direction{east, west, up, sw, east, south}
if !_SliceEqual(steps, want) {
panic(fmt.Sprintf("ShortestPath returned %v, want %v", steps, want))
}
}
func main() {
TestShortestPath()
} | test/typeparam/graph.go | 0.777933 | 0.465691 | graph.go | starcoder |
package storage
import (
"errors"
"fmt"
"github.com/prometheus/prometheus/model/labels"
"github.com/parca-dev/parca/pkg/profile"
"github.com/parca-dev/parca/pkg/storage/chunkenc"
)
// MemRangeSeries is an iterator that only queries certain chunks within the range and
// then only the samples within the range.
type MemRangeSeries struct {
s *MemSeries
mint int64
maxt int64
}
func (rs *MemRangeSeries) Labels() labels.Labels {
return rs.s.Labels()
}
func (rs *MemRangeSeries) Iterator() ProfileSeriesIterator {
rs.s.mu.RLock()
defer rs.s.mu.RUnlock()
var numSamples uint64
chunkStart, chunkEnd := rs.s.timestamps.indexRange(rs.mint, rs.maxt)
timestamps := make([]chunkenc.Chunk, 0, chunkEnd-chunkStart)
for _, t := range rs.s.timestamps[chunkStart:chunkEnd] {
numSamples += uint64(t.chunk.NumSamples())
timestamps = append(timestamps, t.chunk)
}
timestampIt := NewMultiChunkIterator(timestamps)
start, end, err := getIndexRange(timestampIt, numSamples, rs.mint, rs.maxt)
if err != nil {
return &MemRangeSeriesIterator{err: err}
}
rootIt := NewMultiChunkIterator(rs.s.root[chunkStart:chunkEnd])
if start != 0 {
rootIt.Seek(start)
}
sampleIterators := make(map[string]*MultiChunksIterator, len(rs.s.samples))
for key, chunks := range rs.s.samples {
sampleIterators[key] = NewMultiChunkIterator(chunks)
}
timestampIterator := NewMultiChunkIterator(timestamps)
durationsIterator := NewMultiChunkIterator(rs.s.durations[chunkStart:chunkEnd])
periodsIterator := NewMultiChunkIterator(rs.s.periods[chunkStart:chunkEnd])
if start != 0 {
timestampIterator.Seek(start)
durationsIterator.Seek(start)
periodsIterator.Seek(start)
for _, sampleIterator := range sampleIterators {
sampleIterator.Seek(start)
}
}
if end-start < numSamples {
numSamples = end - start - 1
}
return &MemRangeSeriesIterator{
s: rs.s,
mint: rs.mint,
maxt: rs.maxt,
numSamples: numSamples,
timestampsIterator: timestampIterator,
durationsIterator: durationsIterator,
periodsIterator: periodsIterator,
sampleIterators: sampleIterators,
}
}
func getIndexRange(it MemSeriesValuesIterator, numSamples uint64, mint, maxt int64) (uint64, uint64, error) {
// figure out the index of the first sample > mint and the last sample < maxt
start := uint64(0)
end := uint64(0)
i := uint64(0)
for it.Next() {
if i == numSamples {
end++
break
}
t := it.At()
// MultiChunkIterator might return sparse values - shouldn't usually happen though.
if t == 0 {
break
}
if t < mint {
start++
}
if t <= maxt {
end++
} else {
break
}
i++
}
return start, end, it.Err()
}
// MemSeriesValuesIterator is an abstraction on iterator over values from possible multiple chunks.
// It most likely is an abstraction like the MultiChunksIterator over []chunkenc.Chunk.
type MemSeriesValuesIterator interface {
// Next iterates to the next value and returns true if there's more.
Next() bool
// At returns the current value.
At() int64
// Err returns the underlying errors. Next will return false when encountering errors.
Err() error
// Read returns how many iterations the iterator has read at any given moment.
Read() uint64
}
type MemRangeSeriesIterator struct {
s *MemSeries
mint int64
maxt int64
timestampsIterator MemSeriesValuesIterator
durationsIterator MemSeriesValuesIterator
periodsIterator MemSeriesValuesIterator
sampleIterators map[string]*MultiChunksIterator
numSamples uint64 // uint16 might not be enough for many chunks (~500+)
err error
}
func (it *MemRangeSeriesIterator) Next() bool {
if it.err != nil || it.numSamples == 0 {
return false
}
it.s.mu.RLock()
defer it.s.mu.RUnlock()
if !it.timestampsIterator.Next() {
it.err = errors.New("unexpected end of timestamps iterator")
return false
}
if it.timestampsIterator.Err() != nil {
it.err = fmt.Errorf("next timestamp: %w", it.timestampsIterator.Err())
return false
}
if !it.durationsIterator.Next() {
it.err = errors.New("unexpected end of durations iterator")
return false
}
if it.durationsIterator.Err() != nil {
it.err = fmt.Errorf("next duration: %w", it.durationsIterator.Err())
return false
}
if !it.periodsIterator.Next() {
it.err = errors.New("unexpected end of periods iterator")
return false
}
if it.periodsIterator.Err() != nil {
it.err = fmt.Errorf("next period: %w", it.periodsIterator.Err())
return false
}
read := it.timestampsIterator.Read()
if dread := it.durationsIterator.Read(); dread != read {
it.err = fmt.Errorf("duration iterator in wrong iteration, expected %d got %d", read, dread)
return false
}
if pread := it.periodsIterator.Read(); pread != read {
it.err = fmt.Errorf("period iterator in wrong iteration, expected %d got %d", read, pread)
return false
}
for _, sit := range it.sampleIterators {
if !sit.Next() {
it.err = errors.New("unexpected end of numSamples iterator")
return false
}
if sread := sit.Read(); sread != read {
it.err = fmt.Errorf("sample iterator in wrong iteration, expected %d got %d", read, sread)
return false
}
}
it.numSamples--
return true
}
func (it *MemRangeSeriesIterator) At() profile.InstantProfile {
return &MemSeriesInstantProfile{
PeriodType: it.s.periodType,
SampleType: it.s.sampleType,
timestampsIterator: it.timestampsIterator,
durationsIterator: it.durationsIterator,
periodsIterator: it.periodsIterator,
sampleIterators: it.sampleIterators,
}
}
func (it *MemRangeSeriesIterator) Err() error {
return it.err
}
type MemSeriesInstantProfile struct {
PeriodType profile.ValueType
SampleType profile.ValueType
timestampsIterator MemSeriesValuesIterator
durationsIterator MemSeriesValuesIterator
periodsIterator MemSeriesValuesIterator
sampleIterators map[string]*MultiChunksIterator
}
func (m MemSeriesInstantProfile) ProfileMeta() profile.InstantProfileMeta {
return profile.InstantProfileMeta{
PeriodType: m.PeriodType,
SampleType: m.SampleType,
Timestamp: m.timestampsIterator.At(),
Duration: m.durationsIterator.At(),
Period: m.periodsIterator.At(),
}
}
func (m MemSeriesInstantProfile) Samples() map[string]*profile.Sample {
samples := make(map[string]*profile.Sample, len(m.sampleIterators))
for k, it := range m.sampleIterators {
samples[k] = &profile.Sample{
Value: it.At(),
}
}
return samples
} | pkg/storage/series_iterator_range.go | 0.742982 | 0.409634 | series_iterator_range.go | starcoder |
package main
import (
"math/rand"
)
//InitializeClasses populates the slice that has classes( population proportions at various states/time) for a region
// It sets entire population as susceptible i.e. S=1.0
func InitializeClasses(numStates int) []float64 {
s := make([]float64, numStates)
s[0] = 1.0
return s
}
//InitializeReciprocity generates reciprocity parameters to represent the interations between the regions/Cells
func InitializeReciprocity(numRegions int) []float64 {
c := make([]float64, numRegions)
for i := range c {
c[i] = rand.Float64()
}
return c
}
//InitializeBoard initialises the board
func InitializeBoard(numRegions, numStates int) *Board {
var b Board
b.cells = make([]*Cell, numRegions)
for i := range b.cells {
var cell Cell
//set the slice storing class proportions for the length of nummber of states
cell.classProportions = make([]float64, numStates)
//Initialse slice with proportion of susceptible population as 1
p := InitializeClasses(numStates)
copy(cell.classProportions, p)
//Initialise population and area
cell.population = rand.Intn(1000000) + 100000
cell.area = rand.Float64()*1000 + 100.0
//set the slice storing reciprocity for the length of number of regions
cell.reciprocity = make([]float64, numRegions)
//Initialse slice with proportion of susceptible population as 1
r := InitializeReciprocity(numRegions)
copy(cell.reciprocity, r)
//Assign the cell populated to the board
b.cells[i] = &cell
}
// Choose a region to introduce infection
j := rand.Intn(numRegions)
//Choose proportion infected
p := 0.1
b.cells[j].classProportions[1] = p
b.cells[j].classProportions[0] = 1 - p
return &b
}
//Lambdas generates lambda for all the states
func Lambdas(numStates int) []float64 {
lambda := make([]float64, numStates)
for i := range lambda {
lambda[i] = 0.8
}
lambda[0] = 0.0
return lambda
}
//Gammas generates gamma for all the states
func Gammas(numStates int) []float64 {
gamma := make([]float64, numStates)
for i := range gamma {
gamma[i] = 0.1
}
gamma[0] = 0.1
return gamma
} | covid1/initialization.go | 0.63409 | 0.435781 | initialization.go | starcoder |
package main
import "fmt"
// [func] function definition
// Can have more parameters.
func add(x int, y int) int {
// [return]
return x + y // [+]
}
// More parameters from the same type can be listed with same type.
func myMath(x, y float64) {
fmt.Println("x + y:", x+y) // [+] sum
fmt.Println("x - y:", x-y) // [-] difference
fmt.Println("x * y:", x*y) // [*] product
fmt.Println("x / y:", x/y) // [/] quotient
fmt.Println("x % y:", int(x)%int(y)) // [%] remainder
/*
& bitwise AND // [&]
| bitwise OR // [|]
^ bitwise XOR // [^]
&^ bit clear (AND NOT) // [&^]
*/
}
// Can return more values.
func sort(x, y int) (int, int) {
if x < y {
return x, y // [,]
}
return y, x
}
/*
Multiple return values can be named and act just like variables. If the
result parameters are named, a return statement without arguments returns the
current values of the results.
*/
func sort2(x, y int) (first, second int) {
if x < y {
first, second = x, y // [,]
} else {
first, second = y, x
}
return
}
// Variadic functions have variadic parameters.
// [...] ...Type: pack operator
func variadicFunc(arg ...int) {
for i := range arg {
fmt.Println("Variadic func", i, arg[i])
}
}
// <POINTER>
func inc(num *int) {
/*
By default Go passes arguments by value (copying the arguments), if you
want to pass the arguments by reference, you need to pass pointers (or use
a structure using reference values like slices and maps.
*/
// Dereference a pointer, use the [*] symbol.
*num++ // [++]
}
func dec(num *int) {
*num-- // [--]
}
// <METHOD>
// user struct is like an OBJECT of attributes.
type user struct {
id int
age int
name, firstName, lastName, location string
}
// NewUser as user's constructor-like function.
func NewUser(id, age int, firstName, lastName, location string) *user {
u := new(user)
u.id = id
u.age = age
u.firstName = firstName
u.lastName = lastName
u.location = location
return u
}
/*
Stringer [interface] is defined by the fmt package.
This is one of the most ubiquitous interfaces.
A STRINGER is a type that can describe itself as a string.
*/
// Stringer interface defines String() method.
type Stringer interface {
String() string
}
func (u *user) String() string {
return fmt.Sprintf("--- USER ---\nname:\t\t%v\nid:\t\t%v\nage:\t\t%v\nlocation:\t%v\n------------", u.Name(), u.id, u.age, u.location)
}
// Namer [interface] contains one method definition.
// An interface type is defined by a set of methods.
type Namer interface {
Name() string
}
// Name <METHOD> prints the user's name.
// Interfaces are satisfied implicitly.
func (u *user) Name() string {
return fmt.Sprintf("%s %s", u.firstName, u.lastName)
}
// Greetings <METHOD> is just a function with a RECEIVER ARGUMENT.
func (u *user) Greetings() string { // (u *user): method receiver
return fmt.Sprintf("Hi %s from %s", u.Name(), u.location)
}
// Birth returns by the birth year.
func (u *user) Birth() int {
return 2022 - u.age // [-]
}
// <GENERIC>
// Generics introduced in Go v1.18.
func printAnySlice[T any](s []T) {
for _, v := range s {
fmt.Print(v, " ")
}
fmt.Println("")
}
func main() {
// [func] call
fmt.Println("add(42, 13): ", add(42, 13))
fmt.Println("mymath(12.0, 33.0): ")
myMath(12.0, 33.0)
a, b := sort(42, 13)
fmt.Println("sort(42, 13): ", a, b)
a, b = sort2(42, 13)
fmt.Println("sort2(42, 13): ", a, b)
fmt.Println("variadicfunc(1, 2, 3):")
variadicFunc(1, 2, 3)
var nums []int = []int{42, 57, 93}
fmt.Println("variadicFunc(42, 57, 93):")
variadicFunc(nums...) // [...] Var...: unpack operator
age := 33
fmt.Println("age: ", age)
fmt.Println("inc(&age)")
inc(&age) // get the <POINTER> of a value, use the & symbol
fmt.Println("age: ", age)
fmt.Println("dec(&age)")
dec(&age) // get the <POINTER> of a value, use the & symbol
fmt.Println("age: ", age)
// Methods
/*
Instead of:
me := &user{} //me := new(user)
me.id = 1
me.age = 127
me.firstName = "Gábor"
me.lastName = "Imolai"
me.location = "Hungary"
*/
// We call "constructor".
me := NewUser(1, 127, "Gábor", "Imolai", "Hungary")
fmt.Println(me) // Print via Stringer.
printAnySlice(nums)
} | src/func.go | 0.520009 | 0.47098 | func.go | starcoder |
package engine
import "math"
const (
RadianMode = iota
AngleMode
)
// enum "RadianMode", "AngleMode"
var TrigonometricMode = RadianMode
var defConst = map[string]float64{
"pi": math.Pi,
}
var defFunC = map[string]int{
"sin": 1,
"cos": 1,
"tan": 1,
"cot": 1,
"sec": 1,
"csc": 1,
"abs": 1,
"ceil": 1,
"floor": 1,
"round": 1,
"sqrt": 1,
"cbrt": 1,
"log": 1,
"noerr": 1,
"max": 2,
"min": 2,
}
var defFunc map[string]func(expr []ExprAST) float64
func init() {
defFunc = map[string]func(expr []ExprAST) float64{
"sin": defSin,
"cos": defCos,
"tan": defTan,
"cot": defCot,
"sec": defSec,
"csc": defCsc,
"abs": defAbs,
"ceil": defCeil,
"floor": defFloor,
"round": defRound,
"sqrt": defSqrt,
"cbrt": defCbrt,
"log": defLog,
"noerr": defNoerr,
"max": defMax,
"min": defMin,
}
}
// log(1) = 0
func defLog(expr []ExprAST) float64 {
return math.Log(ExprASTResult(expr[0]))
}
// sin(pi/2) = 1
func defSin(expr []ExprAST) float64 {
return math.Sin(expr2Radian(expr[0]))
}
// cos(0) = 1
func defCos(expr []ExprAST) float64 {
return math.Cos(expr2Radian(expr[0]))
}
// tan(pi/4) = 1
func defTan(expr []ExprAST) float64 {
return math.Tan(expr2Radian(expr[0]))
}
// cot(pi/4) = 1
func defCot(expr []ExprAST) float64 {
return 1 / defTan(expr)
}
// sec(0) = 1
func defSec(expr []ExprAST) float64 {
return 1 / defCos(expr)
}
// csc(pi/2) = 1
func defCsc(expr []ExprAST) float64 {
return 1 / defSin(expr)
}
// abs(-2) = 2
func defAbs(expr []ExprAST) float64 {
return math.Abs(ExprASTResult(expr[0]))
}
// ceil(4.2) = ceil(4.8) = 5
func defCeil(expr []ExprAST) float64 {
return math.Ceil(ExprASTResult(expr[0]))
}
// floor(4.2) = floor(4.8) = 4
func defFloor(expr []ExprAST) float64 {
return math.Floor(ExprASTResult(expr[0]))
}
// round(4.2) = 4
// round(4.6) = 5
func defRound(expr []ExprAST) float64 {
return math.Round(ExprASTResult(expr[0]))
}
// sqrt(4) = 2
func defSqrt(expr []ExprAST) float64 {
return math.Sqrt(ExprASTResult(expr[0]))
}
// cbrt(27) = 3
func defCbrt(expr []ExprAST) float64 {
return math.Cbrt(ExprASTResult(expr[0]))
}
// max(2, 3) = 3
func defMax(expr []ExprAST) float64 {
return math.Max(p2(expr))
}
// max(2, 3) = 2
func defMin(expr []ExprAST) float64 {
return math.Min(p2(expr))
}
func p2(expr []ExprAST) (float64, float64) {
return ExprASTResult(expr[0]), ExprASTResult(expr[1])
}
// noerr(1/0) = 0
// noerr(2.5/(1-1)) = 0
func defNoerr(expr []ExprAST) (r float64) {
defer func() {
if e := recover(); e != nil {
r = 0
}
}()
return ExprASTResult(expr[0])
} | engine/def.go | 0.55254 | 0.432902 | def.go | starcoder |
package trees
import (
"errors"
"github.com/TectusDreamlab/go-common-utils/datastructure/shared"
)
// IndexedPriorityQueue defines an indexed priority queue based on a heap.
type IndexedPriorityQueue struct {
capacity int
size int
pq []int // This is a heap.
qp []int
values []interface{}
heapType HeapType
comparator shared.Comparator
}
// NewIndexedPriorityQueue creates a new indexed priority queue with capacity, heapType and value comparator specified
func NewIndexedPriorityQueue(capacity int, heapType HeapType, comparator shared.Comparator) *IndexedPriorityQueue {
return &IndexedPriorityQueue{
capacity: capacity,
size: 0,
pq: make([]int, capacity+1), // Because it's used as a heap, so we go 1 indexed.
qp: make([]int, capacity+1), // qp is a reversed index for pq. e.g. if qp[pq[index]] = index
values: make([]interface{}, capacity),
heapType: heapType,
comparator: comparator,
}
}
// isIndexValid checks whether the index falls into the range of capacity
func (i *IndexedPriorityQueue) isIndexValid(index int) bool {
return index >= 0 && index < i.capacity
}
// Contains check whether the queue contains a value with the given index.
func (i *IndexedPriorityQueue) Contains(index int) bool {
return i.values[index] != nil
}
// Size returns the number of values stored in the queue.
func (i *IndexedPriorityQueue) Size() int {
return i.size
}
// IsEmpty returns whether the queue is empty.
func (i *IndexedPriorityQueue) IsEmpty() bool {
return i.size == 0
}
// Insert inters a value into the queue with a given index, if the index is not valid or is taken, error will be return.
func (i *IndexedPriorityQueue) Insert(index int, value interface{}) error {
if !i.isIndexValid(index) {
return errors.New("index out of range")
}
if i.Contains(index) {
return errors.New("index is already used")
}
i.values[index] = value
i.size++
i.pq[i.size] = index
i.qp[index] = i.size
i.pqBubbleUp(i.size)
return nil
}
// Peek peeks the priority queue, it returns the heap top value with it's index. if queue is empty, error will be returned.
func (i *IndexedPriorityQueue) Peek() (index int, value interface{}, err error) {
if i.IsEmpty() {
return -1, nil, errors.New("queue is empty")
}
return i.pq[1], i.values[i.pq[1]], nil
}
// Pop returns the heap top value of the queue, and remove it from the queue in the meantime, error will be returned if queue is empty.
func (i *IndexedPriorityQueue) Pop() (index int, value interface{}, err error) {
if i.IsEmpty() {
return -1, nil, errors.New("queue is empty")
}
index = i.pq[1]
value = i.values[index]
i.pq[1], i.pq[i.size] = i.pq[i.size], i.pq[1]
i.qp[i.pq[1]] = 1
i.qp[i.pq[i.size]] = i.size
i.size--
i.pqBubbleDown(1)
i.qp[index] = -1
i.values[index] = nil
return
}
// GetValue gets value at a given index, if index is out of range or no value was added, error will be returned.
func (i *IndexedPriorityQueue) GetValue(index int) (interface{}, error) {
if !i.isIndexValid(index) {
return nil, errors.New("index out of range")
}
if !i.Contains(index) {
return nil, errors.New("index does not have value")
}
return i.values[index], nil
}
// ChangeValue changes the value at a given index, if the index is out of range or no value was added, error will be returned.
func (i *IndexedPriorityQueue) ChangeValue(index int, value interface{}) error {
if !i.isIndexValid(index) {
return errors.New("index out of range")
}
if !i.Contains(index) {
return errors.New("index does not have value")
}
i.values[index] = value
i.pqBubbleUp(i.qp[index])
i.pqBubbleDown(i.qp[index])
return nil
}
// DeleteValue deletes the value at a given index, if the index is out of range or no value was added, error will be returned.
func (i *IndexedPriorityQueue) DeleteValue(index int) error {
if !i.isIndexValid(index) {
return errors.New("index out of range")
}
if !i.Contains(index) {
return errors.New("index does not have value")
}
pqIndex := i.qp[index]
i.pq[pqIndex], i.pq[i.size] = i.pq[i.size], i.pq[pqIndex]
i.qp[i.pq[pqIndex]] = pqIndex
i.qp[i.pq[i.size]] = i.size
i.size--
i.pqBubbleUp(pqIndex)
i.pqBubbleDown(pqIndex)
i.values[index] = nil
i.qp[index] = -1
return nil
}
// pqBubbleUp bubbles up the value at given index in pq.
func (i *IndexedPriorityQueue) pqBubbleUp(index int) {
for index != 1 {
if (i.heapType == HeapTypeMin && i.comparator(i.values[i.pq[index]], i.values[i.pq[index/2]]) <= 0) ||
(i.heapType == HeapTypeMax && i.comparator(i.values[i.pq[index]], i.values[i.pq[index/2]]) >= 0) {
i.pq[index], i.pq[index/2] = i.pq[index/2], i.pq[index]
i.qp[i.pq[index]] = index
i.qp[i.pq[index/2]] = index / 2
index = index / 2
} else {
break
}
}
}
// pqBubbleUp bubbles down the value at given index in pq.
func (i *IndexedPriorityQueue) pqBubbleDown(index int) {
for index*2 <= i.size {
left := index * 2
right := index*2 + 1
indexToReplace := left
if right <= i.size {
if (i.heapType == HeapTypeMin && i.comparator(i.values[i.pq[left]], i.values[i.pq[right]]) >= 0) ||
(i.heapType == HeapTypeMax && i.comparator(i.values[i.pq[left]], i.values[i.pq[right]]) <= 0) {
indexToReplace = right
}
}
if (i.heapType == HeapTypeMin && i.comparator(i.values[i.pq[index]], i.values[i.pq[indexToReplace]]) >= 0) ||
(i.heapType == HeapTypeMax && i.comparator(i.values[i.pq[index]], i.values[i.pq[indexToReplace]]) <= 0) {
i.pq[index], i.pq[indexToReplace] = i.pq[indexToReplace], i.pq[index]
i.qp[i.pq[index]] = index
i.qp[i.pq[indexToReplace]] = indexToReplace
index = indexToReplace
} else {
break
}
}
} | datastructure/trees/indexes_priority_queue.go | 0.746139 | 0.520192 | indexes_priority_queue.go | starcoder |
package internal
import "github.com/mokiat/gomath/sprec"
const (
initialContourPointCount = 1024
initialContourSubContourCount = 4
)
func newContour() *Contour {
return &Contour{
points: make([]ContourPoint, 0, initialContourPointCount),
subContours: make([]SubContour, 0, initialContourSubContourCount),
}
}
type Contour struct {
points []ContourPoint
subContours []SubContour
}
func (c *Contour) Init() {
c.points = c.points[:0]
c.subContours = c.subContours[:0]
}
func (c *Contour) MoveTo(position sprec.Vec2, stroke Stroke) {
c.startSubContour()
c.addPoint(ContourPoint{
coords: position,
stroke: stroke,
})
}
func (c *Contour) LineTo(position sprec.Vec2, stroke Stroke) {
c.addPoint(ContourPoint{
coords: position,
stroke: stroke,
})
}
func (c *Contour) QuadTo(control, position sprec.Vec2, stroke Stroke) {
// TODO: Evaluate tessellation based on curvature and size
const tessellation = 30
lastPoint := c.lastPoint()
vecCS := sprec.Vec2Diff(lastPoint.coords, control)
vecCE := sprec.Vec2Diff(position, control)
// start and end are excluded from this loop on purpose
for i := 1; i < tessellation; i++ {
t := float32(i) / float32(tessellation)
alpha := (1 - t) * (1 - t)
beta := t * t
c.addPoint(ContourPoint{
coords: sprec.Vec2Sum(
control,
sprec.Vec2Sum(
sprec.Vec2Prod(vecCS, alpha),
sprec.Vec2Prod(vecCE, beta),
),
),
stroke: mixStrokes(lastPoint.stroke, stroke, t),
})
}
c.addPoint(ContourPoint{
coords: position,
stroke: stroke,
})
}
func (c *Contour) CubeTo(control1, control2, position sprec.Vec2, stroke Stroke) {
// TODO: Evaluate tessellation based on curvature and size
const tessellation = 30
lastPoint := c.lastPoint()
// start and end are excluded from this loop on purpose
for i := 1; i < tessellation; i++ {
t := float32(i) / float32(tessellation)
alpha := (1 - t) * (1 - t) * (1 - t)
beta := 3 * (1 - t) * (1 - t) * t
gamma := 3 * (1 - t) * t * t
delta := t * t * t
c.addPoint(ContourPoint{
coords: sprec.Vec2Sum(
sprec.Vec2Sum(
sprec.Vec2Prod(lastPoint.coords, alpha),
sprec.Vec2Prod(control1, beta),
),
sprec.Vec2Sum(
sprec.Vec2Prod(control2, gamma),
sprec.Vec2Prod(position, delta),
),
),
stroke: mixStrokes(lastPoint.stroke, stroke, t),
})
}
c.addPoint(ContourPoint{
coords: position,
stroke: stroke,
})
}
func (c *Contour) CloseLoop() {
lastSubContour := c.subContours[len(c.subContours)-1]
c.addPoint(c.points[lastSubContour.pointOffset])
}
func (c *Contour) startSubContour() {
c.subContours = append(c.subContours, SubContour{
pointOffset: len(c.points),
pointCount: 0,
})
}
func (c *Contour) addPoint(point ContourPoint) {
c.points = append(c.points, point)
c.subContours[len(c.subContours)-1].pointCount++
}
func (c *Contour) lastPoint() ContourPoint {
return c.points[len(c.points)-1]
}
type ContourPoint struct {
coords sprec.Vec2
stroke Stroke
}
type SubContour struct {
pointOffset int
pointCount int
}
type Stroke struct {
innerSize float32
outerSize float32
color sprec.Vec4
}
func mixStrokes(a, b Stroke, alpha float32) Stroke {
return Stroke{
innerSize: (1-alpha)*a.innerSize + alpha*b.innerSize,
outerSize: (1-alpha)*a.outerSize + alpha*b.outerSize,
color: sprec.Vec4Sum(
sprec.Vec4Prod(a.color, (1-alpha)),
sprec.Vec4Prod(b.color, alpha),
),
}
} | framework/opengl/ui/internal/contour.go | 0.504394 | 0.449393 | contour.go | starcoder |
// Package write provides the Point struct
package write
import (
"fmt"
"sort"
"time"
lp "github.com/influxdata/line-protocol"
)
// Point is represents InfluxDB time series point, holding tags and fields
type Point struct {
measurement string
tags []*lp.Tag
fields []*lp.Field
timestamp time.Time
}
// TagList returns a slice containing tags of a Point.
func (m *Point) TagList() []*lp.Tag {
return m.tags
}
// FieldList returns a slice containing the fields of a Point.
func (m *Point) FieldList() []*lp.Field {
return m.fields
}
// SetTime set timestamp for a Point.
func (m *Point) SetTime(timestamp time.Time) *Point {
m.timestamp = timestamp
return m
}
// Time is the timestamp of a Point.
func (m *Point) Time() time.Time {
return m.timestamp
}
// SortTags orders the tags of a point alphanumerically by key.
// This is just here as a helper, to make it easy to keep tags sorted if you are creating a Point manually.
func (m *Point) SortTags() *Point {
sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key })
return m
}
// SortFields orders the fields of a point alphanumerically by key.
func (m *Point) SortFields() *Point {
sort.Slice(m.fields, func(i, j int) bool { return m.fields[i].Key < m.fields[j].Key })
return m
}
// AddTag adds a tag to a point.
func (m *Point) AddTag(k, v string) *Point {
for i, tag := range m.tags {
if k == tag.Key {
m.tags[i].Value = v
return m
}
}
m.tags = append(m.tags, &lp.Tag{Key: k, Value: v})
return m
}
// AddField adds a field to a point.
func (m *Point) AddField(k string, v interface{}) *Point {
for i, field := range m.fields {
if k == field.Key {
m.fields[i].Value = v
return m
}
}
m.fields = append(m.fields, &lp.Field{Key: k, Value: convertField(v)})
return m
}
// Name returns the name of measurement of a point.
func (m *Point) Name() string {
return m.measurement
}
// NewPointWithMeasurement creates a empty Point
// Use AddTag and AddField to fill point with data
func NewPointWithMeasurement(measurement string) *Point {
return &Point{measurement: measurement}
}
// NewPoint creates a Point from measurement name, tags, fields and a timestamp.
func NewPoint(
measurement string,
tags map[string]string,
fields map[string]interface{},
ts time.Time,
) *Point {
m := &Point{
measurement: measurement,
tags: nil,
fields: nil,
timestamp: ts,
}
if len(tags) > 0 {
m.tags = make([]*lp.Tag, 0, len(tags))
for k, v := range tags {
m.tags = append(m.tags,
&lp.Tag{Key: k, Value: v})
}
}
m.fields = make([]*lp.Field, 0, len(fields))
for k, v := range fields {
v := convertField(v)
if v == nil {
continue
}
m.fields = append(m.fields, &lp.Field{Key: k, Value: v})
}
m.SortFields()
m.SortTags()
return m
}
// convertField converts any primitive type to types supported by line protocol
func convertField(v interface{}) interface{} {
switch v := v.(type) {
case bool, int64, string, float64:
return v
case int:
return int64(v)
case uint:
return uint64(v)
case uint64:
return v
case []byte:
return string(v)
case int32:
return int64(v)
case int16:
return int64(v)
case int8:
return int64(v)
case uint32:
return uint64(v)
case uint16:
return uint64(v)
case uint8:
return uint64(v)
case float32:
return float64(v)
case time.Time:
return v.Format(time.RFC3339Nano)
case time.Duration:
return v.String()
default:
return fmt.Sprintf("%v", v)
}
} | vendor/github.com/influxdata/influxdb-client-go/v2/api/write/point.go | 0.792705 | 0.50653 | point.go | starcoder |
package main
/*****************************************************************************************************
*
* Given a reference of a node in a connected undirected graph.
*
* Return a deep copy (clone) of the graph.
*
* Each node in the graph contains a val (int) and a list (List[Node]) of its neighbors.
*
* class Node {
* public int val;
* public List<Node> neighbors;
* }
*
* Test case format:
*
* For simplicity sake, each node's value is the same as the node's index (1-indexed). For example,
* the first node with val = 1, the second node with val = 2, and so on. The graph is represented in
* the test case using an adjacency list.
*
* Adjacency list is a collection of unordered lists used to represent a finite graph. Each list
* describes the set of neighbors of a node in the graph.
*
* The given node will always be the first node with val = 1. You must return the copy of the given
* node as a reference to the cloned graph.
*
* Example 1:
*
* Input: adjList = [[2,4],[1,3],[2,4],[1,3]]
* Output: [[2,4],[1,3],[2,4],[1,3]]
* Explanation: There are 4 nodes in the graph.
* 1st node (val = 1)'s neighbors are 2nd node (val = 2) and 4th node (val = 4).
* 2nd node (val = 2)'s neighbors are 1st node (val = 1) and 3rd node (val = 3).
* 3rd node (val = 3)'s neighbors are 2nd node (val = 2) and 4th node (val = 4).
* 4th node (val = 4)'s neighbors are 1st node (val = 1) and 3rd node (val = 3).
*
* Example 2:
*
* Input: adjList = [[]]
* Output: [[]]
* Explanation: Note that the input contains one empty list. The graph consists of only one node with
* val = 1 and it does not have any neighbors.
*
* Example 3:
*
* Input: adjList = []
* Output: []
* Explanation: This an empty graph, it does not have any nodes.
*
* Example 4:
*
* Input: adjList = [[2],[1]]
* Output: [[2],[1]]
*
* Constraints:
*
* 1 <= Node.val <= 100
* Node.val is unique for each node.
* Number of Nodes will not exceed 100.
* There is no repeated edges and no self-loops in the graph.
* The Graph is connected and all nodes can be visited starting from the given node.
******************************************************************************************************/
//Definition for a Node.
type Node struct {
Val int
Neighbors []*Node
}
// 思路 就是图的遍历,由于是连通图,要加标记避免死循环
// time 100,对于图来说bfs遍历的优势是不需要函数栈(有可能爆栈),但是写起来麻烦,
// 而dfs写起来简单,容易爆栈
//? todo @zhangshilin dfs solution
func cloneGraph(node *Node) *Node {
// 边界检查
if node==nil{
return nil
}
vis:=make(map[*Node]*Node)
que:=[]*Node{}
que=append(que, node)
vis[node] = &Node{
Val: node.Val,
Neighbors: make([]*Node,0),
}
for len(que)!=0{
now:=que[0]
que = que[1:]
for _,v:=range now.Neighbors{
if _,ok:=vis[v];!ok{
vis[v] = &Node{
Val: v.Val,
Neighbors: make([]*Node,0),
}
que=append(que, v)
}
vis[now].Neighbors = append(vis[now].Neighbors, vis[v])
}
}
return vis[node]
} | leetcode/133.clone_graph/133.CloneGraph_zmillionaire.go | 0.626581 | 0.708339 | 133.CloneGraph_zmillionaire.go | starcoder |
package httpfake
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"strings"
"testing"
)
const assertErrorTemplate = "assertion error: %s"
// Assertor provides an interface for setting assertions for http requests
type Assertor interface {
Assert(r *http.Request) error
Log(t testing.TB)
Error(t testing.TB, err error)
}
// requiredHeaders provides an Assertor for the presence of the provided http header keys
type requiredHeaders struct {
Keys []string
}
// Assert runs the required headers assertion against the provided request
func (h *requiredHeaders) Assert(r *http.Request) error {
var missingHeaders []string
for _, key := range h.Keys {
if value := r.Header.Get(key); len(value) == 0 {
missingHeaders = append(missingHeaders, key)
}
}
if len(missingHeaders) > 0 {
return fmt.Errorf("missing required header(s): %s", strings.Join(missingHeaders, ", "))
}
return nil
}
// Log prints a testing info log for the requiredHeaders Assertor
func (h *requiredHeaders) Log(t testing.TB) {
t.Log("Testing request for required headers")
}
// Error prints a testing error for the requiredHeaders Assertor
func (h *requiredHeaders) Error(t testing.TB, err error) {
t.Errorf(assertErrorTemplate, err)
}
// requiredHeaderValue provides an Assertor for a header and its expected value
type requiredHeaderValue struct {
Key string
ExpectedValue string
}
// Assert runs the required header value assertion against the provided request
func (h *requiredHeaderValue) Assert(r *http.Request) error {
if value := r.Header.Get(h.Key); value != h.ExpectedValue {
return fmt.Errorf("header %s does not have the expected value; expected %s to equal %s",
h.Key,
value,
h.ExpectedValue)
}
return nil
}
// Log prints a testing info log for the requiredHeaderValue Assertor
func (h *requiredHeaderValue) Log(t testing.TB) {
t.Logf("Testing request for a required header value [%s: %s]", h.Key, h.ExpectedValue)
}
// Error prints a testing error for the requiredHeaderValue Assertor
func (h *requiredHeaderValue) Error(t testing.TB, err error) {
t.Errorf(assertErrorTemplate, err)
}
// requiredQueries provides an Assertor for the presence of the provided query parameter keys
type requiredQueries struct {
Keys []string
}
// Assert runs the required queries assertion against the provided request
func (q *requiredQueries) Assert(r *http.Request) error {
queryVals := r.URL.Query()
var missingParams []string
for _, key := range q.Keys {
if value := queryVals.Get(key); len(value) == 0 {
missingParams = append(missingParams, key)
}
}
if len(missingParams) > 0 {
return fmt.Errorf("missing required query parameter(s): %s", strings.Join(missingParams, ", "))
}
return nil
}
// Log prints a testing info log for the requiredQueries Assertor
func (q *requiredQueries) Log(t testing.TB) {
t.Log("Testing request for required query parameters")
}
// Error prints a testing error for the requiredQueries Assertor
func (q *requiredQueries) Error(t testing.TB, err error) {
t.Errorf(assertErrorTemplate, err)
}
// requiredQueryValue provides an Assertor for a query parameter and its expected value
type requiredQueryValue struct {
Key string
ExpectedValue string
}
// Assert runs the required query value assertion against the provided request
func (q *requiredQueryValue) Assert(r *http.Request) error {
if value := r.URL.Query().Get(q.Key); value != q.ExpectedValue {
return fmt.Errorf("query %s does not have the expected value; expected %s to equal %s", q.Key, value, q.ExpectedValue)
}
return nil
}
// Log prints a testing info log for the requiredQueryValue Assertor
func (q *requiredQueryValue) Log(t testing.TB) {
t.Logf("Testing request for a required query parameter value [%s: %s]", q.Key, q.ExpectedValue)
}
// Error prints a testing error for the requiredQueryValue Assertor
func (q *requiredQueryValue) Error(t testing.TB, err error) {
t.Errorf(assertErrorTemplate, err)
}
// requiredBody provides an Assertor for the expected value of the request body
type requiredBody struct {
ExpectedBody []byte
}
// Assert runs the required body assertion against the provided request
func (b *requiredBody) Assert(r *http.Request) error {
if r.Body == nil {
return fmt.Errorf("error reading the request body; the request body is nil")
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return fmt.Errorf("error reading the request body: %s", err.Error())
}
if !bytes.EqualFold(b.ExpectedBody, body) {
return fmt.Errorf("request body does not have the expected value; expected %s to equal %s",
string(body[:]),
string(b.ExpectedBody[:]))
}
return nil
}
// Log prints a testing info log for the requiredBody Assertor
func (b *requiredBody) Log(t testing.TB) {
t.Log("Testing request for a required body value")
}
// Error prints a testing error for the requiredBody Assertor
func (b *requiredBody) Error(t testing.TB, err error) {
t.Errorf(assertErrorTemplate, err)
}
// CustomAssertor provides a function signature that implements the Assertor interface. This allows for
// adhoc creation of a custom assertion for use with the AssertCustom assertor.
type CustomAssertor func(r *http.Request) error
// Assert runs the CustomAssertor assertion against the provided request
func (c CustomAssertor) Assert(r *http.Request) error {
return c(r)
}
// Log prints a testing info log for the CustomAssertor
func (c CustomAssertor) Log(t testing.TB) {
t.Log("Testing request with a custom assertor")
}
// Error prints a testing error for the CustomAssertor
func (c CustomAssertor) Error(t testing.TB, err error) {
t.Errorf(assertErrorTemplate, err)
} | assertions.go | 0.802865 | 0.444987 | assertions.go | starcoder |
package pipe
import (
"github.com/modfin/henry/slicez"
)
func Of[A any](a []A) Pipe[A] {
return Pipe[A]{
in: a,
}
}
type Pipe[A any] struct {
in []A
}
func (p Pipe[A]) Slice() []A {
return p.in
}
func (p Pipe[A]) Peek(apply func(a A)) Pipe[A] {
slicez.Each(p.in, apply)
return p
}
func (p Pipe[A]) Concat(slices ...[]A) Pipe[A] {
return Of(slicez.Concat(append([][]A{p.in}, slices...)...))
}
func (p Pipe[A]) Tail() Pipe[A] {
return Of(slicez.Tail(p.in))
}
func (p Pipe[A]) Head() (A, error) {
return slicez.Head(p.in)
}
func (p Pipe[A]) Last() (A, error) {
return slicez.Last(p.in)
}
func (p Pipe[A]) Reverse() Pipe[A] {
return Of(slicez.Reverse(p.in))
}
func (p Pipe[A]) Nth(i int) A {
return slicez.Nth(p.in, i)
}
func (p Pipe[A]) Take(i int) Pipe[A] {
return Of(slicez.Take(p.in, i))
}
func (p Pipe[A]) TakeRight(i int) Pipe[A] {
return Of(slicez.TakeRight(p.in, i))
}
func (p Pipe[A]) TakeWhile(take func(a A) bool) Pipe[A] {
return Of(slicez.TakeWhile(p.in, take))
}
func (p Pipe[A]) TakeRightWhile(take func(a A) bool) Pipe[A] {
return Of(slicez.TakeRightWhile(p.in, take))
}
func (p Pipe[A]) Drop(i int) Pipe[A] {
return Of(slicez.Drop(p.in, i))
}
func (p Pipe[A]) DropRight(i int) Pipe[A] {
return Of(slicez.DropRight(p.in, i))
}
func (p Pipe[A]) DropWhile(drop func(a A) bool) Pipe[A] {
return Of(slicez.DropWhile(p.in, drop))
}
func (p Pipe[A]) DropRightWhile(drop func(a A) bool) Pipe[A] {
return Of(slicez.DropRightWhile(p.in, drop))
}
func (p Pipe[A]) Filter(include func(a A) bool) Pipe[A] {
return Of(slicez.Filter(p.in, include))
}
func (p Pipe[A]) Reject(exclude func(a A) bool) Pipe[A] {
return Of(slicez.Reject(p.in, exclude))
}
func (p Pipe[A]) Map(f func(a A) A) Pipe[A] {
return Of(slicez.Map(p.in, f))
}
func (p Pipe[A]) Fold(combined func(accumulator A, val A) A, accumulator A) A {
return slicez.Fold(p.in, combined, accumulator)
}
func (p Pipe[A]) FoldRight(combined func(accumulator A, val A) A, accumulator A) A {
return slicez.Fold(p.in, combined, accumulator)
}
func (p Pipe[A]) Every(predicate func(a A) bool) bool {
return slicez.EveryFunc(p.in, predicate)
}
func (p Pipe[A]) Some(predicate func(a A) bool) bool {
return slicez.SomeFunc(p.in, predicate)
}
func (p Pipe[A]) None(predicate func(a A) bool) bool {
return slicez.NoneFunc(p.in, predicate)
}
func (p Pipe[A]) Partition(predicate func(a A) bool) (satisfied, notSatisfied []A) {
return slicez.Partition(p.in, predicate)
}
func (p Pipe[A]) Sample(n int) Pipe[A] {
return Of(slicez.Sample(p.in, n))
}
func (p Pipe[A]) Shuffle() Pipe[A] {
return Of(slicez.Shuffle(p.in))
}
func (p Pipe[A]) SortFunc(less func(a, b A) bool) Pipe[A] {
return Of(slicez.SortFunc(p.in, less))
}
func (p Pipe[A]) Compact(equal func(a, b A) bool) Pipe[A] {
return Of(slicez.CompactFunc(p.in, equal))
}
func (p Pipe[A]) Count() int {
return len(p.in)
} | exp/pipe/pipe.go | 0.677687 | 0.561876 | pipe.go | starcoder |
package graphic
import (
"github.com/kasworld/h4o/geometry"
"github.com/kasworld/h4o/gls"
"github.com/kasworld/h4o/material"
"github.com/kasworld/h4o/math32"
"github.com/kasworld/h4o/node"
"github.com/kasworld/h4o/renderinfo"
)
// Mesh is a Graphic with uniforms for the model, view, projection, and normal matrices.
type Mesh struct {
Graphic // Embedded graphic
uniMm gls.Uniform // Model matrix uniform location cache
uniMVm gls.Uniform // Model view matrix uniform location cache
uniMVPm gls.Uniform // Model view projection matrix uniform cache
uniNm gls.Uniform // Normal matrix uniform cache
}
// NewMesh creates and returns a pointer to a mesh with the specified geometry and material.
// If the mesh has multi materials, the material specified here must be nil and the
// individual materials must be add using "AddMaterial" or AddGroupMaterial".
func NewMesh(igeom geometry.GeometryI, imat material.MaterialI) *Mesh {
m := new(Mesh)
m.Init(igeom, imat)
return m
}
// Init initializes the Mesh and its uniforms.
func (m *Mesh) Init(igeom geometry.GeometryI, imat material.MaterialI) {
m.Graphic.Init(m, igeom, gls.TRIANGLES)
// Initialize uniforms
m.uniMm.Init("ModelMatrix")
m.uniMVm.Init("ModelViewMatrix")
m.uniMVPm.Init("MVP")
m.uniNm.Init("NormalMatrix")
// Adds single material if not nil
if imat != nil {
m.AddMaterial(imat, 0, 0)
}
}
// SetMaterial clears all materials and adds the specified material for all vertices.
func (m *Mesh) SetMaterial(imat material.MaterialI) {
m.Graphic.ClearMaterials()
m.Graphic.AddMaterial(m, imat, 0, 0)
}
// AddMaterial adds a material for the specified subset of vertices.
func (m *Mesh) AddMaterial(imat material.MaterialI, start, count int) {
m.Graphic.AddMaterial(m, imat, start, count)
}
// AddGroupMaterial adds a material for the specified geometry group.
func (m *Mesh) AddGroupMaterial(imat material.MaterialI, gindex int) {
m.Graphic.AddGroupMaterial(m, imat, gindex)
}
// Clone clones the mesh and satisfies the NodeI interface.
func (m *Mesh) Clone() node.NodeI {
clone := new(Mesh)
clone.Graphic = *m.Graphic.Clone().(*Graphic)
clone.SetIGraphic(clone)
// Initialize uniforms
clone.uniMm.Init("ModelMatrix")
clone.uniMVm.Init("ModelViewMatrix")
clone.uniMVPm.Init("MVP")
clone.uniNm.Init("NormalMatrix")
return clone
}
// RenderSetup is called by the engine before drawing the mesh geometry
// It is responsible to updating the current shader uniforms with
// the model matrices.
func (m *Mesh) RenderSetup(gs *gls.GLS, rinfo *renderinfo.RenderInfo) {
// Transfer uniform for model matrix
mm := m.ModelMatrix()
location := m.uniMm.Location(gs)
gs.UniformMatrix4fv(location, 1, false, &mm[0])
// Transfer uniform for model view matrix
mvm := m.ModelViewMatrix()
location = m.uniMVm.Location(gs)
gs.UniformMatrix4fv(location, 1, false, &mvm[0])
// Transfer uniform for model view projection matrix
mvpm := m.ModelViewProjectionMatrix()
location = m.uniMVPm.Location(gs)
gs.UniformMatrix4fv(location, 1, false, &mvpm[0])
// Calculates normal matrix and transfer uniform
var nm math32.Matrix3
nm.GetNormalMatrix(mvm)
location = m.uniNm.Location(gs)
gs.UniformMatrix3fv(location, 1, false, &nm[0])
} | graphic/mesh.go | 0.778944 | 0.504455 | mesh.go | starcoder |
package envh
import (
"reflect"
)
// StructWalker must be implemented, when using PopulateStruct* functions,
// to be able to set a value for a custom field with an unsupported field (a map for instance),
// to add transformation before setting a field or for custom validation purpose.
// Walk function is called when struct is populated for every struct field a matching is made with
// an EnvTree node. Two parameters are given : tree represents whole parsed tree and keyChain is path leading to the node in tree.
// Returning true as first parameter will bypass walking process and false not, so it's
// possible to completely control how some part of a structure are defined and it's possible as well
// only to add some checking and let regular process do its job.
type StructWalker interface {
Walk(tree *EnvTree, keyChain []string) (bypassWalkingProcess bool, err error)
}
type entry struct {
typ reflect.Type
value reflect.Value
chain []string
}
func populateInt(forceDefinition bool, tree *EnvTree, val reflect.Value, keyChain []string) error {
v, err := tree.FindInt(keyChain...)
if forceDefinition && err != nil {
return err
}
if _, ok := err.(WrongTypeError); ok {
return err
}
val.SetInt(int64(v))
return nil
}
func populateFloat(forceDefinition bool, tree *EnvTree, val reflect.Value, keyChain []string) error {
v, err := tree.FindFloat(keyChain...)
if forceDefinition && err != nil {
return err
}
if _, ok := err.(WrongTypeError); ok {
return err
}
val.SetFloat(float64(v))
return nil
}
func populateString(forceDefinition bool, tree *EnvTree, val reflect.Value, keyChain []string) error {
v, err := tree.FindString(keyChain...)
if forceDefinition && err != nil {
return err
}
val.SetString(v)
return nil
}
func populateBool(forceDefinition bool, tree *EnvTree, val reflect.Value, keyChain []string) error {
v, err := tree.FindBool(keyChain...)
if forceDefinition && err != nil {
return err
}
if _, ok := err.(WrongTypeError); ok {
return err
}
val.SetBool(v)
return nil
}
func populateRegularType(entries *[]entry, tree *EnvTree, val reflect.Value, valKeyChain []string, forceDefinition bool) error {
switch val.Type().Kind() {
case reflect.Struct:
*entries = append(*entries, entry{val.Type(), val, valKeyChain})
return nil
case reflect.Int:
return populateInt(forceDefinition, tree, val, valKeyChain)
case reflect.Float32:
return populateFloat(forceDefinition, tree, val, valKeyChain)
case reflect.String:
return populateString(forceDefinition, tree, val, valKeyChain)
case reflect.Bool:
return populateBool(forceDefinition, tree, val, valKeyChain)
default:
return TypeUnsupported{val.Type().Kind().String(), "int32, float32, string, boolean or struct"}
}
}
func callStructMethodWalk(origStruct interface{}, tree *EnvTree, keyChain []string) (bool, error) {
if walker, ok := origStruct.(StructWalker); ok {
return walker.Walk(tree, keyChain)
}
return false, nil
}
func populateStruct(entries *[]entry, origStruct interface{}, tree *EnvTree, forceDefinition bool) error {
var err error
var ok bool
var val reflect.Value
var valKeyChain []string
typ := (*entries)[0].typ
value := (*entries)[0].value
chain := (*entries)[0].chain
(*entries) = append([]entry{}, (*entries)[1:]...)
for i := 0; i < typ.NumField(); i++ {
val = value.Field(i)
valKeyChain = append([]string{}, append(chain, typ.Field(i).Name)...)
ok, err = callStructMethodWalk(origStruct, tree, valKeyChain)
if err != nil {
return err
}
if ok {
continue
}
if err = populateRegularType(entries, tree, val, valKeyChain, forceDefinition); err != nil {
return err
}
}
return nil
}
func isPointerToStruct(data interface{}) bool {
return !(reflect.TypeOf(data).Kind() != reflect.Ptr || reflect.TypeOf(data).Elem().Kind() != reflect.Struct)
}
func populateStructFromEnvTree(origStruct interface{}, tree *EnvTree, forceDefinition bool) error {
if !isPointerToStruct(origStruct) {
return TypeUnsupported{reflect.TypeOf(origStruct).Kind().String(), "pointer to struct"}
}
entries := []entry{{reflect.TypeOf(origStruct).Elem(), reflect.ValueOf(origStruct).Elem(), []string{reflect.TypeOf(origStruct).Elem().Name()}}}
for {
err := populateStruct(&entries, origStruct, tree, forceDefinition)
if err != nil {
return err
}
if len(entries) == 0 {
return nil
}
}
} | struct.go | 0.684264 | 0.451568 | struct.go | starcoder |
package log
import "strconv"
// FieldType defines the type of field
type FieldType uint8
// Defines the types of fields
const (
UnknownFieldType FieldType = iota
BinaryType
IntegerType
FloatType
StringType
BoolType
)
// Field encapsulates logging fields
type Field struct {
Key string `json:"key"`
Type FieldType `json:"type,omitempty"`
Binary []byte `json:"binary,omitempty"`
Integer int64 `json:"integer,omitempty"`
Float float64 `json:"float,omitempty"`
String string `json:"string,omitempty"`
Bool bool `json:"bool,omitempty"`
}
// Value returns field data in string format
func (f Field) Value() string {
switch f.Type {
case BinaryType:
s := string(f.Binary[:])
return s
case IntegerType:
return strconv.FormatInt(f.Integer, 10)
case FloatType:
return strconv.FormatFloat(f.Float, 'f', 6, 64)
case StringType:
return f.String
case BoolType:
if f.Bool {
return "true"
}
return "false"
default:
return "unknown"
}
}
// Tag returns a field that contains string data
func Tag(s string) Field {
return Field{Type: StringType, String: s}
}
// Binary returns a field that contains bytes data
func Binary(k string, b []byte) Field {
return Field{Key: k, Type: BinaryType, Binary: b}
}
// String returns a field that contains string data
func String(k string, s string) Field {
return Field{Key: k, Type: StringType, String: s}
}
// Bool returns a field that contains string data
func Bool(k string, b bool) Field {
return Field{Key: k, Type: BoolType, Bool: b}
}
// Int returns a field that contains integer data
func Int(k string, i int) Field {
return Int64(k, int64(i))
}
// Int8 returns a field that contains integer data
func Int8(k string, i int8) Field {
return Int64(k, int64(i))
}
// Int16 returns a field that contains integer data
func Int16(k string, i int16) Field {
return Int64(k, int64(i))
}
// Int32 returns a field that contains integer data
func Int32(k string, i int32) Field {
return Int64(k, int64(i))
}
// Int64 returns a field that contains integer data
func Int64(k string, i int64) Field {
return Field{Key: k, Type: IntegerType, Integer: i}
}
// Uint returns a field that contains integer data
func Uint(k string, i uint) Field {
return Uint64(k, uint64(i))
}
// Uint8 returns a field that contains integer data
func Uint8(k string, i uint8) Field {
return Uint64(k, uint64(i))
}
// Uint16 returns a field that contains integer data
func Uint16(k string, i uint16) Field {
return Uint64(k, uint64(i))
}
// Uint32 returns a field that contains integer data
func Uint32(k string, i uint32) Field {
return Uint64(k, uint64(i))
}
// Uint64 returns a field that contains integer data
func Uint64(k string, i uint64) Field {
return Int64(k, int64(i))
}
// Float32 returns a field that contains float data
func Float32(k string, f float32) Field {
return Float64(k, float64(f))
}
// Float64 returns a field that contains float data
func Float64(k string, f float64) Field {
return Field{Key: k, Type: FloatType, Float: f}
} | log/field.go | 0.860428 | 0.431824 | field.go | starcoder |
package economist
import (
"github.com/coschain/contentos-go/app"
"github.com/coschain/contentos-go/common/constants"
. "github.com/coschain/contentos-go/dandelion"
"github.com/coschain/contentos-go/prototype"
"github.com/stretchr/testify/assert"
"math"
"math/big"
"testing"
)
type ReplyTester struct {
acc0,acc1,acc2 *DandelionAccount
}
func (tester *ReplyTester) Test1(t *testing.T, d *Dandelion) {
tester.acc0 = d.Account("actor0")
tester.acc1 = d.Account("actor1")
tester.acc2 = d.Account("actor2")
registerBlockProducer(tester.acc2, t)
const VEST = 1000
SelfTransferToVesting([]*DandelionAccount{tester.acc0, tester.acc1}, VEST, t)
t.Run("normal", d.Test(tester.normal))
}
func (tester *ReplyTester) Test2(t *testing.T, d *Dandelion) {
tester.acc0 = d.Account("actor0")
tester.acc1 = d.Account("actor1")
tester.acc2 = d.Account("actor2")
registerBlockProducer(tester.acc2, t)
const VEST = 1000
SelfTransferToVesting([]*DandelionAccount{tester.acc0, tester.acc1}, VEST, t)
t.Run("cashout", d.Test(tester.cashout))
t.Run("cashout after other cashout", d.Test(tester.cashoutAfterOtherCashout))
t.Run("mul cashout", d.Test(tester.multiCashout))
}
func (tester *ReplyTester) Test3(t *testing.T, d *Dandelion) {
tester.acc0 = d.Account("actor0")
tester.acc1 = d.Account("actor1")
tester.acc2 = d.Account("actor2")
registerBlockProducer(tester.acc2, t)
const VEST = 1000
SelfTransferToVesting([]*DandelionAccount{tester.acc0, tester.acc1}, VEST, t)
t.Run("huge global vp", d.Test(tester.hugeGlobalVp))
t.Run("zero global vp", d.Test(tester.zeroGlobalVp))
}
func (tester *ReplyTester) Test4(t *testing.T, d *Dandelion) {
tester.acc0 = d.Account("actor0")
tester.acc1 = d.Account("actor1")
tester.acc2 = d.Account("actor2")
registerBlockProducer(tester.acc2, t)
t.Run("with ticket", d.Test(tester.withTicket))
}
func (tester *ReplyTester) normal(t *testing.T, d *Dandelion) {
a := assert.New(t)
const POST = 1
const REPLY = 2
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"1"}, nil)))
post1Block := d.GlobalProps().GetHeadBlockNumber() - 1
post1Cashout := post1Block + constants.PostCashOutDelayBlock
a.Equal(post1Cashout, d.Post(POST).GetCashoutBlockNum())
a.NoError(tester.acc0.SendTrxAndProduceBlock(Reply(REPLY, POST, tester.acc0.Name, "content", nil)))
// waiting for vp charge
// next block post will be cashout
vest0 := d.Account(tester.acc0.Name).GetVest().Value
a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, REPLY)))
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2))
a.Equal(d.Account(tester.acc0.Name).GetVest().Value, vest0)
// to cashout
a.NoError(d.ProduceBlocks(1))
a.NotEqual(d.Account(tester.acc0.Name).GetVest().Value, vest0)
vest1 := d.Account(tester.acc0.Name).GetVest().Value
a.NoError(d.ProduceBlocks(1))
a.Equal(d.Account(tester.acc0.Name).GetVest().Value, vest1)
// make all post/test has been cashouted
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock))
a.Equal(d.Post(REPLY).GetCashoutBlockNum(), app.CashoutCompleted)
}
func (tester *ReplyTester) cashout(t *testing.T, d *Dandelion) {
a := assert.New(t)
const POST = 1
const REPLY = 2
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"1"}, nil)))
post1Block := d.GlobalProps().GetHeadBlockNumber() - 1
post1Cashout := post1Block + constants.PostCashOutDelayBlock
a.Equal(post1Cashout, d.Post(1).GetCashoutBlockNum())
a.NoError(tester.acc0.SendTrxAndProduceBlock(Reply(REPLY, POST, tester.acc0.Name, "content", nil)))
vest0 := d.Account(tester.acc0.Name).GetVest().Value
a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, REPLY)))
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2))
replyWeight := StringToBigInt(d.Post(REPLY).GetWeightedVp())
a.NotEqual(replyWeight.Int64(), int64(0))
globalReplyReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolReplyRewards().Value)
bigTotalReplyWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsReply())
decayedReplyWeight := bigDecay(bigTotalReplyWeight)
exceptNextBlockReplyWeightedVps := decayedReplyWeight.Add(decayedReplyWeight, replyWeight)
bigGlobalReplyReward := globalReplyReward.Add(globalReplyReward, new(big.Int).SetUint64(perBlockReplyReward(d)))
reward := ProportionAlgorithm(replyWeight, exceptNextBlockReplyWeightedVps, globalReplyReward)
exceptGlobalClaimRewardAfterCashout := &prototype.Vest{Value: reward.Uint64()}
exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(bigGlobalReplyReward, reward).Uint64()}
a.NoError(d.ProduceBlocks(1))
a.Equal(d.GlobalProps().GetWeightedVpsReply(), exceptNextBlockReplyWeightedVps.String())
vest1 := d.Account(tester.acc0.Name).GetVest().Value
realReward := vest1 - vest0
a.Equal(reward.Uint64(), realReward)
a.Equal(d.Post(REPLY).GetRewards().Value, realReward)
a.Equal(d.Post(REPLY).GetCashoutBlockNum(), app.CashoutCompleted)
a.Equal(d.GlobalProps().GetClaimedReplyRewards(), exceptGlobalClaimRewardAfterCashout)
a.Equal(d.GlobalProps().GetPoolReplyRewards(), exceptGlobalRewardAfterCashout)
// make all post/test has been cashouted
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock))
}
func (tester *ReplyTester) cashoutAfterOtherCashout(t *testing.T, d *Dandelion) {
a := assert.New(t)
const POST = 3
const REPLY = 4
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"2"}, nil)))
a.NoError(tester.acc0.SendTrxAndProduceBlock(Reply(REPLY, POST, tester.acc0.Name, "content", nil)))
vest0 := d.Account(tester.acc0.Name).GetVest().Value
a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, REPLY)))
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2))
replyWeight := StringToBigInt(d.Post(REPLY).GetWeightedVp())
a.NotEqual(replyWeight.Int64(), int64(0))
globalReplyReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolReplyRewards().Value)
bigTotalReplyWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsReply())
decayedReplyWeight := bigDecay(bigTotalReplyWeight)
exceptNextBlockReplyWeightedVps := decayedReplyWeight.Add(decayedReplyWeight, replyWeight)
bigGlobalReplyReward := globalReplyReward.Add(globalReplyReward, new(big.Int).SetUint64(perBlockReplyReward(d)))
reward := ProportionAlgorithm(replyWeight, exceptNextBlockReplyWeightedVps, globalReplyReward)
exceptGlobalClaimRewardAfterCashout := d.GlobalProps().ClaimedReplyRewards.Add(&prototype.Vest{Value: reward.Uint64()})
exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(bigGlobalReplyReward, reward).Uint64()}
a.NoError(d.ProduceBlocks(1))
a.Equal(d.GlobalProps().GetWeightedVpsReply(), exceptNextBlockReplyWeightedVps.String())
vest1 := d.Account(tester.acc0.Name).GetVest().Value
realReward := vest1 - vest0
a.Equal(reward.Uint64(), realReward)
a.Equal(d.Post(REPLY).GetRewards().Value, realReward)
a.Equal(d.Post(REPLY).GetCashoutBlockNum(), app.CashoutCompleted)
a.Equal(d.GlobalProps().GetClaimedReplyRewards(), exceptGlobalClaimRewardAfterCashout)
a.Equal(d.GlobalProps().GetPoolReplyRewards(), exceptGlobalRewardAfterCashout)
// make all post/test has been cashouted
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock))
}
func (tester *ReplyTester) multiCashout(t *testing.T, d *Dandelion) {
a := assert.New(t)
const POST = 5
const REPLY1 = 6
const REPLY2 = 7
const BLOCK = 100
a.NoError(tester.acc0.SendTrx(Post(POST, tester.acc0.Name, "title", "content", []string{"3"}, nil)))
// do not to interrupt reply cashout
a.NoError(d.ProduceBlocks(1))
a.NoError(tester.acc0.SendTrx(Reply(REPLY1, POST, tester.acc0.Name, "content1", nil)))
a.NoError(tester.acc1.SendTrx(Reply(REPLY2, POST, tester.acc1.Name, "content2", nil)))
a.NoError(d.ProduceBlocks(1))
a.NoError(tester.acc0.SendTrx(Vote(tester.acc0.Name, REPLY2)))
a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, REPLY1)))
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2))
vestold1 := d.Account(tester.acc0.Name).GetVest().Value
vestold2 := d.Account(tester.acc1.Name).GetVest().Value
// convert to uint64 to make test easier
// the mul result less than uint64.MAX
reply1Weight := StringToBigInt(d.Post(REPLY1).GetWeightedVp())
reply2Weight := StringToBigInt(d.Post(REPLY2).GetWeightedVp())
a.NotEqual(reply1Weight.Int64(), int64(0))
a.NotEqual(reply2Weight.Int64(), int64(0))
globalReplyReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolReplyRewards().Value)
bigTotalReplyWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsReply())
repliesWeight := new(big.Int).Add(reply1Weight, reply2Weight)
decayedReplyWeight := bigDecay(bigTotalReplyWeight)
exceptNextBlockReplyWeightedVps := decayedReplyWeight.Add(decayedReplyWeight, repliesWeight)
bigGlobalReplyReward := globalReplyReward.Add(globalReplyReward, new(big.Int).SetUint64(perBlockReplyReward(d)))
reply1Reward := ProportionAlgorithm(reply1Weight, exceptNextBlockReplyWeightedVps, bigGlobalReplyReward)
reply2Reward := ProportionAlgorithm(reply2Weight, exceptNextBlockReplyWeightedVps, bigGlobalReplyReward)
repliesReward := new(big.Int).Add(reply1Reward, reply2Reward)
exceptGlobalClaimRewardAfterCashout := d.GlobalProps().ClaimedReplyRewards.Add(&prototype.Vest{Value: repliesReward.Uint64()})
exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(bigGlobalReplyReward, repliesReward).Uint64()}
a.NoError(d.ProduceBlocks(1))
a.Equal(d.GlobalProps().GetWeightedVpsReply(), exceptNextBlockReplyWeightedVps.String())
vestnew1 := d.Account(tester.acc0.Name).GetVest().Value
vestnew2 := d.Account(tester.acc1.Name).GetVest().Value
real1Reward := vestnew1 - vestold1
real2Reward := vestnew2 - vestold2
a.Equal(reply1Reward.Uint64(), real1Reward)
a.Equal(reply2Reward.Uint64(), real2Reward)
a.Equal(d.Post(REPLY1).GetRewards().Value, real1Reward)
a.Equal(d.Post(REPLY2).GetRewards().Value, real2Reward)
a.Equal(d.Post(REPLY1).GetCashoutBlockNum(), app.CashoutCompleted)
a.Equal(d.Post(REPLY2).GetCashoutBlockNum(), app.CashoutCompleted)
a.Equal(d.GlobalProps().GetClaimedReplyRewards(), exceptGlobalClaimRewardAfterCashout)
a.Equal(d.GlobalProps().GetPoolReplyRewards(), exceptGlobalRewardAfterCashout)
// make all post/test has been cashouted
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock))
}
func (tester *ReplyTester) hugeGlobalVp(t *testing.T, d *Dandelion) {
a := assert.New(t)
const POST = 1
const REPLY = 2
_ = d.ModifyProps(func(props *prototype.DynamicProperties) {
maxUint64 := new(big.Int).SetUint64(math.MaxUint64)
factor := new(big.Int).SetUint64(10)
replyWeightedVp := new(big.Int).Mul(maxUint64, factor)
props.WeightedVpsReply = replyWeightedVp.String()
})
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc0.SendTrxAndProduceBlock(Reply(REPLY, POST, tester.acc0.Name, "content", nil)))
vest0 := d.Account(tester.acc0.Name).GetVest().Value
a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, REPLY)))
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2))
replyWeight := StringToBigInt(d.Post(REPLY).GetWeightedVp())
a.NotEqual(replyWeight.Int64(), int64(0))
globalReplyReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolReplyRewards().Value)
bigTotalReplyWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsReply())
decayedReplyWeight := bigDecay(bigTotalReplyWeight)
exceptNextBlockReplyWeightedVps := decayedReplyWeight.Add(decayedReplyWeight, replyWeight)
bigGlobalReplyReward := globalReplyReward.Add(globalReplyReward, new(big.Int).SetUint64(perBlockReplyReward(d)))
reward := ProportionAlgorithm(replyWeight, exceptNextBlockReplyWeightedVps, globalReplyReward)
exceptGlobalClaimRewardAfterCashout := &prototype.Vest{Value: reward.Uint64()}
exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(bigGlobalReplyReward, reward).Uint64()}
a.NoError(d.ProduceBlocks(1))
a.Equal(d.GlobalProps().GetWeightedVpsReply(), exceptNextBlockReplyWeightedVps.String() )
vest1 := d.Account(tester.acc0.Name).GetVest().Value
realReward := vest1 - vest0
a.Equal(reward.Uint64(), realReward)
a.Equal(d.Post(REPLY).GetRewards().Value, realReward)
a.Equal(d.Post(REPLY).GetCashoutBlockNum(), app.CashoutCompleted)
a.Equal(d.GlobalProps().GetClaimedReplyRewards(), exceptGlobalClaimRewardAfterCashout)
a.Equal(d.GlobalProps().GetPoolReplyRewards(), exceptGlobalRewardAfterCashout)
// make all post/test has been cashouted
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock))
}
func (tester *ReplyTester) zeroGlobalVp(t *testing.T, d *Dandelion) {
a := assert.New(t)
const POST = 3
const REPLY = 4
_ = d.ModifyProps(func(props *prototype.DynamicProperties) {
replyWeightedVp := new(big.Int).SetUint64(0)
props.WeightedVpsReply = replyWeightedVp.String()
})
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc0.SendTrxAndProduceBlock(Reply(REPLY, POST, tester.acc0.Name, "content", nil)))
vest0 := d.Account(tester.acc0.Name).GetVest().Value
a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, REPLY)))
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2))
replyWeight := StringToBigInt(d.Post(REPLY).GetWeightedVp())
a.NotEqual(replyWeight.Int64(), int64(0))
globalReplyReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolReplyRewards().Value)
bigTotalReplyWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsReply())
decayedReplyWeight := bigDecay(bigTotalReplyWeight)
exceptNextBlockReplyWeightedVps := decayedReplyWeight.Add(decayedReplyWeight, replyWeight)
bigGlobalReplyReward := globalReplyReward.Add(globalReplyReward, new(big.Int).SetUint64(perBlockReplyReward(d)))
reward := ProportionAlgorithm(replyWeight, exceptNextBlockReplyWeightedVps, globalReplyReward)
exceptGlobalClaimRewardAfterCashout := &prototype.Vest{Value: reward.Uint64()}
exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(bigGlobalReplyReward, reward).Uint64()}
a.NoError(d.ProduceBlocks(1))
a.Equal(d.GlobalProps().GetWeightedVpsReply(), exceptNextBlockReplyWeightedVps.String() )
vest1 := d.Account(tester.acc0.Name).GetVest().Value
realReward := vest1 - vest0
a.Equal(reward.Uint64(), realReward)
a.Equal(d.Post(REPLY).GetRewards().Value, realReward)
a.Equal(d.Post(REPLY).GetCashoutBlockNum(), app.CashoutCompleted)
a.Equal(d.GlobalProps().GetClaimedReplyRewards(), exceptGlobalClaimRewardAfterCashout)
a.Equal(d.GlobalProps().GetPoolReplyRewards(), exceptGlobalRewardAfterCashout)
// make all post/test has been cashouted
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock))
}
func (tester *ReplyTester) withTicket(t *testing.T, d *Dandelion) {
a := assert.New(t)
const VEST = 10 * constants.COSTokenDecimals
const POST = 1
const REPLY = 2
a.NoError(tester.acc0.SendTrxAndProduceBlock(TransferToVest(tester.acc0.Name, tester.acc0.Name, VEST, "")))
a.NoError(tester.acc0.SendTrxAndProduceBlock(AcquireTicket(tester.acc0.Name, 1)))
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST, tester.acc0.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc0.SendTrxAndProduceBlock(Reply(REPLY, POST, tester.acc0.Name, "content", nil)))
a.NoError(tester.acc0.SendTrx(VoteByTicket(tester.acc0.Name, REPLY, 1)))
vest0 := d.Account(tester.acc0.Name).GetVest().Value
a.NoError(tester.acc1.SendTrx(Vote(tester.acc1.Name, REPLY)))
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock - 2))
replyWeight := StringToBigInt(d.Post(REPLY).GetWeightedVp())
a.NotEqual(replyWeight.Int64(), int64(0))
replyWeight = new(big.Int).Add(replyWeight, new(big.Int).SetUint64(1 * d.GlobalProps().GetPerTicketWeight()))
globalReplyReward := new(big.Int).SetUint64(d.GlobalProps().GetPoolReplyRewards().Value)
bigTotalReplyWeight := StringToBigInt(d.GlobalProps().GetWeightedVpsReply())
decayedReplyWeight := bigDecay(bigTotalReplyWeight)
exceptNextBlockReplyWeightedVps := decayedReplyWeight.Add(decayedReplyWeight, replyWeight)
bigGlobalReplyReward := globalReplyReward.Add(globalReplyReward, new(big.Int).SetUint64(perBlockReplyReward(d)))
reward := ProportionAlgorithm(replyWeight, exceptNextBlockReplyWeightedVps, globalReplyReward)
exceptGlobalClaimRewardAfterCashout := &prototype.Vest{Value: reward.Uint64()}
exceptGlobalRewardAfterCashout := &prototype.Vest{ Value: new(big.Int).Sub(bigGlobalReplyReward, reward).Uint64()}
a.NoError(d.ProduceBlocks(1))
a.Equal(d.GlobalProps().GetWeightedVpsReply(), exceptNextBlockReplyWeightedVps.String() )
vest1 := d.Account(tester.acc0.Name).GetVest().Value
realReward := vest1 - vest0
a.Equal(reward.Uint64(), realReward)
a.Equal(d.Post(REPLY).GetRewards().Value, realReward)
a.Equal(d.Post(REPLY).GetCashoutBlockNum(), app.CashoutCompleted)
a.Equal(d.GlobalProps().GetClaimedReplyRewards(), exceptGlobalClaimRewardAfterCashout)
a.Equal(d.GlobalProps().GetPoolReplyRewards(), exceptGlobalRewardAfterCashout)
// make all post/test has been cashouted
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock))
} | tests/economist/reply.go | 0.521959 | 0.400808 | reply.go | starcoder |
package optimisation
import "github.com/gonum/matrix/mat64"
// BatchGradientDescent finds the local minimum of a function.
// See http://en.wikipedia.org/wiki/Gradient_descent for more details.
func BatchGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch int) *mat64.Dense {
m, _ := y.Dims()
// Helper function for scalar multiplication
mult := func(r, c int, v float64) float64 { return v * 1.0 / float64(m) * alpha }
for i := 0; i < epoch; i++ {
grad := mat64.DenseCopyOf(x)
grad.TCopy(grad)
temp := mat64.DenseCopyOf(x)
// Calculate our best prediction, given theta
temp.Mul(temp, theta)
// Calculate our error from the real values
temp.Sub(temp, y)
grad.Mul(grad, temp)
// Multiply by scalar factor
grad.Apply(mult, grad)
// Take a step in gradient direction
theta.Sub(theta, grad)
}
return theta
}
// StochasticGradientDescent updates the parameters of theta on a random row selection from a matrix.
// It is faster as it does not compute the cost function over the entire dataset every time.
// It instead calculates the error parameters over only one row of the dataset at a time.
// In return, there is a trade off for accuracy. This is minimised by running multiple SGD processes
// (the number of goroutines spawned is specified by the procs variable) in parallel and taking an average of the result.
func StochasticGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch, procs int) *mat64.Dense {
m, _ := y.Dims()
resultPipe := make(chan *mat64.Dense)
results := make([]*mat64.Dense, 0)
// Helper function for scalar multiplication
mult := func(r, c int, v float64) float64 { return v * 1.0 / float64(m) * alpha }
for p := 0; p < procs; p++ {
go func() {
// Is this just a pointer to theta?
thetaCopy := mat64.DenseCopyOf(theta)
for i := 0; i < epoch; i++ {
for k := 0; k < m; k++ {
datXtemp := x.RowView(k)
datYtemp := y.RowView(k)
datX := mat64.NewDense(1, len(datXtemp), datXtemp)
datY := mat64.NewDense(1, 1, datYtemp)
grad := mat64.DenseCopyOf(datX)
grad.TCopy(grad)
datX.Mul(datX, thetaCopy)
datX.Sub(datX, datY)
grad.Mul(grad, datX)
// Multiply by scalar factor
grad.Apply(mult, grad)
// Take a step in gradient direction
thetaCopy.Sub(thetaCopy, grad)
}
}
resultPipe <- thetaCopy
}()
}
for {
select {
case d := <-resultPipe:
results = append(results, d)
if len(results) == procs {
return averageTheta(results)
}
}
}
}
func averageTheta(matrices []*mat64.Dense) *mat64.Dense {
if len(matrices) < 2 {
panic("Must provide at least two matrices to average")
}
invLen := 1.0 / float64(len(matrices))
// Helper function for scalar multiplication
mult := func(r, c int, v float64) float64 { return v * invLen}
// Sum matrices
average := matrices[0]
for i := 1; i < len(matrices); i++ {
average.Add(average, matrices[i])
}
// Calculate the average
average.Apply(mult, average)
return average
} | optimisation/gradient_descent.go | 0.882479 | 0.687886 | gradient_descent.go | starcoder |
package risk
import (
"github.com/go-bongo/bongo"
"github.com/golang/glog"
"github.com/toolkits/slice"
"kube-scan/common"
)
type RiskCategory string
type RiskType string
type RiskFactorCategory string
type RiskCIACategory string
type RiskStatus string
type RiskAttackVector string
type RiskScope string
const (
None = RiskCategory("None")
Low = RiskCategory("Low")
Medium = RiskCategory("Medium")
High = RiskCategory("High")
FactorNone = RiskFactorCategory("None")
FactorVeryLow = RiskFactorCategory("VeryLow")
FactorLow = RiskFactorCategory("Low")
FactorModerate = RiskFactorCategory("Moderate")
FactorHigh = RiskFactorCategory("High")
CIANone = RiskCIACategory("None")
CIALow = RiskCIACategory("Low")
CIAHigh = RiskCIACategory("High")
AttackVectorLocal = RiskAttackVector("Local")
AttackVectorRemote = RiskAttackVector("Remote")
ScopeCluster = RiskScope("Cluster")
ScopeHost = RiskScope("Host")
ScopeNone = RiskScope("None")
Basic = RiskType("Basic")
Remediation = RiskType("Remediation")
//RiskStatusNone = RiskStatus("None")
RiskStatusOpen = RiskStatus("Open")
RiskStatusInProgress = RiskStatus("InProgress")
RiskStatusAccepted = RiskStatus("Accepted")
)
var (
NotNoneScopes = []RiskScope{ScopeHost, ScopeCluster}
)
func (cia RiskCIACategory) getOrder() int {
switch cia {
case CIANone:
return 0
case CIALow:
return 1
case CIAHigh:
return 2
default:
glog.Errorf("unknown cia category %v", cia)
return -1
}
}
func (cia RiskCIACategory) GreaterThan(other RiskCIACategory) bool {
return cia.getOrder() > other.getOrder()
}
func (cia RiskCIACategory) Minus(other RiskCIACategory) RiskCIACategory {
switch other {
case CIAHigh:
return CIANone
case CIALow:
switch cia {
case CIAHigh:
return CIALow
default:
return CIANone
}
default:
return cia
}
}
func validate(t interface{}, all ...interface{}) bool {
return slice.Contains(all, t)
}
func ValidateStatus(status RiskStatus) bool {
return validate(status, RiskStatusOpen, RiskStatusInProgress, RiskStatusAccepted)
}
func ValidateScope(scope RiskScope) bool {
return validate(scope, ScopeCluster, ScopeHost, ScopeNone)
}
func ValidateFactorCategory(rfc RiskFactorCategory) bool {
return validate(rfc, FactorNone, FactorVeryLow, FactorLow, FactorModerate, FactorHigh)
}
func ValidateCIACategory(cia RiskCIACategory) bool {
return validate(cia, CIANone, CIALow, CIAHigh)
}
func ValidateAttackVector(av RiskAttackVector) bool {
return validate(av, AttackVectorLocal, AttackVectorRemote)
}
type RiskItem struct {
Name string `json:"name" bson:"name"`
RiskCategory RiskCategory `json:"riskCategory" bson:"riskCategory"`
RiskType RiskType `json:"type" bson:"type"`
Title string `json:"title" bson:"title"`
ShortDescription string `json:"shortDescription" bson:"shortDescription"`
Description string `json:"description" bson:"description"`
Confidentiality RiskCIACategory `json:"confidentiality" bson:"confidentiality"`
ConfidentialityDescription string `json:"confidentialityDescription" bson:"confidentialityDescription"`
Integrity RiskCIACategory `json:"integrity" bson:"integrity"`
IntegrityDescription string `json:"integrityDescription" bson:"integrityDescription"`
Availability RiskCIACategory `json:"availability" bson:"availability"`
AvailabilityDescription string `json:"availabilityDescription" bson:"availabilityDescription"`
Exploitability RiskFactorCategory `json:"exploitability" bson:"exploitability"`
AttackVector RiskAttackVector `json:"attackVector" bson:"attackVector"`
Scope RiskScope `json:"scope" bson:"scope"`
Score *float64 `json:"score" bson:"score"`
}
func NewRiskItem(riskType RiskType, riskConfig ScoreConfig, score *float64, category RiskCategory) RiskItem {
return RiskItem{
Name: riskConfig.Name,
RiskType: riskType,
RiskCategory: category,
Title: riskConfig.Title,
ShortDescription: riskConfig.ShortDescription,
Description: riskConfig.Description,
Confidentiality: riskConfig.Confidentiality,
ConfidentialityDescription: riskConfig.ConfidentialityDescription,
Integrity: riskConfig.Integrity,
IntegrityDescription: riskConfig.IntegrityDescription,
Availability: riskConfig.Availability,
AvailabilityDescription: riskConfig.AvailabilityDescription,
Exploitability: riskConfig.Exploitability,
AttackVector: riskConfig.AttackVector,
Scope: riskConfig.Scope,
Score: score,
}
}
type Risk struct {
RiskScore int `json:"riskScore" bson:"riskScore"`
RiskCategory RiskCategory `json:"riskCategory" bson:"riskCategory"`
RiskItems []RiskItem `json:"riskItems" bson:"riskItems"`
RiskStatus RiskStatus `json:"riskStatus" bson:"riskStatus"`
}
func (risk *Risk) Clone() *Risk {
items := make([]RiskItem, len(risk.RiskItems))
copy(items, risk.RiskItems)
return &Risk{
RiskScore: risk.RiskScore,
RiskCategory: risk.RiskCategory,
RiskItems: items,
RiskStatus: risk.RiskStatus,
}
}
type IWorloadRisk interface {
common.Workload
SetRisk(r *Risk)
GetRisk() *Risk
}
type WorkloadRisk struct {
Risk *Risk `json:"risk" bson:"risk"`
}
func (w *WorkloadRisk) GetRisk() *Risk {
return w.Risk.Clone()
}
func (w *WorkloadRisk) SetRisk(r *Risk) {
w.Risk = r
}
type WorkloadRiskData struct {
Kind string `json:"kind"`
Name string `json:"name"`
Namespace string `json:"namespace"`
Domain string `json:"domain"`
Risk *Risk `json:"risk"`
}
func ToWorkloadRiskData(workload IWorloadRisk) *WorkloadRiskData {
return &WorkloadRiskData{
Kind: workload.GetKind(),
Name: workload.GetName(),
Namespace: workload.GetNamespace(),
Domain: workload.GetDomain(),
Risk: workload.GetRisk(),
}
}
type WorkloadRiskStatus struct {
bongo.DocumentBase `bson:",inline"`
Account string `json:"account" bson:"account"`
Domain string `json:"domain" bson:"domain"`
Namespace string `json:"namespace" bson:"namespace"`
Kind string `json:"kind" bson:"kind"`
Name string `json:"name" bson:"name"`
RiskStatus RiskStatus `json:"riskStatus" bson:"riskStatus"`
}
type RiskStatusesHolder struct {
Account string
//domain->namespace->kind->name->status
cache map[string]map[string]map[string]map[string]RiskStatus
}
func NewRiskStatusesHolder(account string) *RiskStatusesHolder {
cache := make(map[string]map[string]map[string]map[string]RiskStatus)
return &RiskStatusesHolder{
Account: account,
cache: cache,
}
}
func (holder *RiskStatusesHolder) SetStatus(status WorkloadRiskStatus) {
domain, ok := holder.cache[status.Domain]
if !ok {
holder.cache[status.Domain] = make(map[string]map[string]map[string]RiskStatus)
domain = holder.cache[status.Domain]
}
namespace, ok := domain[status.Namespace]
if !ok {
domain[status.Namespace] = make(map[string]map[string]RiskStatus)
namespace = domain[status.Namespace]
}
kind, ok := namespace[status.Kind]
if !ok {
namespace[status.Kind] = make(map[string]RiskStatus)
kind = namespace[status.Kind]
}
kind[status.Name] = status.RiskStatus
}
func (holder *RiskStatusesHolder) GetStatus(domain string, namespace string, kind string, name string) (RiskStatus, bool) {
domainMap, ok := holder.cache[domain]
if !ok {
return RiskStatusOpen, false
}
namespaceMap, ok := domainMap[namespace]
if !ok {
return RiskStatusOpen, false
}
kindMap, ok := namespaceMap[kind]
if !ok {
return RiskStatusOpen, false
}
result, ok := kindMap[name]
if !ok {
return RiskStatusOpen, false
}
return result, true
}
type WorkloadRiskDataList []*WorkloadRiskData
func (w WorkloadRiskDataList) Sanitized() interface{} {
return w
} | server/src/risk/types.go | 0.530723 | 0.409752 | types.go | starcoder |
package coin
import (
"math/big"
"strings"
"github.com/digitalbitbox/bitbox-wallet-app/util/errp"
)
// Amount represents an amount in the smallest coin unit (e.g. satoshi).
type Amount struct {
n *big.Int
}
// NewAmount creates a new amount.
func NewAmount(amount *big.Int) Amount {
return Amount{n: amount}
}
// NewAmountFromInt64 creates a new amount.
func NewAmountFromInt64(amount int64) Amount {
return Amount{n: big.NewInt(amount)}
}
// NewAmountFromString parses a user given coin amount, converting it from the default coin unit to
// the the smallest unit.
func NewAmountFromString(s string, unit *big.Int) (Amount, error) {
// big.Rat parsing accepts rationals like "2/3". Exclude those, we only want decimals.
if strings.ContainsRune(s, '/') {
return Amount{}, errp.Newf("could not parse %q", s)
}
rat, ok := new(big.Rat).SetString(s)
if !ok {
return Amount{}, errp.Newf("could not parse %q", s)
}
rat.Mul(rat, new(big.Rat).SetInt(unit))
if rat.Denom().Cmp(big.NewInt(1)) != 0 {
return Amount{}, errp.Newf("invalid amount %q", s)
}
return Amount{n: rat.Num()}, nil
}
// Int64 returns the int64 representation of amount. If x cannot be represented in an int64, an
// error is returned.
func (amount Amount) Int64() (int64, error) {
if !amount.n.IsInt64() {
return 0, errp.Newf("%s overflows int64", amount.n)
}
return amount.n.Int64(), nil
}
// BigInt returns a copy of the underlying big integer.
func (amount Amount) BigInt() *big.Int {
return new(big.Int).Set(amount.n)
}
// SendAmount is either a concrete amount, or "all"/"max". The concrete amount is user input and is
// parsed/validated in Amount().
type SendAmount struct {
amount string
sendAll bool
}
// NewSendAmount creates a new SendAmount based on a concrete amount.
func NewSendAmount(amount string) SendAmount {
return SendAmount{amount: amount, sendAll: false}
}
// NewSendAmountAll creates a new Sendall-amount.
func NewSendAmountAll() SendAmount {
return SendAmount{amount: "", sendAll: true}
}
// Amount parses the amount and converts it from the default unit to the smallest unit (e.g. satoshi
// = 1e8). Returns an error if the amount is negative, or depending on allowZero, if it is zero.
func (sendAmount SendAmount) Amount(unit *big.Int, allowZero bool) (Amount, error) {
if sendAmount.sendAll {
panic("can only be called if SendAll is false")
}
amount, err := NewAmountFromString(sendAmount.amount, unit)
if err != nil {
return Amount{}, errp.WithStack(ErrInvalidAmount)
}
if amount.BigInt().Sign() == -1 {
return Amount{}, errp.WithStack(ErrInvalidAmount)
}
if !allowZero && amount.BigInt().Sign() == 0 {
return Amount{}, errp.WithStack(ErrInvalidAmount)
}
return amount, nil
}
// SendAll returns if this represents a send-all input.
func (sendAmount *SendAmount) SendAll() bool {
return sendAmount.sendAll
} | backend/coins/coin/amount.go | 0.774583 | 0.412826 | amount.go | starcoder |
package message
// http://www.fipa.org/specs/fipa00061/SC00061G.html
// Message represent a message object send between agents
type Message struct {
Type string // Performative :Type of communicative acts. http://www.fipa.org/specs/fipa00037/SC00037J.html
Sender string // Participant in communication
Receiver string // Participant in communication
replyTo string // Participant in communication
Content string // Content of message
language string // Description of Content
encoding string // Description of Content
ontology string // Description of Content
protocol string // Control of conversation
conversationID string // Control of conversation
replyWith string // Control of conversation
inReplyTo string // Control of conversation
replyBy string // Control of conversation
}
// They are listed in the FIPA Communicative Act Library Specification.
const (
// TypeAcceptProposal The action of accepting a previously submitted propose to perform an action.
TypeAcceptProposal string = "accept-proposal"
// The action of agreeing to perform a requestd action made by another agent. Agent will carry it out.
TypeAgree string = "agree"
// Agent wants to cancel a previous request.
TypeCancel string = "cancel"
// Agent issues a call for proposals. It contains the actions to be carried out and any other terms of the agreement.
TypeCfp string = "cfp"
// The sender confirms to the receiver the truth of the content. The sender initially believed that the receiver was unsure about it.
TypeConfirm string = "confirm"
// The sender confirms to the receiver the falsity of the content.
TypeDisconfirm string = "disconfirm"
// Tell the other agent that a previously requested action failed.
TypeFailure string = "failure"
// Tell another agent something. The sender must believe in the truth of the statement. Most used performative.
TypeInform string = "inform"
// Used as content of request to ask another agent to tell us is a statement is true or false.
TypeIinformIf string = "inform-if"
// Like inform-if but asks for the value of the expression.
TypeInformRef string = "inform-ref"
// Sent when the agent did not understand the message.
TypeNotUnderstood string = "not-understood"
// Asks another agent so forward this same propagate message to others.
TypePropagate string = "propagate"
// Used as a response to a cfp. Agent proposes a deal.
TypePropose string = "propose"
// The sender wants the receiver to select target agents denoted by a given description and to send an embedded message to them.
TypeProxy string = "proxy"
// The action of asking another agent whether or not a given proposition is true.
TypeQueryIf string = "query-if"
// The action of asking another agent for the object referred to by an referential expression.
TypeQueryRef string = "query-ref"
// The action of refusing to perform a given action, and explaining the reason for the refusal.
TypeRefuse string = "refuse"
// The action of rejecting a proposal to perform some action during a negotiation.
TypeRejectProposal string = "reject-proposal"
// The sender requests the receiver to perform some action. Usually to request the receiver to perform another communicative act.
TypeRequest string = "request"
// The sender wants the receiver to perform some action when some given proposition becomes true.
TypeRequestWhen string = "request-when"
// The sender wants the receiver to perform some action as soon as some proposition becomes true and thereafter each time the proposition becomes true again.
TypeRequestWhenever string = "request-whenever"
// The act of requesting a persistent intention to notify the sender of the value of a reference, and to notify again whenever the object identified by the reference changes.
TypeSubscribe string = "subscribe"
) | message/message.go | 0.535098 | 0.406214 | message.go | starcoder |
package engine
import (
"math"
)
var (
BubblePos = NewVectorValue("ext_bubblepos", "m", "Bubble core position", bubblePos)
BubbleDist = NewScalarValue("ext_bubbledist", "m", "Bubble traveled distance", bubbleDist)
BubbleSpeed = NewScalarValue("ext_bubblespeed", "m/s", "Bubble velocity", bubbleSpeed)
BubbleMz = 1.0
)
func init() {
DeclVar("ext_BubbleMz", &BubbleMz, "Center magnetization 1.0 or -1.0 (default = 1.0)")
}
func bubblePos() []float64 {
m := M.Buffer()
n := Mesh().Size()
c := Mesh().CellSize()
mz := m.Comp(Z).HostCopy().Scalars()[0]
posx, posy := 0., 0.
if BubbleMz != -1.0 && BubbleMz != 1.0 {
panic("ext_BubbleMz should be 1.0 or -1.0")
}
{
var magsum float32
var weightedsum float32
for iy := range mz {
for ix := range mz[0] {
magsum += ((mz[iy][ix]*float32(BubbleMz) + 1.) / 2.)
weightedsum += ((mz[iy][ix]*float32(BubbleMz) + 1.) / 2.) * float32(iy)
}
}
posy = float64(weightedsum / magsum)
}
{
var magsum float32
var weightedsum float32
for ix := range mz[0] {
for iy := range mz {
magsum += ((mz[iy][ix]*float32(BubbleMz) + 1.) / 2.)
weightedsum += ((mz[iy][ix]*float32(BubbleMz) + 1.) / 2.) * float32(ix)
}
}
posx = float64(weightedsum / magsum)
}
return []float64{(posx-float64(n[X]/2))*c[X] + GetShiftPos(), (posy-float64(n[Y]/2))*c[Y] + GetShiftYPos(), 0}
}
var (
prevBpos = [2]float64{-1e99, -1e99}
bdist = 0.0
)
func bubbleDist() float64 {
pos := bubblePos()
if prevBpos == [2]float64{-1e99, -1e99} {
prevBpos = [2]float64{pos[X], pos[Y]}
return 0
}
w := Mesh().WorldSize()
dx := pos[X] - prevBpos[X]
dy := pos[Y] - prevBpos[Y]
prevBpos = [2]float64{pos[X], pos[Y]}
// PBC wrap
if dx > w[X]/2 {
dx -= w[X]
}
if dx < -w[X]/2 {
dx += w[X]
}
if dy > w[Y]/2 {
dy -= w[Y]
}
if dy < -w[Y]/2 {
dy += w[Y]
}
bdist += math.Sqrt(dx*dx + dy*dy)
return bdist
}
var (
prevBdist = 0.0
prevBt = -999.0
)
func bubbleSpeed() float64 {
dist := bubbleDist()
if prevBt < 0 {
prevBdist = dist
prevBt = Time
return 0
}
v := (dist - prevBdist) / (Time - prevBt)
prevBt = Time
prevBdist = dist
return v
} | engine/ext_bubblepos.go | 0.585931 | 0.521471 | ext_bubblepos.go | starcoder |
package metrics
import (
"regexp"
"time"
"github.com/Masterminds/semver"
)
// AllJobs represents a regex that will collect results from all jobs.
var AllJobs = regexp.MustCompile(".*")
// Phase is a phase of an osde2e run.
type Phase string
// Result is the result of a JUnit test.
type Result string
const (
// Install phase represents tests that were run after the initial installation of the cluster.
Install Phase = "install"
// Upgrade phase represents tests that were run after the upgrade of the cluster.
Upgrade Phase = "upgrade"
// UnknownPhase represents tests that were run in a phase that is currently unknown to the metrics library.
UnknownPhase Phase = "unknown"
// Passed result represents a JUnitResult that passed acceptably.
Passed Result = "passed"
// Failed result represents a JUnitResult that failed.
Failed Result = "failed"
// Skipped result represents a JUnitResult that was skipped during a run.
Skipped Result = "skipped"
// UnknownResult represents a JUnitResult that is currently unknown to the metrics library.
UnknownResult Result = "unknown"
)
// Event objects that are recorded by osde2e runs. These typically represent occurrences that are of
// some note. For example, cluste provisioning failure, failure to collect Hive logs, etc.
type Event struct {
// InstallVersion is the starting install version of the cluster that generated this event.
InstallVersion *semver.Version
// UpgradeVersion is the upgrade version of the cluster that generated this event. This can be nil.
UpgradeVersion *semver.Version
// CloudProvider is the cluster cloud provider that was used when this event was generated.
CloudProvider string
// Environment is the environment that the cluster provider was using during the generation of this event.
Environment string
// Event is the name of the event that was recorded.
Event string
// ClusterID is the cluster ID of the cluster that was provisioned while generating this event.
ClusterID string
// JobName is the name of the job that generated this event.
JobName string
// JobID is the job ID number that corresponds to the job that generated this event.
JobID int64
}
// Equal will return true if two event objects are equal.
func (e Event) Equal(that Event) bool {
if !versionsEqual(e.InstallVersion, that.InstallVersion) {
return false
}
if !versionsEqual(e.UpgradeVersion, that.UpgradeVersion) {
return false
}
if e.CloudProvider != that.CloudProvider {
return false
}
if e.Environment != that.Environment {
return false
}
if e.Event != that.Event {
return false
}
if e.ClusterID != that.ClusterID {
return false
}
if e.JobName != that.JobName {
return false
}
if e.JobID != that.JobID {
return false
}
return true
}
// Metadata objects are numerical values associated with metadata calculated by osde2e.
type Metadata struct {
// InstallVersion is the starting install version of the cluster that generated this metadata.
InstallVersion *semver.Version
// UpgradeVersion is the upgrade version of the cluster that generated this metadata. This can be nil.
UpgradeVersion *semver.Version
// CloudProvider is the cluster cloud provider that was used when this metadata was generated.
CloudProvider string
// Environment is the environment that the cluster provider was using during the generation of this metadata.
Environment string
// MetadataName is the name of the metadata that was recorded.
MetadataName string
// ClusterID is the cluster ID of the cluster that was provisioned while generating this metadata.
ClusterID string
// JobName is the name of the job that generated this metadata.
JobName string
// JobID is the job ID number that corresponds to the job that generated this metadata.
JobID int64
// Value is the numerical value associated with this metadata.
Value float64
}
// Equal will return true if two metadata objects are equal.
func (m Metadata) Equal(that Metadata) bool {
if !versionsEqual(m.InstallVersion, that.InstallVersion) {
return false
}
if !versionsEqual(m.UpgradeVersion, that.UpgradeVersion) {
return false
}
if m.CloudProvider != that.CloudProvider {
return false
}
if m.Environment != that.Environment {
return false
}
if m.MetadataName != that.MetadataName {
return false
}
if m.ClusterID != that.ClusterID {
return false
}
if m.JobName != that.JobName {
return false
}
if m.JobID != that.JobID {
return false
}
if m.Value != that.Value {
return false
}
return true
}
// AddonMetadata is numerical data captured by osde2e runs, similar to Metadata. However, this is customizable and
// focused on addon testing.
type AddonMetadata struct {
Metadata
// Phase is the test phase where this this metadata was generated in.
Phase Phase
}
// Equal will return true if two addon metadata objects are equal.
func (a AddonMetadata) Equal(that AddonMetadata) bool {
if !a.Metadata.Equal(that.Metadata) {
return false
}
if a.Phase != that.Phase {
return false
}
return true
}
// JUnitResult represents an individual test that was run over the course of an osde2e run.
type JUnitResult struct {
// InstallVersion is the starting install version of the cluster that generated this result.
InstallVersion *semver.Version
// UpgradeVersion is the upgrade version of the cluster that generated this result. This can be nil.
UpgradeVersion *semver.Version
// CloudProvider is the cluster cloud provider that was used when this result was generated.
CloudProvider string
// Environment is the environment that the cluster provider was using during the generation of this result.
Environment string
// Suite is the name of the test suite that this test belongs to.
Suite string
// TestName is the name of the test that was run.
TestName string
// Result is the result of this test.
Result Result
// ClusterID is the cluster ID of the cluster that was provisioned while generating this result.
ClusterID string
// JobName is the name of the job that generated this result.
JobName string
// JobID is the job ID number that corresponds to the job that generated this result.
JobID int64
// Phase is the test phase where this this result was generated in.
Phase Phase
// Duration is the length of time that this test took to run.
Duration time.Duration
}
// Equal will return true if two JUnitResult objects are equal.
func (j JUnitResult) Equal(that JUnitResult) bool {
if !versionsEqual(j.InstallVersion, that.InstallVersion) {
return false
}
if !versionsEqual(j.UpgradeVersion, that.UpgradeVersion) {
return false
}
if j.CloudProvider != that.CloudProvider {
return false
}
if j.Environment != that.Environment {
return false
}
if j.Suite != that.Suite {
return false
}
if j.TestName != that.TestName {
return false
}
if j.Result != that.Result {
return false
}
if j.ClusterID != that.ClusterID {
return false
}
if j.JobName != that.JobName {
return false
}
if j.JobID != that.JobID {
return false
}
if j.Phase != that.Phase {
return false
}
if j.Duration != that.Duration {
return false
}
return true
}
// nil safe semver equivalency
func versionsEqual(version1, version2 *semver.Version) bool {
return (version1 == nil && version1 == version2) || (version1 != nil && version2 != nil && version1.Equal(version2))
} | pkg/metrics/objects.go | 0.821367 | 0.449393 | objects.go | starcoder |
package timespec
import (
"fmt"
"time"
)
type Interval uint
const (
Hourly Interval = iota
Daily
Weekly
Monthly
)
const ns = 1000 * 1000 * 1000
type Spec struct {
Interval Interval
TimeOfDay int
TimeOfHour int
DayOfWeek time.Weekday
DayOfMonth int
Week int
}
func roundM(t time.Time) time.Time {
return t.Add(time.Duration(-1 * (ns*t.Second() + t.Nanosecond())))
}
func offsetM(t time.Time, m int) time.Time {
return t.Add(time.Duration(ns * 60 * m))
}
func nthWeek(t time.Time) int {
return int(t.Day()/7) + 1
}
func ord(n int) string {
switch {
case n%100 >= 11 && n%100 <= 13:
return "th"
case n%10 == 1:
return "st"
case n%10 == 2:
return "nd"
case n%10 == 3:
return "rd"
}
return "th"
}
func weekday(d time.Weekday) string {
switch d {
case time.Sunday:
return "sunday"
case time.Monday:
return "monday"
case time.Tuesday:
return "tuesday"
case time.Wednesday:
return "wednesday"
case time.Thursday:
return "thursday"
case time.Friday:
return "friday"
case time.Saturday:
return "saturday"
}
return "unknown-weekday"
}
func (s *Spec) String() string {
t := fmt.Sprintf("%d:%02d", s.TimeOfDay/60, s.TimeOfDay%60)
if s.Interval == Hourly && s.TimeOfHour < 60 {
return fmt.Sprintf("hourly at %d after", s.TimeOfHour)
}
if s.Interval == Daily {
return fmt.Sprintf("daily at %s", t)
} else if s.Interval == Weekly {
return fmt.Sprintf("%ss at %s", weekday(s.DayOfWeek), t)
} else if s.Interval == Monthly && s.Week != 0 {
return fmt.Sprintf("%d%s %s at %s", s.Week, ord(s.Week), weekday(s.DayOfWeek), t)
} else if s.Interval == Monthly && s.DayOfMonth != 0 {
return fmt.Sprintf("monthly at %s on %d%s", t, s.DayOfMonth, ord(s.DayOfMonth))
}
return "<unknown interval>"
}
func (s *Spec) Next(t time.Time) (time.Time, error) {
t = roundM(t)
midnight := offsetM(t, -1*(t.Hour()*60+t.Minute()))
if s.Interval == Hourly && s.TimeOfHour < 60 {
target := offsetM(t, s.TimeOfHour-t.Minute())
if target.After(t) {
return target, nil
}
return offsetM(target, 60), nil
}
if s.Interval == Daily {
target := offsetM(midnight, s.TimeOfDay)
if target.After(t) {
return target, nil
}
return offsetM(target, 1440), nil
} else if s.Interval == Weekly {
target := offsetM(midnight, s.TimeOfDay)
for target.Weekday() != s.DayOfWeek {
target = offsetM(target, 1440)
}
if target.Before(t) || target.Equal(t) {
target = offsetM(target, 7*1440)
}
return target, nil
} else if s.Interval == Monthly && s.Week != 0 {
if s.Week < 1 || s.Week > 5 {
return t, fmt.Errorf("Cannot calculate the %dth week in a month", s.Week)
}
target := offsetM(midnight, s.TimeOfDay)
for target.Weekday() != s.DayOfWeek || target.Before(t) {
target = offsetM(target, 1440)
}
for nthWeek(target) != s.Week {
target = offsetM(target, 1440*7)
}
return target, nil
} else if s.Interval == Monthly && s.DayOfMonth != 0 {
if s.DayOfMonth < 1 || s.DayOfMonth > 31 {
return t, fmt.Errorf("Cannot calculate the %dth week in a month", s.Week)
}
target := offsetM(midnight, s.TimeOfDay)
for target.Day() != s.DayOfMonth || target.Before(t) {
target = offsetM(target, 1440)
}
return target, nil
}
return t, fmt.Errorf("unhandled Interval for Spec")
} | timespec/spec.go | 0.607896 | 0.487063 | spec.go | starcoder |
package bulletproofs
import (
"bytes"
"errors"
"fmt"
"github.com/sirupsen/logrus"
"io"
"math/big"
)
// Point is a group element of the secp256k1 curve in affine coordinates.
type Point struct {
X *big.Int
Y *big.Int
}
// Equals returns true if the given point is the same.
func (p *Point) Equals(other *Point) bool {
return p.X.Cmp(other.X) == 0 && p.Y.Cmp(other.Y) == 0
}
// String prints the coordinates of this point.
func (p *Point) String() string {
return fmt.Sprintf("{x: %032x, y: %032x}", p.X.Bytes(), p.Y.Bytes())
}
// Read deserializes a compressed elliptic curve point from the reader.
func (p *Point) Read(r io.Reader) error {
buf := make([]byte, 32+1)
if _, err := io.ReadFull(r, buf); err != nil {
return err
}
sign := buf[0]
x := new(big.Int).SetBytes(buf[1:])
if (sign & 0xfe) != 8 {
return errors.New("point is not serialized correctly")
}
// Derive the possible y coordinates from the secp256k1 curve
// y² = x³ + 7.
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
x3.Add(x3, curve.Params().B)
// y = ±sqrt(x³ + 7).
y := ModSqrtFast(x3)
// Pick which y from the sign encoded in the first byte.
if (sign & 1) != 0 {
y = new(big.Int).Sub(curve.P, y)
}
p.X = x
p.Y = y
return nil
}
// serializedPedersenCommitment is the constant that is encoded to signal that
// the encoded value is a Pedersen commitment, rather than a standard compressed
// curve point.
const serializedPedersenCommitment = byte(9)
// Bytes compresses and serializes the point.
func (p *Point) Bytes() []byte {
buff := new(bytes.Buffer)
sign := serializedPedersenCommitment
if IsQuadraticResidue(p.Y) {
sign ^= 1
}
if err := buff.WriteByte(sign); err != nil {
logrus.Fatal(err)
}
x := GetB32(p.X)
if _, err := buff.Write(x[:]); err != nil {
logrus.Fatal(err)
}
return buff.Bytes()
}
// isOdd returns true if the given integer is odd.
func isOdd(a *big.Int) bool {
return a.Bit(0) == 1
}
// ModSqrtOrig returns a value v such that v*v = x mod P.
func ModSqrtOrig(x *big.Int) *big.Int {
return new(big.Int).ModSqrt(x, curve.Params().P)
}
// ModSqrtFast returns a value v such that v*v = x mod P. This is about twice as
// fast as ModSqrtOrig. See: https://bitcointalk.org/index.php?topic=162805.msg1712294#msg1712294
func ModSqrtFast(x *big.Int) *big.Int {
return new(big.Int).Exp(x, curve.QPlus1Div4(), curve.Params().P)
}
// IsQuadraticResidue returns true if there exists some x such that
// x*x = y mod P.
func IsQuadraticResidue(y *big.Int) bool {
return big.Jacobi(y, curve.P) >= 0
}
// SerializePoints returns a byte slice containing a bit vector that indicates
// whether the points
func SerializePoints(points []*Point) []byte {
bitvec := make([]byte, (len(points)+7)/8)
// Encode whether each y value is a quadratic residue, so when we decompress
// the points we can determine the sign of the y coordinate.
for i, point := range points {
if !IsQuadraticResidue(point.Y) {
bitvec[i/8] |= 1 << (uint(i) % 8)
}
}
// Now write all the x coordinates as fixed 32-byte integers.
buff := new(bytes.Buffer)
for _, point := range points {
x := GetB32(point.X)
if _, err := buff.Write(x[:]); err != nil {
logrus.Fatal(err)
}
}
return append(bitvec, buff.Bytes()...)
}
// DeserializePoints parses num points that have been serialized using
// SerializePoints.
func DeserializePoints(buf []byte, num uint) ([]*Point, error) {
bitvecSize := (num + 7) / 8
isNonResidue := buf[0:bitvecSize]
xcoords := buf[bitvecSize:]
points := make([]*Point, num)
for i := uint(0); i < num; i++ {
x := new(big.Int).SetBytes(xcoords[i*32 : (i+1)*32])
// Derive the possible y coordinates from the secp256k1 curve
// y² = x³ + 7.
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
x3.Add(x3, curve.Params().B)
// y = ±sqrt(x³ + 7).
y := ModSqrtFast(x3)
// Pick which y from the bit vector.
if isNonResidue[i/8]&(1<<(i%8)) != 0 {
y = new(big.Int).Sub(curve.P, y)
}
points[i] = new(Point)
points[i].Y = y
points[i].X = x
}
return points, nil
}
// ScalarMulPoint multiplies a point by a scalar.
func ScalarMulPoint(point *Point, scalar *big.Int) *Point {
x, y := curve.ScalarMult(point.X, point.Y, scalar.Bytes())
return &Point{x, y}
}
// ScalarMultAll multiplies all points by the given scalar and sums the results.
func ScalarMultAll(scalar *big.Int, points ...*Point) *Point {
initial := ScalarMulPoint(points[0], scalar)
sumx := new(big.Int).Set(initial.X)
sumy := new(big.Int).Set(initial.Y)
for i := 1; i < len(points); i++ {
mult := ScalarMulPoint(points[i], scalar)
sumx, sumy = curve.Add(sumx, sumy, mult.X, mult.Y)
}
return &Point{sumx, sumy}
}
// ScalarMulPoints multiplies each point with the corresponding scalar and sums
// the results. This function will panic if the number of scalars and points
// differ.
func ScalarMulPoints(scalars []*big.Int, points []*Point) *Point {
if len(scalars) != len(points) {
panic("len(scalars) != len(points)")
}
initial := ScalarMulPoint(points[0], scalars[0])
sumx := new(big.Int).Set(initial.X)
sumy := new(big.Int).Set(initial.Y)
for i := 1; i < len(points); i++ {
mult := ScalarMulPoint(points[i], scalars[i])
sumx, sumy = curve.Add(sumx, sumy, mult.X, mult.Y)
}
return &Point{sumx, sumy}
}
// ScalarMul returns the vector that is the result of the scalar multiplication
// of vector and scalar.
func ScalarMul(vector []*big.Int, scalar *big.Int) []*big.Int {
result := make([]*big.Int, len(vector))
for i := range vector {
result[i] = Mul(vector[i], scalar)
}
return result
}
// ScalarMultArray multiplies each point in the vector points by the scalar xi
// and returns them as a vector.
func ScalarMultArray(xi *big.Int, points []*Point) []*Point {
result := make([]*Point, len(points))
for i := range points {
result[i] = ScalarMulPoint(points[i], xi)
}
return result
}
// Square computes and returns z*z.
func Square(z *big.Int) *big.Int {
return Mul(z, z)
}
// AddVectors returns the vector z = a + b. This function will panic if the vectors are
// of different length.
func AddVectors(a []*big.Int, b []*big.Int) []*big.Int {
if len(a) != len(b) {
panic("vectors must be equal dimension")
}
z := make([]*big.Int, len(a))
for i := range a {
z[i] = Sum(a[i], b[i])
}
return z
}
// AddVectors3 returns the vector z = a + b + c.
func AddVectors3(a []*big.Int, b []*big.Int, c []*big.Int) []*big.Int {
if len(a) != len(b) || len(a) != len(c) {
panic("vectors must be equal dimension")
}
z := make([]*big.Int, len(a))
for i := range a {
z[i] = Sum(a[i], b[i], c[i])
}
return z
}
// Dot computes the inner product of two vectors of length n: a · b =
// a_1 * b_1 + a_2 * b_2 + ··· + a_n * b_n.
func Dot(a, b []*big.Int) *big.Int {
if len(a) != len(b) {
panic("vectors must have same length")
}
result := big.NewInt(0)
for i := 0; i < len(a); i++ {
result.Add(result, Mul(a[i], b[i]))
}
result.Mod(result, curve.N)
return result
}
// SubScalars returns the scalar a - b.
func SubScalars(a, b *big.Int) *big.Int {
aMinusB := new(big.Int).Sub(a, b)
aMinusB.Mod(aMinusB, curve.N)
return aMinusB
}
// SubVectors returns the vector a - b. This function will panic if the vectors are of
// different lengths.
func SubVectors(a, b []*big.Int) []*big.Int {
if len(a) != len(b) {
panic("vectors must have same length")
}
var result []*big.Int
for i := 0; i < len(a); i++ {
result = append(result, SubScalars(a[i], b[i]))
}
return result
}
// Ones returns a vector of length n where all elements are 1.
func Ones(n int) []*big.Int {
ones := make([]*big.Int, n)
for i := 0; i < n; i++ {
ones[i] = big.NewInt(1)
}
return ones
}
// Hadamard computes the vector given by element-wise multiplication of the two
// given vectors. a ○ b = (a_0*b_0 a_1*b_1 ... a_n*b_n). This function will
// panic if the vectors have different lengths.
func Hadamard(a, b []*big.Int) []*big.Int {
if len(a) != len(b) {
panic("vectors must be the same length")
}
result := make([]*big.Int, len(a))
for i := 0; i < len(a); i++ {
result[i] = Mul(a[i], b[i])
}
return result
}
// HadamardP computes the element-wise point addition of the two vectors. This
// function will panic if the vectors have different lengths.
func HadamardP(a []*Point, b []*Point) []*Point {
if len(a) != len(b) {
panic("vectors must be the same length")
}
result := make([]*Point, len(a))
for i := range a {
result[i] = new(Point)
result[i].X, result[i].Y = curve.Add(a[i].X, a[i].Y, b[i].X, b[i].Y)
}
return result
}
// Sum adds the given numbers and returns the total sum.
func Sum(nums ...*big.Int) *big.Int {
sum := new(big.Int).Set(nums[0])
for i := 1; i < len(nums); i++ {
sum.Add(sum, nums[i])
}
sum.Mod(sum, curve.N)
return sum
}
// SumPoints adds the given curve points and returns the total sum.
func SumPoints(points ...*Point) *Point {
sumx := new(big.Int).Set(points[0].X)
sumy := new(big.Int).Set(points[0].Y)
for i := 1; i < len(points); i++ {
sumx, sumy = curve.Add(sumx, sumy, points[i].X, points[i].Y)
}
return &Point{
X: sumx,
Y: sumy,
}
}
// Mul returns the product of the given integers.
func Mul(nums ...*big.Int) *big.Int {
prod := new(big.Int).Set(nums[0])
for i := 1; i < len(nums); i++ {
prod.Mul(prod, nums[i])
}
prod.Mod(prod, curve.N)
return prod
}
// VectorOf returns a length n vector of vs.
func VectorOf(n int, v *big.Int) []*big.Int {
vec := make([]*big.Int, n)
for i := 0; i < n; i++ {
vec[i] = new(big.Int).Set(v)
}
return vec
}
// Neg returns the additive inverse of z modulo the group order, i.e. -z such
// that z + (-z) = 0 mod N.
func Neg(z *big.Int) *big.Int {
x := new(big.Int).Sub(curve.N, z)
return x.Mod(x, curve.N)
}
// Inv returns the multiplicative inverse of z modulo the group order, i.e. z^-1
// such that z * z^-1 = 1 mod N.
func Inv(z *big.Int) *big.Int {
return new(big.Int).ModInverse(z, curve.N)
}
// GetB32 returns a fixed size 32-byte slice containing the big-endian
// representation of num. This function will panic if the given number does not
// fit into 32 bytes.
func GetB32(num *big.Int) [32]byte {
numBytes := num.Bytes()
if len(numBytes) > 32 {
panic("num doesn't fit in 32 bytes")
}
var b [32]byte
offset := 32 - len(numBytes)
for i := offset; i < 32; i++ {
b[i] = numBytes[i-offset]
}
return b
} | math.go | 0.818845 | 0.613844 | math.go | starcoder |
package xgboost
import (
"context"
"errors"
"runtime"
"github.com/foxever/go-xgboost/core"
)
// Matrix interface for 2D matrix
type Matrix interface {
Data() (data []float32, rowCount, columnCount int)
}
// FloatSliceVector float32 slice backed Matrix implementation
type FloatSliceVector []float32
// Data returns float32 slice as (1, len(data)) matrix
func (fsm FloatSliceVector) Data() (data []float32, rowCount, columnCount int) {
return fsm, 1, len(fsm)
}
// Predictor interface for xgboost predictors
type Predictor interface {
Predict(input Matrix) ([]float32, error)
Close(ctx context.Context) error
}
// NewPredictor returns a new predictor based on given model path, worker count, option mask, ntree_limit and missing value indicator
func NewPredictor(xboostSavedModelPath string, workerCount int, optionMask int, nTreeLimit uint, missingValue float32) (Predictor, error) {
if workerCount <= 0 {
return nil, errors.New("worker count needs to be larger than zero")
}
requestChan := make(chan multiBoosterRequest)
initErrors := make(chan error)
defer close(initErrors)
for i := 0; i < workerCount; i++ {
go func() {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
booster, err := core.XGBoosterCreate(nil)
if err != nil {
initErrors <- err
return
}
err = booster.LoadModel(xboostSavedModelPath)
if err != nil {
initErrors <- err
return
}
// No errors occured during init
initErrors <- nil
for req := range requestChan {
data, rowCount, columnCount := req.matrix.Data()
matrix, err := core.XGDMatrixCreateFromMat(data, rowCount, columnCount, missingValue)
if err != nil {
req.resultChan <- multiBoosterResponse{
err: err,
}
continue
}
res, err := booster.Predict(matrix, optionMask, nTreeLimit)
req.resultChan <- multiBoosterResponse{
err: err,
result: res,
}
}
}()
err := <-initErrors
if err != nil {
return nil, err
}
}
return &multiBooster{reqChan: requestChan}, nil
}
func NewPredictorFromBuffer(xboostSavedModelBuffer []byte, workerCount int, optionMask int, nTreeLimit uint, missingValue float32) (Predictor, error) {
if workerCount <= 0 {
return nil, errors.New("worker count needs to be larger than zero")
}
requestChan := make(chan multiBoosterRequest)
initErrors := make(chan error)
defer close(initErrors)
for i := 0; i < workerCount; i++ {
go func() {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
booster, err := core.XGBoosterCreate(nil)
if err != nil {
initErrors <- err
return
}
err = booster.LoadModelFromBuffer(xboostSavedModelBuffer)
if err != nil {
initErrors <- err
return
}
// No errors occured during init
initErrors <- nil
for req := range requestChan {
data, rowCount, columnCount := req.matrix.Data()
matrix, err := core.XGDMatrixCreateFromMat(data, rowCount, columnCount, missingValue)
if err != nil {
req.resultChan <- multiBoosterResponse{
err: err,
}
continue
}
res, err := booster.Predict(matrix, optionMask, nTreeLimit)
req.resultChan <- multiBoosterResponse{
err: err,
result: res,
}
}
}()
err := <-initErrors
if err != nil {
return nil, err
}
}
return &multiBooster{reqChan: requestChan}, nil
}
type multiBoosterRequest struct {
matrix Matrix
resultChan chan multiBoosterResponse
}
type multiBoosterResponse struct {
err error
result []float32
}
type multiBooster struct {
reqChan chan multiBoosterRequest
}
func (mb *multiBooster) Predict(input Matrix) ([]float32, error) {
resChan := make(chan multiBoosterResponse)
mb.reqChan <- multiBoosterRequest{
matrix: input,
resultChan: resChan,
}
result := <-resChan
return result.result, result.err
}
func (mb *multiBooster) Close(ctx context.Context) error {
close(mb.reqChan)
return nil
} | booster.go | 0.707607 | 0.401131 | booster.go | starcoder |
package encoding
import (
"sort"
)
// ByteArrayList is a container similar to [][]byte with a smaller memory
// overhead. Where using a byte slices introduces ~24 bytes of overhead per
// element, ByteArrayList requires only 8 bytes per element. Extra efficiency
// also comes from reducing GC pressure by using contiguous areas of memory
// instead of allocating individual slices for each element. For lists with
// many small-size elements, the memory footprint can be reduced by 40-80%.
type ByteArrayList struct {
slices []slice
values []byte
}
type slice struct{ i, j uint32 }
func (s slice) len() int { return int(s.j - s.i) }
func MakeByteArrayList(capacity int) ByteArrayList {
return ByteArrayList{
slices: make([]slice, 0, capacity),
values: make([]byte, 0, 8*capacity),
}
}
func (list *ByteArrayList) Clone() ByteArrayList {
size := 0
for _, s := range list.slices {
size += s.len()
}
clone := ByteArrayList{
slices: make([]slice, 0, len(list.slices)),
values: make([]byte, 0, size),
}
for _, s := range list.slices {
clone.Push(list.slice(s))
}
return clone
}
func (list *ByteArrayList) Split() [][]byte {
clone := ByteArrayList{
slices: list.slices,
values: make([]byte, len(list.values)),
}
copy(clone.values, list.values)
split := make([][]byte, clone.Len())
for i := range split {
split[i] = clone.Index(i)
}
return split
}
func (list *ByteArrayList) Slice(i, j int) ByteArrayList {
return ByteArrayList{
slices: list.slices[i:j:j],
values: list.values,
}
}
func (list *ByteArrayList) Grow(n int) {
if n > (cap(list.slices) - len(list.slices)) {
newCap := 2 * cap(list.slices)
newLen := len(list.slices) + n
for newCap < newLen {
newCap *= 2
}
newSlices := make([]slice, len(list.slices), newCap)
copy(newSlices, list.slices)
list.slices = newSlices
}
}
func (list *ByteArrayList) Reset() {
list.slices = list.slices[:0]
list.values = list.values[:0]
}
func (list *ByteArrayList) Push(v []byte) {
list.slices = append(list.slices, slice{
i: uint32(len(list.values)),
j: uint32(len(list.values) + len(v)),
})
list.values = append(list.values, v...)
}
func (list *ByteArrayList) PushSize(n int) []byte {
i := len(list.values)
j := len(list.values) + n
list.slices = append(list.slices, slice{
i: uint32(i),
j: uint32(j),
})
if j <= cap(list.values) {
list.values = list.values[:j]
} else {
newCap := 2 * cap(list.values)
newLen := j
for newCap < newLen {
newCap *= 2
}
newValues := make([]byte, newLen, newCap)
copy(newValues, list.values)
list.values = newValues
}
return list.values[i:j:j]
}
func (list *ByteArrayList) Index(i int) []byte {
return list.slice(list.slices[i])
}
func (list *ByteArrayList) Range(f func([]byte) bool) {
for _, s := range list.slices {
if !f(list.slice(s)) {
break
}
}
}
func (list *ByteArrayList) Size() int64 {
size := int64(0)
for _, s := range list.slices {
size += 8 + int64(s.len())
}
return size
}
func (list *ByteArrayList) Cap() int {
return cap(list.slices)
}
func (list *ByteArrayList) Len() int {
return len(list.slices)
}
func (list *ByteArrayList) Less(i, j int) bool {
return string(list.Index(i)) < string(list.Index(j))
}
func (list *ByteArrayList) Swap(i, j int) {
list.slices[i], list.slices[j] = list.slices[j], list.slices[i]
}
func (list *ByteArrayList) slice(s slice) []byte {
return list.values[s.i:s.j:s.j]
}
var (
_ sort.Interface = (*ByteArrayList)(nil)
) | encoding/bytearray.go | 0.727395 | 0.488222 | bytearray.go | starcoder |
package tipb
import (
"fmt"
proto "github.com/golang/protobuf/proto"
math "math"
io "io"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type ExecType int32
const (
ExecType_TypeTableScan ExecType = 0
ExecType_TypeIndexScan ExecType = 1
ExecType_TypeSelection ExecType = 2
ExecType_TypeAggregation ExecType = 3
ExecType_TypeTopN ExecType = 4
ExecType_TypeLimit ExecType = 5
ExecType_TypeStreamAgg ExecType = 6
ExecType_TypeJoin ExecType = 7
ExecType_TypeKill ExecType = 8
ExecType_TypeExchangeSender ExecType = 9
ExecType_TypeExchangeReceiver ExecType = 10
ExecType_TypeProjection ExecType = 11
ExecType_TypeGraphScan ExecType = 12
)
var ExecType_name = map[int32]string{
0: "TypeTableScan",
1: "TypeIndexScan",
2: "TypeSelection",
3: "TypeAggregation",
4: "TypeTopN",
5: "TypeLimit",
6: "TypeStreamAgg",
7: "TypeJoin",
8: "TypeKill",
9: "TypeExchangeSender",
10: "TypeExchangeReceiver",
11: "TypeProjection",
12: "TypeGraphScan",
}
var ExecType_value = map[string]int32{
"TypeTableScan": 0,
"TypeIndexScan": 1,
"TypeSelection": 2,
"TypeAggregation": 3,
"TypeTopN": 4,
"TypeLimit": 5,
"TypeStreamAgg": 6,
"TypeJoin": 7,
"TypeKill": 8,
"TypeExchangeSender": 9,
"TypeExchangeReceiver": 10,
"TypeProjection": 11,
"TypeGraphScan": 12,
}
func (x ExecType) Enum() *ExecType {
p := new(ExecType)
*p = x
return p
}
func (x ExecType) String() string {
return proto.EnumName(ExecType_name, int32(x))
}
func (x *ExecType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(ExecType_value, data, "ExecType")
if err != nil {
return err
}
*x = ExecType(value)
return nil
}
func (ExecType) EnumDescriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{0} }
type ExchangeType int32
const (
ExchangeType_PassThrough ExchangeType = 0
ExchangeType_Broadcast ExchangeType = 1
ExchangeType_Hash ExchangeType = 2
)
var ExchangeType_name = map[int32]string{
0: "PassThrough",
1: "Broadcast",
2: "Hash",
}
var ExchangeType_value = map[string]int32{
"PassThrough": 0,
"Broadcast": 1,
"Hash": 2,
}
func (x ExchangeType) Enum() *ExchangeType {
p := new(ExchangeType)
*p = x
return p
}
func (x ExchangeType) String() string {
return proto.EnumName(ExchangeType_name, int32(x))
}
func (x *ExchangeType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(ExchangeType_value, data, "ExchangeType")
if err != nil {
return err
}
*x = ExchangeType(value)
return nil
}
func (ExchangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{1} }
type EngineType int32
const (
EngineType_Local EngineType = 0
EngineType_TiKV EngineType = 1
EngineType_TiFlash EngineType = 2
)
var EngineType_name = map[int32]string{
0: "Local",
1: "TiKV",
2: "TiFlash",
}
var EngineType_value = map[string]int32{
"Local": 0,
"TiKV": 1,
"TiFlash": 2,
}
func (x EngineType) Enum() *EngineType {
p := new(EngineType)
*p = x
return p
}
func (x EngineType) String() string {
return proto.EnumName(EngineType_name, int32(x))
}
func (x *EngineType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(EngineType_value, data, "EngineType")
if err != nil {
return err
}
*x = EngineType(value)
return nil
}
func (EngineType) EnumDescriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{2} }
type JoinType int32
const (
JoinType_TypeInnerJoin JoinType = 0
JoinType_TypeLeftOuterJoin JoinType = 1
JoinType_TypeRightOuterJoin JoinType = 2
JoinType_TypeSemiJoin JoinType = 3
JoinType_TypeAntiSemiJoin JoinType = 4
JoinType_TypeLeftOuterSemiJoin JoinType = 5
JoinType_TypeAntiLeftOuterSemiJoin JoinType = 6
)
var JoinType_name = map[int32]string{
0: "TypeInnerJoin",
1: "TypeLeftOuterJoin",
2: "TypeRightOuterJoin",
3: "TypeSemiJoin",
4: "TypeAntiSemiJoin",
5: "TypeLeftOuterSemiJoin",
6: "TypeAntiLeftOuterSemiJoin",
}
var JoinType_value = map[string]int32{
"TypeInnerJoin": 0,
"TypeLeftOuterJoin": 1,
"TypeRightOuterJoin": 2,
"TypeSemiJoin": 3,
"TypeAntiSemiJoin": 4,
"TypeLeftOuterSemiJoin": 5,
"TypeAntiLeftOuterSemiJoin": 6,
}
func (x JoinType) Enum() *JoinType {
p := new(JoinType)
*p = x
return p
}
func (x JoinType) String() string {
return proto.EnumName(JoinType_name, int32(x))
}
func (x *JoinType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(JoinType_value, data, "JoinType")
if err != nil {
return err
}
*x = JoinType(value)
return nil
}
func (JoinType) EnumDescriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{3} }
type JoinExecType int32
const (
JoinExecType_TypeHashJoin JoinExecType = 0
)
var JoinExecType_name = map[int32]string{
0: "TypeHashJoin",
}
var JoinExecType_value = map[string]int32{
"TypeHashJoin": 0,
}
func (x JoinExecType) Enum() *JoinExecType {
p := new(JoinExecType)
*p = x
return p
}
func (x JoinExecType) String() string {
return proto.EnumName(JoinExecType_name, int32(x))
}
func (x *JoinExecType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(JoinExecType_value, data, "JoinExecType")
if err != nil {
return err
}
*x = JoinExecType(value)
return nil
}
func (JoinExecType) EnumDescriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{4} }
// It represents a Executor.
type Executor struct {
Tp ExecType `protobuf:"varint,1,opt,name=tp,enum=tipb.ExecType" json:"tp"`
TblScan *TableScan `protobuf:"bytes,2,opt,name=tbl_scan,json=tblScan" json:"tbl_scan,omitempty"`
IdxScan *IndexScan `protobuf:"bytes,3,opt,name=idx_scan,json=idxScan" json:"idx_scan,omitempty"`
Selection *Selection `protobuf:"bytes,4,opt,name=selection" json:"selection,omitempty"`
Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation" json:"aggregation,omitempty"`
TopN *TopN `protobuf:"bytes,6,opt,name=topN" json:"topN,omitempty"`
Limit *Limit `protobuf:"bytes,7,opt,name=limit" json:"limit,omitempty"`
ExchangeReceiver *ExchangeReceiver `protobuf:"bytes,8,opt,name=exchange_receiver,json=exchangeReceiver" json:"exchange_receiver,omitempty"`
Join *Join `protobuf:"bytes,9,opt,name=join" json:"join,omitempty"`
ExecutorId *string `protobuf:"bytes,10,opt,name=executor_id,json=executorId" json:"executor_id,omitempty"`
Kill *Kill `protobuf:"bytes,11,opt,name=kill" json:"kill,omitempty"`
ExchangeSender *ExchangeSender `protobuf:"bytes,12,opt,name=exchange_sender,json=exchangeSender" json:"exchange_sender,omitempty"`
Projection *Projection `protobuf:"bytes,13,opt,name=Projection" json:"Projection,omitempty"`
GrpScan *GraphScan `protobuf:"bytes,14,opt,name=grp_scan,json=grpScan" json:"grp_scan,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Executor) Reset() { *m = Executor{} }
func (m *Executor) String() string { return proto.CompactTextString(m) }
func (*Executor) ProtoMessage() {}
func (*Executor) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{0} }
func (m *Executor) GetTp() ExecType {
if m != nil {
return m.Tp
}
return ExecType_TypeTableScan
}
func (m *Executor) GetTblScan() *TableScan {
if m != nil {
return m.TblScan
}
return nil
}
func (m *Executor) GetIdxScan() *IndexScan {
if m != nil {
return m.IdxScan
}
return nil
}
func (m *Executor) GetSelection() *Selection {
if m != nil {
return m.Selection
}
return nil
}
func (m *Executor) GetAggregation() *Aggregation {
if m != nil {
return m.Aggregation
}
return nil
}
func (m *Executor) GetTopN() *TopN {
if m != nil {
return m.TopN
}
return nil
}
func (m *Executor) GetLimit() *Limit {
if m != nil {
return m.Limit
}
return nil
}
func (m *Executor) GetExchangeReceiver() *ExchangeReceiver {
if m != nil {
return m.ExchangeReceiver
}
return nil
}
func (m *Executor) GetJoin() *Join {
if m != nil {
return m.Join
}
return nil
}
func (m *Executor) GetExecutorId() string {
if m != nil && m.ExecutorId != nil {
return *m.ExecutorId
}
return ""
}
func (m *Executor) GetKill() *Kill {
if m != nil {
return m.Kill
}
return nil
}
func (m *Executor) GetExchangeSender() *ExchangeSender {
if m != nil {
return m.ExchangeSender
}
return nil
}
func (m *Executor) GetProjection() *Projection {
if m != nil {
return m.Projection
}
return nil
}
func (m *Executor) GetGrpScan() *GraphScan {
if m != nil {
return m.GrpScan
}
return nil
}
// ExchangeSender will build connection with ExchangeReceiver.
type ExchangeSender struct {
Tp ExchangeType `protobuf:"varint,1,opt,name=tp,enum=tipb.ExchangeType" json:"tp"`
EncodedTaskMeta [][]byte `protobuf:"bytes,2,rep,name=encoded_task_meta,json=encodedTaskMeta" json:"encoded_task_meta,omitempty"`
PartitionKeys []*Expr `protobuf:"bytes,3,rep,name=partition_keys,json=partitionKeys" json:"partition_keys,omitempty"`
Child *Executor `protobuf:"bytes,4,opt,name=child" json:"child,omitempty"`
Types []*FieldType `protobuf:"bytes,5,rep,name=types" json:"types,omitempty"`
AllFieldTypes []*FieldType `protobuf:"bytes,6,rep,name=all_field_types,json=allFieldTypes" json:"all_field_types,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *ExchangeSender) Reset() { *m = ExchangeSender{} }
func (m *ExchangeSender) String() string { return proto.CompactTextString(m) }
func (*ExchangeSender) ProtoMessage() {}
func (*ExchangeSender) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{1} }
func (m *ExchangeSender) GetTp() ExchangeType {
if m != nil {
return m.Tp
}
return ExchangeType_PassThrough
}
func (m *ExchangeSender) GetEncodedTaskMeta() [][]byte {
if m != nil {
return m.EncodedTaskMeta
}
return nil
}
func (m *ExchangeSender) GetPartitionKeys() []*Expr {
if m != nil {
return m.PartitionKeys
}
return nil
}
func (m *ExchangeSender) GetChild() *Executor {
if m != nil {
return m.Child
}
return nil
}
func (m *ExchangeSender) GetTypes() []*FieldType {
if m != nil {
return m.Types
}
return nil
}
func (m *ExchangeSender) GetAllFieldTypes() []*FieldType {
if m != nil {
return m.AllFieldTypes
}
return nil
}
// ExchangeReceiver accept connection and receiver data from ExchangeSender.
type ExchangeReceiver struct {
EncodedTaskMeta [][]byte `protobuf:"bytes,1,rep,name=encoded_task_meta,json=encodedTaskMeta" json:"encoded_task_meta,omitempty"`
FieldTypes []*FieldType `protobuf:"bytes,2,rep,name=field_types,json=fieldTypes" json:"field_types,omitempty"`
Tp *ExchangeType `protobuf:"varint,3,opt,name=tp,enum=tipb.ExchangeType" json:"tp,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *ExchangeReceiver) Reset() { *m = ExchangeReceiver{} }
func (m *ExchangeReceiver) String() string { return proto.CompactTextString(m) }
func (*ExchangeReceiver) ProtoMessage() {}
func (*ExchangeReceiver) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{2} }
func (m *ExchangeReceiver) GetEncodedTaskMeta() [][]byte {
if m != nil {
return m.EncodedTaskMeta
}
return nil
}
func (m *ExchangeReceiver) GetFieldTypes() []*FieldType {
if m != nil {
return m.FieldTypes
}
return nil
}
func (m *ExchangeReceiver) GetTp() ExchangeType {
if m != nil && m.Tp != nil {
return *m.Tp
}
return ExchangeType_PassThrough
}
type TableScan struct {
TableId int64 `protobuf:"varint,1,opt,name=table_id,json=tableId" json:"table_id"`
Columns []*ColumnInfo `protobuf:"bytes,2,rep,name=columns" json:"columns,omitempty"`
Desc bool `protobuf:"varint,3,opt,name=desc" json:"desc"`
PrimaryColumnIds []int64 `protobuf:"varint,4,rep,name=primary_column_ids,json=primaryColumnIds" json:"primary_column_ids,omitempty"`
NextReadEngine EngineType `protobuf:"varint,5,opt,name=next_read_engine,json=nextReadEngine,enum=tipb.EngineType" json:"next_read_engine"`
Ranges []KeyRange `protobuf:"bytes,6,rep,name=ranges" json:"ranges"`
PrimaryPrefixColumnIds []int64 `protobuf:"varint,7,rep,name=primary_prefix_column_ids,json=primaryPrefixColumnIds" json:"primary_prefix_column_ids,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *TableScan) Reset() { *m = TableScan{} }
func (m *TableScan) String() string { return proto.CompactTextString(m) }
func (*TableScan) ProtoMessage() {}
func (*TableScan) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{3} }
func (m *TableScan) GetTableId() int64 {
if m != nil {
return m.TableId
}
return 0
}
func (m *TableScan) GetColumns() []*ColumnInfo {
if m != nil {
return m.Columns
}
return nil
}
func (m *TableScan) GetDesc() bool {
if m != nil {
return m.Desc
}
return false
}
func (m *TableScan) GetPrimaryColumnIds() []int64 {
if m != nil {
return m.PrimaryColumnIds
}
return nil
}
func (m *TableScan) GetNextReadEngine() EngineType {
if m != nil {
return m.NextReadEngine
}
return EngineType_Local
}
func (m *TableScan) GetRanges() []KeyRange {
if m != nil {
return m.Ranges
}
return nil
}
func (m *TableScan) GetPrimaryPrefixColumnIds() []int64 {
if m != nil {
return m.PrimaryPrefixColumnIds
}
return nil
}
type Join struct {
JoinType JoinType `protobuf:"varint,1,opt,name=join_type,json=joinType,enum=tipb.JoinType" json:"join_type"`
JoinExecType JoinExecType `protobuf:"varint,2,opt,name=join_exec_type,json=joinExecType,enum=tipb.JoinExecType" json:"join_exec_type"`
Children []*Executor `protobuf:"bytes,3,rep,name=children" json:"children,omitempty"`
InnerIdx int64 `protobuf:"varint,4,opt,name=inner_idx,json=innerIdx" json:"inner_idx"`
LeftJoinKeys []*Expr `protobuf:"bytes,5,rep,name=left_join_keys,json=leftJoinKeys" json:"left_join_keys,omitempty"`
RightJoinKeys []*Expr `protobuf:"bytes,6,rep,name=right_join_keys,json=rightJoinKeys" json:"right_join_keys,omitempty"`
// used by TiFlash join when new collation is enabled.
ProbeTypes []*FieldType `protobuf:"bytes,7,rep,name=probe_types,json=probeTypes" json:"probe_types,omitempty"`
BuildTypes []*FieldType `protobuf:"bytes,8,rep,name=build_types,json=buildTypes" json:"build_types,omitempty"`
LeftConditions []*Expr `protobuf:"bytes,9,rep,name=left_conditions,json=leftConditions" json:"left_conditions,omitempty"`
RightConditions []*Expr `protobuf:"bytes,10,rep,name=right_conditions,json=rightConditions" json:"right_conditions,omitempty"`
OtherConditions []*Expr `protobuf:"bytes,11,rep,name=other_conditions,json=otherConditions" json:"other_conditions,omitempty"`
OtherEqConditionsFromIn []*Expr `protobuf:"bytes,12,rep,name=other_eq_conditions_from_in,json=otherEqConditionsFromIn" json:"other_eq_conditions_from_in,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Join) Reset() { *m = Join{} }
func (m *Join) String() string { return proto.CompactTextString(m) }
func (*Join) ProtoMessage() {}
func (*Join) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{4} }
func (m *Join) GetJoinType() JoinType {
if m != nil {
return m.JoinType
}
return JoinType_TypeInnerJoin
}
func (m *Join) GetJoinExecType() JoinExecType {
if m != nil {
return m.JoinExecType
}
return JoinExecType_TypeHashJoin
}
func (m *Join) GetChildren() []*Executor {
if m != nil {
return m.Children
}
return nil
}
func (m *Join) GetInnerIdx() int64 {
if m != nil {
return m.InnerIdx
}
return 0
}
func (m *Join) GetLeftJoinKeys() []*Expr {
if m != nil {
return m.LeftJoinKeys
}
return nil
}
func (m *Join) GetRightJoinKeys() []*Expr {
if m != nil {
return m.RightJoinKeys
}
return nil
}
func (m *Join) GetProbeTypes() []*FieldType {
if m != nil {
return m.ProbeTypes
}
return nil
}
func (m *Join) GetBuildTypes() []*FieldType {
if m != nil {
return m.BuildTypes
}
return nil
}
func (m *Join) GetLeftConditions() []*Expr {
if m != nil {
return m.LeftConditions
}
return nil
}
func (m *Join) GetRightConditions() []*Expr {
if m != nil {
return m.RightConditions
}
return nil
}
func (m *Join) GetOtherConditions() []*Expr {
if m != nil {
return m.OtherConditions
}
return nil
}
func (m *Join) GetOtherEqConditionsFromIn() []*Expr {
if m != nil {
return m.OtherEqConditionsFromIn
}
return nil
}
type IndexScan struct {
TableId int64 `protobuf:"varint,1,opt,name=table_id,json=tableId" json:"table_id"`
IndexId int64 `protobuf:"varint,2,opt,name=index_id,json=indexId" json:"index_id"`
Columns []*ColumnInfo `protobuf:"bytes,3,rep,name=columns" json:"columns,omitempty"`
Desc bool `protobuf:"varint,4,opt,name=desc" json:"desc"`
Unique *bool `protobuf:"varint,5,opt,name=unique" json:"unique,omitempty"`
PrimaryColumnIds []int64 `protobuf:"varint,6,rep,name=primary_column_ids,json=primaryColumnIds" json:"primary_column_ids,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *IndexScan) Reset() { *m = IndexScan{} }
func (m *IndexScan) String() string { return proto.CompactTextString(m) }
func (*IndexScan) ProtoMessage() {}
func (*IndexScan) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{5} }
func (m *IndexScan) GetTableId() int64 {
if m != nil {
return m.TableId
}
return 0
}
func (m *IndexScan) GetIndexId() int64 {
if m != nil {
return m.IndexId
}
return 0
}
func (m *IndexScan) GetColumns() []*ColumnInfo {
if m != nil {
return m.Columns
}
return nil
}
func (m *IndexScan) GetDesc() bool {
if m != nil {
return m.Desc
}
return false
}
func (m *IndexScan) GetUnique() bool {
if m != nil && m.Unique != nil {
return *m.Unique
}
return false
}
func (m *IndexScan) GetPrimaryColumnIds() []int64 {
if m != nil {
return m.PrimaryColumnIds
}
return nil
}
type Selection struct {
// Where conditions.
Conditions []*Expr `protobuf:"bytes,1,rep,name=conditions" json:"conditions,omitempty"`
RpnConditions []*RpnExpr `protobuf:"bytes,2,rep,name=rpn_conditions,json=rpnConditions" json:"rpn_conditions,omitempty"`
Child *Executor `protobuf:"bytes,3,opt,name=child" json:"child,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Selection) Reset() { *m = Selection{} }
func (m *Selection) String() string { return proto.CompactTextString(m) }
func (*Selection) ProtoMessage() {}
func (*Selection) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{6} }
func (m *Selection) GetConditions() []*Expr {
if m != nil {
return m.Conditions
}
return nil
}
func (m *Selection) GetRpnConditions() []*RpnExpr {
if m != nil {
return m.RpnConditions
}
return nil
}
func (m *Selection) GetChild() *Executor {
if m != nil {
return m.Child
}
return nil
}
type Projection struct {
// Projection expressions.
Exprs []*Expr `protobuf:"bytes,1,rep,name=exprs" json:"exprs,omitempty"`
RpnExprs []*RpnExpr `protobuf:"bytes,2,rep,name=rpn_exprs,json=rpnExprs" json:"rpn_exprs,omitempty"`
Child *Executor `protobuf:"bytes,3,opt,name=child" json:"child,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Projection) Reset() { *m = Projection{} }
func (m *Projection) String() string { return proto.CompactTextString(m) }
func (*Projection) ProtoMessage() {}
func (*Projection) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{7} }
func (m *Projection) GetExprs() []*Expr {
if m != nil {
return m.Exprs
}
return nil
}
func (m *Projection) GetRpnExprs() []*RpnExpr {
if m != nil {
return m.RpnExprs
}
return nil
}
func (m *Projection) GetChild() *Executor {
if m != nil {
return m.Child
}
return nil
}
type Aggregation struct {
// Group by clause.
GroupBy []*Expr `protobuf:"bytes,1,rep,name=group_by,json=groupBy" json:"group_by,omitempty"`
RpnGroupBy []*RpnExpr `protobuf:"bytes,4,rep,name=rpn_group_by,json=rpnGroupBy" json:"rpn_group_by,omitempty"`
// Aggregate functions.
AggFunc []*Expr `protobuf:"bytes,2,rep,name=agg_func,json=aggFunc" json:"agg_func,omitempty"`
RpnAggFunc []*RpnExpr `protobuf:"bytes,5,rep,name=rpn_agg_func,json=rpnAggFunc" json:"rpn_agg_func,omitempty"`
// If it is a stream aggregation.
Streamed bool `protobuf:"varint,3,opt,name=streamed" json:"streamed"`
Child *Executor `protobuf:"bytes,6,opt,name=child" json:"child,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Aggregation) Reset() { *m = Aggregation{} }
func (m *Aggregation) String() string { return proto.CompactTextString(m) }
func (*Aggregation) ProtoMessage() {}
func (*Aggregation) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{8} }
func (m *Aggregation) GetGroupBy() []*Expr {
if m != nil {
return m.GroupBy
}
return nil
}
func (m *Aggregation) GetRpnGroupBy() []*RpnExpr {
if m != nil {
return m.RpnGroupBy
}
return nil
}
func (m *Aggregation) GetAggFunc() []*Expr {
if m != nil {
return m.AggFunc
}
return nil
}
func (m *Aggregation) GetRpnAggFunc() []*RpnExpr {
if m != nil {
return m.RpnAggFunc
}
return nil
}
func (m *Aggregation) GetStreamed() bool {
if m != nil {
return m.Streamed
}
return false
}
func (m *Aggregation) GetChild() *Executor {
if m != nil {
return m.Child
}
return nil
}
type TopN struct {
// Order by clause.
OrderBy []*ByItem `protobuf:"bytes,1,rep,name=order_by,json=orderBy" json:"order_by,omitempty"`
Limit uint64 `protobuf:"varint,2,opt,name=limit" json:"limit"`
Child *Executor `protobuf:"bytes,3,opt,name=child" json:"child,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *TopN) Reset() { *m = TopN{} }
func (m *TopN) String() string { return proto.CompactTextString(m) }
func (*TopN) ProtoMessage() {}
func (*TopN) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{9} }
func (m *TopN) GetOrderBy() []*ByItem {
if m != nil {
return m.OrderBy
}
return nil
}
func (m *TopN) GetLimit() uint64 {
if m != nil {
return m.Limit
}
return 0
}
func (m *TopN) GetChild() *Executor {
if m != nil {
return m.Child
}
return nil
}
type Limit struct {
// Limit the result to be returned.
Limit uint64 `protobuf:"varint,1,opt,name=limit" json:"limit"`
Child *Executor `protobuf:"bytes,2,opt,name=child" json:"child,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Limit) Reset() { *m = Limit{} }
func (m *Limit) String() string { return proto.CompactTextString(m) }
func (*Limit) ProtoMessage() {}
func (*Limit) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{10} }
func (m *Limit) GetLimit() uint64 {
if m != nil {
return m.Limit
}
return 0
}
func (m *Limit) GetChild() *Executor {
if m != nil {
return m.Child
}
return nil
}
type Kill struct {
ConnID uint64 `protobuf:"varint,1,opt,name=connID" json:"connID"`
// Query indicates whether terminate a single query on this connection or the whole connection.
// If Query is true, terminates the statement the connection is currently executing, but leaves the connection itself intact.
// If Query is false, terminates the connection associated with the given ConnectionID, after terminating any statement the connection is executing.
// See https://dev.mysql.com/doc/refman/8.0/en/kill.html.
Query bool `protobuf:"varint,2,opt,name=query" json:"query"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Kill) Reset() { *m = Kill{} }
func (m *Kill) String() string { return proto.CompactTextString(m) }
func (*Kill) ProtoMessage() {}
func (*Kill) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{11} }
func (m *Kill) GetConnID() uint64 {
if m != nil {
return m.ConnID
}
return 0
}
func (m *Kill) GetQuery() bool {
if m != nil {
return m.Query
}
return false
}
type ExecutorExecutionSummary struct {
// Total time cost in this executor. Includes self time cost and children time cost.
TimeProcessedNs *uint64 `protobuf:"varint,1,opt,name=time_processed_ns,json=timeProcessedNs" json:"time_processed_ns,omitempty"`
// How many rows this executor produced totally.
NumProducedRows *uint64 `protobuf:"varint,2,opt,name=num_produced_rows,json=numProducedRows" json:"num_produced_rows,omitempty"`
// How many times executor's `next()` is called.
NumIterations *uint64 `protobuf:"varint,3,opt,name=num_iterations,json=numIterations" json:"num_iterations,omitempty"`
// Coresponding executor id
ExecutorId *string `protobuf:"bytes,4,opt,name=executor_id,json=executorId" json:"executor_id,omitempty"`
// The execution concurrency for this executor
Concurrency *uint64 `protobuf:"varint,5,opt,name=concurrency" json:"concurrency,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *ExecutorExecutionSummary) Reset() { *m = ExecutorExecutionSummary{} }
func (m *ExecutorExecutionSummary) String() string { return proto.CompactTextString(m) }
func (*ExecutorExecutionSummary) ProtoMessage() {}
func (*ExecutorExecutionSummary) Descriptor() ([]byte, []int) {
return fileDescriptorExecutor, []int{12}
}
func (m *ExecutorExecutionSummary) GetTimeProcessedNs() uint64 {
if m != nil && m.TimeProcessedNs != nil {
return *m.TimeProcessedNs
}
return 0
}
func (m *ExecutorExecutionSummary) GetNumProducedRows() uint64 {
if m != nil && m.NumProducedRows != nil {
return *m.NumProducedRows
}
return 0
}
func (m *ExecutorExecutionSummary) GetNumIterations() uint64 {
if m != nil && m.NumIterations != nil {
return *m.NumIterations
}
return 0
}
func (m *ExecutorExecutionSummary) GetExecutorId() string {
if m != nil && m.ExecutorId != nil {
return *m.ExecutorId
}
return ""
}
func (m *ExecutorExecutionSummary) GetConcurrency() uint64 {
if m != nil && m.Concurrency != nil {
return *m.Concurrency
}
return 0
}
type GraphScan struct {
TableId *int64 `protobuf:"varint,1,opt,name=table_id,json=tableId" json:"table_id,omitempty"`
Columns []*ColumnInfo `protobuf:"bytes,2,rep,name=columns" json:"columns,omitempty"`
Desc bool `protobuf:"varint,3,opt,name=desc" json:"desc"`
PrimaryColumnIds []int64 `protobuf:"varint,4,rep,name=primary_column_ids,json=primaryColumnIds" json:"primary_column_ids,omitempty"`
NextReadEngine EngineType `protobuf:"varint,5,opt,name=next_read_engine,json=nextReadEngine,enum=tipb.EngineType" json:"next_read_engine"`
Ranges []KeyRange `protobuf:"bytes,6,rep,name=ranges" json:"ranges"`
PrimaryPrefixColumnIds []int64 `protobuf:"varint,7,rep,name=primary_prefix_column_ids,json=primaryPrefixColumnIds" json:"primary_prefix_column_ids,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *GraphScan) Reset() { *m = GraphScan{} }
func (m *GraphScan) String() string { return proto.CompactTextString(m) }
func (*GraphScan) ProtoMessage() {}
func (*GraphScan) Descriptor() ([]byte, []int) { return fileDescriptorExecutor, []int{13} }
func (m *GraphScan) GetTableId() int64 {
if m != nil && m.TableId != nil {
return *m.TableId
}
return 0
}
func (m *GraphScan) GetColumns() []*ColumnInfo {
if m != nil {
return m.Columns
}
return nil
}
func (m *GraphScan) GetDesc() bool {
if m != nil {
return m.Desc
}
return false
}
func (m *GraphScan) GetPrimaryColumnIds() []int64 {
if m != nil {
return m.PrimaryColumnIds
}
return nil
}
func (m *GraphScan) GetNextReadEngine() EngineType {
if m != nil {
return m.NextReadEngine
}
return EngineType_Local
}
func (m *GraphScan) GetRanges() []KeyRange {
if m != nil {
return m.Ranges
}
return nil
}
func (m *GraphScan) GetPrimaryPrefixColumnIds() []int64 {
if m != nil {
return m.PrimaryPrefixColumnIds
}
return nil
}
func init() {
proto.RegisterType((*Executor)(nil), "tipb.Executor")
proto.RegisterType((*ExchangeSender)(nil), "tipb.ExchangeSender")
proto.RegisterType((*ExchangeReceiver)(nil), "tipb.ExchangeReceiver")
proto.RegisterType((*TableScan)(nil), "tipb.TableScan")
proto.RegisterType((*Join)(nil), "tipb.Join")
proto.RegisterType((*IndexScan)(nil), "tipb.IndexScan")
proto.RegisterType((*Selection)(nil), "tipb.Selection")
proto.RegisterType((*Projection)(nil), "tipb.Projection")
proto.RegisterType((*Aggregation)(nil), "tipb.Aggregation")
proto.RegisterType((*TopN)(nil), "tipb.TopN")
proto.RegisterType((*Limit)(nil), "tipb.Limit")
proto.RegisterType((*Kill)(nil), "tipb.Kill")
proto.RegisterType((*ExecutorExecutionSummary)(nil), "tipb.ExecutorExecutionSummary")
proto.RegisterType((*GraphScan)(nil), "tipb.GraphScan")
proto.RegisterEnum("tipb.ExecType", ExecType_name, ExecType_value)
proto.RegisterEnum("tipb.ExchangeType", ExchangeType_name, ExchangeType_value)
proto.RegisterEnum("tipb.EngineType", EngineType_name, EngineType_value)
proto.RegisterEnum("tipb.JoinType", JoinType_name, JoinType_value)
proto.RegisterEnum("tipb.JoinExecType", JoinExecType_name, JoinExecType_value)
}
func (m *Executor) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Executor) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0x8
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Tp))
if m.TblScan != nil {
dAtA[i] = 0x12
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.TblScan.Size()))
n1, err := m.TblScan.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
if m.IdxScan != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.IdxScan.Size()))
n2, err := m.IdxScan.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
if m.Selection != nil {
dAtA[i] = 0x22
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Selection.Size()))
n3, err := m.Selection.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n3
}
if m.Aggregation != nil {
dAtA[i] = 0x2a
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Aggregation.Size()))
n4, err := m.Aggregation.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n4
}
if m.TopN != nil {
dAtA[i] = 0x32
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.TopN.Size()))
n5, err := m.TopN.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n5
}
if m.Limit != nil {
dAtA[i] = 0x3a
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Limit.Size()))
n6, err := m.Limit.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n6
}
if m.ExchangeReceiver != nil {
dAtA[i] = 0x42
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.ExchangeReceiver.Size()))
n7, err := m.ExchangeReceiver.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n7
}
if m.Join != nil {
dAtA[i] = 0x4a
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Join.Size()))
n8, err := m.Join.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n8
}
if m.ExecutorId != nil {
dAtA[i] = 0x52
i++
i = encodeVarintExecutor(dAtA, i, uint64(len(*m.ExecutorId)))
i += copy(dAtA[i:], *m.ExecutorId)
}
if m.Kill != nil {
dAtA[i] = 0x5a
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Kill.Size()))
n9, err := m.Kill.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n9
}
if m.ExchangeSender != nil {
dAtA[i] = 0x62
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.ExchangeSender.Size()))
n10, err := m.ExchangeSender.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n10
}
if m.Projection != nil {
dAtA[i] = 0x6a
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Projection.Size()))
n11, err := m.Projection.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n11
}
if m.GrpScan != nil {
dAtA[i] = 0x72
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.GrpScan.Size()))
n12, err := m.GrpScan.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n12
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ExchangeSender) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExchangeSender) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0x8
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Tp))
if len(m.EncodedTaskMeta) > 0 {
for _, b := range m.EncodedTaskMeta {
dAtA[i] = 0x12
i++
i = encodeVarintExecutor(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if len(m.PartitionKeys) > 0 {
for _, msg := range m.PartitionKeys {
dAtA[i] = 0x1a
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Child != nil {
dAtA[i] = 0x22
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Child.Size()))
n13, err := m.Child.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n13
}
if len(m.Types) > 0 {
for _, msg := range m.Types {
dAtA[i] = 0x2a
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.AllFieldTypes) > 0 {
for _, msg := range m.AllFieldTypes {
dAtA[i] = 0x32
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ExchangeReceiver) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExchangeReceiver) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.EncodedTaskMeta) > 0 {
for _, b := range m.EncodedTaskMeta {
dAtA[i] = 0xa
i++
i = encodeVarintExecutor(dAtA, i, uint64(len(b)))
i += copy(dAtA[i:], b)
}
}
if len(m.FieldTypes) > 0 {
for _, msg := range m.FieldTypes {
dAtA[i] = 0x12
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Tp != nil {
dAtA[i] = 0x18
i++
i = encodeVarintExecutor(dAtA, i, uint64(*m.Tp))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TableScan) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TableScan) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0x8
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.TableId))
if len(m.Columns) > 0 {
for _, msg := range m.Columns {
dAtA[i] = 0x12
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
dAtA[i] = 0x18
i++
if m.Desc {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
if len(m.PrimaryColumnIds) > 0 {
for _, num := range m.PrimaryColumnIds {
dAtA[i] = 0x20
i++
i = encodeVarintExecutor(dAtA, i, uint64(num))
}
}
dAtA[i] = 0x28
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.NextReadEngine))
if len(m.Ranges) > 0 {
for _, msg := range m.Ranges {
dAtA[i] = 0x32
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.PrimaryPrefixColumnIds) > 0 {
for _, num := range m.PrimaryPrefixColumnIds {
dAtA[i] = 0x38
i++
i = encodeVarintExecutor(dAtA, i, uint64(num))
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Join) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Join) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0x8
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.JoinType))
dAtA[i] = 0x10
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.JoinExecType))
if len(m.Children) > 0 {
for _, msg := range m.Children {
dAtA[i] = 0x1a
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
dAtA[i] = 0x20
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.InnerIdx))
if len(m.LeftJoinKeys) > 0 {
for _, msg := range m.LeftJoinKeys {
dAtA[i] = 0x2a
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.RightJoinKeys) > 0 {
for _, msg := range m.RightJoinKeys {
dAtA[i] = 0x32
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.ProbeTypes) > 0 {
for _, msg := range m.ProbeTypes {
dAtA[i] = 0x3a
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.BuildTypes) > 0 {
for _, msg := range m.BuildTypes {
dAtA[i] = 0x42
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.LeftConditions) > 0 {
for _, msg := range m.LeftConditions {
dAtA[i] = 0x4a
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.RightConditions) > 0 {
for _, msg := range m.RightConditions {
dAtA[i] = 0x52
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.OtherConditions) > 0 {
for _, msg := range m.OtherConditions {
dAtA[i] = 0x5a
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.OtherEqConditionsFromIn) > 0 {
for _, msg := range m.OtherEqConditionsFromIn {
dAtA[i] = 0x62
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *IndexScan) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *IndexScan) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0x8
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.TableId))
dAtA[i] = 0x10
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.IndexId))
if len(m.Columns) > 0 {
for _, msg := range m.Columns {
dAtA[i] = 0x1a
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
dAtA[i] = 0x20
i++
if m.Desc {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
if m.Unique != nil {
dAtA[i] = 0x28
i++
if *m.Unique {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if len(m.PrimaryColumnIds) > 0 {
for _, num := range m.PrimaryColumnIds {
dAtA[i] = 0x30
i++
i = encodeVarintExecutor(dAtA, i, uint64(num))
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Selection) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Selection) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Conditions) > 0 {
for _, msg := range m.Conditions {
dAtA[i] = 0xa
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.RpnConditions) > 0 {
for _, msg := range m.RpnConditions {
dAtA[i] = 0x12
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Child != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Child.Size()))
n14, err := m.Child.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n14
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Projection) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Projection) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Exprs) > 0 {
for _, msg := range m.Exprs {
dAtA[i] = 0xa
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.RpnExprs) > 0 {
for _, msg := range m.RpnExprs {
dAtA[i] = 0x12
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Child != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Child.Size()))
n15, err := m.Child.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n15
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Aggregation) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Aggregation) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.GroupBy) > 0 {
for _, msg := range m.GroupBy {
dAtA[i] = 0xa
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.AggFunc) > 0 {
for _, msg := range m.AggFunc {
dAtA[i] = 0x12
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
dAtA[i] = 0x18
i++
if m.Streamed {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
if len(m.RpnGroupBy) > 0 {
for _, msg := range m.RpnGroupBy {
dAtA[i] = 0x22
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.RpnAggFunc) > 0 {
for _, msg := range m.RpnAggFunc {
dAtA[i] = 0x2a
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Child != nil {
dAtA[i] = 0x32
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Child.Size()))
n16, err := m.Child.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n16
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *TopN) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TopN) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.OrderBy) > 0 {
for _, msg := range m.OrderBy {
dAtA[i] = 0xa
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
dAtA[i] = 0x10
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Limit))
if m.Child != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Child.Size()))
n17, err := m.Child.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n17
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Limit) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Limit) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0x8
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Limit))
if m.Child != nil {
dAtA[i] = 0x12
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.Child.Size()))
n18, err := m.Child.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n18
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Kill) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Kill) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0x8
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.ConnID))
dAtA[i] = 0x10
i++
if m.Query {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *ExecutorExecutionSummary) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ExecutorExecutionSummary) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.TimeProcessedNs != nil {
dAtA[i] = 0x8
i++
i = encodeVarintExecutor(dAtA, i, uint64(*m.TimeProcessedNs))
}
if m.NumProducedRows != nil {
dAtA[i] = 0x10
i++
i = encodeVarintExecutor(dAtA, i, uint64(*m.NumProducedRows))
}
if m.NumIterations != nil {
dAtA[i] = 0x18
i++
i = encodeVarintExecutor(dAtA, i, uint64(*m.NumIterations))
}
if m.ExecutorId != nil {
dAtA[i] = 0x22
i++
i = encodeVarintExecutor(dAtA, i, uint64(len(*m.ExecutorId)))
i += copy(dAtA[i:], *m.ExecutorId)
}
if m.Concurrency != nil {
dAtA[i] = 0x28
i++
i = encodeVarintExecutor(dAtA, i, uint64(*m.Concurrency))
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *GraphScan) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GraphScan) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.TableId != nil {
dAtA[i] = 0x8
i++
i = encodeVarintExecutor(dAtA, i, uint64(*m.TableId))
}
if len(m.Columns) > 0 {
for _, msg := range m.Columns {
dAtA[i] = 0x12
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
dAtA[i] = 0x18
i++
if m.Desc {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
if len(m.PrimaryColumnIds) > 0 {
for _, num := range m.PrimaryColumnIds {
dAtA[i] = 0x20
i++
i = encodeVarintExecutor(dAtA, i, uint64(num))
}
}
dAtA[i] = 0x28
i++
i = encodeVarintExecutor(dAtA, i, uint64(m.NextReadEngine))
if len(m.Ranges) > 0 {
for _, msg := range m.Ranges {
dAtA[i] = 0x32
i++
i = encodeVarintExecutor(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if len(m.PrimaryPrefixColumnIds) > 0 {
for _, num := range m.PrimaryPrefixColumnIds {
dAtA[i] = 0x38
i++
i = encodeVarintExecutor(dAtA, i, uint64(num))
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeVarintExecutor(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Executor) Size() (n int) {
var l int
_ = l
n += 1 + sovExecutor(uint64(m.Tp))
if m.TblScan != nil {
l = m.TblScan.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.IdxScan != nil {
l = m.IdxScan.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.Selection != nil {
l = m.Selection.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.Aggregation != nil {
l = m.Aggregation.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.TopN != nil {
l = m.TopN.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.Limit != nil {
l = m.Limit.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.ExchangeReceiver != nil {
l = m.ExchangeReceiver.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.Join != nil {
l = m.Join.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.ExecutorId != nil {
l = len(*m.ExecutorId)
n += 1 + l + sovExecutor(uint64(l))
}
if m.Kill != nil {
l = m.Kill.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.ExchangeSender != nil {
l = m.ExchangeSender.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.Projection != nil {
l = m.Projection.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.GrpScan != nil {
l = m.GrpScan.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ExchangeSender) Size() (n int) {
var l int
_ = l
n += 1 + sovExecutor(uint64(m.Tp))
if len(m.EncodedTaskMeta) > 0 {
for _, b := range m.EncodedTaskMeta {
l = len(b)
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.PartitionKeys) > 0 {
for _, e := range m.PartitionKeys {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if m.Child != nil {
l = m.Child.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if len(m.Types) > 0 {
for _, e := range m.Types {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.AllFieldTypes) > 0 {
for _, e := range m.AllFieldTypes {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ExchangeReceiver) Size() (n int) {
var l int
_ = l
if len(m.EncodedTaskMeta) > 0 {
for _, b := range m.EncodedTaskMeta {
l = len(b)
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.FieldTypes) > 0 {
for _, e := range m.FieldTypes {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if m.Tp != nil {
n += 1 + sovExecutor(uint64(*m.Tp))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TableScan) Size() (n int) {
var l int
_ = l
n += 1 + sovExecutor(uint64(m.TableId))
if len(m.Columns) > 0 {
for _, e := range m.Columns {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
n += 2
if len(m.PrimaryColumnIds) > 0 {
for _, e := range m.PrimaryColumnIds {
n += 1 + sovExecutor(uint64(e))
}
}
n += 1 + sovExecutor(uint64(m.NextReadEngine))
if len(m.Ranges) > 0 {
for _, e := range m.Ranges {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.PrimaryPrefixColumnIds) > 0 {
for _, e := range m.PrimaryPrefixColumnIds {
n += 1 + sovExecutor(uint64(e))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Join) Size() (n int) {
var l int
_ = l
n += 1 + sovExecutor(uint64(m.JoinType))
n += 1 + sovExecutor(uint64(m.JoinExecType))
if len(m.Children) > 0 {
for _, e := range m.Children {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
n += 1 + sovExecutor(uint64(m.InnerIdx))
if len(m.LeftJoinKeys) > 0 {
for _, e := range m.LeftJoinKeys {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.RightJoinKeys) > 0 {
for _, e := range m.RightJoinKeys {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.ProbeTypes) > 0 {
for _, e := range m.ProbeTypes {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.BuildTypes) > 0 {
for _, e := range m.BuildTypes {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.LeftConditions) > 0 {
for _, e := range m.LeftConditions {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.RightConditions) > 0 {
for _, e := range m.RightConditions {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.OtherConditions) > 0 {
for _, e := range m.OtherConditions {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.OtherEqConditionsFromIn) > 0 {
for _, e := range m.OtherEqConditionsFromIn {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *IndexScan) Size() (n int) {
var l int
_ = l
n += 1 + sovExecutor(uint64(m.TableId))
n += 1 + sovExecutor(uint64(m.IndexId))
if len(m.Columns) > 0 {
for _, e := range m.Columns {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
n += 2
if m.Unique != nil {
n += 2
}
if len(m.PrimaryColumnIds) > 0 {
for _, e := range m.PrimaryColumnIds {
n += 1 + sovExecutor(uint64(e))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Selection) Size() (n int) {
var l int
_ = l
if len(m.Conditions) > 0 {
for _, e := range m.Conditions {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.RpnConditions) > 0 {
for _, e := range m.RpnConditions {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if m.Child != nil {
l = m.Child.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Projection) Size() (n int) {
var l int
_ = l
if len(m.Exprs) > 0 {
for _, e := range m.Exprs {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.RpnExprs) > 0 {
for _, e := range m.RpnExprs {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if m.Child != nil {
l = m.Child.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Aggregation) Size() (n int) {
var l int
_ = l
if len(m.GroupBy) > 0 {
for _, e := range m.GroupBy {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.AggFunc) > 0 {
for _, e := range m.AggFunc {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
n += 2
if len(m.RpnGroupBy) > 0 {
for _, e := range m.RpnGroupBy {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.RpnAggFunc) > 0 {
for _, e := range m.RpnAggFunc {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if m.Child != nil {
l = m.Child.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *TopN) Size() (n int) {
var l int
_ = l
if len(m.OrderBy) > 0 {
for _, e := range m.OrderBy {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
n += 1 + sovExecutor(uint64(m.Limit))
if m.Child != nil {
l = m.Child.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Limit) Size() (n int) {
var l int
_ = l
n += 1 + sovExecutor(uint64(m.Limit))
if m.Child != nil {
l = m.Child.Size()
n += 1 + l + sovExecutor(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Kill) Size() (n int) {
var l int
_ = l
n += 1 + sovExecutor(uint64(m.ConnID))
n += 2
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *ExecutorExecutionSummary) Size() (n int) {
var l int
_ = l
if m.TimeProcessedNs != nil {
n += 1 + sovExecutor(uint64(*m.TimeProcessedNs))
}
if m.NumProducedRows != nil {
n += 1 + sovExecutor(uint64(*m.NumProducedRows))
}
if m.NumIterations != nil {
n += 1 + sovExecutor(uint64(*m.NumIterations))
}
if m.ExecutorId != nil {
l = len(*m.ExecutorId)
n += 1 + l + sovExecutor(uint64(l))
}
if m.Concurrency != nil {
n += 1 + sovExecutor(uint64(*m.Concurrency))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *GraphScan) Size() (n int) {
var l int
_ = l
if m.TableId != nil {
n += 1 + sovExecutor(uint64(*m.TableId))
}
if len(m.Columns) > 0 {
for _, e := range m.Columns {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
n += 2
if len(m.PrimaryColumnIds) > 0 {
for _, e := range m.PrimaryColumnIds {
n += 1 + sovExecutor(uint64(e))
}
}
n += 1 + sovExecutor(uint64(m.NextReadEngine))
if len(m.Ranges) > 0 {
for _, e := range m.Ranges {
l = e.Size()
n += 1 + l + sovExecutor(uint64(l))
}
}
if len(m.PrimaryPrefixColumnIds) > 0 {
for _, e := range m.PrimaryPrefixColumnIds {
n += 1 + sovExecutor(uint64(e))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovExecutor(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozExecutor(x uint64) (n int) {
return sovExecutor(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Executor) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Executor: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Executor: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Tp", wireType)
}
m.Tp = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Tp |= (ExecType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TblScan", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TblScan == nil {
m.TblScan = &TableScan{}
}
if err := m.TblScan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IdxScan", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.IdxScan == nil {
m.IdxScan = &IndexScan{}
}
if err := m.IdxScan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Selection", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Selection == nil {
m.Selection = &Selection{}
}
if err := m.Selection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Aggregation", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Aggregation == nil {
m.Aggregation = &Aggregation{}
}
if err := m.Aggregation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TopN", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.TopN == nil {
m.TopN = &TopN{}
}
if err := m.TopN.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Limit == nil {
m.Limit = &Limit{}
}
if err := m.Limit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExchangeReceiver", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ExchangeReceiver == nil {
m.ExchangeReceiver = &ExchangeReceiver{}
}
if err := m.ExchangeReceiver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Join", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Join == nil {
m.Join = &Join{}
}
if err := m.Join.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExecutorId", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.ExecutorId = &s
iNdEx = postIndex
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kill", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Kill == nil {
m.Kill = &Kill{}
}
if err := m.Kill.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExchangeSender", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ExchangeSender == nil {
m.ExchangeSender = &ExchangeSender{}
}
if err := m.ExchangeSender.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 13:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Projection", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Projection == nil {
m.Projection = &Projection{}
}
if err := m.Projection.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 14:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field GrpScan", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.GrpScan == nil {
m.GrpScan = &GraphScan{}
}
if err := m.GrpScan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExchangeSender) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExchangeSender: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExchangeSender: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Tp", wireType)
}
m.Tp = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Tp |= (ExchangeType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EncodedTaskMeta", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EncodedTaskMeta = append(m.EncodedTaskMeta, make([]byte, postIndex-iNdEx))
copy(m.EncodedTaskMeta[len(m.EncodedTaskMeta)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PartitionKeys", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.PartitionKeys = append(m.PartitionKeys, &Expr{})
if err := m.PartitionKeys[len(m.PartitionKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Child", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Child == nil {
m.Child = &Executor{}
}
if err := m.Child.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Types", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Types = append(m.Types, &FieldType{})
if err := m.Types[len(m.Types)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AllFieldTypes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AllFieldTypes = append(m.AllFieldTypes, &FieldType{})
if err := m.AllFieldTypes[len(m.AllFieldTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExchangeReceiver) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExchangeReceiver: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExchangeReceiver: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EncodedTaskMeta", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EncodedTaskMeta = append(m.EncodedTaskMeta, make([]byte, postIndex-iNdEx))
copy(m.EncodedTaskMeta[len(m.EncodedTaskMeta)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field FieldTypes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.FieldTypes = append(m.FieldTypes, &FieldType{})
if err := m.FieldTypes[len(m.FieldTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Tp", wireType)
}
var v ExchangeType
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (ExchangeType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Tp = &v
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TableScan) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TableScan: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TableScan: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TableId", wireType)
}
m.TableId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TableId |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Columns = append(m.Columns, &ColumnInfo{})
if err := m.Columns[len(m.Columns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Desc = bool(v != 0)
case 4:
if wireType == 0 {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryColumnIds = append(m.PrimaryColumnIds, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryColumnIds = append(m.PrimaryColumnIds, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryColumnIds", wireType)
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NextReadEngine", wireType)
}
m.NextReadEngine = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.NextReadEngine |= (EngineType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ranges = append(m.Ranges, KeyRange{})
if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType == 0 {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryPrefixColumnIds = append(m.PrimaryPrefixColumnIds, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryPrefixColumnIds = append(m.PrimaryPrefixColumnIds, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryPrefixColumnIds", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Join) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Join: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Join: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field JoinType", wireType)
}
m.JoinType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.JoinType |= (JoinType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field JoinExecType", wireType)
}
m.JoinExecType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.JoinExecType |= (JoinExecType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Children = append(m.Children, &Executor{})
if err := m.Children[len(m.Children)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field InnerIdx", wireType)
}
m.InnerIdx = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.InnerIdx |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LeftJoinKeys", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LeftJoinKeys = append(m.LeftJoinKeys, &Expr{})
if err := m.LeftJoinKeys[len(m.LeftJoinKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RightJoinKeys", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RightJoinKeys = append(m.RightJoinKeys, &Expr{})
if err := m.RightJoinKeys[len(m.RightJoinKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ProbeTypes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ProbeTypes = append(m.ProbeTypes, &FieldType{})
if err := m.ProbeTypes[len(m.ProbeTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field BuildTypes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.BuildTypes = append(m.BuildTypes, &FieldType{})
if err := m.BuildTypes[len(m.BuildTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LeftConditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LeftConditions = append(m.LeftConditions, &Expr{})
if err := m.LeftConditions[len(m.LeftConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RightConditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RightConditions = append(m.RightConditions, &Expr{})
if err := m.RightConditions[len(m.RightConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OtherConditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OtherConditions = append(m.OtherConditions, &Expr{})
if err := m.OtherConditions[len(m.OtherConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OtherEqConditionsFromIn", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OtherEqConditionsFromIn = append(m.OtherEqConditionsFromIn, &Expr{})
if err := m.OtherEqConditionsFromIn[len(m.OtherEqConditionsFromIn)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *IndexScan) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: IndexScan: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: IndexScan: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TableId", wireType)
}
m.TableId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TableId |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field IndexId", wireType)
}
m.IndexId = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.IndexId |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Columns = append(m.Columns, &ColumnInfo{})
if err := m.Columns[len(m.Columns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Desc = bool(v != 0)
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Unique", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
b := bool(v != 0)
m.Unique = &b
case 6:
if wireType == 0 {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryColumnIds = append(m.PrimaryColumnIds, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryColumnIds = append(m.PrimaryColumnIds, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryColumnIds", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Selection) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Selection: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Selection: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Conditions = append(m.Conditions, &Expr{})
if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RpnConditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RpnConditions = append(m.RpnConditions, &RpnExpr{})
if err := m.RpnConditions[len(m.RpnConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Child", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Child == nil {
m.Child = &Executor{}
}
if err := m.Child.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Projection) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Projection: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Projection: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Exprs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Exprs = append(m.Exprs, &Expr{})
if err := m.Exprs[len(m.Exprs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RpnExprs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RpnExprs = append(m.RpnExprs, &RpnExpr{})
if err := m.RpnExprs[len(m.RpnExprs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Child", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Child == nil {
m.Child = &Executor{}
}
if err := m.Child.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Aggregation) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Aggregation: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Aggregation: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field GroupBy", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.GroupBy = append(m.GroupBy, &Expr{})
if err := m.GroupBy[len(m.GroupBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AggFunc", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AggFunc = append(m.AggFunc, &Expr{})
if err := m.AggFunc[len(m.AggFunc)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Streamed", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Streamed = bool(v != 0)
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RpnGroupBy", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RpnGroupBy = append(m.RpnGroupBy, &RpnExpr{})
if err := m.RpnGroupBy[len(m.RpnGroupBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RpnAggFunc", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RpnAggFunc = append(m.RpnAggFunc, &RpnExpr{})
if err := m.RpnAggFunc[len(m.RpnAggFunc)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Child", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Child == nil {
m.Child = &Executor{}
}
if err := m.Child.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TopN) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TopN: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TopN: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OrderBy", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OrderBy = append(m.OrderBy, &ByItem{})
if err := m.OrderBy[len(m.OrderBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Limit |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Child", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Child == nil {
m.Child = &Executor{}
}
if err := m.Child.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Limit) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Limit: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Limit: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Limit |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Child", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Child == nil {
m.Child = &Executor{}
}
if err := m.Child.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Kill) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Kill: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Kill: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ConnID", wireType)
}
m.ConnID = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ConnID |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Query = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ExecutorExecutionSummary) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ExecutorExecutionSummary: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ExecutorExecutionSummary: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeProcessedNs", wireType)
}
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.TimeProcessedNs = &v
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NumProducedRows", wireType)
}
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NumProducedRows = &v
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NumIterations", wireType)
}
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.NumIterations = &v
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExecutorId", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.ExecutorId = &s
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType)
}
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Concurrency = &v
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GraphScan) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GraphScan: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GraphScan: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TableId", wireType)
}
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.TableId = &v
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Columns = append(m.Columns, &ColumnInfo{})
if err := m.Columns[len(m.Columns)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Desc", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Desc = bool(v != 0)
case 4:
if wireType == 0 {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryColumnIds = append(m.PrimaryColumnIds, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryColumnIds = append(m.PrimaryColumnIds, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryColumnIds", wireType)
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field NextReadEngine", wireType)
}
m.NextReadEngine = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.NextReadEngine |= (EngineType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ranges = append(m.Ranges, KeyRange{})
if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType == 0 {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryPrefixColumnIds = append(m.PrimaryPrefixColumnIds, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthExecutor
}
postIndex := iNdEx + packedLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
for iNdEx < postIndex {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowExecutor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.PrimaryPrefixColumnIds = append(m.PrimaryPrefixColumnIds, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field PrimaryPrefixColumnIds", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipExecutor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthExecutor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipExecutor(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowExecutor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowExecutor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowExecutor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthExecutor
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowExecutor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipExecutor(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthExecutor = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowExecutor = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("executor.proto", fileDescriptorExecutor) }
var fileDescriptorExecutor = []byte{
// 1659 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x73, 0xe3, 0x48,
0x15, 0x8f, 0x6c, 0xd9, 0x96, 0x9f, 0x1d, 0x5b, 0x69, 0x66, 0x06, 0xcd, 0x00, 0x19, 0xaf, 0x6b,
0xa7, 0x08, 0xae, 0x25, 0x9b, 0x9d, 0x85, 0x02, 0x0e, 0x50, 0x3b, 0x19, 0x66, 0x76, 0xcd, 0x2c,
0x43, 0x4a, 0x49, 0x71, 0x55, 0xc9, 0x52, 0x5b, 0xee, 0x44, 0xea, 0x56, 0x5a, 0x12, 0x6b, 0xdf,
0xb8, 0xf0, 0x09, 0xe0, 0xc0, 0x8d, 0x6f, 0xc0, 0xe7, 0x98, 0x2a, 0x2e, 0x5c, 0xa9, 0xa2, 0x28,
0x6a, 0xf8, 0x06, 0xdc, 0x29, 0xa8, 0xee, 0x96, 0xe4, 0x76, 0xec, 0x84, 0x7c, 0x80, 0x3d, 0x59,
0xfa, 0xbd, 0xdf, 0x7b, 0xfd, 0x5e, 0xbf, 0x3f, 0x7a, 0x86, 0x01, 0x5e, 0xe2, 0xa0, 0xc8, 0x19,
0x3f, 0x4e, 0x39, 0xcb, 0x19, 0x32, 0x73, 0x92, 0xce, 0x9e, 0xd8, 0x78, 0x99, 0x72, 0x9c, 0x65,
0x84, 0x51, 0x85, 0x3f, 0xe9, 0x67, 0xc1, 0x02, 0x27, 0x7e, 0xf9, 0xf6, 0x20, 0x62, 0x11, 0x93,
0x8f, 0x1f, 0x8b, 0x27, 0x85, 0x8e, 0xff, 0x6d, 0x82, 0xf5, 0xaa, 0x34, 0x87, 0x3e, 0x84, 0x46,
0x9e, 0x3a, 0xc6, 0xc8, 0x38, 0x1a, 0x3c, 0x1f, 0x1c, 0x0b, 0xab, 0xc7, 0x42, 0x76, 0xb1, 0x4a,
0xf1, 0xa9, 0xf9, 0xee, 0x1f, 0x4f, 0xf7, 0xdc, 0x46, 0x9e, 0xa2, 0x09, 0x58, 0xf9, 0x2c, 0xf6,
0xb2, 0xc0, 0xa7, 0x4e, 0x63, 0x64, 0x1c, 0xf5, 0x9e, 0x0f, 0x15, 0xf7, 0xc2, 0x9f, 0xc5, 0xf8,
0x3c, 0xf0, 0xa9, 0xdb, 0xc9, 0x67, 0xb1, 0x78, 0x10, 0x5c, 0x12, 0x2e, 0x15, 0xb7, 0xa9, 0x73,
0xa7, 0x34, 0xc4, 0x4b, 0xc5, 0x25, 0xa1, 0x7c, 0x40, 0xdf, 0x87, 0x6e, 0x86, 0x63, 0x1c, 0xe4,
0x84, 0x51, 0xc7, 0xd4, 0xc9, 0xe7, 0x15, 0xec, 0xae, 0x19, 0xe8, 0x53, 0xe8, 0xf9, 0x51, 0xc4,
0x71, 0xe4, 0x4b, 0x85, 0x96, 0x54, 0x38, 0x50, 0x0a, 0x2f, 0xd6, 0x02, 0x57, 0x67, 0xa1, 0x43,
0x30, 0x73, 0x96, 0xbe, 0x75, 0xda, 0x92, 0x0d, 0xa5, 0xdf, 0x2c, 0x7d, 0xeb, 0x4a, 0x1c, 0x7d,
0x00, 0xad, 0x98, 0x24, 0x24, 0x77, 0x3a, 0x92, 0xd0, 0x53, 0x84, 0x2f, 0x05, 0xe4, 0x2a, 0x09,
0x7a, 0x09, 0x07, 0x78, 0x19, 0x2c, 0x7c, 0x1a, 0x61, 0x8f, 0xe3, 0x00, 0x93, 0xdf, 0x60, 0xee,
0x58, 0x92, 0xfe, 0xa8, 0xba, 0x33, 0x25, 0x76, 0x4b, 0xa9, 0x6b, 0xe3, 0x1b, 0x88, 0xf0, 0xe3,
0x92, 0x11, 0xea, 0x74, 0x75, 0x3f, 0x7e, 0xc1, 0x08, 0x75, 0x25, 0x8e, 0x9e, 0x42, 0xaf, 0x4a,
0xb2, 0x47, 0x42, 0x07, 0x46, 0xc6, 0x51, 0xd7, 0x85, 0x0a, 0x9a, 0x86, 0xc2, 0xc0, 0x15, 0x89,
0x63, 0xa7, 0xa7, 0x1b, 0x78, 0x43, 0xe2, 0xd8, 0x95, 0x38, 0xfa, 0x29, 0x0c, 0x6b, 0x2f, 0x33,
0x4c, 0x43, 0xcc, 0x9d, 0xbe, 0xa4, 0x3e, 0xd8, 0xf4, 0xf1, 0x5c, 0xca, 0xdc, 0x01, 0xde, 0x78,
0x47, 0x27, 0x00, 0x67, 0x9c, 0x5d, 0x96, 0xc9, 0xd8, 0x97, 0x9a, 0xb6, 0xd2, 0x5c, 0xe3, 0xae,
0xc6, 0x11, 0x99, 0x8e, 0x78, 0xaa, 0x32, 0x3d, 0xd0, 0x93, 0xf7, 0x39, 0xf7, 0xd3, 0x85, 0xca,
0x74, 0xc4, 0x53, 0xf1, 0x30, 0xfe, 0x53, 0x03, 0x06, 0x9b, 0x0e, 0xa0, 0x23, 0xad, 0xf4, 0xd0,
0xa6, 0x8b, 0x5b, 0xe5, 0x77, 0x80, 0x69, 0xc0, 0x42, 0x1c, 0x7a, 0xb9, 0x9f, 0x5d, 0x79, 0x09,
0xce, 0x7d, 0xa7, 0x31, 0x6a, 0x1e, 0xf5, 0xdd, 0x61, 0x29, 0xb8, 0xf0, 0xb3, 0xab, 0x5f, 0xe2,
0xdc, 0x47, 0x9f, 0xc0, 0x20, 0xf5, 0x79, 0x4e, 0x84, 0x87, 0xde, 0x15, 0x5e, 0x65, 0x4e, 0x73,
0xd4, 0x5c, 0xdf, 0xd7, 0xab, 0x65, 0xca, 0xdd, 0xfd, 0x9a, 0xf1, 0x06, 0xaf, 0x32, 0xf4, 0x21,
0xb4, 0x82, 0x05, 0x89, 0xc3, 0xb2, 0x02, 0xb5, 0x36, 0x10, 0x37, 0xef, 0x2a, 0x21, 0x7a, 0x06,
0xad, 0x7c, 0x95, 0xe2, 0xcc, 0x69, 0x49, 0x7b, 0x65, 0xa8, 0xaf, 0x09, 0x8e, 0x43, 0xe1, 0xae,
0xab, 0xa4, 0xe8, 0x47, 0x30, 0xf4, 0xe3, 0xd8, 0x9b, 0x0b, 0xdc, 0x53, 0x0a, 0xed, 0xdd, 0x0a,
0xfb, 0x7e, 0x1c, 0xd7, 0x6f, 0xd9, 0xf8, 0x0f, 0x06, 0xd8, 0x37, 0xcb, 0x68, 0x77, 0xe4, 0xc6,
0xee, 0xc8, 0x4f, 0xa0, 0xa7, 0x9f, 0xda, 0xd8, 0x7d, 0x2a, 0xcc, 0xeb, 0x23, 0xd1, 0x58, 0x66,
0xa0, 0x79, 0x5b, 0x06, 0xc4, 0xdd, 0x8f, 0xff, 0xd2, 0x80, 0x6e, 0xdd, 0xe5, 0xe8, 0x29, 0x58,
0xb9, 0x78, 0x11, 0x15, 0x2a, 0x32, 0xd7, 0x2c, 0xb3, 0xd4, 0x91, 0xe8, 0x34, 0x44, 0x13, 0xe8,
0x04, 0x2c, 0x2e, 0x12, 0x5a, 0x39, 0x50, 0x96, 0xd0, 0x4b, 0x09, 0x4e, 0xe9, 0x9c, 0xb9, 0x15,
0x01, 0x39, 0x60, 0x86, 0x38, 0x0b, 0xa4, 0x03, 0x56, 0x69, 0x48, 0x22, 0xe8, 0x23, 0x40, 0x29,
0x27, 0x89, 0xcf, 0x57, 0x9e, 0x22, 0x7b, 0x24, 0xcc, 0x1c, 0x73, 0xd4, 0x3c, 0x6a, 0xba, 0x76,
0x29, 0x29, 0x2d, 0x86, 0x19, 0xfa, 0x0c, 0x6c, 0x8a, 0x97, 0xb9, 0xc7, 0xb1, 0x1f, 0x7a, 0x98,
0x46, 0x84, 0x62, 0x39, 0x1b, 0x06, 0xd5, 0xe1, 0xaf, 0x24, 0xa6, 0x15, 0xd5, 0x40, 0xf0, 0x5d,
0xec, 0x87, 0x4a, 0x82, 0x3e, 0x82, 0x36, 0x17, 0x51, 0x57, 0xb9, 0x2a, 0x4b, 0xe0, 0x0d, 0x5e,
0xb9, 0x02, 0x2e, 0xb5, 0x4a, 0x0e, 0xfa, 0x09, 0x3c, 0xae, 0xbc, 0x4b, 0x39, 0x9e, 0x93, 0xa5,
0xee, 0x64, 0x47, 0x3a, 0xf9, 0xa8, 0x24, 0x9c, 0x49, 0x79, 0xed, 0xea, 0xf8, 0xbf, 0x26, 0x98,
0xa2, 0xe7, 0xd1, 0x27, 0xd0, 0x15, 0x5d, 0x2f, 0x73, 0xb5, 0x39, 0x7e, 0x85, 0x58, 0x73, 0xd5,
0xba, 0x2c, 0xdf, 0xd1, 0xcf, 0x60, 0x20, 0x55, 0xc4, 0x48, 0x50, 0x7a, 0x0d, 0x3d, 0x73, 0x42,
0xef, 0xc6, 0xe8, 0xee, 0x5f, 0x6a, 0x98, 0x68, 0x57, 0x59, 0xc9, 0x1c, 0xd3, 0xb2, 0x27, 0x6e,
0x56, 0x7a, 0x2d, 0x47, 0x1f, 0x40, 0x97, 0x50, 0x8a, 0xc5, 0x24, 0x5a, 0xca, 0xb6, 0xa8, 0x12,
0x6d, 0x49, 0x78, 0x1a, 0x2e, 0xd1, 0x09, 0x0c, 0x62, 0x3c, 0xcf, 0x3d, 0xe9, 0x93, 0x6c, 0xb4,
0xd6, 0x56, 0xa3, 0xf5, 0x05, 0x43, 0x38, 0x26, 0xfb, 0xec, 0x39, 0x0c, 0x39, 0x89, 0x16, 0xba,
0x4a, 0x7b, 0xbb, 0x37, 0x25, 0xa5, 0xd6, 0x39, 0x81, 0x5e, 0xca, 0xd9, 0x0c, 0x97, 0x45, 0xdd,
0xb9, 0xa5, 0xa8, 0x25, 0x47, 0x15, 0xf5, 0x09, 0xf4, 0x66, 0x05, 0xa9, 0xdb, 0xc0, 0xba, 0x45,
0x43, 0x72, 0x94, 0xc6, 0xa7, 0x30, 0x94, 0x91, 0x04, 0x8c, 0x86, 0x72, 0x2a, 0x64, 0x4e, 0x77,
0xcb, 0x2f, 0x19, 0xec, 0xcb, 0x9a, 0x81, 0x7e, 0x08, 0xb6, 0x0a, 0x46, 0xd3, 0x82, 0x2d, 0x2d,
0x15, 0xf0, 0xa6, 0x1a, 0xcb, 0x17, 0x98, 0xeb, 0x6a, 0xbd, 0x6d, 0x35, 0xc9, 0xd1, 0xd4, 0xbe,
0x80, 0x6f, 0x29, 0x35, 0x7c, 0xad, 0x69, 0x7a, 0x73, 0xce, 0x12, 0x8f, 0x50, 0xa7, 0xbf, 0x65,
0xe1, 0x9b, 0x92, 0xfe, 0xea, 0x7a, 0x6d, 0xe3, 0x35, 0x67, 0xc9, 0x94, 0x8e, 0xff, 0x66, 0x40,
0xb7, 0xfe, 0x12, 0xff, 0xff, 0x7e, 0x7e, 0x0a, 0x16, 0x11, 0x6c, 0x41, 0x68, 0xe8, 0x04, 0x89,
0x6e, 0x36, 0x7c, 0xf3, 0xbe, 0x0d, 0x6f, 0x6e, 0x35, 0xfc, 0x23, 0x68, 0x17, 0x94, 0x5c, 0x17,
0xaa, 0x71, 0x2d, 0xb7, 0x7c, 0xbb, 0x65, 0x10, 0xb4, 0x77, 0x0f, 0x82, 0xf1, 0xef, 0x0d, 0xe8,
0xd6, 0x8b, 0x03, 0x9a, 0x00, 0x68, 0x97, 0x6c, 0x6c, 0x5d, 0x91, 0x26, 0x45, 0x3f, 0x80, 0x01,
0x4f, 0xa9, 0x9e, 0x14, 0x35, 0xbd, 0xf6, 0x15, 0xdf, 0x4d, 0x69, 0x59, 0x9c, 0x29, 0xd5, 0xb2,
0x52, 0x7f, 0x38, 0x9a, 0x77, 0x7c, 0x38, 0xc6, 0xbf, 0x35, 0xf4, 0x2f, 0x2b, 0x1a, 0x41, 0x4b,
0xac, 0x6d, 0xbb, 0x3c, 0x52, 0x02, 0x34, 0x81, 0xae, 0x70, 0x46, 0xb1, 0x76, 0xfa, 0x61, 0x71,
0xf5, 0x70, 0x5f, 0x17, 0x7e, 0xd7, 0x80, 0x9e, 0xb6, 0x20, 0xa1, 0x67, 0xe2, 0xcb, 0xcd, 0x8a,
0xd4, 0x9b, 0xad, 0x76, 0xb8, 0xd1, 0x91, 0xb2, 0xd3, 0x95, 0xa0, 0xf9, 0x51, 0xe4, 0xcd, 0x0b,
0x1a, 0x94, 0x7e, 0x6c, 0xd0, 0xfc, 0x28, 0x7a, 0x5d, 0xd0, 0x00, 0x8d, 0xc0, 0xca, 0x72, 0x8e,
0xfd, 0x04, 0x87, 0x1b, 0xb3, 0xbc, 0x46, 0xd1, 0xc7, 0xd0, 0x17, 0x11, 0xd5, 0x67, 0x9a, 0xbb,
0x82, 0x02, 0x9e, 0xd2, 0xcf, 0xcb, 0x93, 0x4b, 0x85, 0xfa, 0xf4, 0xd6, 0x6d, 0x0a, 0x2f, 0x4a,
0x1f, 0xea, 0x7b, 0x68, 0xdf, 0x75, 0x0f, 0xd7, 0x60, 0x8a, 0xcd, 0x0f, 0x7d, 0x17, 0x2c, 0xc6,
0x43, 0xcc, 0xd7, 0xf1, 0xf7, 0x95, 0xc2, 0xe9, 0x6a, 0x9a, 0xe3, 0xc4, 0xed, 0x48, 0xe9, 0xe9,
0x0a, 0x3d, 0xa9, 0x96, 0x43, 0x51, 0xfb, 0x66, 0x19, 0x57, 0xb9, 0x15, 0xde, 0xef, 0xea, 0xa7,
0xd0, 0x92, 0xbb, 0xe4, 0xda, 0x94, 0x71, 0x87, 0xa9, 0xc6, 0x5d, 0xa6, 0x3e, 0x03, 0x53, 0xac,
0x7b, 0xe8, 0xdb, 0xd0, 0x0e, 0x18, 0xa5, 0xd3, 0x9f, 0x6f, 0x98, 0x2a, 0x31, 0x71, 0xce, 0x75,
0x81, 0xf9, 0x4a, 0xda, 0xaa, 0x52, 0xa1, 0xa0, 0xf1, 0xdf, 0x0d, 0x70, 0x2a, 0xab, 0xea, 0x97,
0x30, 0x7a, 0x5e, 0x24, 0xa2, 0x8d, 0xc4, 0xae, 0x91, 0x93, 0x04, 0x7b, 0x29, 0x67, 0x01, 0xce,
0x32, 0x1c, 0x7a, 0xb2, 0x6d, 0x8c, 0x23, 0xd3, 0x1d, 0x0a, 0xc1, 0x59, 0x85, 0xbf, 0x15, 0x25,
0x7a, 0x40, 0x8b, 0x44, 0x50, 0xc3, 0x22, 0xc0, 0xa1, 0xc7, 0xd9, 0x57, 0x99, 0xba, 0x23, 0x77,
0x48, 0x8b, 0xe4, 0xac, 0xc4, 0x5d, 0xf6, 0x55, 0x86, 0x9e, 0xc1, 0x40, 0x70, 0x49, 0x8e, 0xb9,
0xaf, 0x7a, 0xab, 0x29, 0x89, 0xfb, 0xb4, 0x48, 0xa6, 0x35, 0x78, 0x73, 0xff, 0x35, 0xb7, 0xf6,
0xdf, 0x11, 0xf4, 0x02, 0x46, 0x83, 0x82, 0x73, 0x4c, 0x83, 0x95, 0x1c, 0x14, 0xa6, 0xab, 0x43,
0x72, 0x57, 0xa9, 0x77, 0xcf, 0x5b, 0x66, 0x9b, 0xf1, 0xf5, 0xae, 0x72, 0xbf, 0x5d, 0x65, 0xf2,
0x1f, 0x43, 0xfd, 0x4f, 0x94, 0xcb, 0xc3, 0x01, 0xec, 0x8b, 0xdf, 0x7a, 0x13, 0xb4, 0xf7, 0x2a,
0xa8, 0xfe, 0x98, 0xd8, 0x46, 0x05, 0xd5, 0x33, 0xd8, 0x6e, 0xa0, 0x6f, 0xc0, 0x50, 0x40, 0xda,
0xf4, 0xb1, 0x9b, 0xa8, 0x0f, 0x96, 0xb4, 0xc6, 0xd2, 0xb7, 0xb6, 0x89, 0xf6, 0xa1, 0x2b, 0xde,
0x64, 0x9b, 0xd8, 0xad, 0xda, 0x88, 0x1c, 0x1e, 0x2f, 0xa2, 0xc8, 0x6e, 0x57, 0x7c, 0xb1, 0x15,
0xd8, 0x9d, 0xea, 0x4d, 0xf4, 0x82, 0x6d, 0xa1, 0x47, 0x80, 0xc4, 0xdb, 0xe6, 0x9f, 0x0b, 0xbb,
0x8b, 0x1c, 0x78, 0xa0, 0xe3, 0xd5, 0x4a, 0x6d, 0x03, 0x42, 0x30, 0x10, 0x92, 0xf5, 0x4c, 0xb6,
0x7b, 0xd5, 0xa1, 0x75, 0xf5, 0xd8, 0xfd, 0xc9, 0x8f, 0xa1, 0xaf, 0x6f, 0xc3, 0x68, 0x08, 0xbd,
0x33, 0x3f, 0xcb, 0x2e, 0x16, 0x9c, 0x15, 0xd1, 0xc2, 0xde, 0x13, 0x7e, 0x9f, 0x72, 0xe6, 0x87,
0x81, 0x9f, 0xe5, 0xb6, 0x81, 0x2c, 0x30, 0xbf, 0xf0, 0xb3, 0x85, 0xdd, 0x98, 0x1c, 0x03, 0xac,
0xd3, 0x88, 0xba, 0xd0, 0xfa, 0x92, 0x05, 0x7e, 0x6c, 0xef, 0x09, 0xca, 0x05, 0x79, 0xf3, 0x6b,
0xdb, 0x40, 0x3d, 0xe8, 0x5c, 0x90, 0xd7, 0xb1, 0xe2, 0xff, 0xd9, 0x00, 0xab, 0x5a, 0xfb, 0xd6,
0xd7, 0x4a, 0x31, 0x97, 0x01, 0xef, 0xa1, 0x87, 0x70, 0x20, 0x2f, 0x08, 0xcf, 0xf3, 0x5f, 0x15,
0x79, 0x09, 0x1b, 0x55, 0xe4, 0xae, 0x58, 0x31, 0xd6, 0x78, 0x03, 0xd9, 0xd0, 0x57, 0x59, 0x48,
0x88, 0x44, 0x9a, 0xe8, 0x01, 0xd8, 0x32, 0x09, 0x34, 0x27, 0x35, 0x6a, 0xa2, 0xc7, 0xf0, 0x70,
0xc3, 0x6c, 0x2d, 0x6a, 0xa1, 0xef, 0xc0, 0xe3, 0x4a, 0x61, 0x5b, 0xdc, 0x9e, 0x8c, 0xa0, 0xaf,
0xaf, 0x9b, 0xd5, 0x89, 0x22, 0x7c, 0xe5, 0xf2, 0xe9, 0xf7, 0xde, 0xbd, 0x3f, 0x34, 0xfe, 0xfa,
0xfe, 0xd0, 0xf8, 0xe7, 0xfb, 0x43, 0xe3, 0x8f, 0xff, 0x3a, 0xdc, 0x83, 0x87, 0x01, 0x4b, 0x8e,
0x53, 0x42, 0xa3, 0xc0, 0x4f, 0x8f, 0x73, 0x12, 0xce, 0x64, 0xdd, 0x9e, 0x19, 0xff, 0x0b, 0x00,
0x00, 0xff, 0xff, 0x84, 0x71, 0xe8, 0xb5, 0xdc, 0x10, 0x00, 0x00,
} | go-tipb/executor.pb.go | 0.567937 | 0.40392 | executor.pb.go | starcoder |
package callrecords
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// DeviceInfo
type DeviceInfo struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Name of the capture device driver used by the media endpoint.
captureDeviceDriver *string
// Name of the capture device used by the media endpoint.
captureDeviceName *string
// Fraction of the call that the media endpoint detected the capture device was not working properly.
captureNotFunctioningEventRatio *float32
// Fraction of the call that the media endpoint detected the CPU resources available were insufficient and caused poor quality of the audio sent and received.
cpuInsufficentEventRatio *float32
// Fraction of the call that the media endpoint detected clipping in the captured audio that caused poor quality of the audio being sent.
deviceClippingEventRatio *float32
// Fraction of the call that the media endpoint detected glitches or gaps in the audio played or captured that caused poor quality of the audio being sent or received.
deviceGlitchEventRatio *float32
// Number of times during the call that the media endpoint detected howling or screeching audio.
howlingEventCount *int32
// The root mean square (RMS) of the incoming signal of up to the first 30 seconds of the call.
initialSignalLevelRootMeanSquare *float32
// Fraction of the call that the media endpoint detected low speech level that caused poor quality of the audio being sent.
lowSpeechLevelEventRatio *float32
// Fraction of the call that the media endpoint detected low speech to noise level that caused poor quality of the audio being sent.
lowSpeechToNoiseEventRatio *float32
// Glitches per 5 minute interval for the media endpoint's microphone.
micGlitchRate *float32
// Average energy level of received audio for audio classified as mono noise or left channel of stereo noise by the media endpoint.
receivedNoiseLevel *int32
// Average energy level of received audio for audio classified as mono speech, or left channel of stereo speech by the media endpoint.
receivedSignalLevel *int32
// Name of the render device driver used by the media endpoint.
renderDeviceDriver *string
// Name of the render device used by the media endpoint.
renderDeviceName *string
// Fraction of the call that media endpoint detected device render is muted.
renderMuteEventRatio *float32
// Fraction of the call that the media endpoint detected the render device was not working properly.
renderNotFunctioningEventRatio *float32
// Fraction of the call that media endpoint detected device render volume is set to 0.
renderZeroVolumeEventRatio *float32
// Average energy level of sent audio for audio classified as mono noise or left channel of stereo noise by the media endpoint.
sentNoiseLevel *int32
// Average energy level of sent audio for audio classified as mono speech, or left channel of stereo speech by the media endpoint.
sentSignalLevel *int32
// Glitches per 5 minute internal for the media endpoint's loudspeaker.
speakerGlitchRate *float32
}
// NewDeviceInfo instantiates a new deviceInfo and sets the default values.
func NewDeviceInfo()(*DeviceInfo) {
m := &DeviceInfo{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateDeviceInfoFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateDeviceInfoFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewDeviceInfo(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *DeviceInfo) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetCaptureDeviceDriver gets the captureDeviceDriver property value. Name of the capture device driver used by the media endpoint.
func (m *DeviceInfo) GetCaptureDeviceDriver()(*string) {
if m == nil {
return nil
} else {
return m.captureDeviceDriver
}
}
// GetCaptureDeviceName gets the captureDeviceName property value. Name of the capture device used by the media endpoint.
func (m *DeviceInfo) GetCaptureDeviceName()(*string) {
if m == nil {
return nil
} else {
return m.captureDeviceName
}
}
// GetCaptureNotFunctioningEventRatio gets the captureNotFunctioningEventRatio property value. Fraction of the call that the media endpoint detected the capture device was not working properly.
func (m *DeviceInfo) GetCaptureNotFunctioningEventRatio()(*float32) {
if m == nil {
return nil
} else {
return m.captureNotFunctioningEventRatio
}
}
// GetCpuInsufficentEventRatio gets the cpuInsufficentEventRatio property value. Fraction of the call that the media endpoint detected the CPU resources available were insufficient and caused poor quality of the audio sent and received.
func (m *DeviceInfo) GetCpuInsufficentEventRatio()(*float32) {
if m == nil {
return nil
} else {
return m.cpuInsufficentEventRatio
}
}
// GetDeviceClippingEventRatio gets the deviceClippingEventRatio property value. Fraction of the call that the media endpoint detected clipping in the captured audio that caused poor quality of the audio being sent.
func (m *DeviceInfo) GetDeviceClippingEventRatio()(*float32) {
if m == nil {
return nil
} else {
return m.deviceClippingEventRatio
}
}
// GetDeviceGlitchEventRatio gets the deviceGlitchEventRatio property value. Fraction of the call that the media endpoint detected glitches or gaps in the audio played or captured that caused poor quality of the audio being sent or received.
func (m *DeviceInfo) GetDeviceGlitchEventRatio()(*float32) {
if m == nil {
return nil
} else {
return m.deviceGlitchEventRatio
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *DeviceInfo) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["captureDeviceDriver"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCaptureDeviceDriver(val)
}
return nil
}
res["captureDeviceName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetCaptureDeviceName(val)
}
return nil
}
res["captureNotFunctioningEventRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetCaptureNotFunctioningEventRatio(val)
}
return nil
}
res["cpuInsufficentEventRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetCpuInsufficentEventRatio(val)
}
return nil
}
res["deviceClippingEventRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetDeviceClippingEventRatio(val)
}
return nil
}
res["deviceGlitchEventRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetDeviceGlitchEventRatio(val)
}
return nil
}
res["howlingEventCount"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetHowlingEventCount(val)
}
return nil
}
res["initialSignalLevelRootMeanSquare"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetInitialSignalLevelRootMeanSquare(val)
}
return nil
}
res["lowSpeechLevelEventRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetLowSpeechLevelEventRatio(val)
}
return nil
}
res["lowSpeechToNoiseEventRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetLowSpeechToNoiseEventRatio(val)
}
return nil
}
res["micGlitchRate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetMicGlitchRate(val)
}
return nil
}
res["receivedNoiseLevel"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetReceivedNoiseLevel(val)
}
return nil
}
res["receivedSignalLevel"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetReceivedSignalLevel(val)
}
return nil
}
res["renderDeviceDriver"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetRenderDeviceDriver(val)
}
return nil
}
res["renderDeviceName"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetRenderDeviceName(val)
}
return nil
}
res["renderMuteEventRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetRenderMuteEventRatio(val)
}
return nil
}
res["renderNotFunctioningEventRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetRenderNotFunctioningEventRatio(val)
}
return nil
}
res["renderZeroVolumeEventRatio"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetRenderZeroVolumeEventRatio(val)
}
return nil
}
res["sentNoiseLevel"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetSentNoiseLevel(val)
}
return nil
}
res["sentSignalLevel"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetSentSignalLevel(val)
}
return nil
}
res["speakerGlitchRate"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetFloat32Value()
if err != nil {
return err
}
if val != nil {
m.SetSpeakerGlitchRate(val)
}
return nil
}
return res
}
// GetHowlingEventCount gets the howlingEventCount property value. Number of times during the call that the media endpoint detected howling or screeching audio.
func (m *DeviceInfo) GetHowlingEventCount()(*int32) {
if m == nil {
return nil
} else {
return m.howlingEventCount
}
}
// GetInitialSignalLevelRootMeanSquare gets the initialSignalLevelRootMeanSquare property value. The root mean square (RMS) of the incoming signal of up to the first 30 seconds of the call.
func (m *DeviceInfo) GetInitialSignalLevelRootMeanSquare()(*float32) {
if m == nil {
return nil
} else {
return m.initialSignalLevelRootMeanSquare
}
}
// GetLowSpeechLevelEventRatio gets the lowSpeechLevelEventRatio property value. Fraction of the call that the media endpoint detected low speech level that caused poor quality of the audio being sent.
func (m *DeviceInfo) GetLowSpeechLevelEventRatio()(*float32) {
if m == nil {
return nil
} else {
return m.lowSpeechLevelEventRatio
}
}
// GetLowSpeechToNoiseEventRatio gets the lowSpeechToNoiseEventRatio property value. Fraction of the call that the media endpoint detected low speech to noise level that caused poor quality of the audio being sent.
func (m *DeviceInfo) GetLowSpeechToNoiseEventRatio()(*float32) {
if m == nil {
return nil
} else {
return m.lowSpeechToNoiseEventRatio
}
}
// GetMicGlitchRate gets the micGlitchRate property value. Glitches per 5 minute interval for the media endpoint's microphone.
func (m *DeviceInfo) GetMicGlitchRate()(*float32) {
if m == nil {
return nil
} else {
return m.micGlitchRate
}
}
// GetReceivedNoiseLevel gets the receivedNoiseLevel property value. Average energy level of received audio for audio classified as mono noise or left channel of stereo noise by the media endpoint.
func (m *DeviceInfo) GetReceivedNoiseLevel()(*int32) {
if m == nil {
return nil
} else {
return m.receivedNoiseLevel
}
}
// GetReceivedSignalLevel gets the receivedSignalLevel property value. Average energy level of received audio for audio classified as mono speech, or left channel of stereo speech by the media endpoint.
func (m *DeviceInfo) GetReceivedSignalLevel()(*int32) {
if m == nil {
return nil
} else {
return m.receivedSignalLevel
}
}
// GetRenderDeviceDriver gets the renderDeviceDriver property value. Name of the render device driver used by the media endpoint.
func (m *DeviceInfo) GetRenderDeviceDriver()(*string) {
if m == nil {
return nil
} else {
return m.renderDeviceDriver
}
}
// GetRenderDeviceName gets the renderDeviceName property value. Name of the render device used by the media endpoint.
func (m *DeviceInfo) GetRenderDeviceName()(*string) {
if m == nil {
return nil
} else {
return m.renderDeviceName
}
}
// GetRenderMuteEventRatio gets the renderMuteEventRatio property value. Fraction of the call that media endpoint detected device render is muted.
func (m *DeviceInfo) GetRenderMuteEventRatio()(*float32) {
if m == nil {
return nil
} else {
return m.renderMuteEventRatio
}
}
// GetRenderNotFunctioningEventRatio gets the renderNotFunctioningEventRatio property value. Fraction of the call that the media endpoint detected the render device was not working properly.
func (m *DeviceInfo) GetRenderNotFunctioningEventRatio()(*float32) {
if m == nil {
return nil
} else {
return m.renderNotFunctioningEventRatio
}
}
// GetRenderZeroVolumeEventRatio gets the renderZeroVolumeEventRatio property value. Fraction of the call that media endpoint detected device render volume is set to 0.
func (m *DeviceInfo) GetRenderZeroVolumeEventRatio()(*float32) {
if m == nil {
return nil
} else {
return m.renderZeroVolumeEventRatio
}
}
// GetSentNoiseLevel gets the sentNoiseLevel property value. Average energy level of sent audio for audio classified as mono noise or left channel of stereo noise by the media endpoint.
func (m *DeviceInfo) GetSentNoiseLevel()(*int32) {
if m == nil {
return nil
} else {
return m.sentNoiseLevel
}
}
// GetSentSignalLevel gets the sentSignalLevel property value. Average energy level of sent audio for audio classified as mono speech, or left channel of stereo speech by the media endpoint.
func (m *DeviceInfo) GetSentSignalLevel()(*int32) {
if m == nil {
return nil
} else {
return m.sentSignalLevel
}
}
// GetSpeakerGlitchRate gets the speakerGlitchRate property value. Glitches per 5 minute internal for the media endpoint's loudspeaker.
func (m *DeviceInfo) GetSpeakerGlitchRate()(*float32) {
if m == nil {
return nil
} else {
return m.speakerGlitchRate
}
}
// Serialize serializes information the current object
func (m *DeviceInfo) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
{
err := writer.WriteStringValue("captureDeviceDriver", m.GetCaptureDeviceDriver())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("captureDeviceName", m.GetCaptureDeviceName())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("captureNotFunctioningEventRatio", m.GetCaptureNotFunctioningEventRatio())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("cpuInsufficentEventRatio", m.GetCpuInsufficentEventRatio())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("deviceClippingEventRatio", m.GetDeviceClippingEventRatio())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("deviceGlitchEventRatio", m.GetDeviceGlitchEventRatio())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("howlingEventCount", m.GetHowlingEventCount())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("initialSignalLevelRootMeanSquare", m.GetInitialSignalLevelRootMeanSquare())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("lowSpeechLevelEventRatio", m.GetLowSpeechLevelEventRatio())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("lowSpeechToNoiseEventRatio", m.GetLowSpeechToNoiseEventRatio())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("micGlitchRate", m.GetMicGlitchRate())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("receivedNoiseLevel", m.GetReceivedNoiseLevel())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("receivedSignalLevel", m.GetReceivedSignalLevel())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("renderDeviceDriver", m.GetRenderDeviceDriver())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("renderDeviceName", m.GetRenderDeviceName())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("renderMuteEventRatio", m.GetRenderMuteEventRatio())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("renderNotFunctioningEventRatio", m.GetRenderNotFunctioningEventRatio())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("renderZeroVolumeEventRatio", m.GetRenderZeroVolumeEventRatio())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("sentNoiseLevel", m.GetSentNoiseLevel())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("sentSignalLevel", m.GetSentSignalLevel())
if err != nil {
return err
}
}
{
err := writer.WriteFloat32Value("speakerGlitchRate", m.GetSpeakerGlitchRate())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *DeviceInfo) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetCaptureDeviceDriver sets the captureDeviceDriver property value. Name of the capture device driver used by the media endpoint.
func (m *DeviceInfo) SetCaptureDeviceDriver(value *string)() {
if m != nil {
m.captureDeviceDriver = value
}
}
// SetCaptureDeviceName sets the captureDeviceName property value. Name of the capture device used by the media endpoint.
func (m *DeviceInfo) SetCaptureDeviceName(value *string)() {
if m != nil {
m.captureDeviceName = value
}
}
// SetCaptureNotFunctioningEventRatio sets the captureNotFunctioningEventRatio property value. Fraction of the call that the media endpoint detected the capture device was not working properly.
func (m *DeviceInfo) SetCaptureNotFunctioningEventRatio(value *float32)() {
if m != nil {
m.captureNotFunctioningEventRatio = value
}
}
// SetCpuInsufficentEventRatio sets the cpuInsufficentEventRatio property value. Fraction of the call that the media endpoint detected the CPU resources available were insufficient and caused poor quality of the audio sent and received.
func (m *DeviceInfo) SetCpuInsufficentEventRatio(value *float32)() {
if m != nil {
m.cpuInsufficentEventRatio = value
}
}
// SetDeviceClippingEventRatio sets the deviceClippingEventRatio property value. Fraction of the call that the media endpoint detected clipping in the captured audio that caused poor quality of the audio being sent.
func (m *DeviceInfo) SetDeviceClippingEventRatio(value *float32)() {
if m != nil {
m.deviceClippingEventRatio = value
}
}
// SetDeviceGlitchEventRatio sets the deviceGlitchEventRatio property value. Fraction of the call that the media endpoint detected glitches or gaps in the audio played or captured that caused poor quality of the audio being sent or received.
func (m *DeviceInfo) SetDeviceGlitchEventRatio(value *float32)() {
if m != nil {
m.deviceGlitchEventRatio = value
}
}
// SetHowlingEventCount sets the howlingEventCount property value. Number of times during the call that the media endpoint detected howling or screeching audio.
func (m *DeviceInfo) SetHowlingEventCount(value *int32)() {
if m != nil {
m.howlingEventCount = value
}
}
// SetInitialSignalLevelRootMeanSquare sets the initialSignalLevelRootMeanSquare property value. The root mean square (RMS) of the incoming signal of up to the first 30 seconds of the call.
func (m *DeviceInfo) SetInitialSignalLevelRootMeanSquare(value *float32)() {
if m != nil {
m.initialSignalLevelRootMeanSquare = value
}
}
// SetLowSpeechLevelEventRatio sets the lowSpeechLevelEventRatio property value. Fraction of the call that the media endpoint detected low speech level that caused poor quality of the audio being sent.
func (m *DeviceInfo) SetLowSpeechLevelEventRatio(value *float32)() {
if m != nil {
m.lowSpeechLevelEventRatio = value
}
}
// SetLowSpeechToNoiseEventRatio sets the lowSpeechToNoiseEventRatio property value. Fraction of the call that the media endpoint detected low speech to noise level that caused poor quality of the audio being sent.
func (m *DeviceInfo) SetLowSpeechToNoiseEventRatio(value *float32)() {
if m != nil {
m.lowSpeechToNoiseEventRatio = value
}
}
// SetMicGlitchRate sets the micGlitchRate property value. Glitches per 5 minute interval for the media endpoint's microphone.
func (m *DeviceInfo) SetMicGlitchRate(value *float32)() {
if m != nil {
m.micGlitchRate = value
}
}
// SetReceivedNoiseLevel sets the receivedNoiseLevel property value. Average energy level of received audio for audio classified as mono noise or left channel of stereo noise by the media endpoint.
func (m *DeviceInfo) SetReceivedNoiseLevel(value *int32)() {
if m != nil {
m.receivedNoiseLevel = value
}
}
// SetReceivedSignalLevel sets the receivedSignalLevel property value. Average energy level of received audio for audio classified as mono speech, or left channel of stereo speech by the media endpoint.
func (m *DeviceInfo) SetReceivedSignalLevel(value *int32)() {
if m != nil {
m.receivedSignalLevel = value
}
}
// SetRenderDeviceDriver sets the renderDeviceDriver property value. Name of the render device driver used by the media endpoint.
func (m *DeviceInfo) SetRenderDeviceDriver(value *string)() {
if m != nil {
m.renderDeviceDriver = value
}
}
// SetRenderDeviceName sets the renderDeviceName property value. Name of the render device used by the media endpoint.
func (m *DeviceInfo) SetRenderDeviceName(value *string)() {
if m != nil {
m.renderDeviceName = value
}
}
// SetRenderMuteEventRatio sets the renderMuteEventRatio property value. Fraction of the call that media endpoint detected device render is muted.
func (m *DeviceInfo) SetRenderMuteEventRatio(value *float32)() {
if m != nil {
m.renderMuteEventRatio = value
}
}
// SetRenderNotFunctioningEventRatio sets the renderNotFunctioningEventRatio property value. Fraction of the call that the media endpoint detected the render device was not working properly.
func (m *DeviceInfo) SetRenderNotFunctioningEventRatio(value *float32)() {
if m != nil {
m.renderNotFunctioningEventRatio = value
}
}
// SetRenderZeroVolumeEventRatio sets the renderZeroVolumeEventRatio property value. Fraction of the call that media endpoint detected device render volume is set to 0.
func (m *DeviceInfo) SetRenderZeroVolumeEventRatio(value *float32)() {
if m != nil {
m.renderZeroVolumeEventRatio = value
}
}
// SetSentNoiseLevel sets the sentNoiseLevel property value. Average energy level of sent audio for audio classified as mono noise or left channel of stereo noise by the media endpoint.
func (m *DeviceInfo) SetSentNoiseLevel(value *int32)() {
if m != nil {
m.sentNoiseLevel = value
}
}
// SetSentSignalLevel sets the sentSignalLevel property value. Average energy level of sent audio for audio classified as mono speech, or left channel of stereo speech by the media endpoint.
func (m *DeviceInfo) SetSentSignalLevel(value *int32)() {
if m != nil {
m.sentSignalLevel = value
}
}
// SetSpeakerGlitchRate sets the speakerGlitchRate property value. Glitches per 5 minute internal for the media endpoint's loudspeaker.
func (m *DeviceInfo) SetSpeakerGlitchRate(value *float32)() {
if m != nil {
m.speakerGlitchRate = value
}
} | models/callrecords/device_info.go | 0.732305 | 0.5 | device_info.go | starcoder |
package tu
import (
"time"
)
var (
// Yesterday get yesterday datetime of given datetime
Yesterday TimeFunc = func(t time.Time) time.Time {
return t.AddDate(0, 0, -1)
}
// Tomorrow get tomorrow datetime of given datetime
Tomorrow TimeFunc = func(t time.Time) time.Time {
return t.AddDate(0, 0, 1)
}
// BeginningOfDay get beginning of day of given datetime
BeginningOfDay TimeFunc = func(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
}
// EndOfDay get end of day of given datetime
EndOfDay TimeFunc = func(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), 23, 59, 59, 999999999, t.Location())
}
// BeginningOfMonth get beginning of month of given datetime
BeginningOfMonth TimeFunc = func(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
}
// EndOfMonth get end of month of given datetime
EndOfMonth TimeFunc = func(t time.Time) time.Time {
tt := BeginningOfMonth(t.AddDate(0, 1, 0)).AddDate(0, 0, -1)
return time.Date(t.Year(), t.Month(), tt.Day(), 23, 59, 59, 999999999, t.Location())
}
// BeginningOfYear get beginning of year of given datetime
BeginningOfYear TimeFunc = func(t time.Time) time.Time {
return time.Date(t.Year(), 1, 1, 0, 0, 0, 0, t.Location())
}
// EndOfYear get end of year of given datetime
EndOfYear TimeFunc = func(t time.Time) time.Time {
return time.Date(t.Year(), 12, 31, 23, 59, 59, 999999999, t.Location())
}
)
// TimeFunc types of time utility function provides methods to compose utility functions
type TimeFunc func(time.Time) time.Time
// Yesterday composes Yesterday function with f TimeFunc function
func (f TimeFunc) Yesterday() TimeFunc {
return func(t time.Time) time.Time {
return Yesterday(f(t))
}
}
// Tomorrow composes Tomorrow function with f TimeFunc function
func (f TimeFunc) Tomorrow() TimeFunc {
return func(t time.Time) time.Time {
return Tomorrow(f(t))
}
}
// BeginningOfDay composes BeginningOfDay function with f TimeFunc function
func (f TimeFunc) BeginningOfDay() TimeFunc {
return func(t time.Time) time.Time {
return BeginningOfDay(f(t))
}
}
// EndOfDay composes EndOfDay function with f TimeFunc function
func (f TimeFunc) EndOfDay() TimeFunc {
return func(t time.Time) time.Time {
return EndOfDay(f(t))
}
}
// BeginningOfMonth composes BeginningOfMonth function with f TimeFunc function
func (f TimeFunc) BeginningOfMonth() TimeFunc {
return func(t time.Time) time.Time {
return BeginningOfMonth(f(t))
}
}
// EndOfMonth composes EndOfMonth function with f TimeFunc function
func (f TimeFunc) EndOfMonth() TimeFunc {
return func(t time.Time) time.Time {
return EndOfMonth(f(t))
}
}
// BeginningOfYear composes BeginningOfYear function with f TimeFunc function
func (f TimeFunc) BeginningOfYear() TimeFunc {
return func(t time.Time) time.Time {
return BeginningOfYear(f(t))
}
}
// EndOfYear composes EndOfYear function with f TimeFunc function
func (f TimeFunc) EndOfYear() TimeFunc {
return func(t time.Time) time.Time {
return EndOfYear(f(t))
}
}
// Of apply f TimeFunc function with given datetime
func (f TimeFunc) Of(t time.Time) time.Time {
return f(t)
}
// Num provides method to easily get time before or after of specific date time with N number time duration
type Num int
// SecondsAgo provides TimeFunc function to get n seconds ago
func (n Num) SecondsAgo() TimeFunc {
return func(t time.Time) time.Time {
return t.Add(time.Duration(-int(n)) * time.Second)
}
}
// SecondsLater provides TimeFunc function to get n seconds later
func (n Num) SecondsLater() TimeFunc {
return func(t time.Time) time.Time {
return t.Add(time.Duration(int(n)) * time.Second)
}
}
// MinutesAgo provides TimeFunc function to get n minutes ago
func (n Num) MinutesAgo() TimeFunc {
return func(t time.Time) time.Time {
return t.Add(time.Duration(-int(n)) * time.Minute)
}
}
// MinutesLater provides TimeFunc function to get n minutes later
func (n Num) MinutesLater() TimeFunc {
return func(t time.Time) time.Time {
return t.Add(time.Duration(int(n)) * time.Minute)
}
}
// HoursAgo provides TimeFunc function to get n hours ago
func (n Num) HoursAgo() TimeFunc {
return func(t time.Time) time.Time {
return t.Add(time.Duration(-int(n)) * time.Hour)
}
}
// HoursLater provides TimeFunc function to get n hours later
func (n Num) HoursLater() TimeFunc {
return func(t time.Time) time.Time {
return t.Add(time.Duration(int(n)) * time.Hour)
}
}
// DaysAgo provides TimeFunc function to get n days ago
func (n Num) DaysAgo() TimeFunc {
return func(t time.Time) time.Time {
return t.AddDate(0, 0, -int(n))
}
}
// DaysLater provides TimeFunc function to get n days later
func (n Num) DaysLater() TimeFunc {
return func(t time.Time) time.Time {
return t.AddDate(0, 0, int(n))
}
}
// MonthsAgo provides TimeFunc function to get n months ago
func (n Num) MonthsAgo() TimeFunc {
return func(t time.Time) time.Time {
return t.AddDate(0, -int(n), 0)
}
}
// MonthsLater provides TimeFunc function to get n months later
func (n Num) MonthsLater() TimeFunc {
return func(t time.Time) time.Time {
return t.AddDate(0, int(n), 0)
}
}
// YearsAgo provides TimeFunc function to get n years ago
func (n Num) YearsAgo() TimeFunc {
return func(t time.Time) time.Time {
return t.AddDate(-int(n), 0, 0)
}
}
// YearsLater provides TimeFunc function to get n years later
func (n Num) YearsLater() TimeFunc {
return func(t time.Time) time.Time {
return t.AddDate(int(n), 0, 0)
}
}
// N alias of Num
type N = Num | timeutil.go | 0.571288 | 0.619457 | timeutil.go | starcoder |
package cryptoapis
import (
"encoding/json"
)
// ListOmniTransactionsByBlockHashRI struct for ListOmniTransactionsByBlockHashRI
type ListOmniTransactionsByBlockHashRI struct {
// Defines the amount of the sent tokens.
Amount string `json:"amount"`
// Defines whether the attribute can be divisible or not, as boolean. E.g., if it is \"true\", the attribute is divisible.
Divisible bool `json:"divisible"`
// Represents the hash of the block where this transaction was mined/confirmed for first time. The hash is defined as a cryptographic digital fingerprint made by hashing the block header twice through the SHA256 algorithm.
MinedInBlockHash string `json:"minedInBlockHash"`
// Represents the hight of the block where this transaction was mined/confirmed for first time. The height is defined as the number of blocks in the blockchain preceding this specific block.
MinedInBlockHeight int32 `json:"minedInBlockHeight"`
// Represents the index position of the transaction in the specific block.
PositionInBlock int32 `json:"positionInBlock"`
// Represents the identifier of the tokens to send.
PropertyId int32 `json:"propertyId"`
// Represents an object of addresses that receive the transactions.
Recipients []ListOmniTransactionsByAddressRIRecipients `json:"recipients"`
// Represents an object of addresses that provide the funds.
Senders []ListOmniTransactionsByAddressRISenders `json:"senders"`
// Defines the exact date/time in Unix Timestamp when this transaction was mined, confirmed or first seen in Mempool, if it is unconfirmed.
Timestamp int32 `json:"timestamp"`
// Represents the unique identifier of a transaction, i.e. it could be `transactionId` in UTXO-based protocols like Bitcoin, and transaction `hash` in Ethereum blockchain.
TransactionId string `json:"transactionId"`
// Defines the type of the transaction as a string.
Type string `json:"type"`
// Defines the type of the transaction as a number.
TypeInt int32 `json:"typeInt"`
// Defines whether the transaction is valid or not, as boolean. E.g., if it is \"true\", the transaction is valid.
Valid bool `json:"valid"`
// Defines the specific version.
Version int32 `json:"version"`
Fee ListOmniTransactionsByBlockHashRIFee `json:"fee"`
}
// NewListOmniTransactionsByBlockHashRI instantiates a new ListOmniTransactionsByBlockHashRI object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewListOmniTransactionsByBlockHashRI(amount string, divisible bool, minedInBlockHash string, minedInBlockHeight int32, positionInBlock int32, propertyId int32, recipients []ListOmniTransactionsByAddressRIRecipients, senders []ListOmniTransactionsByAddressRISenders, timestamp int32, transactionId string, type_ string, typeInt int32, valid bool, version int32, fee ListOmniTransactionsByBlockHashRIFee) *ListOmniTransactionsByBlockHashRI {
this := ListOmniTransactionsByBlockHashRI{}
this.Amount = amount
this.Divisible = divisible
this.MinedInBlockHash = minedInBlockHash
this.MinedInBlockHeight = minedInBlockHeight
this.PositionInBlock = positionInBlock
this.PropertyId = propertyId
this.Recipients = recipients
this.Senders = senders
this.Timestamp = timestamp
this.TransactionId = transactionId
this.Type = type_
this.TypeInt = typeInt
this.Valid = valid
this.Version = version
this.Fee = fee
return &this
}
// NewListOmniTransactionsByBlockHashRIWithDefaults instantiates a new ListOmniTransactionsByBlockHashRI object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewListOmniTransactionsByBlockHashRIWithDefaults() *ListOmniTransactionsByBlockHashRI {
this := ListOmniTransactionsByBlockHashRI{}
return &this
}
// GetAmount returns the Amount field value
func (o *ListOmniTransactionsByBlockHashRI) GetAmount() string {
if o == nil {
var ret string
return ret
}
return o.Amount
}
// GetAmountOk returns a tuple with the Amount field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetAmountOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Amount, true
}
// SetAmount sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetAmount(v string) {
o.Amount = v
}
// GetDivisible returns the Divisible field value
func (o *ListOmniTransactionsByBlockHashRI) GetDivisible() bool {
if o == nil {
var ret bool
return ret
}
return o.Divisible
}
// GetDivisibleOk returns a tuple with the Divisible field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetDivisibleOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.Divisible, true
}
// SetDivisible sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetDivisible(v bool) {
o.Divisible = v
}
// GetMinedInBlockHash returns the MinedInBlockHash field value
func (o *ListOmniTransactionsByBlockHashRI) GetMinedInBlockHash() string {
if o == nil {
var ret string
return ret
}
return o.MinedInBlockHash
}
// GetMinedInBlockHashOk returns a tuple with the MinedInBlockHash field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetMinedInBlockHashOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.MinedInBlockHash, true
}
// SetMinedInBlockHash sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetMinedInBlockHash(v string) {
o.MinedInBlockHash = v
}
// GetMinedInBlockHeight returns the MinedInBlockHeight field value
func (o *ListOmniTransactionsByBlockHashRI) GetMinedInBlockHeight() int32 {
if o == nil {
var ret int32
return ret
}
return o.MinedInBlockHeight
}
// GetMinedInBlockHeightOk returns a tuple with the MinedInBlockHeight field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetMinedInBlockHeightOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.MinedInBlockHeight, true
}
// SetMinedInBlockHeight sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetMinedInBlockHeight(v int32) {
o.MinedInBlockHeight = v
}
// GetPositionInBlock returns the PositionInBlock field value
func (o *ListOmniTransactionsByBlockHashRI) GetPositionInBlock() int32 {
if o == nil {
var ret int32
return ret
}
return o.PositionInBlock
}
// GetPositionInBlockOk returns a tuple with the PositionInBlock field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetPositionInBlockOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.PositionInBlock, true
}
// SetPositionInBlock sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetPositionInBlock(v int32) {
o.PositionInBlock = v
}
// GetPropertyId returns the PropertyId field value
func (o *ListOmniTransactionsByBlockHashRI) GetPropertyId() int32 {
if o == nil {
var ret int32
return ret
}
return o.PropertyId
}
// GetPropertyIdOk returns a tuple with the PropertyId field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetPropertyIdOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.PropertyId, true
}
// SetPropertyId sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetPropertyId(v int32) {
o.PropertyId = v
}
// GetRecipients returns the Recipients field value
func (o *ListOmniTransactionsByBlockHashRI) GetRecipients() []ListOmniTransactionsByAddressRIRecipients {
if o == nil {
var ret []ListOmniTransactionsByAddressRIRecipients
return ret
}
return o.Recipients
}
// GetRecipientsOk returns a tuple with the Recipients field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetRecipientsOk() (*[]ListOmniTransactionsByAddressRIRecipients, bool) {
if o == nil {
return nil, false
}
return &o.Recipients, true
}
// SetRecipients sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetRecipients(v []ListOmniTransactionsByAddressRIRecipients) {
o.Recipients = v
}
// GetSenders returns the Senders field value
func (o *ListOmniTransactionsByBlockHashRI) GetSenders() []ListOmniTransactionsByAddressRISenders {
if o == nil {
var ret []ListOmniTransactionsByAddressRISenders
return ret
}
return o.Senders
}
// GetSendersOk returns a tuple with the Senders field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetSendersOk() (*[]ListOmniTransactionsByAddressRISenders, bool) {
if o == nil {
return nil, false
}
return &o.Senders, true
}
// SetSenders sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetSenders(v []ListOmniTransactionsByAddressRISenders) {
o.Senders = v
}
// GetTimestamp returns the Timestamp field value
func (o *ListOmniTransactionsByBlockHashRI) GetTimestamp() int32 {
if o == nil {
var ret int32
return ret
}
return o.Timestamp
}
// GetTimestampOk returns a tuple with the Timestamp field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetTimestampOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Timestamp, true
}
// SetTimestamp sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetTimestamp(v int32) {
o.Timestamp = v
}
// GetTransactionId returns the TransactionId field value
func (o *ListOmniTransactionsByBlockHashRI) GetTransactionId() string {
if o == nil {
var ret string
return ret
}
return o.TransactionId
}
// GetTransactionIdOk returns a tuple with the TransactionId field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetTransactionIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.TransactionId, true
}
// SetTransactionId sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetTransactionId(v string) {
o.TransactionId = v
}
// GetType returns the Type field value
func (o *ListOmniTransactionsByBlockHashRI) GetType() string {
if o == nil {
var ret string
return ret
}
return o.Type
}
// GetTypeOk returns a tuple with the Type field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetTypeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Type, true
}
// SetType sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetType(v string) {
o.Type = v
}
// GetTypeInt returns the TypeInt field value
func (o *ListOmniTransactionsByBlockHashRI) GetTypeInt() int32 {
if o == nil {
var ret int32
return ret
}
return o.TypeInt
}
// GetTypeIntOk returns a tuple with the TypeInt field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetTypeIntOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.TypeInt, true
}
// SetTypeInt sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetTypeInt(v int32) {
o.TypeInt = v
}
// GetValid returns the Valid field value
func (o *ListOmniTransactionsByBlockHashRI) GetValid() bool {
if o == nil {
var ret bool
return ret
}
return o.Valid
}
// GetValidOk returns a tuple with the Valid field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetValidOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.Valid, true
}
// SetValid sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetValid(v bool) {
o.Valid = v
}
// GetVersion returns the Version field value
func (o *ListOmniTransactionsByBlockHashRI) GetVersion() int32 {
if o == nil {
var ret int32
return ret
}
return o.Version
}
// GetVersionOk returns a tuple with the Version field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetVersionOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Version, true
}
// SetVersion sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetVersion(v int32) {
o.Version = v
}
// GetFee returns the Fee field value
func (o *ListOmniTransactionsByBlockHashRI) GetFee() ListOmniTransactionsByBlockHashRIFee {
if o == nil {
var ret ListOmniTransactionsByBlockHashRIFee
return ret
}
return o.Fee
}
// GetFeeOk returns a tuple with the Fee field value
// and a boolean to check if the value has been set.
func (o *ListOmniTransactionsByBlockHashRI) GetFeeOk() (*ListOmniTransactionsByBlockHashRIFee, bool) {
if o == nil {
return nil, false
}
return &o.Fee, true
}
// SetFee sets field value
func (o *ListOmniTransactionsByBlockHashRI) SetFee(v ListOmniTransactionsByBlockHashRIFee) {
o.Fee = v
}
func (o ListOmniTransactionsByBlockHashRI) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["amount"] = o.Amount
}
if true {
toSerialize["divisible"] = o.Divisible
}
if true {
toSerialize["minedInBlockHash"] = o.MinedInBlockHash
}
if true {
toSerialize["minedInBlockHeight"] = o.MinedInBlockHeight
}
if true {
toSerialize["positionInBlock"] = o.PositionInBlock
}
if true {
toSerialize["propertyId"] = o.PropertyId
}
if true {
toSerialize["recipients"] = o.Recipients
}
if true {
toSerialize["senders"] = o.Senders
}
if true {
toSerialize["timestamp"] = o.Timestamp
}
if true {
toSerialize["transactionId"] = o.TransactionId
}
if true {
toSerialize["type"] = o.Type
}
if true {
toSerialize["typeInt"] = o.TypeInt
}
if true {
toSerialize["valid"] = o.Valid
}
if true {
toSerialize["version"] = o.Version
}
if true {
toSerialize["fee"] = o.Fee
}
return json.Marshal(toSerialize)
}
type NullableListOmniTransactionsByBlockHashRI struct {
value *ListOmniTransactionsByBlockHashRI
isSet bool
}
func (v NullableListOmniTransactionsByBlockHashRI) Get() *ListOmniTransactionsByBlockHashRI {
return v.value
}
func (v *NullableListOmniTransactionsByBlockHashRI) Set(val *ListOmniTransactionsByBlockHashRI) {
v.value = val
v.isSet = true
}
func (v NullableListOmniTransactionsByBlockHashRI) IsSet() bool {
return v.isSet
}
func (v *NullableListOmniTransactionsByBlockHashRI) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableListOmniTransactionsByBlockHashRI(val *ListOmniTransactionsByBlockHashRI) *NullableListOmniTransactionsByBlockHashRI {
return &NullableListOmniTransactionsByBlockHashRI{value: val, isSet: true}
}
func (v NullableListOmniTransactionsByBlockHashRI) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableListOmniTransactionsByBlockHashRI) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_list_omni_transactions_by_block_hash_ri.go | 0.858289 | 0.444565 | model_list_omni_transactions_by_block_hash_ri.go | starcoder |
package structs
/* S t r u c t d e f i n i t i o n */
// StructRows represents a single row of a struct from a StructValue containing a slice
// of structs. If StructValue does not contain a slice of structs, StructRows cannot be
// initialized by contructor Rows. StructRows encapsulates high level functions around
// the element of slice of structs.
type StructRows struct {
rownum int // index of the slice of structs.
StructValue // embedded copy and inherits all fields and methods.
}
/* C o n s t r u c t o r */
// Rows returns an iterator, for a slice of structs.
// Rows returns nil if there was an error.
func (s *StructValue) Rows() (*StructRows, error) {
if s.Multiple() {
if s.rows.Len() > 0 {
return &StructRows{OutOfRange, *s}, nil
}
return nil, ErrNoRows
}
return nil, ErrNoStructs
}
/* I m p l e m e n t a t i o n */
// Index returns the index element in the slice of structs pointing to current struct.
// Index returns OutOfRange, i.e. -1, if the rows are closed.
func (r *StructRows) Index() int {
if !r.isClosed() {
return r.rownum
}
return OutOfRange
}
// Len returns the number elements in the slice of structs.
// Len returns OutOfRange, i.e. -1, if the rows are closed.
func (r *StructRows) Len() int {
if !r.isClosed() {
return r.rows.Len()
}
return OutOfRange
}
// MaxRow returns the index of the lasr elements in the slice of structs.
// MaxRow returns OutOfRange, i.e. -1, if the rows are closed.
func (r *StructRows) MaxRow() int {
if !r.isClosed() {
return r.Len() - 1
}
return OutOfRange
}
// Columns returns the current struct field names.
// Columns returns an error if the rows are closed.
func (r *StructRows) Columns() ([]string, error) {
if !r.isClosed() {
return r.Fields().Names(), nil
}
return nil, ErrRowsClosed
}
// Next prepares the next result row for reading an element from the slice of struct.
// It returns true on success, or false if there is no next result row or an error
// happened while preparing it. Err should be consulted to distinguish between
// the two cases.
func (r *StructRows) Next() bool {
if !r.isClosed() {
if i := r.rownum + 1; i < r.Len() {
err := r.getRow(i)
if err == nil {
r.rownum = i // confirm new row number
return true
}
}
}
return false
}
// Err returns the error, if any, that was encountered during iteration.
// Err may be called after an explicit or implicit Close.
func (r *StructRows) Err() (err error) {
if r.Error != nil {
err, r.Error = r.Error, err // swap variable values
}
return err
}
// Close closes the Rows, preventing further enumeration. If Next is called
// and returns false and there are no further result rows,
// the Rows are closed automatically and it will suffice to check the
// result of Err. Close is idempotent and does not affect the result of Err.
func (r *StructRows) Close() error {
return r.destroy()
}
/* U n e x p o r t e d */
// isClosed returns true if r is not closed and false if it is.
// Closure prevents further enumeration of StructRows.
func (r *StructRows) isClosed() bool {
return !r.IsValid()
} | rows.go | 0.742141 | 0.438485 | rows.go | starcoder |
package bench
import (
"fmt"
"os"
"time"
"github.com/codahale/hdrhistogram"
)
// Summary contains the results of a Benchmark run.
type Summary struct {
Connections uint64
RequestRate uint64
SuccessTotal uint64
ErrorTotal uint64
TimeElapsed time.Duration
SuccessHistogram *hdrhistogram.Histogram
UncorrectedSuccessHistogram *hdrhistogram.Histogram
ErrorHistogram *hdrhistogram.Histogram
UncorrectedErrorHistogram *hdrhistogram.Histogram
Throughput float64
}
// String returns a stringified version of the Summary.
func (s *Summary) String() string {
return fmt.Sprintf(
"\n{Connections: %d, RequestRate: %d, RequestTotal: %d, SuccessTotal: %d, ErrorTotal: %d, TimeElapsed: %s, Throughput: %.2f/s}",
s.Connections, s.RequestRate, (s.SuccessTotal + s.ErrorTotal), s.SuccessTotal, s.ErrorTotal, s.TimeElapsed, s.Throughput)
}
// GenerateLatencyDistribution generates a text file containing the specified
// latency distribution in a format plottable by
// http://hdrhistogram.github.io/HdrHistogram/plotFiles.html. Percentiles is a
// list of percentiles to include, e.g. 10.0, 50.0, 99.0, 99.99, etc. If
// percentiles is nil, it defaults to a logarithmic percentile scale. If a
// request rate was specified for the benchmark, this will also generate an
// uncorrected distribution file which does not account for coordinated
// omission.
func (s *Summary) GenerateLatencyDistribution(percentiles Percentiles, file string) error {
return generateLatencyDistribution(s.SuccessHistogram, s.UncorrectedSuccessHistogram, s.RequestRate, percentiles, file)
}
// GenerateErrorLatencyDistribution generates a text file containing the specified
// latency distribution (of requests that resulted in errors) in a format plottable by
// http://hdrhistogram.github.io/HdrHistogram/plotFiles.html. Percentiles is a
// list of percentiles to include, e.g. 10.0, 50.0, 99.0, 99.99, etc. If
// percentiles is nil, it defaults to a logarithmic percentile scale. If a
// request rate was specified for the benchmark, this will also generate an
// uncorrected distribution file which does not account for coordinated
// omission.
func (s *Summary) GenerateErrorLatencyDistribution(percentiles Percentiles, file string) error {
return generateLatencyDistribution(s.ErrorHistogram, s.UncorrectedErrorHistogram, s.RequestRate, percentiles, file)
}
func getOneByPercentile(percentile float64) float64 {
if percentile < 100 {
return 1 / (1 - (percentile / 100))
}
return float64(10000000)
}
func generateLatencyDistribution(histogram, unHistogram *hdrhistogram.Histogram, requestRate uint64, percentiles Percentiles, file string) error {
if percentiles == nil {
percentiles = Logarithmic
}
f, err := os.Create(file)
if err != nil {
return err
}
defer f.Close()
f.WriteString("Value Percentile TotalCount 1/(1-Percentile)\n\n")
totalCount := histogram.TotalCount()
for _, percentile := range percentiles {
value := float64(histogram.ValueAtQuantile(percentile)) / 1000000
oneByPercentile := getOneByPercentile(percentile)
countAtPercentile := int64(((percentile / 100) * float64(totalCount)) + 0.5)
_, err := f.WriteString(fmt.Sprintf("%f %f %d %f\n",
value, percentile/100, countAtPercentile, oneByPercentile))
if err != nil {
return err
}
}
// Generate uncorrected distribution.
if requestRate > 0 {
f, err := os.Create("uncorrected_" + file)
if err != nil {
return err
}
defer f.Close()
f.WriteString("Value Percentile TotalCount 1/(1-Percentile)\n\n")
totalCount = unHistogram.TotalCount()
for _, percentile := range percentiles {
value := float64(unHistogram.ValueAtQuantile(percentile)) / 1000000
oneByPercentile := getOneByPercentile(percentile)
countAtPercentile := int64(((percentile / 100) * float64(totalCount)) + 0.5)
_, err := f.WriteString(fmt.Sprintf("%f %f %d %f\n",
value, percentile/100, countAtPercentile, oneByPercentile))
if err != nil {
return err
}
}
}
return nil
}
// merge the other Summary into this one.
func (s *Summary) merge(o *Summary) {
if o.TimeElapsed > s.TimeElapsed {
s.TimeElapsed = o.TimeElapsed
}
s.SuccessHistogram.Merge(o.SuccessHistogram)
s.UncorrectedSuccessHistogram.Merge(o.UncorrectedSuccessHistogram)
s.ErrorHistogram.Merge(o.ErrorHistogram)
s.UncorrectedErrorHistogram.Merge(o.UncorrectedErrorHistogram)
s.SuccessTotal += o.SuccessTotal
s.ErrorTotal += o.ErrorTotal
s.Throughput += o.Throughput
s.RequestRate += o.RequestRate
} | latency/bench/summary.go | 0.764804 | 0.478468 | summary.go | starcoder |
package transforms
import (
"fmt"
"image"
"unsafe"
torch "github.com/wangkuiyi/gotorch"
)
// ToTensorTransformer transforms an image or an interger into a Tensor. If the
// image is of type image.Gray, the tensor has one channle; otherwise, the
// tensor has three channels (RGB).
type ToTensorTransformer struct{}
// ToTensor returns ToTensorTransformer
func ToTensor() *ToTensorTransformer {
return &ToTensorTransformer{}
}
// Run executes the ToTensorTransformer and returns a Tensor
func (t ToTensorTransformer) Run(obj interface{}) torch.Tensor {
switch v := obj.(type) {
case image.Image:
return imageToTensor(obj.(image.Image))
case int:
return intToTensor(obj.(int))
default:
panic(fmt.Sprintf("ToTensorTransformer can not transform the input type: %T", v))
}
}
// ToTensor transform c.f. https://github.com/pytorch/vision/blob/ba1b22125723f3719a3c38a2fe7cd6fb77657c57/torchvision/transforms/functional.py#L45
func imageToTensor(img image.Image) torch.Tensor {
switch img.(type) {
case *image.Gray, *image.Gray16:
return grayImageToTensor(img)
}
return colorImageToTensor(img)
}
func colorImageToTensor(img image.Image) torch.Tensor {
maxX, maxY := img.Bounds().Max.X, img.Bounds().Max.Y
array := make([]float32, maxY*maxX*3) // 3 channels
// Convert pixels into the HWC format
const denom = float32(0xffff)
i := 0
for y := 0; y < maxY; y++ {
for x := 0; x < maxX; x++ {
r, g, b, _ := img.At(x, y).RGBA()
array[i] = float32(r) / denom
i++
array[i] = float32(g) / denom
i++
array[i] = float32(b) / denom
i++
}
}
hwc := torch.FromBlob(unsafe.Pointer(&array[0]), torch.Float,
[]int64{int64(maxY), int64(maxX), 3})
return hwc.Permute([]int64{2, 0, 1})
}
func grayImageToTensor(img image.Image) torch.Tensor {
maxX, maxY := img.Bounds().Max.X, img.Bounds().Max.Y
array := make([]float32, maxY*maxX) // 1 channel
// Convert pixels into the HWC format
const denom = float32(0xffff)
i := 0
for y := 0; y < maxY; y++ {
for x := 0; x < maxX; x++ {
r, _, _, _ := img.At(x, y).RGBA()
array[i] = float32(r) / denom
i++
}
}
return torch.FromBlob(unsafe.Pointer(&array[0]), torch.Float,
[]int64{int64(maxY), int64(maxX)})
}
func intToTensor(x int) torch.Tensor {
array := make([]int32, 1)
array[0] = int32(x)
return torch.FromBlob(unsafe.Pointer(&array[0]), torch.Int, []int64{1})
} | vision/transforms/to_tensor.go | 0.897904 | 0.62939 | to_tensor.go | starcoder |
type pair struct {
w1, w2 string
}
type WordDistance struct {
wordsLocMap map[string][]int
//cacheMinDist map[pair]int
}
//time: O(n)
//space: O(n) where n = size of constructor input
func Constructor(wordsDict []string) WordDistance {
wordsLocMap := make(map[string][]int)
for idx, word := range wordsDict {
if _, ok := wordsLocMap[word]; !ok {
wordsLocMap[word] = []int{idx}
} else {
wordsLocMap[word] = append(wordsLocMap[word], idx)
}
}
return WordDistance{wordsLocMap: wordsLocMap,
// cacheMinDist: make(map[pair]int),
}
}
/*
note: At most 5000 calls will be made to shortest.
- it's good to cache the distances and not repeat the same calculation again
time: O(lenW1locs + lenW2locs) where m = lenW1 idx locs; n = lenW2locs
space: O(1)
*/
func (this *WordDistance) Shortest(word1 string, word2 string) int {
//Q: should we cache the min distance? question does not make clear whether
//we'll have repeated queries or not
/*
if _, ok := this.cacheMinDist[pair{word1, word2}]; ok {
return this.cacheMinDist[pair{word1, word2}]
}
if _, ok := this.cacheMinDist[pair{word2, word1}]; ok {
return this.cacheMinDist[pair{word2, word1}]
}
*/
shortestDist := (1 << 31)-1
//word1 and word2 are in wordsDict.
word1Locs, word2Locs := this.wordsLocMap[word1], this.wordsLocMap[word2]
w1Idx, w2Idx := 0, 0
for w1Idx != len(word1Locs) && w2Idx != len(word2Locs) {
distW1 := word1Locs[w1Idx]
distW2 := word2Locs[w2Idx]
shortestDist = min(shortestDist, abs(distW1 - distW2))
if shortestDist == 1 { break }
if distW1 < distW2 {
w1Idx++
} else {
w2Idx++
}
}
// this.cacheMinDist[pair{word1, word2}] = shortestDist
return shortestDist
}
func abs(a int) int {
if a < 0 {
return -a
}
return a
}
func min(a, b int) int { if a <= b { return a}; return b }
/**
* Your WordDistance object will be instantiated and called as such:
* obj := Constructor(wordsDict);
* param_1 := obj.Shortest(word1,word2);
*/ | 244-shortest-word-distance-ii/244-shortest-word-distance-ii.go | 0.606032 | 0.471102 | 244-shortest-word-distance-ii.go | starcoder |
package un
import "reflect"
func init() {
MakePartition(&Partition)
MakePartition(&PartitionInt)
// MakePartition(&PartitionString)
// MakePartition(&PartitionStringInt)
// MakePartitionP(&PartitionP)
}
// Partition func(func(A, B) bool, []A []A)
// Applies the given iterator function to partition element of a collection (slice or map).
// If the collection is a Slice, the iterator function arguments are *value, index*
// If the collection is a Map, the iterator function arguments are *value, key*
// Iterator functions accept a value, and the index or key is an optional argument.
// Note: partition does not return a value, you may want un.Map
// var Partition func(func(value, i interface{}), interface{})
var Partition func(fn interface{}, slice_or_map interface{}) ([]interface{}, []interface{})
// var Partition func(interface{}, func(interface{}) bool) ([]interface{}, []interface{})
// // PartitionP Parallel Partition
// // *Concurrently* applies the given iterator function to partition element of a collection (slice or map).
// var PartitionP func(fn interface{}, slice_or_map interface{})
// // PartitionInt
// // Applies the given iterator function to partition element of []int
// // Iterator function arguments are *value, index*
var PartitionInt func(func(value, i int), []int) ([]int, []int)
// // PartitionStringInt
// // Applies the given iterator function to partition element of map[string]int
// // Iterator function arguments are *value, key*
// var PartitionStringInt func(func(value int, key string), map[string]int)
// MakePartition implements a typed Partition function in the form Partition func(func(A, B), []A)
func MakePartition(fn interface{}) {
Maker(fn, partition)
}
type partitioner struct {
fn reflect.Value
col reflect.Value
t reflect.Value
f reflect.Value
}
func partition(values []reflect.Value) []reflect.Value {
fn, col := extractArgs(values)
kind := values[1].Kind()
p := newPartitioner(fn, col, kind)
return p.partition()
}
func newPartitioner(fn, col reflect.Value, kind reflect.Kind) *partitioner {
t, f := makePartitions(col, kind)
return &partitioner{fn: fn, col: col, t: t, f: f}
}
func (p *partitioner) partition() []reflect.Value {
switch {
case p.isSlice():
p.partitionSlice()
case p.isMap():
p.partitionMap()
}
return []reflect.Value{p.t, p.f}
}
func (p *partitioner) isSlice() bool {
return p.col.Kind() == reflect.Slice
}
func (p *partitioner) isMap() bool {
return p.col.Kind() == reflect.Map
}
func (p *partitioner) partitionSlice() {
for i := 0; i < p.col.Len(); i++ {
val := p.col.Index(i)
idx := reflect.ValueOf(i)
p.partitionate(val, idx)
}
}
func (p *partitioner) partitionMap() {
for _, key := range p.col.MapKeys() {
val := p.col.MapIndex(key)
p.partitionate(val, key)
}
}
func (p *partitioner) partitionate(val, idx_or_key reflect.Value) {
if ok := callPredicate(p.fn, val, idx_or_key); ok {
p.t = reflect.Append(p.t, val)
} else {
p.f = reflect.Append(p.f, val)
}
}
func makePartitions(col reflect.Value, kind reflect.Kind) (reflect.Value, reflect.Value) {
var t, f reflect.Value
if kind == reflect.Interface {
t = reflect.ValueOf(make([]interface{}, 0))
f = reflect.ValueOf(make([]interface{}, 0))
} else {
t = reflect.MakeSlice(col.Type(), 0, 0)
f = reflect.MakeSlice(col.Type(), 0, 0)
}
return t, f
} | partition.go | 0.690976 | 0.431524 | partition.go | starcoder |
package to
import (
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"strconv"
)
// String change val type to string
func String(val interface{}) string {
if val == nil {
return "nil"
}
switch t := val.(type) {
case bool:
return strconv.FormatBool(t)
case int:
return strconv.FormatInt(int64(t), 10)
case int8:
return strconv.FormatInt(int64(t), 10)
case int16:
return strconv.FormatInt(int64(t), 10)
case int32:
return strconv.FormatInt(int64(t), 10)
case int64:
return strconv.FormatInt(t, 10)
case uint:
return strconv.FormatUint(uint64(t), 10)
case uint8:
return strconv.FormatUint(uint64(t), 10)
case uint16:
return strconv.FormatUint(uint64(t), 10)
case uint32:
return strconv.FormatUint(uint64(t), 10)
case uint64:
return strconv.FormatUint(t, 10)
case float32:
return strconv.FormatFloat(float64(t), 'f', -1, 32)
case float64:
return strconv.FormatFloat(t, 'f', -1, 64)
case []byte:
return string(t)
case string:
return t
default:
data, err := json.Marshal(val)
if err != nil {
return fmt.Sprintf("%v", val)
}
return string(data)
}
}
//Atoi change val type to int64
func Atoi(val interface{}) int64 {
if val == nil {
return 0
}
switch t := val.(type) {
case bool:
if t {
return int64(1)
}
return int64(0)
case int:
return int64(t)
case int8:
return int64(t)
case int16:
return int64(t)
case int32:
return int64(t)
case int64:
return int64(t)
case uint:
return int64(t)
case uint8:
return int64(t)
case uint16:
return int64(t)
case uint32:
return int64(t)
case uint64:
return int64(t)
case float32:
return int64(t)
case float64:
return int64(t)
case []byte:
i, _ := strconv.Atoi(string(t))
return int64(i)
case string:
b, err := strconv.ParseBool(t)
if err == nil {
if b {
return int64(1)
}
return int64(0)
}
i, _ := strconv.ParseFloat(t, 64)
return int64(i)
default:
i, _ := strconv.ParseFloat(fmt.Sprintf("%v", t), 64)
return int64(i)
}
}
const hexTable = "0123456789ABCDEF"
// encode encodes src into EncodedLen(len(src))
// bytes of dst. As a convenience, it returns the number
// of bytes written to dst, but this value is always EncodedLen(len(src)).
// encode implements hexadecimal encoding.
func encode(dst, src []byte) int {
for i, v := range src {
dst[i*5+0] = '0'
dst[i*5+1] = 'x'
dst[i*5+2] = hexTable[v>>4]
dst[i*5+3] = hexTable[v&0x0f]
if i != len(src)-1 {
dst[i*5+4] = ' '
}
}
return len(src)*5 - 1
}
// encodedLen returns the length of an encoding of n source bytes.
// Specifically, it returns n * 5.
func encodedLen(n int) int { return n*5 - 1 }
func getBytes(data interface{}) ([]byte, error) {
var buf bytes.Buffer
enc := gob.NewEncoder(&buf)
err := enc.Encode(data)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// HexString returns the hexadecimal encoding of src.
func HexString(v interface{}) string {
var hex string
src, err := getBytes(v)
if err != nil {
return hex
}
dst := make([]byte, encodedLen(len(src)))
encode(dst, src)
return string(dst)
} | to.go | 0.645455 | 0.459986 | to.go | starcoder |
package graphics
import (
"encoding/binary"
"github.com/go-gl/gl/v4.6-core/gl"
"github.com/mokiat/gomath/sprec"
"github.com/mokiat/lacking/data/buffer"
"github.com/mokiat/lacking/framework/opengl"
)
type skyboxMeshVertex struct {
Position sprec.Vec3
}
func (v skyboxMeshVertex) Serialize(plotter *buffer.Plotter) {
plotter.PlotFloat32(v.Position.X)
plotter.PlotFloat32(v.Position.Y)
plotter.PlotFloat32(v.Position.Z)
}
func newSkyboxMesh() *SkyboxMesh {
return &SkyboxMesh{
VertexBuffer: opengl.NewBuffer(),
IndexBuffer: opengl.NewBuffer(),
VertexArray: opengl.NewVertexArray(),
Primitive: gl.TRIANGLES,
IndexCount: 36,
IndexOffsetBytes: 0,
}
}
type SkyboxMesh struct {
VertexBuffer *opengl.Buffer
IndexBuffer *opengl.Buffer
VertexArray *opengl.VertexArray
Primitive uint32
IndexCount int32
IndexOffsetBytes int
}
func (m *SkyboxMesh) Allocate() {
const vertexSize = 3 * 4
vertexPlotter := buffer.NewPlotter(
make([]byte, vertexSize*8),
binary.LittleEndian,
)
skyboxMeshVertex{
Position: sprec.NewVec3(-1.0, 1.0, 1.0),
}.Serialize(vertexPlotter)
skyboxMeshVertex{
Position: sprec.NewVec3(-1.0, -1.0, 1.0),
}.Serialize(vertexPlotter)
skyboxMeshVertex{
Position: sprec.NewVec3(1.0, -1.0, 1.0),
}.Serialize(vertexPlotter)
skyboxMeshVertex{
Position: sprec.NewVec3(1.0, 1.0, 1.0),
}.Serialize(vertexPlotter)
skyboxMeshVertex{
Position: sprec.NewVec3(-1.0, 1.0, -1.0),
}.Serialize(vertexPlotter)
skyboxMeshVertex{
Position: sprec.NewVec3(-1.0, -1.0, -1.0),
}.Serialize(vertexPlotter)
skyboxMeshVertex{
Position: sprec.NewVec3(1.0, -1.0, -1.0),
}.Serialize(vertexPlotter)
skyboxMeshVertex{
Position: sprec.NewVec3(1.0, 1.0, -1.0),
}.Serialize(vertexPlotter)
const indexSize = 1 * 2
indexPlotter := buffer.NewPlotter(
make([]byte, indexSize*36),
binary.LittleEndian,
)
indexPlotter.PlotUint16(3)
indexPlotter.PlotUint16(2)
indexPlotter.PlotUint16(1)
indexPlotter.PlotUint16(3)
indexPlotter.PlotUint16(1)
indexPlotter.PlotUint16(0)
indexPlotter.PlotUint16(0)
indexPlotter.PlotUint16(1)
indexPlotter.PlotUint16(5)
indexPlotter.PlotUint16(0)
indexPlotter.PlotUint16(5)
indexPlotter.PlotUint16(4)
indexPlotter.PlotUint16(7)
indexPlotter.PlotUint16(6)
indexPlotter.PlotUint16(2)
indexPlotter.PlotUint16(7)
indexPlotter.PlotUint16(2)
indexPlotter.PlotUint16(3)
indexPlotter.PlotUint16(4)
indexPlotter.PlotUint16(5)
indexPlotter.PlotUint16(6)
indexPlotter.PlotUint16(4)
indexPlotter.PlotUint16(6)
indexPlotter.PlotUint16(7)
indexPlotter.PlotUint16(5)
indexPlotter.PlotUint16(1)
indexPlotter.PlotUint16(2)
indexPlotter.PlotUint16(5)
indexPlotter.PlotUint16(2)
indexPlotter.PlotUint16(6)
indexPlotter.PlotUint16(0)
indexPlotter.PlotUint16(4)
indexPlotter.PlotUint16(7)
indexPlotter.PlotUint16(0)
indexPlotter.PlotUint16(7)
indexPlotter.PlotUint16(3)
vertexBufferInfo := opengl.BufferAllocateInfo{
Dynamic: false,
Data: vertexPlotter.Data(),
}
m.VertexBuffer.Allocate(vertexBufferInfo)
indexBufferInfo := opengl.BufferAllocateInfo{
Dynamic: false,
Data: indexPlotter.Data(),
}
m.IndexBuffer.Allocate(indexBufferInfo)
vertexArrayInfo := opengl.VertexArrayAllocateInfo{
BufferBindings: []opengl.VertexArrayBufferBinding{
{
VertexBuffer: m.VertexBuffer,
OffsetBytes: 0,
StrideBytes: 3 * 4,
},
},
Attributes: []opengl.VertexArrayAttribute{
{
Index: 0,
ComponentCount: 3,
ComponentType: gl.FLOAT,
Normalized: false,
OffsetBytes: 0,
BufferBinding: 0,
},
},
IndexBuffer: m.IndexBuffer,
}
m.VertexArray.Allocate(vertexArrayInfo)
}
func (m *SkyboxMesh) Release() {
m.VertexArray.Release()
m.IndexBuffer.Release()
m.VertexBuffer.Release()
} | framework/opengl/game/graphics/preset_skybox_mesh.go | 0.659186 | 0.451931 | preset_skybox_mesh.go | starcoder |
package data
import (
"github.com/wardlem/graphlite/util"
)
// error messages
const (
nilVertex = "attempt to operate on a nil vertex"
)
const vertexDataSize = 13
type Vertex struct {
Id uint32 // The Id of the vertex
class uint8 // Id of the class the vertex belongs to
out uint32 // Id of the first outbound edge of the vertex
in uint32 // Id of the first inbound edge of the vertex
outMap edgeMap // map stores outbound edges by label
inMap edgeMap // map stores inbound edges by label
attributable
}
func constructVertex (id uint32, bytes []byte) *Vertex {
vertex := new(Vertex)
class := bytes[0]
out := bytes[1:5]
in := bytes[5:9]
attributes := bytes[9:13]
vertex.Id = id;
vertex.class = class;
vertex.out, _ = util.BytesToUint32(out)
vertex.in, _ = util.BytesToUint32(in)
vertex.firstAtt, _ = util.BytesToUint32(attributes)
return vertex;
}
func newVertex(class *Class) *Vertex {
vertex := new(Vertex)
vertex.class = class.Id
return vertex
}
func (v *Vertex) Class(g *Graph) *Class {
Assert(nilVertex, v != nil)
Assert(nilGraph, g != nil)
Assert(nilClassStore, g.classStore != nil)
return g.classStore.Find(v.class)
}
// Returns the name of the class the vertex belongs to
func (v *Vertex) ClassName(g *Graph) string {
name, _ := v.Class(g).Name(g)
return name
}
func (v *Vertex) FirstOut(g *Graph) *Edge {
Assert(nilVertex, v != nil)
Assert(nilGraph, g != nil)
Assert(nilEdgeStore, g.edgeStore != nil)
return g.edgeStore.Find(v.out)
}
func (v *Vertex) Out(g *Graph) edgeMap {
if v.outMap == nil {
e := v.FirstOut(g)
m := make(edgeMap)
for e != nil {
m.add(e, g)
e = e.OutNext(g)
}
v.outMap = m
}
return v.outMap
}
func (v *Vertex) FirstIn(g *Graph) *Edge {
Assert(nilVertex, v != nil)
Assert(nilGraph, g != nil)
Assert(nilEdgeStore, g.edgeStore != nil)
return g.edgeStore.Find(v.in)
}
func (v *Vertex) In(g *Graph) edgeMap {
if v.inMap == nil {
e := v.FirstIn(g)
m := make(edgeMap)
for e != nil {
m.add(e, g)
e = e.InNext(g)
}
v.inMap = m
}
return v.inMap
}
func (v *Vertex) RemoveOutboundEdge(e *Edge, g *Graph) {
m := v.Out(g)
if v.out == e.Id {
v.out = e.outNext
g.vertexStore.Track(v)
} else {
OutMapLoop:
for _, list := range m {
for _, edge := range list {
if edge.outNext == e.Id {
edge.outNext = e.outNext
g.edgeStore.Track(edge)
break OutMapLoop
}
}
}
}
m.remove(e, g)
}
func (v *Vertex) RemoveInboundEdge(e *Edge, g *Graph) {
m := v.In(g)
if v.in == e.Id {
v.in = e.inNext
g.vertexStore.Track(v)
} else {
InMapLoop:
for _, list := range m {
for _, edge := range list {
if edge.inNext == e.Id {
edge.inNext = e.inNext
break InMapLoop
}
}
}
}
m.remove(e, g)
}
func (v *Vertex) RemoveEdge(e *Edge, g *Graph) {
if (e.from == v.Id) {
v.RemoveOutboundEdge(e, g)
}
if (e.to == v.Id) {
v.RemoveInboundEdge(e, g)
}
}
func (v *Vertex) track(g *Graph) {
g.vertexStore.Track(v)
} | data/vertex.go | 0.774754 | 0.567637 | vertex.go | starcoder |
package design
import (
. "github.com/goadesign/goa/design"
. "github.com/goadesign/goa/design/apidsl"
)
// Input Type.This defines the shape of the request.
// DepositPayload defines the data structure used in the create deposit request body
// It is also the base type for the deposit media type used to render deposits.
var DepositPayload = Type("DepositPayload", func() {
Description("DepositsPayload is the type used to create deposits.")
Attribute("acquirerCountryCode", Integer, "Country of the originator BIN.", func() {
Example(643)
})
Attribute("acquiringBin", Integer, "BIN number identifies the originator of cash in transaction.", func() {
Example(400171)
})
Attribute("amount", Integer, "Transaction amount in agent currency", func() {
Example(12400)
})
Attribute("businessApplicationId", String, "This field is populated with business application identifier for the transaction.", func() {
MinLength(2)
MaxLength(2)
Example("CI")
})
Attribute("localTransactionDateTime", DateTime, "The date and time the transaction takes place, ", func() {
})
Attribute("merchantCategoryCode", Integer, "Originators should populate 6012 for mVisa transaction.", func() {
Example(4829)
})
Attribute("recipientPrimaryAccountNumber", String, "Consumer PAN.16-digit PAN as provided by the consumer to agent.", func() {
MinLength(13)
MaxLength(19)
Example("4123640062698797")
})
Attribute("retrievalReferenceNumber", String, "Matches message to others within a given transaction", func() {
MinLength(12)
MaxLength(12)
Example("430000367618")
})
Attribute("senderAccountNumber", String, "mVisa cash-in transactions", func() {
Example("4541237895236")
})
Attribute("senderName", String, "Name of agents business name", func() {
MaxLength(30)
Example("<NAME>")
})
Attribute("senderReference", String, "A reference number unique to the agent", func() {
MaxLength(16)
Example("1234")
})
Attribute("systemsTraceAuditNumber", Integer, "Key data element", func() {
Example(313042)
})
Attribute("transactionCurrencyCode", String, "Currency code", func() {
MinLength(3)
MaxLength(3)
Example("USD")
})
Attribute("transactionIdentifier", Integer, "VisaNet reference Number for the transaction", func() {
// transactionIdentifier is a Visa generated field that client receives in the response message.
})
})
// WithdrawalPayload defines the data structure used in the create withdrawal request body
// It is also the base type for the withdrawal media type used to render withdrawals.
var WithdrawalPayload = Type("WithdrawalPayload", func() {
Description("WithdrawalPayload is the type used to create a withdrawal.")
Attribute("acquirerCountryCode", Integer, "Country of the originator BIN.", func() {
Example(643)
})
Attribute("acquiringBin", Integer, "BIN number identifies the originator of cash in transaction.", func() {
Example(400171)
})
Attribute("amount", Integer, "Transaction amount in agent currency", func() {
Example(12400)
})
Attribute("businessApplicationId", String, "This field is populated with business application identifier for the transaction.", func() {
MinLength(2)
MaxLength(2)
Example("CI")
})
Attribute("localTransactionDateTime", DateTime, "The date and time the transaction takes place, ", func() {
})
Attribute("merchantCategoryCode", Integer, "Originators should populate 6012 for mVisa transaction.", func() {
Example(4829)
})
Attribute("recipientPrimaryAccountNumber", String, "Consumer PAN.16-digit PAN as provided by the consumer to agent.", func() {
MinLength(13)
MaxLength(19)
Example("4123640062698797")
})
Attribute("retrievalReferenceNumber", String, "Matches message to others within a given transaction", func() {
MinLength(12)
MaxLength(12)
Example("430000367618")
})
Attribute("senderAccountNumber", String, "mVisa cash-in transactions", func() {
Example("4541237895236")
})
Attribute("senderName", String, "Name of agents business name", func() {
MaxLength(30)
Example("<NAME>")
})
Attribute("senderReference", String, "A reference number unique to the agent", func() {
MaxLength(16)
Example("1234")
})
Attribute("systemsTraceAuditNumber", Integer, "Key data element", func() {
Example(313042)
})
Attribute("transactionCurrencyCode", String, "Currency code", func() {
MinLength(3)
MaxLength(3)
Example("USD")
})
Attribute("transactionIdentifier", Integer, "VisaNet reference Number for the transaction", func() {
// transactionIdentifier is a Visa generated field that client receives in the response message.
})
})
// PaymentPayload defines the data structure used in the create payment request body
// It is also the base type for the payment media type used to render payments.
var PaymentPayload = Type("PaymentPayload", func() {
Description("PaymentPayload is the type used to create a payment.")
Attribute("acquirerCountryCode", Integer, "Country of the originator BIN.", func() {
Example(643)
})
Attribute("acquiringBin", Integer, "BIN number identifies the originator of cash in transaction.", func() {
Example(400171)
})
Attribute("amount", Integer, "Transaction amount in agent currency", func() {
Example(12400)
})
Attribute("businessApplicationId", String, "This field is populated with business application identifier for the transaction.", func() {
MinLength(2)
MaxLength(2)
Example("CI")
})
//Attribute(cardAcceptor)
//address
//city
//country
//idCode
//name
Attribute("feeProgramIndicator", String, "Optional field populated by recipient", func() {
MinLength(3)
MaxLength(3)
})
Attribute("localTransactionDateTime", DateTime, "The date and time the transaction takes place, ", func() {
})
// Attribute(purchaseIdentifier)
// referenceNumber
// type
Attribute("recipientName", String, "Recipients Name", func() {
MaxLength(30)
})
Attribute("recipientPrimaryAccountNumber", String, "Consumer PAN.16-digit PAN as provided by the consumer to agent.", func() {
MinLength(13)
MaxLength(19)
Example("4123640062698797")
})
Attribute("retrievalReferenceNumber", String, "Matches message to others within a given transaction", func() {
MinLength(12)
MaxLength(12)
Example("430000367618")
})
Attribute("secondaryId", String, "Obtains additional data along with the payment instruction", func() {
MaxLength(28)
})
Attribute("senderAccountNumber", String, "mVisa cash-in transactions", func() {
Example("4541237895236")
})
Attribute("senderName", String, "Name of agents business name", func() {
MaxLength(30)
Example("<NAME>")
})
Attribute("senderReference", String, "A reference number unique to the agent", func() {
MaxLength(16)
Example("1234")
})
Attribute("systemsTraceAuditNumber", Integer, "Key data element", func() {
Example(313042)
})
Attribute("transactionCurrencyCode", String, "Currency code", func() {
MinLength(3)
MaxLength(3)
Example("USD")
})
Attribute("transactionIdentifier", Integer, "VisaNet reference Number for the transaction", func() {
// transactionIdentifier is a Visa generated field that client receives in the response message.
})
}) | design/user_types.go | 0.563138 | 0.459379 | user_types.go | starcoder |
package aac
import (
"fmt"
)
// ADTSPacket is an ADTS packet
type ADTSPacket struct {
SampleRate int
ChannelCount int
Frame []byte
}
// DecodeADTS decodes an ADTS stream into ADTS packets.
func DecodeADTS(byts []byte) ([]*ADTSPacket, error) {
// refs: https://wiki.multimedia.cx/index.php/ADTS
var ret []*ADTSPacket
for len(byts) > 0 {
syncWord := (uint16(byts[0]) << 4) | (uint16(byts[1]) >> 4)
if syncWord != 0xfff {
return nil, fmt.Errorf("invalid syncword")
}
protectionAbsent := byts[1] & 0x01
if protectionAbsent != 1 {
return nil, fmt.Errorf("ADTS with CRC is not supported")
}
pkt := &ADTSPacket{}
profile := (byts[2] >> 6)
if profile != 0 {
return nil, fmt.Errorf("only AAC-LC is supported")
}
sampleRateIndex := (byts[2] >> 2) & 0x0F
switch sampleRateIndex {
case 0:
pkt.SampleRate = 96000
case 1:
pkt.SampleRate = 88200
case 2:
pkt.SampleRate = 64000
case 3:
pkt.SampleRate = 48000
case 4:
pkt.SampleRate = 44100
case 5:
pkt.SampleRate = 32000
case 6:
pkt.SampleRate = 24000
case 7:
pkt.SampleRate = 22050
case 8:
pkt.SampleRate = 16000
case 9:
pkt.SampleRate = 12000
case 10:
pkt.SampleRate = 11025
case 11:
pkt.SampleRate = 8000
case 12:
pkt.SampleRate = 7350
default:
return nil, fmt.Errorf("invalid sample rate index: %d", sampleRateIndex)
}
channelConfig := ((byts[2] & 0x01) << 2) | ((byts[3] >> 6) & 0x03)
switch channelConfig {
case 1:
pkt.ChannelCount = 1
case 2:
pkt.ChannelCount = 2
case 3:
pkt.ChannelCount = 3
case 4:
pkt.ChannelCount = 4
case 5:
pkt.ChannelCount = 5
case 6:
pkt.ChannelCount = 6
case 7:
pkt.ChannelCount = 8
default:
return nil, fmt.Errorf("invalid channel configuration: %d", channelConfig)
}
frameLen := int(((uint16(byts[3])&0x03)<<11)|
(uint16(byts[4])<<3)|
((uint16(byts[5])>>5)&0x07)) - 7
fullness := ((uint16(byts[5]) & 0x1F) << 6) | ((uint16(byts[6]) >> 2) & 0x3F)
if fullness != 1800 {
return nil, fmt.Errorf("fullness not supported")
}
frameCount := byts[6] & 0x03
if frameCount != 0 {
return nil, fmt.Errorf("multiple frame count not supported")
}
if len(byts[7:]) < frameLen {
return nil, fmt.Errorf("invalid frame length")
}
pkt.Frame = byts[7 : 7+frameLen]
byts = byts[7+frameLen:]
ret = append(ret, pkt)
}
return ret, nil
}
// EncodeADTS encodes ADTS packets into an ADTS stream.
func EncodeADTS(pkts []*ADTSPacket) ([]byte, error) {
var ret []byte
for _, pkt := range pkts {
frameLen := len(pkt.Frame) + 7
fullness := 1800
var channelConf uint8
switch pkt.ChannelCount {
case 1:
channelConf = 1
case 2:
channelConf = 2
case 3:
channelConf = 3
case 4:
channelConf = 4
case 5:
channelConf = 5
case 6:
channelConf = 6
case 8:
channelConf = 7
default:
return nil, fmt.Errorf("invalid channel count: %v", pkt.ChannelCount)
}
var sampleRateIndex uint8
switch pkt.SampleRate {
case 96000:
sampleRateIndex = 0
case 88200:
sampleRateIndex = 1
case 64000:
sampleRateIndex = 2
case 48000:
sampleRateIndex = 3
case 44100:
sampleRateIndex = 4
case 32000:
sampleRateIndex = 5
case 24000:
sampleRateIndex = 6
case 22050:
sampleRateIndex = 7
case 16000:
sampleRateIndex = 8
case 12000:
sampleRateIndex = 9
case 11025:
sampleRateIndex = 10
case 8000:
sampleRateIndex = 11
case 7350:
sampleRateIndex = 12
default:
return nil, fmt.Errorf("invalid sample rate: %v", pkt.SampleRate)
}
header := make([]byte, 7)
header[0] = 0xFF
header[1] = 0xF1
header[2] = (sampleRateIndex << 2) | ((channelConf >> 2) & 0x01)
header[3] = (channelConf&0x03)<<6 | uint8((frameLen>>11)&0x03)
header[4] = uint8((frameLen >> 3) & 0xFF)
header[5] = uint8((frameLen&0x07)<<5 | ((fullness >> 6) & 0x1F))
header[6] = uint8((fullness & 0x3F) << 2)
ret = append(ret, header...)
ret = append(ret, pkt.Frame...)
}
return ret, nil
} | internal/aac/adts.go | 0.531453 | 0.47524 | adts.go | starcoder |
Package gerror provides an improved error type which captures an error tag and the stack trace
at construction time.
*/
package gerror
import (
"fmt"
"reflect"
"runtime"
)
const stackSize = 4096
// A Tag represents an error identifier of any type.
type Tag interface{}
// A Gerror is a tagged error with a stack trace embedded in the Error() string.
type Gerror interface {
// Returns the tag used to create this error.
Tag() Tag
// Returns the concrete type of the tag used to create this error.
TagType() reflect.Type
// Returns the string form of this error, which includes the tag value, the tag type, the error message, and a stack trace.
Error() string
// Test the tag used to create this error for equality with a given tag. Returns `true` if and only if the two are equal.
EqualTag(Tag) bool
}
// Returns an error containing the given tag and message and the current stack trace.
func New(tag Tag, message string) Gerror {
var stack [stackSize]byte
n := runtime.Stack(stack[:], false)
return &err{tag, reflect.TypeOf(tag), message, stack[:n]}
}
// Returns an error containing the given tag and format string and the current stack trace. The given inserts are applied to the format string to produce an error message.
func Newf(tag Tag, format string, insert ...interface{}) Gerror {
return New(tag, fmt.Sprintf(format, insert...))
}
// Return an error containing the given tag, the cause of the error, and the current stack trace.
func NewFromError(tag Tag, cause error) Gerror {
if cause != nil {
var stack [stackSize]byte
n := runtime.Stack(stack[:], false)
return &err{tag, reflect.TypeOf(tag), "Error caused by: " + cause.Error(), stack[:n]}
} else {
return nil
}
}
type err struct {
tag Tag
typ reflect.Type
message string
stackTrace []byte
}
func (e *err) Error() string {
return fmt.Sprintf("%v %v", e.tag, e.typ) + ": " + e.message + "\n" + string(e.stackTrace)
}
func (e *err) Tag() Tag {
return e.tag
}
func (e *err) TagType() reflect.Type {
return e.typ
}
func (e *err) EqualTag(tag Tag) bool {
return e.typ == reflect.TypeOf(tag) && e.tag == tag
} | gerror/gerror.go | 0.880733 | 0.477859 | gerror.go | starcoder |
package main
import (
"bufio"
"fmt"
"os"
"strings"
)
// Type Position keeps track of most frequently seen letters in a given position.
type Position struct {
Frequency map[rune]int
}
// Type Code keeps track of various letters in various positions to determine
// the repetition code's message.
type Code struct {
Positions []*Position
}
func main() {
if len(os.Args) < 2 {
fmt.Println("Usage: main.go <input file>")
os.Exit(1)
}
// The eventual code we're trying to crack.
code := &Code{}
// Get the input strings.
inputs := ReadInputFile(os.Args[1])
for _, input := range inputs {
// Initialize the length of the code.
for len(code.Positions) < len(input) {
code.Positions = append(code.Positions, NewPosition())
}
// Check each position of the input.
for i, char := range input {
code.Positions[i].Put(char)
}
}
// Get the most/least frequent symbols (Part 1 & 2 of the puzzle)
fmt.Printf("The most likely code is: %s\n", code.MostLikely())
fmt.Printf("The least likely is: %s\n", code.LeastLikely())
}
// NewPosition initializes a new position object.
func NewPosition() *Position {
new := &Position{
Frequency: map[rune]int{},
}
return new
}
// Put adds a letter to the position and increments the letter count.
func (p *Position) Put(letter rune) {
_, ok := p.Frequency[letter]
if !ok {
p.Frequency[letter] = 0
}
p.Frequency[letter]++
}
// Most returns the most frequently used letter in a position.
func (p *Position) Most() rune {
var result rune
var highest int
for k, v := range p.Frequency {
if v > highest {
result = k
highest = v
}
}
return result
}
// Least returns the least frequently used letter in a position.
func (p *Position) Least() rune {
var result rune
var lowest int
for k, v := range p.Frequency {
if lowest == 0 || v < lowest {
result = k
lowest = v
}
}
return result
}
// MostLikely shows the string value of the code's most likely symbols.
func (c *Code) MostLikely() string {
result := []string{}
for _, p := range c.Positions {
result = append(result, string(p.Most()))
}
return strings.Join(result, "")
}
// LeastLikely shows the string value of the code's least likely symbols.
func (c *Code) LeastLikely() string {
result := []string{}
for _, p := range c.Positions {
result = append(result, string(p.Least()))
}
return strings.Join(result, "")
}
// ReadInputFile produces the list of strings from the file.
func ReadInputFile(filename string) []string {
fh, err := os.Open(filename)
if err != nil {
panic(err)
}
defer fh.Close()
// Lines read.
parsed := []string{}
scanner := bufio.NewScanner(fh)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if len(line) == 0 {
continue
}
parsed = append(parsed, line)
}
if err = scanner.Err(); err != nil {
panic(err)
}
return parsed
} | day06/main.go | 0.68763 | 0.447098 | main.go | starcoder |
package stdlib
import (
"fmt"
"github.com/onflow/cadence/runtime/common"
"github.com/onflow/cadence/runtime/interpreter"
"github.com/onflow/cadence/runtime/sema"
)
// This file defines functions built-in to Cadence.
// AssertFunction
const assertFunctionDocString = `
Terminates the program if the given condition is false, and reports a message which explains how the condition is false. Use this function for internal sanity checks.
The message argument is optional.
`
var AssertFunction = NewStandardLibraryFunction(
"assert",
&sema.FunctionType{
Parameters: []*sema.Parameter{
{
Label: sema.ArgumentLabelNotRequired,
Identifier: "condition",
TypeAnnotation: sema.NewTypeAnnotation(sema.BoolType),
},
{
Identifier: "message",
TypeAnnotation: sema.NewTypeAnnotation(sema.StringType),
},
},
ReturnTypeAnnotation: sema.NewTypeAnnotation(
sema.VoidType,
),
RequiredArgumentCount: sema.RequiredArgumentCount(1),
},
assertFunctionDocString,
func(invocation interpreter.Invocation) interpreter.Value {
result := invocation.Arguments[0].(interpreter.BoolValue)
if !result {
var message string
if len(invocation.Arguments) > 1 {
message = invocation.Arguments[1].(*interpreter.StringValue).Str
}
panic(AssertionError{
Message: message,
LocationRange: invocation.GetLocationRange(),
})
}
return interpreter.VoidValue{}
},
)
// PanicError
type PanicError struct {
Message string
interpreter.LocationRange
}
func (e PanicError) Error() string {
return fmt.Sprintf("panic: %s", e.Message)
}
// PanicFunction
const panicFunctionDocString = `
Terminates the program unconditionally and reports a message which explains why the unrecoverable error occurred.
`
var PanicFunction = NewStandardLibraryFunction(
"panic",
&sema.FunctionType{
Parameters: []*sema.Parameter{
{
Label: sema.ArgumentLabelNotRequired,
Identifier: "message",
TypeAnnotation: sema.NewTypeAnnotation(sema.StringType),
},
},
ReturnTypeAnnotation: sema.NewTypeAnnotation(
sema.NeverType,
),
},
panicFunctionDocString,
func(invocation interpreter.Invocation) interpreter.Value {
message := invocation.Arguments[0].(*interpreter.StringValue)
panic(PanicError{
Message: message.Str,
LocationRange: invocation.GetLocationRange(),
})
},
)
// BuiltinFunctions
var BuiltinFunctions = StandardLibraryFunctions{
AssertFunction,
PanicFunction,
CreatePublicKeyFunction,
AggregateBLSSignaturesFunction,
AggregateBLSPublicKeysFunction,
}
// LogFunction
const logFunctionDocString = `
Logs a string representation of the given value
`
var LogFunction = NewStandardLibraryFunction(
"log",
LogFunctionType,
logFunctionDocString,
func(invocation interpreter.Invocation) interpreter.Value {
println(invocation.Arguments[0].String())
return interpreter.VoidValue{}
},
)
// HelperFunctions
var HelperFunctions = StandardLibraryFunctions{
LogFunction,
}
const createPublicKeyFunctionDocString = `
Constructs a new public key
`
var CreatePublicKeyFunction = NewStandardLibraryFunction(
sema.PublicKeyTypeName,
&sema.FunctionType{
Parameters: []*sema.Parameter{
{
Identifier: sema.PublicKeyPublicKeyField,
TypeAnnotation: sema.NewTypeAnnotation(&sema.VariableSizedType{Type: sema.UInt8Type}),
},
{
Identifier: sema.PublicKeySignAlgoField,
TypeAnnotation: sema.NewTypeAnnotation(sema.SignatureAlgorithmType),
},
},
ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.PublicKeyType),
},
createPublicKeyFunctionDocString,
func(invocation interpreter.Invocation) interpreter.Value {
publicKey := invocation.Arguments[0].(*interpreter.ArrayValue)
signAlgo := invocation.Arguments[1].(*interpreter.CompositeValue)
inter := invocation.Interpreter
return interpreter.NewPublicKeyValue(
inter,
invocation.GetLocationRange,
publicKey,
signAlgo,
inter.PublicKeyValidationHandler,
)
},
)
const aggregateBLSSignaturesFunctionDocString = `
This is a specific function for the BLS signature scheme.
It aggregates multiple BLS signatures into one,
considering the proof of possession as a defense against rogue attacks.
Signatures could be generated from the same or distinct messages,
they could also be the aggregation of other signatures.
The order of the signatures in the slice does not matter since the aggregation is commutative.
No subgroup membership check is performed on the input signatures.
The function errors if the array is empty or if decoding one of the signature fails.
`
var AggregateBLSSignaturesFunction = NewStandardLibraryFunction(
"AggregateBLSSignatures",
&sema.FunctionType{
Parameters: []*sema.Parameter{
{
Label: sema.ArgumentLabelNotRequired,
Identifier: "signatures",
TypeAnnotation: sema.NewTypeAnnotation(&sema.VariableSizedType{Type: sema.ByteArrayType}),
},
},
ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.ByteArrayType),
},
aggregateBLSSignaturesFunctionDocString,
func(invocation interpreter.Invocation) interpreter.Value {
signatures := invocation.Arguments[0].(*interpreter.ArrayValue)
return AggregateBLSSignatures(invocation.Interpreter, signatures)
},
)
const aggregateBLSPublicKeysFunctionDocString = `
This is a specific function for the BLS signature scheme.
It aggregates multiple BLS public keys into one.
The order of the public keys in the slice does not matter since the aggregation is commutative.
No subgroup membership check is performed on the input keys.
The function errors if the array is empty or any of the input keys is not a BLS key.
`
var AggregateBLSPublicKeysFunction = NewStandardLibraryFunction(
"AggregateBLSPublicKeys",
&sema.FunctionType{
Parameters: []*sema.Parameter{
{
Label: sema.ArgumentLabelNotRequired,
Identifier: "keys",
TypeAnnotation: sema.NewTypeAnnotation(&sema.VariableSizedType{Type: sema.PublicKeyType}),
},
},
ReturnTypeAnnotation: sema.NewTypeAnnotation(sema.PublicKeyType),
},
aggregateBLSPublicKeysFunctionDocString,
func(invocation interpreter.Invocation) interpreter.Value {
publicKeys := invocation.Arguments[0].(*interpreter.ArrayValue)
return AggregateBLSPublicKeys(
invocation.Interpreter,
invocation.GetLocationRange,
publicKeys,
)
},
)
func AggregateBLSPublicKeys(
inter *interpreter.Interpreter,
getLocationRange func() interpreter.LocationRange,
publicKeys *interpreter.ArrayValue,
) interpreter.Value {
publicKeyArray := make([]interpreter.MemberAccessibleValue, 0, publicKeys.Count())
publicKeys.Iterate(func(element interpreter.Value) (resume bool) {
publicKey := element.(interpreter.MemberAccessibleValue)
publicKeyArray = append(publicKeyArray, publicKey)
return true
})
aggregatedKey, err := inter.AggregateBLSPublicKeysHandler(
inter,
getLocationRange,
publicKeyArray,
)
if err != nil {
panic(err)
}
return aggregatedKey
}
func AggregateBLSSignatures(
inter *interpreter.Interpreter,
signatures *interpreter.ArrayValue,
) interpreter.Value {
bytesArray := make([][]byte, 0, signatures.Count())
signatures.Iterate(func(element interpreter.Value) (resume bool) {
sig := element.(*interpreter.ArrayValue)
bytes := make([]byte, 0, sig.Count())
sig.Iterate(func(element interpreter.Value) (resume bool) {
i := element.(interpreter.UInt8Value)
bytes = append(bytes, byte(i))
return true
})
bytesArray = append(bytesArray, bytes)
return true
})
aggregatedBytes, err := inter.AggregateBLSSignaturesHandler(
bytesArray,
)
if err != nil {
panic(err)
}
aggregatedSignature := make([]interpreter.Value, 0, len(aggregatedBytes))
for _, b := range aggregatedBytes {
aggregatedSignature = append(aggregatedSignature, interpreter.UInt8Value(b))
}
return interpreter.NewArrayValue(
inter,
interpreter.ByteArrayStaticType,
signatures.GetOwner(),
aggregatedSignature...,
)
}
// BuiltinValues
func BuiltinValues() StandardLibraryValues {
signatureAlgorithmValue := StandardLibraryValue{
Name: sema.SignatureAlgorithmTypeName,
Type: cryptoAlgorithmEnumConstructorType(
sema.SignatureAlgorithmType,
sema.SignatureAlgorithms,
),
ValueFactory: func(inter *interpreter.Interpreter) interpreter.Value {
return cryptoAlgorithmEnumValue(
inter,
interpreter.ReturnEmptyLocationRange,
sema.SignatureAlgorithmType,
sema.SignatureAlgorithms,
NewSignatureAlgorithmCase,
)
},
Kind: common.DeclarationKindEnum,
}
hashAlgorithmValue := StandardLibraryValue{
Name: sema.HashAlgorithmTypeName,
Type: cryptoAlgorithmEnumConstructorType(
sema.HashAlgorithmType,
sema.HashAlgorithms,
),
ValueFactory: func(inter *interpreter.Interpreter) interpreter.Value {
return cryptoAlgorithmEnumValue(
inter,
interpreter.ReturnEmptyLocationRange,
sema.HashAlgorithmType,
sema.HashAlgorithms,
NewHashAlgorithmCase,
)
},
Kind: common.DeclarationKindEnum,
}
return StandardLibraryValues{
signatureAlgorithmValue,
hashAlgorithmValue,
}
}
func NewSignatureAlgorithmCase(inter *interpreter.Interpreter, rawValue uint8) *interpreter.CompositeValue {
return interpreter.NewEnumCaseValue(
inter,
sema.SignatureAlgorithmType,
interpreter.UInt8Value(rawValue),
nil,
)
}
var hashAlgorithmFunctions = map[string]interpreter.FunctionValue{
sema.HashAlgorithmTypeHashFunctionName: hashAlgorithmHashFunction,
sema.HashAlgorithmTypeHashWithTagFunctionName: hashAlgorithmHashWithTagFunction,
}
func NewHashAlgorithmCase(inter *interpreter.Interpreter, rawValue uint8) *interpreter.CompositeValue {
return interpreter.NewEnumCaseValue(
inter,
sema.HashAlgorithmType,
interpreter.UInt8Value(rawValue),
hashAlgorithmFunctions,
)
}
var hashAlgorithmHashFunction = interpreter.NewHostFunctionValue(
func(invocation interpreter.Invocation) interpreter.Value {
dataValue := invocation.Arguments[0].(*interpreter.ArrayValue)
hashAlgoValue := invocation.Self
inter := invocation.Interpreter
getLocationRange := invocation.GetLocationRange
inter.ExpectType(
hashAlgoValue,
sema.HashAlgorithmType,
getLocationRange,
)
return inter.HashHandler(
inter,
getLocationRange,
dataValue,
nil,
hashAlgoValue,
)
},
sema.HashAlgorithmTypeHashFunctionType,
)
var hashAlgorithmHashWithTagFunction = interpreter.NewHostFunctionValue(
func(invocation interpreter.Invocation) interpreter.Value {
dataValue := invocation.Arguments[0].(*interpreter.ArrayValue)
tagValue := invocation.Arguments[1].(*interpreter.StringValue)
hashAlgoValue := invocation.Self
inter := invocation.Interpreter
getLocationRange := invocation.GetLocationRange
inter.ExpectType(
hashAlgoValue,
sema.HashAlgorithmType,
getLocationRange,
)
return inter.HashHandler(
inter,
getLocationRange,
dataValue,
tagValue,
hashAlgoValue,
)
},
sema.HashAlgorithmTypeHashWithTagFunctionType,
)
func cryptoAlgorithmEnumConstructorType(
enumType *sema.CompositeType,
enumCases []sema.CryptoAlgorithm,
) *sema.FunctionType {
members := make([]*sema.Member, len(enumCases))
for i, algo := range enumCases {
members[i] = sema.NewPublicConstantFieldMember(
enumType,
algo.Name(),
enumType,
algo.DocString(),
)
}
constructorType := &sema.FunctionType{
IsConstructor: true,
Parameters: []*sema.Parameter{
{
Identifier: sema.EnumRawValueFieldName,
TypeAnnotation: sema.NewTypeAnnotation(enumType.EnumRawType),
},
},
ReturnTypeAnnotation: sema.NewTypeAnnotation(
&sema.OptionalType{
Type: enumType,
},
),
Members: sema.GetMembersAsMap(members),
}
return constructorType
}
func cryptoAlgorithmEnumValue(
inter *interpreter.Interpreter,
getLocationRange func() interpreter.LocationRange,
enumType *sema.CompositeType,
enumCases []sema.CryptoAlgorithm,
caseConstructor func(inter *interpreter.Interpreter, rawValue uint8) *interpreter.CompositeValue,
) interpreter.Value {
caseCount := len(enumCases)
caseValues := make([]*interpreter.CompositeValue, caseCount)
constructorNestedVariables := map[string]*interpreter.Variable{}
for i, enumCase := range enumCases {
rawValue := enumCase.RawValue()
caseValue := caseConstructor(inter, rawValue)
caseValues[i] = caseValue
constructorNestedVariables[enumCase.Name()] =
interpreter.NewVariableWithValue(caseValue)
}
return interpreter.EnumConstructorFunction(
inter,
getLocationRange,
enumType,
caseValues,
constructorNestedVariables,
)
} | runtime/stdlib/builtin.go | 0.765155 | 0.482063 | builtin.go | starcoder |
package payouts
// AdditionalDataOpenInvoice struct for AdditionalDataOpenInvoice
type AdditionalDataOpenInvoice struct {
// The number of invoice lines included in `openinvoicedata`. There needs to be at least one line, so `numberOfLines` needs to be at least 1.
OpeninvoicedataNumberOfLines int32 `json:"openinvoicedata.numberOfLines,omitempty"`
// Holds different merchant data points like product, purchase, customer, and so on. It takes data in a Base64 encoded string. The `merchantData` parameter needs to be added to the `openinvoicedata` signature at the end. Since the field is optional, if it's not included it does not impact computing the merchant signature. Applies only to Klarna. You can contact Klarna for the format and structure of the string.
OpeninvoicedataMerchantData string `json:"openinvoicedata.merchantData,omitempty"`
// The three-character ISO currency code.
OpeninvoicedataLineItemNrCurrencyCode string `json:"openinvoicedata.line[itemNr].currencyCode,omitempty"`
// A text description of the product the invoice line refers to.
OpeninvoicedataLineItemNrDescription string `json:"openinvoicedata.line[itemNr].description,omitempty"`
// The price for one item in the invoice line, represented in minor units. The due amount for the item, VAT excluded.
OpeninvoicedataLineItemNrItemAmount int32 `json:"openinvoicedata.line[itemNr].itemAmount,omitempty"`
// The VAT due for one item in the invoice line, represented in minor units.
OpeninvoicedataLineItemNrItemVatAmount int32 `json:"openinvoicedata.line[itemNr].itemVatAmount,omitempty"`
// The VAT percentage for one item in the invoice line, represented in minor units. For example, 19% VAT is specified as 1900.
OpeninvoicedataLineItemNrItemVatPercentage int32 `json:"openinvoicedata.line[itemNr].itemVatPercentage,omitempty"`
// A unique id for this item. Required for RatePay if the description of each item is not unique.
OpeninvoicedataLineItemNrItemId string `json:"openinvoicedata.line[itemNr].itemId,omitempty"`
// The number of units purchased of a specific product.
OpeninvoicedataLineItemNrNumberOfItems int32 `json:"openinvoicedata.line[itemNr].numberOfItems,omitempty"`
// Required for AfterPay. The country-specific VAT category a product falls under. Allowed values: * High * Low * None.
OpeninvoicedataLineItemNrVatCategory string `json:"openinvoicedata.line[itemNr].vatCategory,omitempty"`
} | src/payouts/model_additional_data_open_invoice.go | 0.758511 | 0.428174 | model_additional_data_open_invoice.go | starcoder |
package regions
import (
"fmt"
"sort"
)
// Index is a searchable collection of intervals.
type Index struct {
idx []interval
}
// NewIndex returns an index on the given interval starts and ends. Starts and ends
// should be of the same length. End positions are exclusive, meaning that an end
// value of n implies that the interval's last position is n-1.
func NewIndex(starts, ends []int) *Index {
if len(starts) != len(ends) {
panic(fmt.Sprintf("lengths of starts and ends don't match: %v!=%v",
len(starts), len(ends)))
}
events := make([]event, 0, len(starts)+len(ends))
for i := range starts {
// TODO(amit): Check that start<end
events = append(events, event{i, starts[i], true})
events = append(events, event{i, ends[i], false})
}
sort.Slice(events, func(i, j int) bool {
return eventLess(events[i], events[j])
})
var intervals []interval
idxs := map[int]struct{}{}
var pos int
for i, e := range events {
if i == 0 {
pos = e.pos
}
if e.pos != pos {
intervals = append(intervals, interval{pos, keys(idxs)})
pos = e.pos
}
if e.start {
idxs[e.idx] = struct{}{}
} else {
delete(idxs, e.idx)
}
}
intervals = append(intervals, interval{pos, keys(idxs)})
return &Index{intervals}
}
// At returns the intervals that overlap with position i. Returned values are the
// serial numbers of the start-end pairs for which starts[x] <= i < ends[x].
func (idx *Index) At(i int) []int {
at := sort.Search(len(idx.idx), func(j int) bool {
return idx.idx[j].start > i
})
if at == 0 {
return nil
}
return cp(idx.idx[at-1].idxs) // Return a copy to keep the index read-only.
}
// A start or an end of an interval.
type event struct {
idx int
pos int
start bool
}
// Compares 2 events for sorting.
func eventLess(a, b event) bool {
if a.pos != b.pos {
return a.pos < b.pos
}
if a.start != b.start {
return !a.start // End comes before start
}
return a.idx < b.idx
}
// The start of a piece with the intervals that intersect with it.
type interval struct {
start int
idxs []int
}
// Returns the keys of a map, sorted.
func keys(m map[int]struct{}) []int {
if len(m) == 0 {
return nil
}
result := make([]int, 0, len(m))
for k := range m {
result = append(result, k)
}
sort.Ints(result)
return result
}
// Copies an int slice.
func cp(a []int) []int {
if a == nil {
return nil
}
result := make([]int, len(a))
copy(result, a)
return result
} | regions/regions.go | 0.580947 | 0.40751 | regions.go | starcoder |
package dataframe
import (
"fmt"
"math"
"reflect"
"strings"
gotime "time"
"go.starlark.net/lib/time"
"go.starlark.net/starlark"
)
// convert starlark value to a string fit for printing
func toStr(val starlark.Value) string {
if text, ok := val.(starlark.String); ok {
return string(text)
}
if val == nil {
return "<nil>"
}
return val.String()
}
// convert starlark value to a string if it has string type, and the empty string otherwise
func toStrOrEmpty(v starlark.Value) string {
if text, ok := v.(starlark.String); ok {
return string(text)
}
return ""
}
// convert starlark value to a list of strings, fit for printing, or nil if not possible
func toStrSliceOrNil(v starlark.Value) []string {
switch x := v.(type) {
case *starlark.List:
result := make([]string, 0, x.Len())
for i := 0; i < x.Len(); i++ {
result = append(result, toStr(x.Index(i)))
}
return result
case starlark.Tuple:
result := make([]string, 0, x.Len())
for i := 0; i < x.Len(); i++ {
result = append(result, toStr(x.Index(i)))
}
return result
default:
return nil
}
}
// convert starlark value to a list of ints, or nil if not possible
func toIntSliceOrNil(v starlark.Value) []int {
switch x := v.(type) {
case *starlark.List:
result := make([]int, 0, x.Len())
for i := 0; i < x.Len(); i++ {
elem, err := starlark.AsInt32(x.Index(i))
if err != nil {
return nil
}
result = append(result, elem)
}
return result
default:
return nil
}
}
// convert starlark value to a list of any values, or nil if not a list
func toInterfaceSliceOrNil(v starlark.Value) []interface{} {
switch x := v.(type) {
case *starlark.List:
result := make([]interface{}, 0, x.Len())
for i := 0; i < x.Len(); i++ {
elem, ok := toScalarMaybe(x.Index(i))
if !ok {
return nil
}
result = append(result, elem)
}
return result
case starlark.Tuple:
result := make([]interface{}, 0, x.Len())
for i := 0; i < x.Len(); i++ {
elem, ok := toScalarMaybe(x.Index(i))
if !ok {
return nil
}
result = append(result, elem)
}
return result
case starlark.Int:
num, _ := starlark.AsInt32(x)
return []interface{}{num}
case starlark.String:
str := string(x)
return []interface{}{str}
default:
return nil
}
}
// convert starlark dictionary to a row of named values
func toNamedRowOrNil(v starlark.Value) *namedRow {
dict, ok := v.(*starlark.Dict)
if !ok {
return nil
}
// Turn the dictionary into two lists: names and values
names := make([]string, 0, dict.Len())
values := make([]interface{}, 0, dict.Len())
for _, item := range dict.Items() {
k := item.Index(0)
v := item.Index(1)
elem, ok := toScalarMaybe(v)
if !ok {
return nil
}
names = append(names, toStr(k))
values = append(values, elem)
}
return newNamedRow(names, values)
}
// convert starlark value to a go native int if it has the right type
func toIntMaybe(v starlark.Value) (int, bool) {
if v == nil || v == starlark.None {
return 0, false
}
n, err := starlark.AsInt32(v)
if err != nil {
return 0, false
}
return n, true
}
// convert starlark value to a go native float if it has the right type
func toFloatMaybe(v starlark.Value) (float64, bool) {
return starlark.AsFloat(v)
}
// convert starlark value to a go native string if it has the right type
func toStrMaybe(v starlark.Value) (string, bool) {
if str, ok := v.(starlark.String); ok {
return string(str), true
}
return "", false
}
// convert starlark value to a go native bool if it has the right type
func toBoolMaybe(v starlark.Value) (bool, bool) {
if b, ok := v.(starlark.Bool); ok {
return bool(b), true
}
return false, false
}
// convert starlark value to a go native time object
func toTimeMaybe(v starlark.Value) (time.Time, bool) {
if tim, ok := v.(time.Time); ok {
return tim, true
}
return time.Time{}, false
}
// convert starlark value to go native int, bool, float, or string
func toScalarMaybe(v starlark.Value) (interface{}, bool) {
if num, ok := toIntMaybe(v); ok {
return num, true
}
if f, ok := toFloatMaybe(v); ok {
return f, true
}
if text, ok := toStrMaybe(v); ok {
return text, true
}
if b, ok := toBoolMaybe(v); ok {
return b, true
}
if tim, ok := toTimeMaybe(v); ok {
return tim, true
}
return nil, false
}
// convert starlark value to a go native datum
func toNativeValue(v starlark.Value) interface{} {
if val, ok := toScalarMaybe(v); ok {
return val
}
switch elem := v.(type) {
case *starlark.List:
res := make([]interface{}, elem.Len())
for i := 0; i < elem.Len(); i++ {
res[i] = toNativeValue(elem.Index(i))
}
return res
case *starlark.Dict:
m := make(map[string]interface{})
keys := elem.Keys()
for i := 0; i < len(keys); i++ {
key := keys[i]
val, _, _ := elem.Get(key)
m[toStr(key)] = toNativeValue(val)
}
return m
}
return nil
}
func toIndexMaybe(v starlark.Value) (*Index, bool) {
objs := toInterfaceSliceOrNil(v)
if objs != nil {
return NewObjIndex(objs, ""), true
}
if index, ok := v.(*Index); ok {
return index, true
}
return nil, false
}
func numToInt(elem interface{}) int {
if num, ok := elem.(int); ok {
return num
}
if f, ok := elem.(float64); ok {
return int(f)
}
return 0
}
func timeToInt(t time.Time) int {
gt := gotime.Time(t)
num := gt.Unix() * 1000000000
return int(num)
}
func intTimestampToString(n int) string {
t := gotime.Unix(int64(n/1000000000), 0)
ans := t.UTC().Format("2006-01-02 15:04:05")
return strings.TrimSuffix(ans, " 00:00:00")
}
func intTimedeltaToString(n int) string {
// Calculate number of days
dur := gotime.Duration(n)
days := int(math.Floor(dur.Hours() / 24))
// Make a Time to get hours:minutes:seconds of the day
t := gotime.Unix(int64(n/1000000000), 0)
return fmt.Sprintf("%d days %s", days, t.UTC().Format("15:04:05"))
}
func stringifyFloat(f float64) string {
return fmt.Sprintf("%1.1f", f)
}
// convert a list of ints to a list of floats
func convertIntsToFloats(vals []int) []float64 {
result := make([]float64, 0, len(vals))
for _, n := range vals {
result = append(result, float64(n))
}
return result
}
// convert a list of ints to a list of objects
func convertIntsToObjects(vals []int) []interface{} {
result := make([]interface{}, 0, len(vals))
for _, n := range vals {
result = append(result, n)
}
return result
}
// convert a list of bools, represented as ints, to a list of objects
func convertBoolsToObjects(vals []int) []interface{} {
result := make([]interface{}, 0, len(vals))
for _, n := range vals {
if n == 0 {
result = append(result, false)
} else {
result = append(result, true)
}
}
return result
}
// convert a list of floats to a list of objects
func convertFloatsToObjects(vals []float64) []interface{} {
result := make([]interface{}, 0, len(vals))
for _, f := range vals {
result = append(result, f)
}
return result
}
// convert a list of strings to a list of objects
func convertStringsToObjects(vals []string) []interface{} {
result := make([]interface{}, 0, len(vals))
for _, v := range vals {
result = append(result, v)
}
return result
}
// convert one of the supported go native data types into a starlark value
func convertToStarlark(it interface{}) (starlark.Value, error) {
switch x := it.(type) {
case int:
return starlark.MakeInt(x), nil
case bool:
if x {
return starlark.True, nil
}
return starlark.False, nil
case float64:
return starlark.Float(x), nil
case string:
return starlark.String(x), nil
default:
return starlark.None, fmt.Errorf("unknown type of %v", reflect.TypeOf(it))
}
} | dataframe/convert.go | 0.588653 | 0.400691 | convert.go | starcoder |
package main
import (
"log"
"strings"
"github.com/charmbracelet/bubbles/viewport"
tea "github.com/charmbracelet/bubbletea"
"github.com/evertras/bubble-data-tree/datatree"
)
type Trainer struct {
Name string
Age int
Hometown string
Description string
Pokemon []Pokemon
}
type Pokemon struct {
Age int
Name string
Description string
Types []string
}
type Model struct {
simpleTree datatree.Model
viewport viewport.Model
ready bool
}
func NewModel() Model {
pikachu := Pokemon{
Name: "Pikachu",
Description: `Pikachu is a fictional species in the Pokémon media franchise. Designed by <NAME> and <NAME>, Pikachu first appeared in the 1996 Japanese video games Pokémon Red and Green created by Game Freak and Nintendo, which were released outside of Japan in 1998 as Pokémon Red and Blue. Pikachu is a yellow, mouse-like creature with electrical abilities. It is a major character in the Pokémon franchise, serving as its mascot and as a major mascot for Nintendo.
Pikachu is widely considered to be the most popular and well-known Pokémon species, largely due to its appearance in the Pokémon anime television series as the companion of protagonist Ash Ketchum. In most vocalized appearances Pikachu is voiced by Ikue Ōtani, though it has been portrayed by other actors, notably Ryan Reynolds in the live-action animated film Pokémon Detective Pikachu. Pikachu has been well received by critics, with particular praise given for its cuteness, and has come to be regarded as an icon of Japanese pop culture.`,
Types: []string{"Electric"},
}
pidgey := Pokemon{
Name: "Pidgey",
Description: "Very docile.\nIf attacked, it will often kick up sand to protect itself rather than fight back.",
Types: []string{"Normal", "Flying"},
}
torterra := Pokemon{
Name: "Torterra",
Description: "ちいさな ポケモンたちが あつまり うごかない ドダイトスの せなかで すづくりを はじめることがある。 (『ポケットモンスター ブリリアントダイヤモンド』より)",
Types: []string{"Grass", "Ground"},
}
dragonite := Pokemon{
Age: 3,
Name: "Dragonite",
Description: "Dragonite is a draconic, bipedal Pokémon with light orange skin.",
Types: []string{"Dragon", "Flying"},
}
ash := Trainer{
Name: "サトシ",
Age: 14,
Hometown: "Pallet Town",
Description: `Ash Ketchum (Japanese: サトシ Satoshi) is the main character of the Pokémon anime. He is also the main character of various manga based on the anime, including The Electric Tale of Pikachu, Ash & Pikachu, and Pocket Monsters Diamond & Pearl.
He is a Pokémon Trainer from Pallet Town whose goal is to become a Pokémon Master. His starter Pokémon was a Pikachu that he received from Professor Oak after arriving late at his laboratory. In Pokémon the Series: Sun & Moon, he becomes the first Champion of the Alola region's Pokémon League.`,
Pokemon: []Pokemon{pikachu, pidgey, torterra, dragonite},
}
return Model{
simpleTree: datatree.New(ash),
}
}
func (m Model) Init() tea.Cmd {
return nil
}
func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var (
cmd tea.Cmd
cmds []tea.Cmd
)
m.simpleTree, cmd = m.simpleTree.Update(msg)
cmds = append(cmds, cmd)
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "ctrl+c", "esc", "q":
cmds = append(cmds, tea.Quit)
}
case tea.WindowSizeMsg:
m.simpleTree = m.simpleTree.WithWidth(msg.Width)
// The help message at top
const headerHeight = 3
if !m.ready {
m.viewport = viewport.New(msg.Width, msg.Height-headerHeight)
m.viewport.HighPerformanceRendering = true
m.viewport.SetContent(m.simpleTree.View())
m.viewport.YPosition = headerHeight
m.ready = true
} else {
m.viewport.Width = msg.Width
m.viewport.Height = msg.Height - headerHeight
}
cmds = append(cmds, viewport.Sync(m.viewport))
}
m.viewport, cmd = m.viewport.Update(msg)
cmds = append(cmds, cmd)
return m, tea.Batch(cmds...)
}
func (m Model) View() string {
body := strings.Builder{}
body.WriteString("Data tree using a viewport for lots of data\nPress up/down to scroll, q or ctrl+c to quit\n\n")
body.WriteString(m.viewport.View())
return body.String()
}
func main() {
p := tea.NewProgram(
NewModel(),
tea.WithAltScreen(),
tea.WithMouseCellMotion(),
)
if err := p.Start(); err != nil {
log.Fatal(err)
}
} | examples/viewport/main.go | 0.518546 | 0.429848 | main.go | starcoder |
package contextionary
import (
"fmt"
annoy "github.com/creativesoftwarefdn/weaviate/contextionary/annoyindex"
)
type mmappedIndex struct {
word_index *Wordlist
knn annoy.AnnoyIndex
}
func (m *mmappedIndex) GetNumberOfItems() int {
return int(m.word_index.numberOfWords)
}
// Returns the length of the used vectors.
func (m *mmappedIndex) GetVectorLength() int {
return int(m.word_index.vectorWidth)
}
func (m *mmappedIndex) WordToItemIndex(word string) ItemIndex {
return m.word_index.FindIndexByWord(word)
}
func (m *mmappedIndex) ItemIndexToWord(item ItemIndex) (string, error) {
if item >= 0 && item <= m.word_index.GetNumberOfWords() {
return m.word_index.getWord(item), nil
} else {
return "", fmt.Errorf("Index out of bounds")
}
}
func (m *mmappedIndex) GetVectorForItemIndex(item ItemIndex) (*Vector, error) {
if item >= 0 && item <= m.word_index.GetNumberOfWords() {
var floats []float32
m.knn.GetItem(int(item), &floats)
return &Vector{floats}, nil
} else {
return nil, fmt.Errorf("Index out of bounds")
}
}
// Compute the distance between two items.
func (m *mmappedIndex) GetDistance(a ItemIndex, b ItemIndex) (float32, error) {
if a >= 0 && b >= 0 && a <= m.word_index.GetNumberOfWords() && b <= m.word_index.GetNumberOfWords() {
return m.knn.GetDistance(int(a), int(b)), nil
} else {
return 0, fmt.Errorf("Index out of bounds")
}
}
func (m *mmappedIndex) GetNnsByItem(item ItemIndex, n int, k int) ([]ItemIndex, []float32, error) {
if item >= 0 && item <= m.word_index.GetNumberOfWords() {
var items []int
var distances []float32
m.knn.GetNnsByItem(int(item), n, k, &items, &distances)
var indices []ItemIndex = make([]ItemIndex, len(items))
for i, x := range items {
indices[i] = ItemIndex(x)
}
return indices, distances, nil
} else {
return nil, nil, fmt.Errorf("Index out of bounds")
}
}
func (m *mmappedIndex) GetNnsByVector(vector Vector, n int, k int) ([]ItemIndex, []float32, error) {
if len(vector.vector) == m.GetVectorLength() {
var items []int
var distances []float32
m.knn.GetNnsByVector(vector.vector, n, k, &items, &distances)
var indices []ItemIndex = make([]ItemIndex, len(items))
for i, x := range items {
indices[i] = ItemIndex(x)
}
return indices, distances, nil
} else {
return nil, nil, fmt.Errorf("Wrong vector length provided")
}
}
func LoadVectorFromDisk(annoy_index string, word_index_file_name string) (Contextionary, error) {
word_index, err := LoadWordlist(word_index_file_name)
if err != nil {
return nil, fmt.Errorf("Could not load vector: %+v", err)
}
knn := annoy.NewAnnoyIndexEuclidean(int(word_index.vectorWidth))
knn.Load(annoy_index)
idx := &mmappedIndex{
word_index: word_index,
knn: knn,
}
return idx, nil
} | contextionary/mmapped.go | 0.711331 | 0.432842 | mmapped.go | starcoder |
package tlv
import (
"encoding/binary"
"errors"
"math"
"github.com/named-data/YaNFD/ndn/util"
)
// EncodeVarNum encodes a non-negative integer value for encoding.
func EncodeVarNum(in uint64) []byte {
if in <= 0xFC {
// This is just here to avoid having to write this condition in every other conditional.
return []byte{byte(in)}
} else if in <= 0xFFFF {
bytes := make([]byte, 3)
bytes[0] = 0xFD
binary.BigEndian.PutUint16(bytes[1:], uint16(in))
return bytes
} else if in <= 0xFFFFFFFF {
bytes := make([]byte, 5)
bytes[0] = 0xFE
binary.BigEndian.PutUint32(bytes[1:], uint32(in))
return bytes
} else {
bytes := make([]byte, 9)
bytes[0] = 0xFF
binary.BigEndian.PutUint64(bytes[1:], in)
return bytes
}
}
// DecodeVarNum decodes a non-negative integer value from a wire value.
func DecodeVarNum(in []byte) (uint64, int, error) {
if len(in) < 1 {
return 0, 0, util.ErrTooShort
}
if in[0] <= 0xFC {
return uint64(in[0]), 1, nil
} else if in[0] == 0xFD {
if len(in) < 3 {
return 0, 0, util.ErrTooShort
}
return uint64(binary.BigEndian.Uint16(in[1:3])), 3, nil
} else if in[0] == 0xFE {
if len(in) < 5 {
return 0, 0, util.ErrTooShort
}
return uint64(binary.BigEndian.Uint32(in[1:5])), 5, nil
} else { // Must be 0xFF
if len(in) < 9 {
return 0, 0, util.ErrTooShort
}
return binary.BigEndian.Uint64(in[1:9]), 9, nil
}
}
// EncodeNNI encodes a non-negative integer value into a TLV value slice.
func EncodeNNI(v uint64) []byte {
value := make([]byte, 8)
binary.BigEndian.PutUint64(value, v)
if v <= math.MaxUint8 {
return value[7:]
} else if v <= math.MaxUint16 {
return value[6:]
} else if v <= math.MaxUint32 {
return value[4:]
}
return value
}
// EncodeNNIBlock encodes a non-negative integer value in a block of the specified type.
func EncodeNNIBlock(t uint32, v uint64) *Block {
b := new(Block)
b.SetType(t)
value := make([]byte, 8)
binary.BigEndian.PutUint64(value, v)
if v <= math.MaxUint8 {
b.SetValue(value[7:])
} else if v <= math.MaxUint16 {
b.SetValue(value[6:])
} else if v <= math.MaxUint32 {
b.SetValue(value[4:])
} else {
b.SetValue(value)
}
return b
}
// GetNNIBlockSize returns the size that a non-negative integer block will take when encoded.
func GetNNIBlockSize(t uint32, v uint64) int {
typeLen := len(EncodeVarNum(uint64(t)))
if v <= math.MaxUint8 {
return typeLen + 1
} else if v <= math.MaxUint16 {
return typeLen + 2
} else if v <= math.MaxUint32 {
return typeLen + 4
}
return typeLen + 8
}
// DecodeNNIBlock decodes a non-negative integer value from a block.
func DecodeNNIBlock(wire *Block) (uint64, error) {
if wire == nil {
return 0, util.ErrNonExistent
}
if len(wire.Value()) < 1 {
return 0, ErrBufferTooShort
} else if len(wire.Value()) > 8 {
return 0, util.ErrTooLong
}
buf := make([]byte, 8)
copy(buf[8-len(wire.Value()):], wire.Value())
return binary.BigEndian.Uint64(buf), nil
}
// DecodeNNI decodes a non-negative integer value from a TLV value slice.
func DecodeNNI(value []byte) (uint64, error) {
if len(value) > 8 {
return 0, util.ErrTooLong
} else if len(value) == 0 {
return 0, util.ErrTooShort
}
// Pad buffer
buf := make([]byte, 8)
copy(buf[8-len(value):], value)
return binary.BigEndian.Uint64(buf), nil
}
// DecodeTypeLength decodes the TLV type, TLV length, and total size of the block from a byte slice.
func DecodeTypeLength(bytes []byte) (uint32, int, int, error) {
var tlvType uint64
var tlvLength uint64
tlvType, tlvTypeSize, err := DecodeVarNum(bytes)
if err != nil {
return 0, 0, 0, err
} else if tlvType > math.MaxUint32 {
return 0, 0, 0, errors.New("TLV type out of range")
}
tlvLength, tlvLengthSize, err := DecodeVarNum(bytes[tlvTypeSize:])
if err != nil {
return 0, 0, 0, err
}
return uint32(tlvType), int(tlvLength), tlvTypeSize + tlvLengthSize + int(tlvLength), nil
} | ndn/tlv/helpers.go | 0.679072 | 0.41567 | helpers.go | starcoder |
package types
import (
"math"
"reflect"
"github.com/lyraproj/puppet-evaluator/eval"
"github.com/lyraproj/puppet-evaluator/utils"
)
// CommonType returns a type that both a and b are assignable to
func commonType(a eval.Type, b eval.Type) eval.Type {
if isAssignable(a, b) {
return a
}
if isAssignable(b, a) {
return a
}
// Deal with mergable types of different type
switch a.(type) {
case *EnumType:
switch b.(type) {
case *StringType:
str := b.(*StringType).value
if str != `` {
ea := a.(*EnumType)
return NewEnumType(utils.Unique(append(ea.values, str)), ea.caseInsensitive)
}
}
case *StringType:
switch b.(type) {
case *EnumType:
return commonType(b, a)
}
}
// Deal with mergable types same type
if reflect.TypeOf(a) == reflect.TypeOf(b) {
switch a.(type) {
case *ArrayType:
aa := a.(*ArrayType)
ba := b.(*ArrayType)
return NewArrayType(commonType(aa.typ, ba.typ), commonType(aa.size, ba.size).(*IntegerType))
case *EnumType:
ea := a.(*EnumType)
eb := b.(*EnumType)
return NewEnumType(utils.Unique(append(ea.values, eb.values...)), ea.caseInsensitive || eb.caseInsensitive)
case *FloatType:
af := a.(*FloatType)
bf := b.(*FloatType)
return NewFloatType(math.Min(af.min, bf.min), math.Max(af.max, bf.max))
case *IntegerType:
ai := a.(*IntegerType)
bi := b.(*IntegerType)
min := ai.min
if bi.min < min {
min = bi.min
}
max := ai.max
if bi.max > max {
max = bi.max
}
return NewIntegerType(min, max)
case *IterableType:
an := a.(*IterableType)
bn := b.(*IterableType)
return NewIterableType(commonType(an.ElementType(), bn.ElementType()))
case *IteratorType:
an := a.(*IteratorType)
bn := b.(*IteratorType)
return NewIteratorType(commonType(an.ElementType(), bn.ElementType()))
case *NotUndefType:
an := a.(*NotUndefType)
bn := b.(*NotUndefType)
return NewNotUndefType(commonType(an.ContainedType(), bn.ContainedType()))
case *PatternType:
ap := a.(*PatternType)
bp := b.(*PatternType)
return NewPatternType(UniqueRegexps(append(ap.regexps, bp.regexps...)))
case *RuntimeType:
ar := a.(*RuntimeType)
br := b.(*RuntimeType)
if ar.runtime == br.runtime {
return NewRuntimeType(ar.runtime, ``, nil)
}
return DefaultRuntimeType()
case *StringType:
as := a.(*StringType)
bs := b.(*StringType)
if as.value == `` || bs.value == `` {
return NewStringType(commonType(as.size, bs.size).(*IntegerType), ``)
}
return NewEnumType([]string{as.value, bs.value}, false)
case *TupleType:
at := a.(*TupleType)
bt := b.(*TupleType)
return NewArrayType(commonType(at.CommonElementType(), bt.CommonElementType()), commonType(at.Size(), bt.Size()).(*IntegerType))
case *TypeType:
at := a.(*TypeType)
bt := b.(*TypeType)
return NewTypeType(commonType(at.ContainedType(), bt.ContainedType()))
case *VariantType:
ap := a.(*VariantType)
bp := b.(*VariantType)
return NewVariantType(UniqueTypes(append(ap.Types(), bp.Types()...))...)
}
}
if isCommonNumeric(a, b) {
return numericType_DEFAULT
}
if isCommonScalarData(a, b) {
return scalarDataType_DEFAULT
}
if isCommonScalar(a, b) {
return scalarType_DEFAULT
}
if isCommonData(a, b) {
return dataType_DEFAULT
}
if isCommonRichData(a, b) {
return richDataType_DEFAULT
}
return anyType_DEFAULT
}
func isCommonNumeric(a eval.Type, b eval.Type) bool {
return isAssignable(numericType_DEFAULT, a) && isAssignable(numericType_DEFAULT, b)
}
func isCommonScalarData(a eval.Type, b eval.Type) bool {
return isAssignable(scalarDataType_DEFAULT, a) && isAssignable(scalarDataType_DEFAULT, b)
}
func isCommonScalar(a eval.Type, b eval.Type) bool {
return isAssignable(scalarType_DEFAULT, a) && isAssignable(scalarType_DEFAULT, b)
}
func isCommonData(a eval.Type, b eval.Type) bool {
return isAssignable(dataType_DEFAULT, a) && isAssignable(dataType_DEFAULT, b)
}
func isCommonRichData(a eval.Type, b eval.Type) bool {
return isAssignable(richDataType_DEFAULT, a) && isAssignable(richDataType_DEFAULT, b)
}
func init() {
eval.CommonType = commonType
} | types/commonality.go | 0.684053 | 0.506591 | commonality.go | starcoder |
package validate
import (
"fmt"
"reflect"
)
// InputAndOutputSig compares the argument and return signatures of actualFunc
// against expectedFunc. It returns an error unless everything matches.
func InputAndOutputSig(actualFunc, expectedFunc reflect.Type) error {
if err := InputSig(actualFunc, expectedFunc); err != nil {
return err
}
if err := outputSig(actualFunc, expectedFunc); err != nil {
return err
}
return nil
}
// InputSig compares the argument signatures of actualFunc
// against expectedFunc. It returns an error unless everything matches.
func InputSig(actualFunc, expectedFunc reflect.Type) error {
// check number of arguments and type of each argument
if actualFunc.NumIn() != expectedFunc.NumIn() {
return fmt.Errorf(
"expected function to have %d arguments not %d",
expectedFunc.NumIn(), actualFunc.NumIn())
}
lastIdx := expectedFunc.NumIn()
// If the function has a variadic argument validate that one first so that
// we aren't checking for it while we iterate over the other args
if expectedFunc.IsVariadic() {
if ok := variadicArg(lastIdx, actualFunc, expectedFunc); !ok {
i := lastIdx - 1
return fmt.Errorf(
"expected function to have"+
" arg of type %v at position %d"+
" not type %v",
expectedFunc.In(i), i, actualFunc.In(i),
)
}
lastIdx--
}
for i := 0; i < lastIdx; i++ {
expectedArg := expectedFunc.In(i)
actualArg := actualFunc.In(i)
if err := arg(actualArg, expectedArg); err != nil {
return fmt.Errorf("input argument at %d: %s", i, err)
}
}
return nil
}
func outputSig(actualFunc, expectedFunc reflect.Type) error {
// check number of return vals and type of each val
if actualFunc.NumOut() != expectedFunc.NumOut() {
return fmt.Errorf(
"expected function to have %d return vals not %d",
expectedFunc.NumOut(), actualFunc.NumOut())
}
for i := 0; i < expectedFunc.NumOut(); i++ {
expectedArg := expectedFunc.Out(i)
actualArg := actualFunc.Out(i)
if err := arg(actualArg, expectedArg); err != nil {
return fmt.Errorf("return argument at %d: %s", i, err)
}
}
return nil
}
func variadicArg(lastIdx int, actualFunc, expectedFunc reflect.Type) bool {
if actualFunc.In(lastIdx-1) != expectedFunc.In(lastIdx-1) {
if actualFunc.In(lastIdx-1).Kind() != reflect.Slice {
return false
}
expectedArgT := expectedFunc.In(lastIdx - 1)
expectedElem := expectedArgT.Elem()
if expectedElem.Kind() != reflect.Interface {
return false
}
actualArgT := actualFunc.In(lastIdx - 1)
actualElem := actualArgT.Elem()
if ok := actualElem.ConvertibleTo(expectedElem); !ok {
return false
}
}
return true
}
func interfaceArg(actualArg, expectedArg reflect.Type) error {
if !actualArg.ConvertibleTo(expectedArg) {
return fmt.Errorf(
"expected arg convertible to type %v not type %v",
expectedArg, actualArg,
)
}
return nil
}
func mapArg(actualArg, expectedArg reflect.Type) error {
expectedKey := expectedArg.Key()
actualKey := actualArg.Key()
switch expectedKey.Kind() {
case reflect.Interface:
if err := interfaceArg(actualKey, expectedKey); err != nil {
return fmt.Errorf("map key: %s", err)
}
default:
if actualKey != expectedKey {
return fmt.Errorf("expected map key of type %v not type %v",
expectedKey, actualKey)
}
}
expectedElem := expectedArg.Elem()
actualElem := actualArg.Elem()
switch expectedElem.Kind() {
case reflect.Interface:
if err := interfaceArg(actualElem, expectedElem); err != nil {
return fmt.Errorf("map element: %s", err)
}
default:
if actualElem != expectedElem {
return fmt.Errorf("expected map element of type %v not type %v",
expectedElem, actualElem)
}
}
return nil
}
func arg(actualArg, expectedArg reflect.Type) error {
switch expectedArg.Kind() {
// If the expected arg is an interface we only care if the actual arg is convertible
// to that interface
case reflect.Interface:
if err := interfaceArg(actualArg, expectedArg); err != nil {
return err
}
default:
// If the expected arg is not an interface then first check to see if
// the actual arg is even the same reflect.Kind
if expectedArg.Kind() != actualArg.Kind() {
return fmt.Errorf("expected arg of kind %v not %v",
expectedArg.Kind(), actualArg.Kind())
}
switch expectedArg.Kind() {
// If the expected arg is a map then we need to handle the case where
// the map key or element type is an interface
case reflect.Map:
if err := mapArg(actualArg, expectedArg); err != nil {
return err
}
default:
if actualArg != expectedArg {
return fmt.Errorf(
"Expected arg of type %v not type %v",
expectedArg, actualArg,
)
}
}
}
return nil
} | gomock/internal/validate/validate.go | 0.74055 | 0.595375 | validate.go | starcoder |
package model
import "encoding/json"
// Series represents time series.
type Series struct {
name string
values []float64
start int64 // timestamp of start.
step int // the interval seconds of values.
alias string
}
// NewSeries returns the Series object.
func NewSeries(name string, values []float64, start int64, step int) *Series {
return &Series{
name: name,
values: values,
start: start,
step: step,
}
}
// Name returns the name.
func (s *Series) Name() string {
return s.name
}
// Values returns the values.
func (s *Series) Values() []float64 {
return s.values
}
// Start returns the start timestamp.
func (s *Series) Start() int64 {
return s.start
}
// End returns the end timestamp.
func (s *Series) End() int64 {
if s.Len() == 0 {
return -1
}
return s.Start() + int64(s.Step()*(s.Len()-1))
}
// Step returns the step.
func (s *Series) Step() int {
return s.step
}
// Len returns the length of series.
func (s *Series) Len() int {
return len(s.Values())
}
// SetName sets the name
func (s *Series) SetName(name string) {
s.name = name
}
// SetAlias set alias with a.
func (s *Series) SetAlias(a string) {
s.alias = a
}
// SetAliasWith set alias with a and return the pointer of series.
func (s *Series) SetAliasWith(a string) *Series {
s.alias = a
return s
}
// Alias returns the alias.
func (s *Series) Alias() string {
if s.alias == "" {
return s.Name()
}
return s.alias
}
// Points returns DataPoints converted from values.
func (s *Series) Points() DataPoints {
if s.Len() == 0 {
return DataPoints{}
}
points := make(DataPoints, 0, s.Len())
end := s.End()
vals := s.Values()
i := 0
for t := s.Start(); t <= end; t += int64(s.Step()) {
points = append(points, NewDataPoint(t, vals[i]))
i++
}
return points
}
/*
An example of json response
{
"target": "server1.cpu.softirq.percentage",
"datapoints": [
[
0.244669050464,
1474725188
],
[
0.236104685209,
1474725248
],
}
*/
// jsonMarshallableSeries represents the JSON response structure for Series.
type jsonMarshallableSeries struct {
Target string `json:"target"`
DataPoints DataPoints `json:"datapoints"`
}
// MarshalJSON marshals Series as JSON.
func (s *Series) MarshalJSON() ([]byte, error) {
return json.Marshal(&jsonMarshallableSeries{
Target: s.Alias(),
DataPoints: s.Points(),
})
} | pkg/model/series.go | 0.818845 | 0.40072 | series.go | starcoder |
package effect
import (
"github.com/df-mc/dragonfly/dragonfly/entity"
"image/color"
"time"
)
// instantEffect forms the base of an instant effect.
type instantEffect struct {
// Lvl holds the level of the effect. A higher level results in a more powerful effect, whereas a negative
// level will generally inverse effect.
Lvl int
}
// Instant always returns true for instant effects.
func (instantEffect) Instant() bool {
return true
}
// Level returns the level of the instant effect.
func (i instantEffect) Level() int {
return i.Lvl
}
// Duration always returns 0 for instant effects.
func (instantEffect) Duration() time.Duration {
return 0
}
// ShowParticles always returns false for instant effects.
func (instantEffect) ShowParticles() bool {
return false
}
// AmbientSource always returns false for instant effects.
func (instantEffect) AmbientSource() bool {
return false
}
// RGBA always returns an empty color.RGBA.
func (instantEffect) RGBA() color.RGBA {
return color.RGBA{}
}
// End ...
func (instantEffect) End(entity.Living) {}
// Start ...
func (instantEffect) Start(entity.Living) {}
// lastingEffect forms the base of an effect that lasts for a specific duration.
type lastingEffect struct {
// Lvl holds the level of the effect. A higher level results in a more powerful effect, whereas a negative
// level will generally inverse effect.
Lvl int
// Dur holds the duration of the effect. One will be subtracted every time the entity that the effect is
// added to is ticked.
Dur time.Duration
// HideParticles hides the coloured particles of the effect when added to an entity.
HideParticles bool
// Ambient specifies if the effect comes from an ambient source, such as from a beacon or conduit. The
// particles displayed when Ambient is true are less visible.
Ambient bool
}
// Instant always returns false for lasting effects.
func (lastingEffect) Instant() bool {
return false
}
// Level returns the level of the lasting effect.
func (l lastingEffect) Level() int {
return l.Lvl
}
// Duration returns the leftover duration of the lasting effect.
func (l lastingEffect) Duration() time.Duration {
return l.Dur
}
// ShowParticles returns true if the effect does not display particles.
func (l lastingEffect) ShowParticles() bool {
return !l.HideParticles
}
// AmbientSource specifies if the effect comes from a beacon or conduit.
func (l lastingEffect) AmbientSource() bool {
return l.Ambient
}
// withSettings returns the lastingEffect with the duration passed.
func (l lastingEffect) withSettings(d time.Duration, level int, ambient bool) lastingEffect {
l.Dur = d
l.Lvl = level
l.Ambient = ambient
return l
}
// End ...
func (lastingEffect) End(entity.Living) {}
// Start ...
func (lastingEffect) Start(entity.Living) {}
// Apply ...
func (lastingEffect) Apply(living entity.Living) {}
// tickDuration returns the duration as in-game ticks.
func tickDuration(d time.Duration) int {
return int(d / (time.Second / 20))
}
// ResultingColour calculates the resulting colour of the effects passed and returns a bool specifying if the
// effects were ambient effects, which will cause their particles to display less frequently.
func ResultingColour(effects []entity.Effect) (color.RGBA, bool) {
r, g, b, a := 0, 0, 0, 0
l := len(effects)
if l == 0 {
return color.RGBA{}, false
}
ambient := true
for _, e := range effects {
c := e.RGBA()
r += int(c.R)
g += int(c.G)
b += int(c.B)
a += int(c.A)
if !e.AmbientSource() {
ambient = false
}
}
return color.RGBA{R: uint8(r / l), G: uint8(g / l), B: uint8(b / l), A: uint8(a / l)}, ambient
} | dragonfly/entity/effect/effect.go | 0.855021 | 0.594286 | effect.go | starcoder |
package ql
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
)
// DataType represents the primitive data types available in samql.
type DataType int
const (
// Float means the data type is a float.
Float DataType = 0
// Integer means the data type is an integer.
Integer DataType = 1
// Unsigned means the data type is an unsigned integer.
Unsigned DataType = 2
// String means the data type is a string of text.
String DataType = 3
// Boolean means the data type is a boolean.
Boolean DataType = 4
// AnyField means the data type is any field.
AnyField DataType = 5
// Unknown primitive data type.
Unknown DataType = 6
)
var (
zeroFloat64 interface{} = float64(0)
zeroInt64 interface{} = int64(0)
zeroUint64 interface{} = uint64(0)
zeroString interface{}
zeroBoolean interface{} = false
)
// String returns the human-readable string representation of the DataType.
func (d DataType) String() string {
switch d {
case Float:
return "float"
case Integer:
return "integer"
case Unsigned:
return "unsigned"
case String:
return "string"
case Boolean:
return "boolean"
case AnyField:
return "field"
}
return "unknown"
}
// Named represents anything that has a name.
type Named interface {
Name() string
}
// Node represents a node in the samql abstract syntax tree.
type Node interface {
// node is unexported to ensure implementations of Node
// can only originate in this package.
node()
String() string
}
// types that implement Node.
func (*SelectStatement) node() {}
func (*BinaryExpr) node() {}
func (*BooleanLiteral) node() {}
func (*Call) node() {}
func (*IntegerLiteral) node() {}
func (*UnsignedLiteral) node() {}
func (*Field) node() {}
func (Fields) node() {}
func (*Table) node() {}
func (*NilLiteral) node() {}
func (*NumberLiteral) node() {}
func (*ParenExpr) node() {}
func (*RegexLiteral) node() {}
func (*StringLiteral) node() {}
func (*VarRef) node() {}
func (*Wildcard) node() {}
// Statement represents a single command in samql.
type Statement interface {
Node
// stmt is unexported to ensure implementations of Statement
// can only originate in this package.
stmt()
}
// types that implement Statement.
func (*SelectStatement) stmt() {}
// Expr represents an expression that can be evaluated to a value.
type Expr interface {
Node
// expr is unexported to ensure implementations of Expr
// can only originate in this package.
expr()
}
func (*BinaryExpr) expr() {}
func (*BooleanLiteral) expr() {}
func (*Call) expr() {}
func (*IntegerLiteral) expr() {}
func (*UnsignedLiteral) expr() {}
func (*NilLiteral) expr() {}
func (*NumberLiteral) expr() {}
func (*ParenExpr) expr() {}
func (*RegexLiteral) expr() {}
func (*StringLiteral) expr() {}
func (*VarRef) expr() {}
func (*Wildcard) expr() {}
// Literal represents a static literal.
type Literal interface {
Expr
// literal is unexported to ensure implementations of Literal
// can only originate in this package.
literal()
}
func (*BooleanLiteral) literal() {}
func (*IntegerLiteral) literal() {}
func (*UnsignedLiteral) literal() {}
func (*NilLiteral) literal() {}
func (*NumberLiteral) literal() {}
func (*RegexLiteral) literal() {}
func (*StringLiteral) literal() {}
// Source represents a source of data for a statement.
type Source interface {
Node
// source is unexported to ensure implementations of Source
// can only originate in this package.
source()
}
// Types that implement Source
func (*Table) source() {}
// SelectStatement represents a command for extracting data from the database.
type SelectStatement struct {
// Expressions returned from the selection.
Fields Fields
// Data sources (tables) that fields are extracted from.
Source Source
// An expression evaluated on data point.
Condition Expr
}
// ColumnNames will walk all fields and functions and return the appropriate
// field names for the select statement while maintaining order of the field
// names.
func (s *SelectStatement) ColumnNames() []string {
// First walk each field to determine the number of columns.
columnFields := Fields{}
for _, field := range s.Fields {
columnFields = append(columnFields, field)
}
columnNames := make([]string, len(columnFields))
// Keep track of the encountered column names.
names := make(map[string]int)
// Resolve aliases first.
for i, col := range columnFields {
if col.Alias != "" {
columnNames[i] = col.Alias
names[col.Alias] = 1
}
}
// Resolve any generated names and resolve conflicts.
for i, col := range columnFields {
if columnNames[i] != "" {
continue
}
name := col.Name()
count, conflict := names[name]
if conflict {
for {
resolvedName := fmt.Sprintf("%s_%d", name, count)
_, conflict = names[resolvedName]
if !conflict {
names[name] = count + 1
name = resolvedName
break
}
count++
}
}
names[name]++
columnNames[i] = name
}
return columnNames
}
// String returns a string representation of the select statement.
func (s *SelectStatement) String() string {
var buf bytes.Buffer
_, _ = buf.WriteString("SELECT ")
_, _ = buf.WriteString(s.Fields.String())
if s.Source != nil {
_, _ = buf.WriteString(" FROM ")
_, _ = buf.WriteString(s.Source.String())
}
if s.Condition != nil {
_, _ = buf.WriteString(" WHERE ")
_, _ = buf.WriteString(s.Condition.String())
}
return buf.String()
}
// Fields represents a list of fields.
type Fields []*Field
// Names returns a list with the name of all fields.
func (a Fields) Names() []string {
names := []string{}
for _, f := range a {
names = append(names, f.Name())
}
return names
}
// String returns a string representation of the fields.
func (a Fields) String() string {
var str []string
for _, f := range a {
str = append(str, f.String())
}
return strings.Join(str, ", ")
}
// Field represents an expression retrieved from a select statement.
type Field struct {
Expr Expr
Alias string
}
// Name returns the name of the field. Returns the alias, if set. Otherwise
// returns the function name or variable name.
func (f *Field) Name() string {
// Return alias, if set.
if f.Alias != "" {
return f.Alias
}
if n, ok := f.Expr.(Named); ok {
return n.Name()
}
// Otherwise return a blank name.
return ""
}
// String returns a string representation of the field.
func (f *Field) String() string {
str := f.Expr.String()
if f.Alias == "" {
return str
}
return fmt.Sprintf("%s AS %s", str, quoteIdent(f.Alias))
}
// Table represents a data source.
type Table struct {
Name string
}
// String returns a string representation of the table.
func (m *Table) String() string {
return quoteIdent(m.Name)
}
// VarRef represents a reference to a variable.
type VarRef struct {
Val string
Type DataType
}
// String returns a string representation of the variable reference.
func (r *VarRef) String() string {
return quoteIdent(r.Val)
}
// Name returns the name of the variable reference.
func (r *VarRef) Name() string {
return r.Val
}
// Call represents a function call.
type Call struct {
Cmd string
Args []Expr
}
// String returns a string representation of the call.
func (c *Call) String() string {
// Join arguments.
var str []string
for _, arg := range c.Args {
str = append(str, arg.String())
}
// Write function name and args.
return fmt.Sprintf("%s(%s)", c.Cmd, strings.Join(str, ", "))
}
// Name returns the name of the call.
func (c *Call) Name() string {
return c.Cmd
}
// NumberLiteral represents a numeric float64 literal.
type NumberLiteral struct {
Val float64
}
// String returns a string representation of the literal.
func (l *NumberLiteral) String() string {
return strconv.FormatFloat(l.Val, 'f', 3, 64)
}
// IntegerLiteral represents an integer literal.
type IntegerLiteral struct {
Val int64
}
// String returns a string representation of the literal.
func (l *IntegerLiteral) String() string {
return fmt.Sprintf("%d", l.Val)
}
// UnsignedLiteral represents an unsigned integer literal. The parser will
// only use an unsigned literal if the parsed integer is greater than
// math.MaxInt64.
type UnsignedLiteral struct {
Val uint64
}
// String returns a string representation of the literal.
func (l *UnsignedLiteral) String() string {
return strconv.FormatUint(l.Val, 10)
}
// BooleanLiteral represents a boolean literal.
type BooleanLiteral struct {
Val bool
}
// String returns a string representation of the literal.
func (l *BooleanLiteral) String() string {
if l.Val {
return "true"
}
return "false"
}
// StringLiteral represents a string literal.
type StringLiteral struct {
Val string
}
// String returns a string representation of the literal.
func (l *StringLiteral) String() string {
return quoteString(l.Val)
}
// RegexLiteral represents a regular expression.
type RegexLiteral struct {
Val *regexp.Regexp
}
// String returns a string representation of the literal.
func (r *RegexLiteral) String() string {
if r.Val != nil {
return fmt.Sprintf("/%s/",
strings.Replace(r.Val.String(), `/`, `\/`, -1))
}
return ""
}
// NilLiteral represents a nil literal. This is not available to the query
// language itself. It's only used internally.
type NilLiteral struct{}
// String returns a string representation of the literal.
func (l *NilLiteral) String() string {
return `nil`
}
// BinaryExpr represents an operation between two expressions.
type BinaryExpr struct {
Op Token
LHS Expr
RHS Expr
}
// String returns a string representation of the binary expression.
func (e *BinaryExpr) String() string {
return fmt.Sprintf("%s %s %s",
e.LHS.String(), e.Op.String(), e.RHS.String())
}
// Name returns the name of a binary expression by concatenating
// the variables in the binary expression with underscores.
func (e *BinaryExpr) Name() string {
names := make([]string, 0)
WalkFunc(e, func(n Node) bool {
switch n := n.(type) {
case *VarRef:
names = append(names, n.Val)
case *Call:
names = append(names, n.Cmd)
return false
}
return true
})
return strings.Join(names, "_")
}
// ParenExpr represents a parenthesized expression.
type ParenExpr struct {
Expr Expr
}
// String returns a string representation of the parenthesized expression.
func (e *ParenExpr) String() string {
return fmt.Sprintf("(%s)", e.Expr.String())
}
// Name returns the name of the parenthesized expression.
func (e *ParenExpr) Name() string {
if n, ok := e.Expr.(Named); ok {
return n.Name()
}
return ""
}
// Wildcard represents a wild card expression.
type Wildcard struct {
Type Token
}
// String returns a string representation of the wildcard.
func (e *Wildcard) String() string {
return "*"
}
// A Visitor is called by Walk to traverse an AST hierarchy. The visitor's
// Visit() function is called once per node.
type Visitor interface {
Visit(Node) Visitor
}
// Walk traverses a node hierarchy in depth-first order and calls the
// visitor's Visit function once per node. Traversing terminates when
// v.Visit() returns nil.
func Walk(v Visitor, node Node) {
if node == nil {
return
}
if v = v.Visit(node); v == nil {
return
}
switch n := node.(type) {
case *BinaryExpr:
Walk(v, n.LHS)
Walk(v, n.RHS)
case *Call:
for _, expr := range n.Args {
Walk(v, expr)
}
case *Field:
Walk(v, n.Expr)
case Fields:
for _, c := range n {
Walk(v, c)
}
case *ParenExpr:
Walk(v, n.Expr)
case *SelectStatement:
Walk(v, n.Fields)
Walk(v, n.Source)
Walk(v, n.Condition)
}
}
// WalkFunc traverses a node hierarchy in depth-first order and calls fn at
// each node. Traversing terminates if fn returns false.
func WalkFunc(node Node, fn func(Node) bool) {
Walk(walkFuncVisitor(fn), node)
}
// walkFuncVisitor wraps a Visit function.
type walkFuncVisitor func(Node) bool
// Visit applies fn to n and returns nil if traversing should stop or fn
// otherwise.
func (fn walkFuncVisitor) Visit(n Node) Visitor {
if ok := fn(n); ok {
return fn
}
return nil
} | ql/ast.go | 0.738763 | 0.50653 | ast.go | starcoder |
package units
var (
Pressure = UnitOptionQuantity("pressure")
// SI unit metric
Pascal = NewUnit("pascal", "Pa", Pressure, SI)
ExaPascal = Exa(Pascal, FactorLinear)
PetaPascal = Peta(Pascal, FactorLinear)
TeraPascal = Tera(Pascal, FactorLinear)
GigaPascal = Giga(Pascal, FactorLinear)
MegaPascal = Mega(Pascal, FactorLinear)
KiloPascal = Kilo(Pascal, FactorLinear)
HectoPascal = Hecto(Pascal, FactorLinear)
DecaPascal = Deca(Pascal, FactorLinear)
DeciPascal = Deci(Pascal, FactorLinear)
CentiPascal = Centi(Pascal, FactorLinear)
MilliPascal = Milli(Pascal, FactorLinear)
MicroPascal = Micro(Pascal, FactorLinear)
NanoPascal = Nano(Pascal, FactorLinear)
PicoPascal = Pico(Pascal, FactorLinear)
FemtoPascal = Femto(Pascal, FactorLinear)
AttoPascal = Atto(Pascal, FactorLinear)
// Other
At = NewUnit("technical atmosphere", "at", Pressure, BI, UnitOptionPlural("technical atmospheres"))
Atm = NewUnit("standard atmosphere", "atm", Pressure, BI, UnitOptionPlural("standard atmospheres"))
Bar = NewUnit("bar", "bar", Pressure, BI, UnitOptionPlural("bars"))
CentiBar = Centi(Bar, FactorLinear)
MilliBar = Milli(Bar, FactorLinear)
MicroBar = Micro(Bar, FactorLinear)
Barye = NewUnit("barye", "Ba", Pressure, BI, UnitOptionPlural("baryes"))
InH2O = NewUnit("inch of Water Column", "inH2O", Pressure, BI)
InHg = NewUnit("inch of Mercury", "inHg", Pressure, BI)
MH2O = NewUnit("meter of Water Column", "mmH2O", Pressure, BI, UnitOptionPlural("meters of Water Column"))
MmH2O = Milli(MH2O, FactorLinear)
CmH2O = Centi(MH2O, FactorLinear)
MHg = NewUnit("meter of Mercury", "mmHg", Pressure, BI, UnitOptionPlural("meters of Mercury"))
MmHg = Milli(MHg, FactorLinear)
CmHg = Centi(MHg, FactorLinear)
Newton = NewUnit("newton per square meter", "N/m²", Pressure, BI)
Psi = NewUnit("pound-force per square inch", "psi", Pressure, BI)
Torr = NewUnit("torr", "Torr", Pressure, BI)
)
func init() {
NewRatioConversion(At, Pascal, 98066.5)
NewRatioConversion(Atm, Pascal, 101325.2738)
NewRatioConversion(Bar, Pascal, 98000)
NewRatioConversion(Barye, Pascal, 0.1)
NewRatioConversion(InH2O, Pascal, 248.84)
NewRatioConversion(InHg, Pascal, 3386.38815789)
NewRatioConversion(MH2O, Pascal, 980.665)
NewRatioConversion(MHg, Pascal, 13332.2368421)
NewRatioConversion(Newton, Pascal, 1)
NewRatioConversion(Psi, Pascal, 6894.757)
NewRatioConversion(Torr, Pascal, 133.322368421)
} | pressure_units.go | 0.627951 | 0.581957 | pressure_units.go | starcoder |
package jettison
import (
"encoding"
"encoding/json"
"reflect"
"sync"
"time"
"unsafe"
)
var (
timeTimeType = reflect.TypeOf(time.Time{})
timeDurationType = reflect.TypeOf(time.Duration(0))
syncMapType = reflect.TypeOf((*sync.Map)(nil)).Elem()
jsonNumberType = reflect.TypeOf(json.Number(""))
jsonRawMessageType = reflect.TypeOf(json.RawMessage(nil))
jsonMarshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
appendMarshalerType = reflect.TypeOf((*AppendMarshaler)(nil)).Elem()
appendMarshalerCtxType = reflect.TypeOf((*AppendMarshalerCtx)(nil)).Elem()
)
var emptyFnCache sync.Map // map[reflect.Type]emptyFunc
// emptyFunc is a function that returns whether a
// value pointed by an unsafe.Pointer represents the
// zero value of its type.
type emptyFunc func(unsafe.Pointer) bool
// marshalerEncodeFunc is a function that appends
// the result of a marshaler method call to dst.
type marshalerEncodeFunc func(interface{}, []byte, encOpts, reflect.Type) ([]byte, error)
func isBasicType(t reflect.Type) bool {
return isBoolean(t) || isString(t) || isFloatingPoint(t) || isInteger(t)
}
func isBoolean(t reflect.Type) bool { return t.Kind() == reflect.Bool }
func isString(t reflect.Type) bool { return t.Kind() == reflect.String }
func isFloatingPoint(t reflect.Type) bool {
kind := t.Kind()
if kind == reflect.Float32 || kind == reflect.Float64 {
return true
}
return false
}
func isInteger(t reflect.Type) bool {
switch t.Kind() {
case reflect.Int,
reflect.Int8,
reflect.Int16,
reflect.Int32,
reflect.Int64,
reflect.Uint,
reflect.Uint8,
reflect.Uint16,
reflect.Uint32,
reflect.Uint64,
reflect.Uintptr:
return true
default:
return false
}
}
func isInlined(t reflect.Type) bool {
switch t.Kind() {
case reflect.Ptr, reflect.Map:
return true
case reflect.Struct:
return t.NumField() == 1 && isInlined(t.Field(0).Type)
default:
return false
}
}
func isNilable(t reflect.Type) bool {
switch t.Kind() {
case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map:
return true
}
return false
}
// cachedEmptyFuncOf is similar to emptyFuncOf, but
// returns a cached function, to avoid duplicates.
func cachedEmptyFuncOf(t reflect.Type) emptyFunc {
if fn, ok := emptyFnCache.Load(t); ok {
return fn.(emptyFunc)
}
fn, _ := emptyFnCache.LoadOrStore(t, emptyFuncOf(t))
return fn.(emptyFunc)
}
// emptyFuncOf returns a function that can be used to
// determine if a value pointed by an unsafe,Pointer
// represents the zero-value of type t.
func emptyFuncOf(t reflect.Type) emptyFunc {
switch t.Kind() {
case reflect.Bool:
return func(p unsafe.Pointer) bool {
return !*(*bool)(p)
}
case reflect.String:
return func(p unsafe.Pointer) bool {
return (*stringHeader)(p).Len == 0
}
case reflect.Int:
return func(p unsafe.Pointer) bool {
return *(*int)(p) == 0
}
case reflect.Int8:
return func(p unsafe.Pointer) bool {
return *(*int8)(p) == 0
}
case reflect.Int16:
return func(p unsafe.Pointer) bool {
return *(*int16)(p) == 0
}
case reflect.Int32:
return func(p unsafe.Pointer) bool {
return *(*int32)(p) == 0
}
case reflect.Int64:
return func(p unsafe.Pointer) bool {
return *(*int64)(p) == 0
}
case reflect.Uint:
return func(p unsafe.Pointer) bool {
return *(*uint)(p) == 0
}
case reflect.Uint8:
return func(p unsafe.Pointer) bool {
return *(*uint8)(p) == 0
}
case reflect.Uint16:
return func(p unsafe.Pointer) bool {
return *(*uint16)(p) == 0
}
case reflect.Uint32:
return func(p unsafe.Pointer) bool {
return *(*uint32)(p) == 0
}
case reflect.Uint64:
return func(p unsafe.Pointer) bool {
return *(*uint64)(p) == 0
}
case reflect.Uintptr:
return func(p unsafe.Pointer) bool {
return *(*uintptr)(p) == 0
}
case reflect.Float32:
return func(p unsafe.Pointer) bool {
return *(*float32)(p) == 0
}
case reflect.Float64:
return func(p unsafe.Pointer) bool {
return *(*float64)(p) == 0
}
case reflect.Map:
return func(p unsafe.Pointer) bool {
return maplen(*(*unsafe.Pointer)(p)) == 0
}
case reflect.Ptr:
return func(p unsafe.Pointer) bool {
return *(*unsafe.Pointer)(p) == nil
}
case reflect.Interface:
return func(p unsafe.Pointer) bool {
return *(*unsafe.Pointer)(p) == nil
}
case reflect.Slice:
return func(p unsafe.Pointer) bool {
return (*sliceHeader)(p).Len == 0
}
case reflect.Array:
if t.Len() == 0 {
return func(unsafe.Pointer) bool { return true }
}
}
return func(unsafe.Pointer) bool { return false }
} | types.go | 0.54819 | 0.44354 | types.go | starcoder |
package quotas
import (
"encoding/json"
)
// Quota represents a quota information for a single billing resource.
type Quota struct {
// Name is a resource human-readable name.
Name string `json:"-"`
// ResourceQuotasEntities contains information about quotas of a single billing resource in different locations.
ResourceQuotasEntities []ResourceQuotaEntity `json:"-"`
}
// ResourceQuotaEntity represents a single entity of the resource quota data in the specific region and zone.
type ResourceQuotaEntity struct {
// Region contains the quota region data.
Region string `json:"region"`
// Zone contains the quota zone data.
Zone string `json:"zone"`
// Value contans value of resource quota in the specific region and zone.
// It represents a free quota value if used with the GetFree request.
Value int `json:"value"`
// Used contains quantity of a used quota in the specific region and zone.
Used int `json:"used"`
}
// ResourcesQuotas represents quotas for different resources.
type ResourcesQuotas struct {
// Quotas represents slice of Quotas.
Quotas []*Quota `json:"-"`
}
/*
UnmarshalJSON implements custom unmarshalling method for the ResourcesQuotas type.
We need it to work with a JSON structure that the Resell v2 API responses with:
"quotas": {
"compute_cores": [
{
"region": "ru-2",
"value": 200,
"zone": "ru-2a"
},
...
],
...
}
*/
func (result *ResourcesQuotas) UnmarshalJSON(b []byte) error {
// Populate temporary structure with resource quotas represented as maps.
var s struct {
ResourcesQuotas map[string][]ResourceQuotaEntity `json:"quotas"`
}
err := json.Unmarshal(b, &s)
if err != nil {
return err
}
// Populate the result with an empty slice in case of empty quota list.
*result = ResourcesQuotas{
Quotas: []*Quota{},
}
if len(s.ResourcesQuotas) != 0 {
// Convert resource quota maps to the slice of Quota types.
// Here we're allocating memory in advance because we already know the length
// of a result slice from the JSON bytearray.
resourceQuotasSlice := make([]*Quota, len(s.ResourcesQuotas))
i := 0
for resourceName, resourceQuotas := range s.ResourcesQuotas {
resourceQuotasSlice[i] = &Quota{
Name: resourceName,
ResourceQuotasEntities: resourceQuotas,
}
i++
}
// Add the unmarshalled quotas slice to the result.
result.Quotas = resourceQuotasSlice
}
return nil
}
// ProjectQuota represents quota information of a single project.
type ProjectQuota struct {
// ID is a project unique id.
ID string `json:"-"`
// ProjectQuotas contains project's quota information.
ProjectQuotas []Quota `json:"-"`
}
// ProjectsQuotas represents quotas for different projects.
type ProjectsQuotas struct {
// ProjectQuotas represents slice of ProjectQuotas.
ProjectQuotas []*ProjectQuota `json:"-"`
}
/*
UnmarshalJSON implements custom unmarshalling method for the ProjectsQuotas type.
We need it to work with a JSON structure that the Resell v2 API responses with:
"quotas": {
"6d23928357bb4e0eb302794bc57fb8fd": {
"compute_cores": [
{
"region": "ru-1",
"used": 2,
"value": 10,
"zone": "ru-1b"
},
...
]
},
...
}
*/
func (result *ProjectsQuotas) UnmarshalJSON(b []byte) error {
// Populate temporary structure with projects quotas represented as maps.
var s struct {
ProjectsQuotas map[string]map[string][]ResourceQuotaEntity `json:"quotas"`
}
err := json.Unmarshal(b, &s)
if err != nil {
return err
}
// Populate the result with an empty slice in case of empty quota list.
*result = ProjectsQuotas{
ProjectQuotas: []*ProjectQuota{},
}
if len(s.ProjectsQuotas) != 0 {
// Convert projects quota maps to the slice of ProjectQuota types.
// Here we're allocating memory in advance for both of project and resource quotas
// because we already know the length of each slices from the JSON bytearray.
projectQuotasSlice := make([]*ProjectQuota, len(s.ProjectsQuotas))
i := 0
for projectName, projectQuotas := range s.ProjectsQuotas {
resourceQuotasSlice := make([]Quota, len(projectQuotas))
j := 0
for resourceName, resourceQuotas := range projectQuotas {
resourceQuotasSlice[j] = Quota{
Name: resourceName,
ResourceQuotasEntities: resourceQuotas,
}
j++
}
projectQuotasSlice[i] = &ProjectQuota{
ID: projectName,
ProjectQuotas: resourceQuotasSlice,
}
i++
}
// Add the unmarshalled project quotas slice to the result.
result.ProjectQuotas = projectQuotasSlice
}
return nil
} | selvpcclient/resell/v2/quotas/schemas.go | 0.716615 | 0.448668 | schemas.go | starcoder |
package sweetiebot
import (
"math/rand"
"strconv"
"strings"
"github.com/bwmarrin/discordgo"
)
type QuoteCommand struct {
}
func (c *QuoteCommand) Name() string {
return "Quote"
}
func (c *QuoteCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
l := 0
for _, v := range info.config.Quotes {
l += len(v)
}
if l <= 0 {
return "```There are no quotes.```", false
}
i := rand.Intn(l)
for k, v := range info.config.Quotes {
if i < len(v) {
return "**" + getUserName(k, info) + "**: " + v[i], false
}
i -= len(v)
}
return "```Error: invalid random quote chosen???```", false
}
arg := strings.ToLower(args[0])
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5
}
q, ok := info.config.Quotes[IDs[0]]
l := len(q)
if !ok || l <= 0 {
return "```That user has no quotes.```", false
}
i := rand.Intn(l)
if len(args) >= 2 {
var err error
i, err = strconv.Atoi(args[1])
if err != nil {
return "```Could not parse quote index. Make sure your username is in quotes. Use !searchquote [user] to list a user's quotes and their indexes.```", false
}
i--
if i >= l || i < 0 {
return "```Invalid quote index. Use !searchquote [user] to list a user's quotes and their indexes.```", false
}
}
return "**" + IDsToUsernames(IDs, info)[0] + "**: " + q[i], false
}
func (c *QuoteCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[user] [quote index]", "If no arguments are specified, returns a random quote. If a user is specified, returns a random quote from that user. If a quote index is specified, returns that specific quote.")
}
func (c *QuoteCommand) UsageShort() string { return "Quotes a user." }
type AddQuoteCommand struct {
}
func (c *AddQuoteCommand) Name() string {
return "AddQuote"
}
func (c *AddQuoteCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
return "```Must specify username.```", false
}
if len(args) < 2 {
return "```Can't add a blank quote!```", false
}
arg := strings.ToLower(args[0])
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5
}
if len(info.config.Quotes) == 0 {
info.config.Quotes = make(map[uint64][]string)
}
info.config.Quotes[IDs[0]] = append(info.config.Quotes[IDs[0]], strings.Join(args[1:], " "))
info.SaveConfig()
return "```Quote added to " + IDsToUsernames(IDs, info)[0] + ".```", false
}
func (c *AddQuoteCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[user] [quote]", "Adds a quote to the quote database for the given user. If the username has spaces, it must be in quotes. If the user is ambiguous, sweetiebot will return all possible matches. Use !searchquote to find a quote index.")
}
func (c *AddQuoteCommand) UsageShort() string { return "Adds a quote." }
type RemoveQuoteCommand struct {
}
func (c *RemoveQuoteCommand) Name() string {
return "RemoveQuote"
}
func (c *RemoveQuoteCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
return "```Must specify username.```", false
}
if len(args) < 2 {
return "```Must specify quote index. Use !searchquote to list them.```", false
}
arg := strings.ToLower(args[0])
index, err := strconv.Atoi(args[1])
if err != nil {
return "```Error: could not parse quote index. Did you surround your username with quotes? Use !searchquote to find a quote index.```", false
}
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5
}
index--
if index >= len(info.config.Quotes[IDs[0]]) || index < 0 {
return "```Invalid quote index. Use !searchquote [user] to list a user's quotes and their indexes.```", false
}
info.config.Quotes[IDs[0]] = append(info.config.Quotes[IDs[0]][:index], info.config.Quotes[IDs[0]][index+1:]...)
info.SaveConfig()
return "```Deleted quote #" + strconv.Itoa(index+1) + " from " + IDsToUsernames(IDs, info)[0] + ".```", false
}
func (c *RemoveQuoteCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[user] [quote index]", "Removes the quote with the given quote index from the user's set of quotes. If the username has spaces, it must be in quotes. If the user is ambiguous, sweetiebot will return all possible matches. Use !searchquote to find a quote index.")
}
func (c *RemoveQuoteCommand) UsageShort() string { return "Removes a quote." }
type SearchQuoteCommand struct {
}
func (c *SearchQuoteCommand) Name() string {
return "SearchQuote"
}
func (c *SearchQuoteCommand) Process(args []string, msg *discordgo.Message, info *GuildInfo) (string, bool) {
if len(args) < 1 {
s := make([]uint64, 0, len(info.config.Quotes))
for k, v := range info.config.Quotes {
if len(v) > 0 { // Map entries can have 0 quotes associated with them
s = append(s, k)
}
}
return "```The following users have at least one quote:\n" + strings.Join(IDsToUsernames(s, info), "\n") + "```", len(s) > 6
}
arg := strings.ToLower(args[0])
IDs := FindUsername(arg, info)
if len(IDs) == 0 { // no matches!
return "```Error: Could not find any usernames or aliases matching " + arg + "!```", false
}
if len(IDs) > 1 {
return "```Could be any of the following users or their aliases:\n" + strings.Join(IDsToUsernames(IDs, info), "\n") + "```", len(IDs) > 5
}
l := len(info.config.Quotes[IDs[0]])
if l == 0 {
return "```That user has no quotes.```", false
}
quotes := make([]string, l, l)
for i := 0; i < l; i++ {
quotes[i] = strconv.Itoa(i+1) + ". " + info.config.Quotes[IDs[0]][i]
}
return "All quotes for " + IDsToUsernames(IDs, info)[0] + ":\n" + strings.Join(quotes, "\n"), l > 6
}
func (c *SearchQuoteCommand) Usage(info *GuildInfo) string {
return info.FormatUsage(c, "[user]", "Lists all quotes for the given user.")
}
func (c *SearchQuoteCommand) UsageShort() string { return "Finds a quote." } | sweetiebot/quote_command.go | 0.582491 | 0.639061 | quote_command.go | starcoder |
package list
// Iter iterates over the values, applying action to each value.
func Iter[T any](action func(T), values []T) {
for i := range values {
action(values[i])
}
}
// IterRev iterates over the values in reverse, applying action to each value.
func IterRev[T any](action func(T), values []T) {
DoRangeToRev(func(i int) { action(values[i]) }, LastIndexOf(values))
}
// Iter2 iterates over two slices of values, applying action to each pair of values.
// It only iterates until the end of the shortest of the value slices.
func Iter2[T, T2 any](action func(T, T2), values1 []T, values2 []T2) {
min, _ := Min(LastIndexOf(values1), LastIndexOf(values2))
DoRangeTo(func(i int) { action(values1[i], values2[i]) }, min)
}
// Iter2Rev iterates over two slices of values in reverse, applying action to each pair of values.
// It only iterates until the end of the shortest of the value slices.
func Iter2Rev[T, T2 any](action func(T, T2), values1 []T, values2 []T2) {
min, _ := Min(LastIndexOf(values1), LastIndexOf(values2))
DoRangeToRev(func(i int) { action(values1[i], values2[i]) }, min)
}
// Iter3 iterates over three slices of values, applying action to each series of values.
// It only iterates until the end of the shortest of the value slices.
func Iter3[T, T2, T3 any](action func(T, T2, T3), values1 []T, values2 []T2, values3 []T3) {
min, _ := Min(LastIndexOf(values1), LastIndexOf(values2), LastIndexOf(values3))
DoRangeTo(func(i int) { action(values1[i], values2[i], values3[i]) }, min)
}
// Iter3Rev iterates over three slices of values in reverse, applying action to each series of values.
// It only iterates until the end of the shortest of the value slices.
func Iter3Rev[T, T2, T3 any](action func(T, T2, T3), values1 []T, values2 []T2, values3 []T3) {
min, _ := Min(LastIndexOf(values1), LastIndexOf(values2), LastIndexOf(values3))
DoRangeToRev(func(i int) { action(values1[i], values2[i], values3[i]) }, min)
}
// Iteri iterates over the values, applying action to each value with the index of the value.
func Iteri[T any](action func(int, T), values []T) {
for i := range values {
action(i, values[i])
}
}
// IteriRev iterates over the values in reverse, applying action to each value with the index of the value.
func IteriRev[T any](action func(int, T), values []T) {
DoRangeToRev(func(i int) { action(i, values[i]) }, LastIndexOf(values))
}
// Iteri2 iterates over the two slices of values, applying action to each pair of values with the index of the values.
// It only iterates until the end of the shortest of the value slices.
func Iteri2[T, T2 any](action func(int, T, T2), values1 []T, values2 []T2) {
min, _ := Min(LastIndexOf(values1), LastIndexOf(values2))
DoRangeTo(func(i int) { action(i, values1[i], values2[i]) }, min)
}
// Iteri2Rev iterates over the two slices of values in reverse, applying action to each pair of values with the index of the values.
// It only iterates until the end of the shortest of the value slices.
func Iteri2Rev[T, T2 any](action func(int, T, T2), values1 []T, values2 []T2) {
min, _ := Min(LastIndexOf(values1), LastIndexOf(values2))
DoRangeToRev(func(i int) { action(i, values1[i], values2[i]) }, min)
}
// Iteri3 iterates over the three slices of values, applying action to each series of values with the index of the values.
// It only iterates until the end of the shortest of the value slices.
func Iteri3[T, T2, T3 any](action func(int, T, T2, T3), values1 []T, values2 []T2, values3 []T3) {
min, _ := Min(LastIndexOf(values1), LastIndexOf(values2), LastIndexOf(values3))
DoRangeTo(func(i int) { action(i, values1[i], values2[i], values3[i]) }, min)
}
// Iteri3Rev iterates over the three slices of values in reverse, applying action to each series of values with the index of the values.
// It only iterates until the end of the shortest of the value slices.
func Iteri3Rev[T, T2, T3 any](action func(int, T, T2, T3), values1 []T, values2 []T2, values3 []T3) {
min, _ := Min(LastIndexOf(values1), LastIndexOf(values2), LastIndexOf(values3))
DoRangeToRev(func(i int) { action(i, values1[i], values2[i], values3[i]) }, min)
}
// Iter2D iterates over a two dimensional slice of values, applying action to each value.
func Iter2D[T any](action func(T), values [][]T) {
for i := range values {
Iter(action, values[i])
}
}
// Iter2DRev iterates over a two dimensional slice of values in reverse, applying action to each value.
func Iter2DRev[T any](action func(T), values [][]T) {
IterRev(func(v []T) { IterRev(action, v) }, values)
}
// Iteri2D iterates over a two dimensional slice of values, applying action to each value with the indexes of the value.
func Iteri2D[T any](action func(int, int, T), values [][]T) {
for i := range values {
Iteri(func(j int, elem T) { action(i, j, elem) }, values[i])
}
}
// Iteri2DRev iterates over a two dimensional slice of values in reverse, applying action to each value with the indexes of the value.
func Iteri2DRev[T any](action func(int, int, T), values [][]T) {
IteriRev(func(i int, v []T) {
IteriRev(func(j int, elem T) { action(i, j, elem) }, v)
}, values)
}
// Iter3D iterates over a three dimensional slice of values, applying action to each value.
func Iter3D[T any](action func(T), values [][][]T) {
for i := range values {
Iter2D(action, values[i])
}
}
// Iter3DRev iterates over a three dimensional slice of values in reverse, applying action to each value.
func Iter3DRev[T any](action func(T), values [][][]T) {
IterRev(func(v [][]T) {
Iter2DRev(action, v)
}, values)
}
// Iteri3D iterates over a three dimensional slice of values, applying action to each value with the indexes of each value.
func Iteri3D[T any](action func(int, int, int, T), values [][][]T) {
for i := range values {
Iteri2D(func(j, k int, elem T) { action(i, j, k, elem) }, values[i])
}
}
// Iteri3DRev iterates over a three dimensional slice of values in reverse, applying action to each value with the indexes of each value.
func Iteri3DRev[T any](action func(int, int, int, T), values [][][]T) {
IteriRev(func(i int, v [][]T) {
Iteri2DRev(func(j, k int, elem T) { action(i, j, k, elem) }, v)
}, values)
}
// Iter4D iterates over a four dimensional slice of values, applying action to each value.
func Iter4D[T any](action func(T), values [][][][]T) {
for i := range values {
Iter3D(action, values[i])
}
}
// Iter4DRev iterates over a four dimensional slice of values in reverse, applying action to each value.
func Iter4DRev[T any](action func(T), values [][][][]T) {
IterRev(func(v [][][]T) {
Iter3DRev(action, v)
}, values)
}
// Iteri4D iterates over a four dimensional slice of values, applying action to each value with the indexes of each value.
func Iteri4D[T any](action func(int, int, int, int, T), values [][][][]T) {
for i := range values {
Iteri3D(func(j, k, l int, elem T) { action(i, j, k, l, elem) }, values[i])
}
}
// Iteri4DRev iterates over a four dimensional slice of values in reverse, applying action to each value with the indexes of each value.
func Iteri4DRev[T any](action func(int, int, int, int, T), values [][][][]T) {
IteriRev(func(i int, v [][][]T) {
Iteri3DRev(func(j, k, l int, elem T) { action(i, j, k, l, elem) }, v)
}, values)
}
// IterUntil iterates over a slice of values, applying action to each value until action returns true.
func IterUntil[T any](action func(T) bool, values []T) {
for i := range values {
if action(values[i]) {
return
}
}
}
// IterRevUntil iterates over a slice of values in reverse, applying action to each value until action returns true.
func IterRevUntil[T any](action func(T) bool, values []T) {
for i := LastIndexOf(values); i >= 0; i-- {
if action(values[i]) {
return
}
}
} | list/iter.go | 0.827828 | 0.888081 | iter.go | starcoder |
package alphafoxtrot
import (
"math"
"strconv"
"strings"
)
const (
AirportTypeUnknown uint64 = 0x00
AirportTypeClosed uint64 = 0x01
AirportTypeHeliport uint64 = 0x02
AirportTypeSeaplaneBase uint64 = 0x04
AirportTypeSmall uint64 = 0x08
AirportTypeMedium uint64 = 0x10
AirportTypeLarge uint64 = 0x20
AirportTypeAll uint64 = AirportTypeClosed | AirportTypeHeliport | AirportTypeSeaplaneBase | AirportTypeSmall | AirportTypeMedium | AirportTypeLarge
AirportTypeActive uint64 = AirportTypeHeliport | AirportTypeSeaplaneBase | AirportTypeSmall | AirportTypeMedium | AirportTypeLarge
AirportTypeRunways uint64 = AirportTypeSmall | AirportTypeMedium | AirportTypeLarge
)
const (
AirportsFileKey = "airports"
FrequenciesFileKey = "frequencies"
RunwaysFileKey = "runways"
RegionsFileKey = "regions"
CountriesFileKey = "countries"
NavaidsFileKey = "navaids"
OurAirportsBaseURL = "https://ourairports.com/data/"
)
const (
AirportTypeClosedName = "closed"
AirportTypeSmallName = "small_airport"
AirportTypeMediumName = "medium_airport"
AirportTypeLargeName = "large_airport"
AirportTypeHeliportName = "heliport"
AirportTypeSeaplaneBaseName = "seaplane_base"
AirportTypeUnknownName = "unknown"
)
const (
DegToRad float64 = math.Pi / 180.0
EarthRadius float64 = 6371.0 * 1000.0
)
var OurAirportsFiles = map[string]string{
AirportsFileKey: "airports.csv",
FrequenciesFileKey: "airport-frequencies.csv",
RunwaysFileKey: "runways.csv",
RegionsFileKey: "regions.csv",
CountriesFileKey: "countries.csv",
NavaidsFileKey: "navaids.csv",
}
func KilometersToMeters(km float64) float64 {
return km * 1000.0
}
func MilesToMeters(mi float64) float64 {
return mi * 1609.34
}
func NauticalMilesToMeters(nm float64) float64 {
return nm * 1852.0
}
func MetersToKilometers(m float64) float64 {
return m * 0.001
}
func MetersToMiles(m float64) float64 {
return m * 0.000621373
}
func MetersToNauticalMiles(m float64) float64 {
return m * 0.000539957
}
func AirportTypeFromString(typ string) uint64 {
switch typ {
case AirportTypeClosedName:
return AirportTypeClosed
case AirportTypeSmallName:
return AirportTypeSmall
case AirportTypeMediumName:
return AirportTypeMedium
case AirportTypeLargeName:
return AirportTypeLarge
case AirportTypeHeliportName:
return AirportTypeHeliport
case AirportTypeSeaplaneBaseName:
return AirportTypeSeaplaneBase
}
return AirportTypeUnknown
}
// see https://stackoverflow.com/questions/43167417/calculate-distance-between-two-points-in-leaflet
// returns the distance between to coordinates in meters
func Distance(fromLatitudeDeg, fromLongitudeDeg, toLatitudeDeg, toLongitudeDeg float64) float64 {
lat1 := fromLatitudeDeg * DegToRad
lon1 := fromLongitudeDeg * DegToRad
lat2 := toLatitudeDeg * DegToRad
lon2 := toLongitudeDeg * DegToRad
dtLat := lat2 - lat1
dtLon := lon2 - lon1
a := math.Pow(math.Sin(dtLat*0.5), 2) + math.Cos(lat1)*math.Cos(lat2)*math.Pow(math.Sin(dtLon*0.5), 2)
c := 2 * math.Asin(math.Sqrt(a))
return c * EarthRadius
}
func ParseFloat(str string) (float64, error) {
value, err := strconv.ParseFloat(str, 64)
if err != nil {
return 0, err
}
return value, err
}
func ParseInt(str string) (int64, error) {
value, err := strconv.ParseInt(str, 10, 64)
if err != nil {
return 0, err
}
return value, nil
}
func ParseUint(str string) (uint64, error) {
value, err := strconv.ParseUint(str, 10, 64)
if err != nil {
return 0, err
}
return value, nil
}
func ParseBool(str string) bool {
str = strings.ToLower(str)
switch str {
case "true":
fallthrough
case "yes":
fallthrough
case "1":
return true
}
return false
}
func MinInt(a, b int) int {
if a < b {
return a
}
return b
} | def.go | 0.708414 | 0.484258 | def.go | starcoder |
package clustering
import (
"time"
)
// ClusterResults represents the results of clustering a list of
// test failures.
type ClusterResults struct {
// AlgorithmsVersion is the version of clustering algorithms used to
// cluster test results in this chunk. (This is a version over the
// set of algorithms, distinct from the version of a single algorithm,
// e.g.: v1 -> {failurereason-v1}, v2 -> {failurereason-v1, testname-v1},
// v3 -> {failurereason-v2, testname-v1}.)
AlgorithmsVersion int64
// ConfigVersion is the version of Weetbix project configuration
// used to cluster the test results. Clustering algorithms can rely
// on the configuration to alter their behaviour, so changes to
// the configuration should trigger re-clustering of test results.
ConfigVersion time.Time
// RulesVersion is the version of failure association rules used
// to cluster test results. This is most recent PredicateLastUpdated
// time in the snapshot of failure association rules used to cluster
// the test results.
RulesVersion time.Time
// Algorithms is the set of algorithms that were used to cluster
// the test results. Each entry is an algorithm name.
// When stored alongside the clustered test results, this allows only
// the new algorithms to be run when re-clustering (for efficiency).
Algorithms map[string]struct{}
// Clusters records the clusters each test result is in;
// one slice of ClusterIDs for each test result. For each test result,
// clusters must be in sorted order, with no duplicates.
Clusters [][]ClusterID
}
// AlgorithmsAndClustersEqual returns whether the algorithms and clusters of
// two cluster results are equivalent.
func AlgorithmsAndClustersEqual(a *ClusterResults, b *ClusterResults) bool {
if !setsEqual(a.Algorithms, b.Algorithms) {
return false
}
if len(a.Clusters) != len(b.Clusters) {
return false
}
for i, aClusters := range a.Clusters {
bClusters := b.Clusters[i]
if !ClustersEqual(aClusters, bClusters) {
return false
}
}
return true
}
// ClustersEqual returns whether the clusters in `as` are element-wise
// equal to those in `bs`.
// To test set-wise cluster equality, this method is called with
// clusters in sorted order, and no duplicates.
func ClustersEqual(as []ClusterID, bs []ClusterID) bool {
if len(as) != len(bs) {
return false
}
for i, a := range as {
b := bs[i]
if a.Algorithm != b.Algorithm {
return false
}
if a.ID != b.ID {
return false
}
}
return true
}
// setsEqual returns whether two sets are equal.
func setsEqual(a map[string]struct{}, b map[string]struct{}) bool {
if len(a) != len(b) {
return false
}
for key := range a {
if _, ok := b[key]; !ok {
return false
}
}
return true
} | go/src/infra/appengine/weetbix/internal/clustering/clusterresults.go | 0.742141 | 0.507385 | clusterresults.go | starcoder |
package rtc
import "math"
// Cube creates a cube at the origin ranging from -1 to 1 on each axis.
// It implements the Object interface.
func Cube() *CubeT {
return &CubeT{Shape{Transform: M4Identity(), Material: GetMaterial()}}
}
// CubeT represents a Cube.
type CubeT struct {
Shape
}
var _ Object = &CubeT{}
// SetTransform sets the object's transform 4x4 matrix.
func (c *CubeT) SetTransform(m M4) Object {
c.Transform = m
return c
}
// SetMaterial sets the object's material.
func (c *CubeT) SetMaterial(material MaterialT) Object {
c.Material = material
return c
}
// SetParent sets the object's parent object.
func (c *CubeT) SetParent(parent Object) Object {
c.Parent = parent
return c
}
// Bounds returns the minimum bounding box of the object in object
// (untransformed) space.
func (c *CubeT) Bounds() *BoundsT {
return &BoundsT{
Min: Point(-1, -1, -1),
Max: Point(1, 1, 1),
}
}
func checkAxis(origin, direction, min, max float64) (tmin float64, tmax float64) {
tminNumerator := min - origin
tmaxNumerator := max - origin
if math.Abs(direction) >= epsilon {
tmin = tminNumerator / direction
tmax = tmaxNumerator / direction
} else {
tmin = tminNumerator * math.Inf(1)
tmax = tmaxNumerator * math.Inf(1)
}
if tmin > tmax {
tmin, tmax = tmax, tmin
}
return tmin, tmax
}
// LocalIntersect returns a slice of IntersectionT values where the
// transformed (object space) ray intersects the object.
func (c *CubeT) LocalIntersect(ray RayT) []IntersectionT {
xtmin, xtmax := checkAxis(ray.Origin.X(), ray.Direction.X(), -1, 1)
ytmin, ytmax := checkAxis(ray.Origin.Y(), ray.Direction.Y(), -1, 1)
ztmin, ztmax := checkAxis(ray.Origin.Z(), ray.Direction.Z(), -1, 1)
tmin := math.Max(xtmin, math.Max(ytmin, ztmin))
tmax := math.Min(xtmax, math.Min(ytmax, ztmax))
if tmin > tmax {
return nil
}
return []IntersectionT{Intersection(tmin, c), Intersection(tmax, c)}
}
// LocalNormalAt returns the normal vector at the given point of intersection
// (transformed to object space) with the object.
func (c *CubeT) LocalNormalAt(objectPoint Tuple, hit *IntersectionT) Tuple {
absX := math.Abs(objectPoint.X())
absY := math.Abs(objectPoint.Y())
maxc := math.Max(absX, math.Max(absY, math.Abs(objectPoint.Z())))
if maxc == absX {
return Vector(objectPoint.X(), 0, 0)
}
if maxc == absY {
return Vector(0, objectPoint.Y(), 0)
}
return Vector(0, 0, objectPoint.Z())
}
// Includes returns whether this object includes (or actually is) the
// other object.
func (c *CubeT) Includes(other Object) bool {
return c == other
} | rtc/cube.go | 0.899012 | 0.593167 | cube.go | starcoder |
package glmatrix
import (
"fmt"
"math"
)
// Quat2Create creates a new identity dual quat
func Quat2Create() []float64 {
return []float64{
0, 0, 0, 1,
0, 0, 0, 0,
}
}
// Quat2Clone creates a new quat initialized with values from an existing quaternion
func Quat2Clone(a []float64) []float64 {
return []float64{
a[0], a[1], a[2], a[3],
a[4], a[5], a[6], a[7],
}
}
// Quat2FromValues creates a new dual quat initialized with the given values
func Quat2FromValues(x1, y1, z1, w1, x2, y2, z2, w2 float64) []float64 {
return []float64{
x1, y1, z1, w1,
x2, y2, z2, w2,
}
}
// Quat2FromRotationTranslationValues creates a new dual quat from the given values (quat and translation)
func Quat2FromRotationTranslationValues(x1, y1, z1, w1, x2, y2, z2 float64) []float64 {
dq := []float64{
x1, y1, z1, w1,
0, 0, 0, 0,
}
ax := x2 * 0.5
ay := y2 * 0.5
az := z2 * 0.5
dq[4] = ax*w1 + ay*z1 - az*y1
dq[5] = ay*w1 + az*x1 - ax*z1
dq[6] = az*w1 + ax*y1 - ay*x1
dq[7] = -ax*x1 - ay*y1 - az*z1
return dq
}
// Quat2FromRotationTranslation creates a dual quat from a quaternion and a translation
func Quat2FromRotationTranslation(out, q, t []float64) []float64 {
ax := t[0] * 0.5
ay := t[1] * 0.5
az := t[2] * 0.5
bx := q[0]
by := q[1]
bz := q[2]
bw := q[3]
out[0] = bx
out[1] = by
out[2] = bz
out[3] = bw
out[4] = ax*bw + ay*bz - az*by
out[5] = ay*bw + az*bx - ax*bz
out[6] = az*bw + ax*by - ay*bx
out[7] = -ax*bx - ay*by - az*bz
return out
}
// Quat2FromTranslation creates a dual quat from a translation
func Quat2FromTranslation(out, t []float64) []float64 {
out[0] = 0
out[1] = 0
out[2] = 0
out[3] = 1
out[4] = t[0] * 0.5
out[5] = t[1] * 0.5
out[6] = t[2] * 0.5
out[7] = 0
return out
}
// Quat2FromRotation creates a dual quat from a quaternion
func Quat2FromRotation(out, q []float64) []float64 {
out[0] = q[0]
out[1] = q[1]
out[2] = q[2]
out[3] = q[3]
out[4] = 0
out[5] = 0
out[6] = 0
out[7] = 0
return out
}
// Quat2FromMat4 creates a new dual quat from a matrix (4x4)
func Quat2FromMat4(out, a []float64) []float64 {
//TODO Optimize this
outer := QuatCreate()
Mat4GetRotation(outer, a)
t := []float64{0, 0, 0}
Mat4GetTranslation(t, a)
Quat2FromRotationTranslation(out, outer, t)
return out
}
// Quat2Copy copy the values from one dual quat to another
func Quat2Copy(out, a []float64) []float64 {
out[0] = a[0]
out[1] = a[1]
out[2] = a[2]
out[3] = a[3]
out[4] = a[4]
out[5] = a[5]
out[6] = a[6]
out[7] = a[7]
return out
}
// Quat2Identity set a dual quat to the identity dual quaternion
func Quat2Identity(out []float64) []float64 {
out[0] = 0
out[1] = 0
out[2] = 0
out[3] = 1
out[4] = 0
out[5] = 0
out[6] = 0
out[7] = 0
return out
}
// Quat2Set set the components of a dual quat to the given values
func Quat2Set(out []float64, x1, y1, z1, w1, x2, y2, z2, w2 float64) []float64 {
out[0] = x1
out[1] = y1
out[2] = z1
out[3] = w1
out[4] = x2
out[5] = y2
out[6] = z2
out[7] = w2
return out
}
// Quat2GetReal gets the real part of a dual quat
var Quat2GetReal = QuatCopy
// Quat2GetDual gets the dual part of a dual quat
func Quat2GetDual(out, a []float64) []float64 {
out[0] = a[4]
out[1] = a[5]
out[2] = a[6]
out[3] = a[7]
return out
}
// Quat2SetReal set the real component of a dual quat to the given quaternion
var Quat2SetReal = QuatCopy
// Quat2SetDual set the dual component of a dual quat to the given quaternion
func Quat2SetDual(out, q []float64) []float64 {
out[4] = q[0]
out[5] = q[1]
out[6] = q[2]
out[7] = q[3]
return out
}
// Quat2GetTranslation gets the translation of a normalized dual quat
func Quat2GetTranslation(out, a []float64) []float64 {
ax := a[4]
ay := a[5]
az := a[6]
aw := a[7]
bx := -a[0]
by := -a[1]
bz := -a[2]
bw := a[3]
out[0] = (ax*bw + aw*bx + ay*bz - az*by) * 2
out[1] = (ay*bw + aw*by + az*bx - ax*bz) * 2
out[2] = (az*bw + aw*bz + ax*by - ay*bx) * 2
return out
}
// Quat2Translate translates a dual quat by the given vector
func Quat2Translate(out, a, v []float64) []float64 {
ax1 := a[0]
ay1 := a[1]
az1 := a[2]
aw1 := a[3]
bx1 := v[0] * 0.5
by1 := v[1] * 0.5
bz1 := v[2] * 0.5
ax2 := a[4]
ay2 := a[5]
az2 := a[6]
aw2 := a[7]
out[0] = ax1
out[1] = ay1
out[2] = az1
out[3] = aw1
out[4] = aw1*bx1 + ay1*bz1 - az1*by1 + ax2
out[5] = aw1*by1 + az1*bx1 - ax1*bz1 + ay2
out[6] = aw1*bz1 + ax1*by1 - ay1*bx1 + az2
out[7] = -ax1*bx1 - ay1*by1 - az1*bz1 + aw2
return out
}
// Quat2RotateX rotates a dual quat around the X axis
func Quat2RotateX(out, a []float64, rad float64) []float64 {
bx := -a[0]
by := -a[1]
bz := -a[2]
bw := a[3]
ax := a[4]
ay := a[5]
az := a[6]
aw := a[7]
ax1 := ax*bw + aw*bx + ay*bz - az*by
ay1 := ay*bw + aw*by + az*bx - ax*bz
az1 := az*bw + aw*bz + ax*by - ay*bx
aw1 := aw*bw - ax*bx - ay*by - az*bz
QuatRotateX(out, a, rad)
bx = out[0]
by = out[1]
bz = out[2]
bw = out[3]
out[4] = ax1*bw + aw1*bx + ay1*bz - az1*by
out[5] = ay1*bw + aw1*by + az1*bx - ax1*bz
out[6] = az1*bw + aw1*bz + ax1*by - ay1*bx
out[7] = aw1*bw - ax1*bx - ay1*by - az1*bz
return out
}
// Quat2RotateY rotates a dual quat around the Y axis
func Quat2RotateY(out, a []float64, rad float64) []float64 {
bx := -a[0]
by := -a[1]
bz := -a[2]
bw := a[3]
ax := a[4]
ay := a[5]
az := a[6]
aw := a[7]
ax1 := ax*bw + aw*bx + ay*bz - az*by
ay1 := ay*bw + aw*by + az*bx - ax*bz
az1 := az*bw + aw*bz + ax*by - ay*bx
aw1 := aw*bw - ax*bx - ay*by - az*bz
QuatRotateY(out, a, rad)
bx = out[0]
by = out[1]
bz = out[2]
bw = out[3]
out[4] = ax1*bw + aw1*bx + ay1*bz - az1*by
out[5] = ay1*bw + aw1*by + az1*bx - ax1*bz
out[6] = az1*bw + aw1*bz + ax1*by - ay1*bx
out[7] = aw1*bw - ax1*bx - ay1*by - az1*bz
return out
}
// Quat2RotateZ rotates a dual quat around the Z axis
func Quat2RotateZ(out, a []float64, rad float64) []float64 {
bx := -a[0]
by := -a[1]
bz := -a[2]
bw := a[3]
ax := a[4]
ay := a[5]
az := a[6]
aw := a[7]
ax1 := ax*bw + aw*bx + ay*bz - az*by
ay1 := ay*bw + aw*by + az*bx - ax*bz
az1 := az*bw + aw*bz + ax*by - ay*bx
aw1 := aw*bw - ax*bx - ay*by - az*bz
QuatRotateZ(out, a, rad)
bx = out[0]
by = out[1]
bz = out[2]
bw = out[3]
out[4] = ax1*bw + aw1*bx + ay1*bz - az1*by
out[5] = ay1*bw + aw1*by + az1*bx - ax1*bz
out[6] = az1*bw + aw1*bz + ax1*by - ay1*bx
out[7] = aw1*bw - ax1*bx - ay1*by - az1*bz
return out
}
// Quat2RotateByQuatAppend rotates a dual quat by a given quaternion (a * q)
func Quat2RotateByQuatAppend(out, a, q []float64) []float64 {
qx := q[0]
qy := q[1]
qz := q[2]
qw := q[3]
ax := a[0]
ay := a[1]
az := a[2]
aw := a[3]
out[0] = ax*qw + aw*qx + ay*qz - az*qy
out[1] = ay*qw + aw*qy + az*qx - ax*qz
out[2] = az*qw + aw*qz + ax*qy - ay*qx
out[3] = aw*qw - ax*qx - ay*qy - az*qz
ax = a[4]
ay = a[5]
az = a[6]
aw = a[7]
out[4] = ax*qw + aw*qx + ay*qz - az*qy
out[5] = ay*qw + aw*qy + az*qx - ax*qz
out[6] = az*qw + aw*qz + ax*qy - ay*qx
out[7] = aw*qw - ax*qx - ay*qy - az*qz
return out
}
// Quat2RotateByQuatPrepend rotates a dual quat by a given quaternion (q * a)
func Quat2RotateByQuatPrepend(out, q, a []float64) []float64 {
qx := q[0]
qy := q[1]
qz := q[2]
qw := q[3]
bx := a[0]
by := a[1]
bz := a[2]
bw := a[3]
out[0] = qx*bw + qw*bx + qy*bz - qz*by
out[1] = qy*bw + qw*by + qz*bx - qx*bz
out[2] = qz*bw + qw*bz + qx*by - qy*bx
out[3] = qw*bw - qx*bx - qy*by - qz*bz
bx = a[4]
by = a[5]
bz = a[6]
bw = a[7]
out[4] = qx*bw + qw*bx + qy*bz - qz*by
out[5] = qy*bw + qw*by + qz*bx - qx*bz
out[6] = qz*bw + qw*bz + qx*by - qy*bx
out[7] = qw*bw - qx*bx - qy*by - qz*bz
return out
}
// Quat2RotateAroundAxis rotates a dual quat around a given axis. Does the normalisation automatically
func Quat2RotateAroundAxis(out, a, axis []float64, rad float64) []float64 {
//Special case for rad = 0
if equals(rad, 0) {
return Quat2Copy(out, a)
}
axisLength := hypot(axis[0], axis[1], axis[2])
rad = rad * 0.5
s := math.Sin(rad)
bx := (s * axis[0]) / axisLength
by := (s * axis[1]) / axisLength
bz := (s * axis[2]) / axisLength
bw := math.Cos(rad)
ax1 := a[0]
ay1 := a[1]
az1 := a[2]
aw1 := a[3]
out[0] = ax1*bw + aw1*bx + ay1*bz - az1*by
out[1] = ay1*bw + aw1*by + az1*bx - ax1*bz
out[2] = az1*bw + aw1*bz + ax1*by - ay1*bx
out[3] = aw1*bw - ax1*bx - ay1*by - az1*bz
ax := a[4]
ay := a[5]
az := a[6]
aw := a[7]
out[4] = ax*bw + aw*bx + ay*bz - az*by
out[5] = ay*bw + aw*by + az*bx - ax*bz
out[6] = az*bw + aw*bz + ax*by - ay*bx
out[7] = aw*bw - ax*bx - ay*by - az*bz
return out
}
// Quat2Add adds two dual quat's
func Quat2Add(out, a, b []float64) []float64 {
out[0] = a[0] + b[0]
out[1] = a[1] + b[1]
out[2] = a[2] + b[2]
out[3] = a[3] + b[3]
out[4] = a[4] + b[4]
out[5] = a[5] + b[5]
out[6] = a[6] + b[6]
out[7] = a[7] + b[7]
return out
}
// Quat2Multiply multiplies two dual quat's
func Quat2Multiply(out, a, b []float64) []float64 {
ax0 := a[0]
ay0 := a[1]
az0 := a[2]
aw0 := a[3]
bx1 := b[4]
by1 := b[5]
bz1 := b[6]
bw1 := b[7]
ax1 := a[4]
ay1 := a[5]
az1 := a[6]
aw1 := a[7]
bx0 := b[0]
by0 := b[1]
bz0 := b[2]
bw0 := b[3]
out[0] = ax0*bw0 + aw0*bx0 + ay0*bz0 - az0*by0
out[1] = ay0*bw0 + aw0*by0 + az0*bx0 - ax0*bz0
out[2] = az0*bw0 + aw0*bz0 + ax0*by0 - ay0*bx0
out[3] = aw0*bw0 - ax0*bx0 - ay0*by0 - az0*bz0
out[4] =
ax0*bw1 +
aw0*bx1 +
ay0*bz1 -
az0*by1 +
ax1*bw0 +
aw1*bx0 +
ay1*bz0 -
az1*by0
out[5] =
ay0*bw1 +
aw0*by1 +
az0*bx1 -
ax0*bz1 +
ay1*bw0 +
aw1*by0 +
az1*bx0 -
ax1*bz0
out[6] =
az0*bw1 +
aw0*bz1 +
ax0*by1 -
ay0*bx1 +
az1*bw0 +
aw1*bz0 +
ax1*by0 -
ay1*bx0
out[7] =
aw0*bw1 -
ax0*bx1 -
ay0*by1 -
az0*bz1 +
aw1*bw0 -
ax1*bx0 -
ay1*by0 -
az1*bz0
return out
}
// Quat2Mul alias for Quat2Multiply
var Quat2Mul = Quat2Multiply
// Quat2Scale scales a dual quat by a scalar number
func Quat2Scale(out, a []float64, b float64) []float64 {
out[0] = a[0] * b
out[1] = a[1] * b
out[2] = a[2] * b
out[3] = a[3] * b
out[4] = a[4] * b
out[5] = a[5] * b
out[6] = a[6] * b
out[7] = a[7] * b
return out
}
// Quat2Dot calculates the dot product of two dual quat's (The dot product of the real parts)
var Quat2Dot = QuatDot
// Quat2Lerp performs a linear interpolation between two dual quats's
// NOTE: The resulting dual quaternions won't always be normalized (The error is most noticeable when t = 0.5)
func Quat2Lerp(out, a, b []float64, t float64) []float64 {
mt := 1 - t
if Quat2Dot(a, b) < 0 {
t = -t
}
out[0] = a[0]*mt + b[0]*t
out[1] = a[1]*mt + b[1]*t
out[2] = a[2]*mt + b[2]*t
out[3] = a[3]*mt + b[3]*t
out[4] = a[4]*mt + b[4]*t
out[5] = a[5]*mt + b[5]*t
out[6] = a[6]*mt + b[6]*t
out[7] = a[7]*mt + b[7]*t
return out
}
// Quat2Invert calculates the inverse of a dual quat. If they are normalized, conjugate is cheaper
func Quat2Invert(out, a []float64) []float64 {
sqlen := Quat2SquaredLength(a)
out[0] = -a[0] / sqlen
out[1] = -a[1] / sqlen
out[2] = -a[2] / sqlen
out[3] = a[3] / sqlen
out[4] = -a[4] / sqlen
out[5] = -a[5] / sqlen
out[6] = -a[6] / sqlen
out[7] = a[7] / sqlen
return out
}
// Quat2Conjugate calculates the conjugate of a dual quat
// If the dual quaternion is normalized, this function is faster than quat2.inverse and produces the same result.
func Quat2Conjugate(out, a []float64) []float64 {
out[0] = -a[0]
out[1] = -a[1]
out[2] = -a[2]
out[3] = a[3]
out[4] = -a[4]
out[5] = -a[5]
out[6] = -a[6]
out[7] = a[7]
return out
}
// Quat2Length calculates the length of a dual quat
var Quat2Length = QuatLength
// Quat2Len alias for Quat2Length
var Quat2Len = Quat2Length
// Quat2SquaredLength calculates the squared length of a dual quat
var Quat2SquaredLength = QuatSquaredLength
// Quat2SqrLen alias for Quat2SquaredLength
var Quat2SqrLen = Quat2SquaredLength
// Quat2Normalize normalize a dual quat
func Quat2Normalize(out, a []float64) []float64 {
magnitude := Quat2SquaredLength(a)
if magnitude > 0 {
magnitude = math.Sqrt(magnitude)
a0 := a[0] / magnitude
a1 := a[1] / magnitude
a2 := a[2] / magnitude
a3 := a[3] / magnitude
b0 := a[4]
b1 := a[5]
b2 := a[6]
b3 := a[7]
dotAB := a0*b0 + a1*b1 + a2*b2 + a3*b3
out[0] = a0
out[1] = a1
out[2] = a2
out[3] = a3
out[4] = (b0 - a0*dotAB) / magnitude
out[5] = (b1 - a1*dotAB) / magnitude
out[6] = (b2 - a2*dotAB) / magnitude
out[7] = (b3 - a3*dotAB) / magnitude
}
return out
}
// Quat2Str returns a string representation of a dual quatenion
func Quat2Str(a []float64) string {
return fmt.Sprintf("quat2(%v, %v, %v, %v, %v, %v, %v, %v)", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7])
}
// Quat2ExactEquals returns whether or not the dual quaternions have exactly the same elements in the same position (when compared with ==)
func Quat2ExactEquals(a, b []float64) bool {
return a[0] == b[0] && a[1] == b[1] && a[2] == b[2] && a[3] == b[3] && a[4] == b[4] && a[5] == b[5] && a[6] == b[6] && a[7] == b[7]
}
// Quat2Equals returns whether or not the dual quaternions have approximately the same elements in the same position.
func Quat2Equals(a, b []float64) bool {
return equals(a[0], b[0]) &&
equals(a[1], b[1]) &&
equals(a[2], b[2]) &&
equals(a[3], b[3]) &&
equals(a[4], b[4]) &&
equals(a[5], b[5]) &&
equals(a[6], b[6]) &&
equals(a[7], b[7])
} | quat2.go | 0.63114 | 0.598723 | quat2.go | starcoder |
package jaxfilter
func square(x float64) float64 {
return x * x
}
// A TranslationHighPassFilter is a high pass filter for translation.
type TranslationHighPassFilter struct {
SamplingTime uint
CutoffFrequency float64
inputs [3]float64
outputs [3]float64
}
// Filter passes the high frequency component of a signal.
func (f *TranslationHighPassFilter) Filter(input float64) float64 {
// T[s]×ωn
tw := f.CutoffFrequency * float64(f.SamplingTime) / 1000
f.inputs[0] = input
// Solve the difference equation
f.outputs[0] = square(2/(tw+2))*(f.inputs[0]-2*f.inputs[1]+f.inputs[2]) -
2*(tw-2)/(tw+2)*f.outputs[1] - square((tw-2)/(tw+2))*f.outputs[2]
// Delay
for i := 0; i < 2; i++ {
f.inputs[2-i] = f.inputs[1-i]
f.outputs[2-i] = f.outputs[1-i]
}
return f.outputs[0]
}
// A TranslationLowPassFilter is a low pass filter for rotation.
type TranslationLowPassFilter struct {
SamplingTime uint
CutoffFrequency float64
DampingRatio float64
inputs [3]float64
outputs [3]float64
}
// Filter passes the low frequency component of a signal.
func (f *TranslationLowPassFilter) Filter(input float64) float64 {
f.inputs[0] = input
// (TωLP)^2、4ζωLP*T
t2w2 := square(f.CutoffFrequency * float64(f.SamplingTime) / 1000)
dw4T := 4 * f.DampingRatio * f.CutoffFrequency * float64(f.SamplingTime) / 1000
// Solve the difference equation
f.outputs[0] = (t2w2)/(t2w2+dw4T+4)*(f.inputs[0]+2*f.inputs[1]+f.inputs[2]) -
(1/(t2w2+dw4T+4))*(2*(t2w2-4)*f.outputs[1]+(t2w2-dw4T+4)*f.outputs[2])
// Delay
for i := 0; i < 2; i++ {
f.inputs[2-i] = f.inputs[1-i]
f.outputs[2-i] = f.outputs[1-i]
}
return f.outputs[0]
}
// A RotationHighPassFilter is a high pass filter for rotation.
type RotationHighPassFilter struct {
SamplingTime uint
CutoffFrequency float64
inputs [2]float64
outputs [2]float64
}
// Filter passes the high frequency component of a signal.
func (f *RotationHighPassFilter) Filter(input float64) float64 {
f.inputs[0] = input
// Tωn
tw := f.CutoffFrequency * float64(f.SamplingTime) / 1000
// Solve the difference equation
f.outputs[0] =
2/(tw+2)*(f.inputs[0]-f.inputs[1]) - (tw-2)/(tw+2)*f.outputs[1]
// Delay
f.inputs[1] = f.inputs[0]
f.outputs[1] = f.outputs[0]
return f.outputs[0]
} | jaxfilter/jaxfilter.go | 0.87879 | 0.578746 | jaxfilter.go | starcoder |
package assert
import (
"fmt"
"reflect"
"regexp"
"runtime"
"testing"
)
type Assertor interface {
Assert() error
}
type Equal struct {
Expect interface{}
Actual interface{}
}
func (r *Equal) Assert() error {
if !reflect.DeepEqual(r.Expect, r.Actual) {
return fmt.Errorf("Expect:%v, Actual:%v", r.Expect, r.Actual)
}
return nil
}
type NotEqual struct {
Expect interface{}
Actual interface{}
}
func (r *NotEqual) Assert() error {
if reflect.DeepEqual(r.Expect, r.Actual) {
return fmt.Errorf("Expect:%v, Actual:%v", r.Expect, r.Actual)
}
return nil
}
type True struct {
Actual bool
}
func (r *True) Assert() error {
if true != r.Actual {
return fmt.Errorf("Expect:%v, Actual:%v", true, r.Actual)
}
return nil
}
type False struct {
Actual bool
}
func (r *False) Assert() error {
if false != r.Actual {
return fmt.Errorf("Expect:%v, Actual:%v", false, r.Actual)
}
return nil
}
type Panic struct {
F func()
}
func (r *Panic) Assert() (err error) {
// 先对 err 赋值,占据一个位置
err = fmt.Errorf("")
// 如果fn抛出panic,那么逻辑会进入这里
defer func() {
recover()
if nil == err {
err = fmt.Errorf("Expect panic, but no panic catched")
}
}()
r.F()
// 如果程序的逻辑走到这里说明没有碰到任何panic
err = nil
return
}
type NoPanic struct {
F func()
}
func (r *NoPanic) Assert() (err error) {
// 先对 err 赋值,占据一个位置
err = fmt.Errorf("")
// 如果fn抛出panic,那么逻辑会进入这里
defer func() {
ret := recover()
if nil != err {
err = fmt.Errorf("Expect no panic, but panic catched:%v", ret)
}
}()
r.F()
err = nil
return
}
type Match struct {
Regexp string
Actual string
}
func (r *Match) Assert() error {
regex, _ := regexp.Compile(r.Regexp)
if !regex.MatchString(r.Actual) {
return fmt.Errorf("Expect match:`%s`, but actual `%s`", r.Regexp, r.Actual)
}
return nil
}
type NotMatch struct {
Regexp string
Actual string
}
func (r *NotMatch) Assert() error {
regex, _ := regexp.Compile(r.Regexp)
if regex.MatchString(r.Actual) {
return fmt.Errorf("Expect not match:`%s`, but actual `%s`", r.Regexp, r.Actual)
}
return nil
}
type Nil struct {
Actual interface{}
}
func (r *Nil) Assert() error {
if nil != r.Actual {
return fmt.Errorf("Expect nil, but actual not nil:%v", r.Actual)
}
return nil
}
type NotNil struct {
Actual interface{}
}
func (r *NotNil) Assert() error {
if nil != r.Actual {
return fmt.Errorf("Expect not nil, but actual nil")
}
return nil
}
// Assert is the wrapper of testing.T
type Assert struct {
T *testing.T
F bool // true: Fail(); false: FailNow()
}
// New is used to create a new Assert object.
func New(t *testing.T) (*Assert, *Assert) {
return &Assert{T: t, F: false}, &Assert{T: t, F: true}
}
func (a *Assert) Assert(message string, assertor Assertor) {
a.AssertInner(message, assertor, 2)
}
func (a *Assert) AssertInner(message string, assertor Assertor, callerSkip int) {
if err := assertor.Assert(); nil != err {
_, file, line, _ := runtime.Caller(callerSkip)
a.T.Errorf("\n%s:%d\n%s\n%s\n", file, line, message, err.Error())
if a.F {
a.T.Fail()
} else {
a.T.FailNow()
}
}
}
// PassValue is used to check if exp equals to got.
func (a *Assert) Equal(message string, exp, got interface{}) {
a.AssertInner(message, &Equal{Expect: exp, Actual: got}, 2)
}
// NotEqual is used to check if exp is not equals to got
func (a *Assert) NotEqual(message string, exp, got interface{}) {
a.AssertInner(message, &NotEqual{Expect: exp, Actual: got}, 2)
}
// True is used to check the got be true.
func (a *Assert) True(message string, got bool) {
a.AssertInner(message, &True{Actual: got}, 2)
}
// False is used to check the got be false.
func (a *Assert) False(message string, got bool) {
a.AssertInner(message, &False{Actual: got}, 2)
}
// Panic is used to check the fn should give a panic.
func (a *Assert) Panic(message string, fn func()) {
a.AssertInner(message, &Panic{fn}, 2)
}
// NoPanic is used to check the fn should not give a panic.
func (a *Assert) NoPanic(message string, fn func()) {
a.AssertInner(message, &NoPanic{fn}, 2)
}
// Match is used to check the got is match to the regular expression of exp.
func (a *Assert) Match(message string, regex string, got string) {
a.AssertInner(message, &Match{Regexp: regex, Actual: got}, 2)
}
func (a *Assert) NotMatch(message string, regex string, got string) {
a.AssertInner(message, &NotMatch{Regexp: regex, Actual: got}, 2)
}
func (a *Assert) Nil(message string, got interface{}) {
a.AssertInner(message, &Nil{Actual: got}, 2)
}
func (a *Assert) NotNil(message string, got interface{}) {
a.AssertInner(message, &NotNil{Actual: got}, 2)
}
func (a *Assert) Empty(message string, got interface{}) {
}
func (a *Assert) NotEmpty() {
} | assert.go | 0.522202 | 0.415492 | assert.go | starcoder |
package simple
import (
"k8s.io/kubernetes/third_party/forked/gonum/graph"
)
// DirectedAcyclicGraph implements graph.Directed using UndirectedGraph,
// which only stores one edge for any node pair.
type DirectedAcyclicGraph struct {
*UndirectedGraph
}
func NewDirectedAcyclicGraph(self, absent float64) *DirectedAcyclicGraph {
return &DirectedAcyclicGraph{
UndirectedGraph: NewUndirectedGraph(self, absent),
}
}
func (g *DirectedAcyclicGraph) HasEdgeFromTo(u, v graph.Node) bool {
edge := g.UndirectedGraph.EdgeBetween(u, v)
if edge == nil {
return false
}
return (edge.From().ID() == u.ID())
}
func (g *DirectedAcyclicGraph) From(n graph.Node) []graph.Node {
if !g.Has(n) {
return nil
}
fid := n.ID()
nodes := make([]graph.Node, 0, g.UndirectedGraph.edges[n.ID()].Len())
g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
if edge.From().ID() == fid {
nodes = append(nodes, g.UndirectedGraph.nodes[edge.To().ID()])
}
})
return nodes
}
func (g *DirectedAcyclicGraph) VisitFrom(n graph.Node, visitor func(neighbor graph.Node) (shouldContinue bool)) {
if !g.Has(n) {
return
}
fid := n.ID()
g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
if edge.From().ID() == fid {
if !visitor(g.UndirectedGraph.nodes[edge.To().ID()]) {
return
}
}
})
}
func (g *DirectedAcyclicGraph) To(n graph.Node) []graph.Node {
if !g.Has(n) {
return nil
}
tid := n.ID()
nodes := make([]graph.Node, 0, g.UndirectedGraph.edges[n.ID()].Len())
g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
if edge.To().ID() == tid {
nodes = append(nodes, g.UndirectedGraph.nodes[edge.From().ID()])
}
})
return nodes
}
func (g *DirectedAcyclicGraph) VisitTo(n graph.Node, visitor func(neighbor graph.Node) (shouldContinue bool)) {
if !g.Has(n) {
return
}
tid := n.ID()
g.UndirectedGraph.edges[n.ID()].Visit(func(neighbor int, edge graph.Edge) {
if edge.To().ID() == tid {
if !visitor(g.UndirectedGraph.nodes[edge.From().ID()]) {
return
}
}
})
} | kubernetes-model/vendor/k8s.io/kubernetes/third_party/forked/gonum/graph/simple/directed_acyclic.go | 0.63023 | 0.484685 | directed_acyclic.go | starcoder |
package engine
import (
"bytes"
"fmt"
)
var (
// ErrInstantiation is an instantiation error exception.
ErrInstantiation = &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
Atom("instantiation_error"),
Atom("Arguments are not sufficiently instantiated."),
},
},
}
// ErrZeroDivisor is an exception that will be raised when an operation divided by zero.
ErrZeroDivisor = evaluationError(Atom("zero_divisor"), Atom("Divided by zero."))
// ErrIntOverflow is an exception that will be raised when an integer overflowed, either positively or negatively.
ErrIntOverflow = evaluationError(Atom("int_overflow"), Atom("Integer overflow."))
// ErrFloatOverflow is an exception that will be raised when a float overflowed, either positively or negatively.
ErrFloatOverflow = evaluationError(Atom("float_overflow"), Atom("Float overflow."))
// ErrUnderflow is an exception that will be raised when a float is too small to be represented by engine.Float.
ErrUnderflow = evaluationError(Atom("underflow"), Atom("Underflow."))
// ErrUndefined is an exception that will be raised when a function value for the arguments is undefined.
ErrUndefined = evaluationError(Atom("undefined"), Atom("Undefined."))
)
// Exception is an error represented by a prolog term.
type Exception struct {
Term Term
}
func (e *Exception) Error() string {
var buf bytes.Buffer
_ = Write(&buf, e.Term, nil, WithQuoted(true))
return buf.String()
}
func TypeErrorAtom(culprit Term) *Exception {
return TypeError("atom", culprit)
}
func TypeErrorAtomic(culprit Term) *Exception {
return TypeError("atomic", culprit)
}
func TypeErrorByte(culprit Term) *Exception {
return TypeError("byte", culprit)
}
func TypeErrorCallable(culprit Term) *Exception {
return TypeError("callable", culprit)
}
func TypeErrorCharacter(culprit Term) *Exception {
return TypeError("character", culprit)
}
func TypeErrorCompound(culprit Term) *Exception {
return TypeError("compound", culprit)
}
func TypeErrorEvaluable(culprit Term) *Exception {
return TypeError("evaluable", culprit)
}
func TypeErrorInByte(culprit Term) *Exception {
return TypeError("in_byte", culprit)
}
func TypeErrorInCharacter(culprit Term) *Exception {
return TypeError("in_character", culprit)
}
func TypeErrorInteger(culprit Term) *Exception {
return TypeError("integer", culprit)
}
func TypeErrorList(culprit Term) *Exception {
return TypeError("list", culprit)
}
func TypeErrorNumber(culprit Term) *Exception {
return TypeError("number", culprit)
}
func TypeErrorPredicateIndicator(culprit Term) *Exception {
return TypeError("predicate_indicator", culprit)
}
func TypeErrorPair(culprit Term) *Exception {
return TypeError("pair", culprit)
}
func TypeErrorFloat(culprit Term) *Exception {
return TypeError("float", culprit)
}
// TypeError creates a new type error exception.
func TypeError(validType Atom, culprit Term) *Exception {
return &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
&Compound{
Functor: "type_error",
Args: []Term{validType, culprit},
},
Atom(fmt.Sprintf("Expected %s, found %T.", validType, culprit)),
},
},
}
}
func domainErrorFlagValue(culprit Term) *Exception {
return DomainError("flag_value", culprit)
}
func domainErrorIOMode(culprit Term) *Exception {
return DomainError("io_mode", culprit)
}
func domainErrorNotEmptyList(culprit Term) *Exception {
return DomainError("not_empty_list", culprit)
}
func domainErrorNotLessThanZero(culprit Term) *Exception {
return DomainError("not_less_than_zero", culprit)
}
func domainErrorOperatorPriority(culprit Term) *Exception {
return DomainError("operator_priority", culprit)
}
func domainErrorOperatorSpecifier(culprit Term) *Exception {
return DomainError("operator_specifier", culprit)
}
func domainErrorPrologFlag(culprit Term) *Exception {
return DomainError("prolog_flag", culprit)
}
func domainErrorReadOption(culprit Term) *Exception {
return DomainError("read_option", culprit)
}
func domainErrorSourceSink(culprit Term) *Exception {
return DomainError("source_sink", culprit)
}
func domainErrorStream(culprit Term) *Exception {
return DomainError("stream", culprit)
}
func domainErrorStreamOption(culprit Term) *Exception {
return DomainError("stream_option", culprit)
}
func domainErrorStreamOrAlias(culprit Term) *Exception {
return DomainError("stream_or_alias", culprit)
}
func domainErrorStreamProperty(culprit Term) *Exception {
return DomainError("stream_property", culprit)
}
func domainErrorWriteOption(culprit Term) *Exception {
return DomainError("write_option", culprit)
}
func domainErrorOrder(culprit Term) *Exception {
return DomainError("order", culprit)
}
// DomainError creates a new domain error exception.
func DomainError(validDomain Atom, culprit Term) *Exception {
return &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
&Compound{
Functor: "domain_error",
Args: []Term{validDomain, culprit},
},
Atom(fmt.Sprintf("Invalid value for %s.", validDomain)),
},
},
}
}
func existenceErrorProcedure(culprit Term) *Exception {
return ExistenceError("procedure", culprit)
}
func existenceErrorSourceSink(culprit Term) *Exception {
return ExistenceError("source_sink", culprit)
}
func existenceErrorStream(culprit Term) *Exception {
return ExistenceError("stream", culprit)
}
// ExistenceError creates a new existence error exception.
func ExistenceError(objectType Atom, culprit Term) *Exception {
return &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
&Compound{
Functor: "existence_error",
Args: []Term{objectType, culprit},
},
Atom(fmt.Sprintf("Unknown %s.", objectType)),
},
},
}
}
func permissionErrorModifyStaticProcedure(culprit Term) *Exception {
return PermissionError("modify", "static_procedure", culprit)
}
func permissionErrorAccessPrivateProcedure(culprit Term) *Exception {
return PermissionError("access", "private_procedure", culprit)
}
func permissionErrorOutputStream(culprit Term) *Exception {
return PermissionError("output", "stream", culprit)
}
func permissionErrorOutputBinaryStream(culprit Term) *Exception {
return PermissionError("output", "binary_stream", culprit)
}
func permissionErrorOutputTextStream(culprit Term) *Exception {
return PermissionError("output", "text_stream", culprit)
}
func permissionErrorInputStream(culprit Term) *Exception {
return PermissionError("input", "stream", culprit)
}
func permissionErrorInputBinaryStream(culprit Term) *Exception {
return PermissionError("input", "binary_stream", culprit)
}
func permissionErrorInputTextStream(culprit Term) *Exception {
return PermissionError("input", "text_stream", culprit)
}
func permissionErrorInputPastEndOfStream(culprit Term) *Exception {
return PermissionError("input", "past_end_of_stream", culprit)
}
// PermissionError creates a new permission error exception.
func PermissionError(operation, permissionType Atom, culprit Term) *Exception {
return &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
&Compound{
Functor: "permission_error",
Args: []Term{operation, permissionType, culprit},
},
Atom(fmt.Sprintf("Operation %s not allowed for %s.", operation, permissionType)),
},
},
}
}
func representationError(limit Atom) *Exception {
return &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
&Compound{
Functor: "representation_error",
Args: []Term{limit},
},
Atom(fmt.Sprintf("Invalid %s.", limit)),
},
},
}
}
func resourceError(resource, info Term) *Exception {
return &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
&Compound{
Functor: "resource_error",
Args: []Term{resource},
},
info,
},
},
}
}
func syntaxErrorNotANumber() *Exception {
return syntaxError(Atom("not_a_number"), Atom("Not a number."))
}
func syntaxErrorUnexpectedToken(info Term) *Exception {
return syntaxError(Atom("unexpected_token"), info)
}
func syntaxErrorInsufficient() *Exception {
return syntaxError(Atom("insufficient"), Atom("Not enough input."))
}
func syntaxError(detail, info Term) *Exception {
return &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
&Compound{
Functor: "syntax_error",
Args: []Term{detail},
},
info,
},
},
}
}
// SystemError creates a new system error exception.
func SystemError(err error) *Exception {
return &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
Atom("system_error"),
Atom(err.Error()),
},
},
}
}
func evaluationError(error, info Term) *Exception {
return &Exception{
Term: &Compound{
Functor: "error",
Args: []Term{
&Compound{
Functor: "evaluation_error",
Args: []Term{error},
},
info,
},
},
}
} | engine/exception.go | 0.677261 | 0.438124 | exception.go | starcoder |
package vec
import (
"bytes"
"fmt"
sof "github.com/ninedraft/partridge/stringOrFloat"
)
type Frame struct {
strings []string
vector Vector
indexes []int
}
func FrameFromValues(values ...interface{}) Frame {
var frame = Frame{}
for _, value := range values {
var index int
switch value := value.(type) {
case string:
index = -1 - len(frame.strings)
frame.strings = append(frame.strings, value)
case float64:
index = frame.vector.Len()
frame.vector = append(frame.vector, value)
case int:
index = frame.vector.Len()
frame.vector = append(frame.vector, float64(value))
default:
panic(fmt.Sprintf("[vec.FrameFromValues] expected string or float64, got %T", value))
}
frame.indexes = append(frame.indexes, index)
}
return frame
}
func (frame Frame) Value(column int) sof.StringOrFloat64 {
var index = frame.indexes[column]
if index < 0 {
return sof.String(frame.strings[-1-index])
}
return sof.Float64(frame.vector.Value(index))
}
func (frame Frame) String() string {
var buf = bytes.NewBufferString("[")
for i, item := range frame.Values() {
switch {
case item.IsString():
fmt.Fprintf(buf, "%q", item.AsString())
default:
fmt.Fprintf(buf, "%v", item.Interface())
}
if (i + 1) != frame.Len() {
fmt.Fprintf(buf, ", ")
}
}
return buf.String() + "]"
}
func (frame Frame) Copy() Frame {
frame.vector = frame.vector.Copy()
var newStrings = make([]string, 0, len(frame.strings))
frame.strings = append(newStrings, frame.strings...)
return frame
}
func (frame Frame) Len() int {
return len(frame.indexes)
}
func (frame Frame) Values() []sof.StringOrFloat64 {
var values = make([]sof.StringOrFloat64, 0, frame.Len())
for index := 0; index < frame.Len(); index++ {
values = append(values, frame.Value(index))
}
return values
}
func (frame Frame) Slice() []interface{} {
var values = make([]interface{}, 0, frame.Len())
for index := 0; index < frame.Len(); index++ {
values = append(values, frame.Value(index).Interface())
}
return values
} | vec/frame.go | 0.546496 | 0.460471 | frame.go | starcoder |
package function
import (
"encoding/binary"
"fmt"
"math"
"strings"
"github.com/dolthub/go-mysql-server/sql"
"github.com/dolthub/go-mysql-server/sql/expression"
)
// AsWKB is a function that converts a spatial type into WKB format (alias for AsBinary)
type AsWKB struct {
expression.UnaryExpression
}
var _ sql.FunctionExpression = (*AsWKB)(nil)
// NewAsWKB creates a new point expression.
func NewAsWKB(e sql.Expression) sql.Expression {
return &AsWKB{expression.UnaryExpression{Child: e}}
}
// FunctionName implements sql.FunctionExpression
func (a *AsWKB) FunctionName() string {
return "st_aswkb"
}
// Description implements sql.FunctionExpression
func (a *AsWKB) Description() string {
return "returns binary representation of given spatial type."
}
// IsNullable implements the sql.Expression interface.
func (a *AsWKB) IsNullable() bool {
return a.Child.IsNullable()
}
// Type implements the sql.Expression interface.
func (a *AsWKB) Type() sql.Type {
return sql.LongBlob
}
func (a *AsWKB) String() string {
return fmt.Sprintf("ST_ASWKB(%s)", a.Child.String())
}
// WithChildren implements the Expression interface.
func (a *AsWKB) WithChildren(children ...sql.Expression) (sql.Expression, error) {
if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(a, len(children), 1)
}
return NewAsWKB(children[0]), nil
}
// serializePoint fills in buf with the values from point
func serializePoint(p sql.Point, buf []byte) {
// Assumes buf is correct size
binary.LittleEndian.PutUint64(buf[0:8], math.Float64bits(p.X))
binary.LittleEndian.PutUint64(buf[8:16], math.Float64bits(p.Y))
}
// PointToBytes converts a sql.Point to a byte array
func PointToBytes(p sql.Point) []byte {
// Initialize point buffer
buf := make([]byte, 16)
serializePoint(p, buf)
return buf
}
// serializeLine fills in buf with values from linestring
func serializeLine(l sql.LineString, buf []byte) {
// Write number of points
binary.LittleEndian.PutUint32(buf[0:4], uint32(len(l.Points)))
// Append each point
for i, p := range l.Points {
start, stop := 4+16*i, 4+16*(i+1)
serializePoint(p, buf[start:stop])
}
}
// LineToBytes converts a sql.LineString to a byte array
func LineToBytes(l sql.LineString) []byte {
// Initialize line buffer
buf := make([]byte, 4+16*len(l.Points))
serializeLine(l, buf)
return buf
}
func serializePoly(p sql.Polygon, buf []byte) {
// Write number of lines
binary.LittleEndian.PutUint32(buf[0:4], uint32(len(p.Lines)))
// Append each line
start, stop := 0, 4
for _, l := range p.Lines {
start, stop = stop, stop+4+16*len(l.Points)
serializeLine(l, buf[start:stop])
}
}
// PolyToBytes converts a sql.Polygon to a byte array
func PolyToBytes(p sql.Polygon) []byte {
// Initialize polygon buffer
size := 0
for _, l := range p.Lines {
size += 4 + 16*len(l.Points)
}
buf := make([]byte, 4+size)
serializePoly(p, buf)
return buf
}
// Eval implements the sql.Expression interface.
func (a *AsWKB) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
// Evaluate child
val, err := a.Child.Eval(ctx, row)
if err != nil {
return nil, err
}
if val == nil {
return nil, nil
}
// Initialize buf with space for endianness (1 byte) and type (4 bytes)
buf := make([]byte, 5)
// MySQL seems to always use Little Endian
buf[0] = 1
var data []byte
// Expect one of the geometry types
switch v := val.(type) {
case sql.Point:
// Mark as point type
binary.LittleEndian.PutUint32(buf[1:5], 1)
data = PointToBytes(v)
case sql.LineString:
// Mark as linestring type
binary.LittleEndian.PutUint32(buf[1:5], 2)
data = LineToBytes(v)
case sql.Polygon:
// Mark as Polygon type
binary.LittleEndian.PutUint32(buf[1:5], 3)
data = PolyToBytes(v)
default:
return nil, sql.ErrInvalidGISData.New("ST_AsWKB")
}
// Append to header
buf = append(buf, data...)
return buf, nil
}
// Header contains endianness (1 byte) and geometry type (4 bytes)
const WKBHeaderLength = 5
// Type IDs
const (
WKBUnknown = iota
WKBPointID
WKBLineID
WKBPolyID
)
// GeomFromWKB is a function that returns a geometry type from a WKB byte array
type GeomFromWKB struct {
expression.NaryExpression
}
var _ sql.FunctionExpression = (*GeomFromWKB)(nil)
// NewGeomFromWKB creates a new geometry expression.
func NewGeomFromWKB(args ...sql.Expression) (sql.Expression, error) {
if len(args) < 1 || len(args) > 3 {
return nil, sql.ErrInvalidArgumentNumber.New("ST_GEOMFROMWKB", "1, 2, or 3", len(args))
}
return &GeomFromWKB{expression.NaryExpression{ChildExpressions: args}}, nil
}
// FunctionName implements sql.FunctionExpression
func (g *GeomFromWKB) FunctionName() string {
return "st_geomfromwkb"
}
// Description implements sql.FunctionExpression
func (g *GeomFromWKB) Description() string {
return "returns a new geometry from a WKB string."
}
// Type implements the sql.Expression interface.
func (g *GeomFromWKB) Type() sql.Type {
return sql.PointType{} // TODO: replace with generic geometry type
}
func (g *GeomFromWKB) String() string {
var args = make([]string, len(g.ChildExpressions))
for i, arg := range g.ChildExpressions {
args[i] = arg.String()
}
return fmt.Sprintf("ST_GEOMFROMWKB(%s)", strings.Join(args, ","))
}
// WithChildren implements the Expression interface.
func (g *GeomFromWKB) WithChildren(children ...sql.Expression) (sql.Expression, error) {
return NewGeomFromWKB(children...)
}
// ParseWKBHeader parses the header portion of a byte array in WKB format to extract endianness and type
func ParseWKBHeader(buf []byte) (bool, uint32, error) {
// Header length
if len(buf) < WKBHeaderLength {
return false, 0, sql.ErrInvalidGISData.New("ST_GeomFromWKB")
}
// Get Endianness
isBig := buf[0] == 0
// Get Geometry Type
var geomType uint32
if isBig {
geomType = binary.BigEndian.Uint32(buf[1:5])
} else {
geomType = binary.LittleEndian.Uint32(buf[1:5])
}
return isBig, geomType, nil
}
// WKBToPoint parses the data portion of a byte array in WKB format to a point object
func WKBToPoint(buf []byte, isBig bool, srid uint32, order bool) (sql.Point, error) {
// Must be 16 bytes (2 floats)
if len(buf) != 16 {
return sql.Point{}, sql.ErrInvalidGISData.New("ST_PointFromWKB")
}
// Read floats x and y
var x, y float64
if isBig {
x = math.Float64frombits(binary.BigEndian.Uint64(buf[:8]))
y = math.Float64frombits(binary.BigEndian.Uint64(buf[8:]))
} else {
x = math.Float64frombits(binary.LittleEndian.Uint64(buf[:8]))
y = math.Float64frombits(binary.LittleEndian.Uint64(buf[8:]))
}
// Determine if bool needs to be flipped
if order {
x, y = y, x
}
return sql.Point{SRID: srid, X: x, Y: y}, nil
}
// WKBToLine parses the data portion of a byte array in WKB format to a point object
func WKBToLine(buf []byte, isBig bool, srid uint32, order bool) (sql.LineString, error) {
// Must be at least 4 bytes (length of linestring)
if len(buf) < 4 {
return sql.LineString{}, sql.ErrInvalidGISData.New("ST_LineFromWKB")
}
// Read length of line string
var numPoints uint32
if isBig {
numPoints = binary.BigEndian.Uint32(buf[:4])
} else {
numPoints = binary.LittleEndian.Uint32(buf[:4])
}
// Extract line data
lineData := buf[4:]
// Check length
if uint32(len(lineData)) < 16*numPoints {
return sql.LineString{}, sql.ErrInvalidGISData.New("ST_LineFromWKB")
}
// Parse points
points := make([]sql.Point, numPoints)
for i := uint32(0); i < numPoints; i++ {
if point, err := WKBToPoint(lineData[16*i:16*(i+1)], isBig, srid, order); err == nil {
points[i] = point
} else {
return sql.LineString{}, sql.ErrInvalidGISData.New("ST_LineFromWKB")
}
}
return sql.LineString{SRID: srid, Points: points}, nil
}
// WKBToPoly parses the data portion of a byte array in WKB format to a point object
func WKBToPoly(buf []byte, isBig bool, srid uint32, order bool) (sql.Polygon, error) {
// Must be at least 4 bytes (length of polygon)
if len(buf) < 4 {
return sql.Polygon{}, sql.ErrInvalidGISData.New("ST_PolyFromWKB")
}
// Get number of lines in polygon
var numLines uint32
if isBig {
numLines = binary.BigEndian.Uint32(buf[:4])
} else {
numLines = binary.LittleEndian.Uint32(buf[:4])
}
// Extract poly data
polyData := buf[4:]
// Parse lines
s := 0
lines := make([]sql.LineString, numLines)
for i := uint32(0); i < numLines; i++ {
if line, err := WKBToLine(polyData[s:], isBig, srid, order); err == nil {
if isLinearRing(line) {
lines[i] = line
s += 4 + 16*len(line.Points) // shift parsing location over
} else {
return sql.Polygon{}, sql.ErrInvalidGISData.New("ST_PolyFromWKB")
}
} else {
return sql.Polygon{}, sql.ErrInvalidGISData.New("ST_PolyFromWKB")
}
}
return sql.Polygon{SRID: srid, Lines: lines}, nil
}
// ParseAxisOrder takes in a key, value string and determines the order of the xy coords
func ParseAxisOrder(s string) (bool, error) {
// TODO: need to deal with whitespace, lowercase, and json-like parsing
s = strings.ToLower(s)
s = strings.TrimSpace(s)
switch s {
case "axis-order=long-lat":
return true, nil
case "axis-order=lat-long", "axis-order=srid-defined":
return false, nil
default:
return false, sql.ErrInvalidArgument.New("placeholder")
}
}
// Eval implements the sql.Expression interface.
func (g *GeomFromWKB) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
// Evaluate child
val, err := g.ChildExpressions[0].Eval(ctx, row)
if err != nil {
return nil, err
}
if val == nil {
return nil, nil
}
// Must be of type byte array
v, ok := val.([]byte)
if !ok {
return nil, sql.ErrInvalidGISData.New("ST_GeomFromWKB")
}
// Parse Header
isBig, geomType, err := ParseWKBHeader(v)
if err != nil {
return nil, err
}
// TODO: convert to this block to helper function
// Determine SRID
srid := uint32(0)
if len(g.ChildExpressions) >= 2 {
s, err := g.ChildExpressions[1].Eval(ctx, row)
if err != nil {
return nil, err
}
if s == nil {
return nil, nil
}
s, err = sql.Uint32.Convert(s)
if err != nil {
return nil, err
}
srid = s.(uint32)
}
if err = ValidateSRID(srid); err != nil {
return nil, err
}
// Convert this block to helper function
// Determine xy order
order := false
if len(g.ChildExpressions) == 3 {
o, err := g.ChildExpressions[2].Eval(ctx, row)
if err != nil {
return nil, err
}
if o == nil {
return nil, nil
}
order, err = ParseAxisOrder(o.(string))
if err != nil {
return nil, sql.ErrInvalidArgument.New(g.FunctionName())
}
}
// Parse accordingly
switch geomType {
case WKBPointID:
return WKBToPoint(v[WKBHeaderLength:], isBig, srid, order)
case WKBLineID:
return WKBToLine(v[WKBHeaderLength:], isBig, srid, order)
case WKBPolyID:
return WKBToPoly(v[WKBHeaderLength:], isBig, srid, order)
default:
return nil, sql.ErrInvalidGISData.New("ST_GeomFromWKB")
}
}
// PointFromWKB is a function that returns a point type from a WKB byte array
type PointFromWKB struct {
expression.NaryExpression
}
var _ sql.FunctionExpression = (*PointFromWKB)(nil)
// NewPointFromWKB creates a new point expression.
func NewPointFromWKB(args ...sql.Expression) (sql.Expression, error) {
if len(args) < 1 && len(args) > 3 {
return nil, sql.ErrInvalidArgumentNumber.New("ST_POINTFROMWKB", "1, 2, or 3", len(args))
}
return &PointFromWKB{expression.NaryExpression{ChildExpressions: args}}, nil
}
// FunctionName implements sql.FunctionExpression
func (p *PointFromWKB) FunctionName() string {
return "st_pointfromwkb"
}
// Description implements sql.FunctionExpression
func (p *PointFromWKB) Description() string {
return "returns a new point from WKB format."
}
// Type implements the sql.Expression interface.
func (p *PointFromWKB) Type() sql.Type {
return sql.PointType{}
}
func (p *PointFromWKB) String() string {
var args = make([]string, len(p.ChildExpressions))
for i, arg := range p.ChildExpressions {
args[i] = arg.String()
}
return fmt.Sprintf("ST_POINTFROMWKB(%s)", strings.Join(args, ","))
}
// WithChildren implements the Expression interface.
func (p *PointFromWKB) WithChildren(children ...sql.Expression) (sql.Expression, error) {
return NewPointFromWKB(children...)
}
// Eval implements the sql.Expression interface.
func (p *PointFromWKB) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
// Evaluate child
val, err := p.ChildExpressions[0].Eval(ctx, row)
if err != nil {
return nil, err
}
if val == nil {
return nil, nil
}
// Must be of type byte array
v, ok := val.([]byte)
if !ok {
return nil, sql.ErrInvalidGISData.New("ST_PointFromWKB")
}
// Parse Header
isBig, geomType, err := ParseWKBHeader(v)
if err != nil {
return nil, sql.ErrInvalidGISData.New("ST_PointFromWKB")
}
// Not a point, throw error
if geomType != WKBPointID {
return nil, sql.ErrInvalidGISData.New("ST_PointFromWKB")
}
// TODO: convert to this block to helper function
// Determine SRID
srid := sql.CartesianSRID
if len(p.ChildExpressions) >= 2 {
s, err := p.ChildExpressions[1].Eval(ctx, row)
if err != nil {
return nil, err
}
if s == nil {
return nil, nil
}
s, err = sql.Uint32.Convert(s)
if err != nil {
return nil, err
}
srid = s.(uint32)
}
if err = ValidateSRID(srid); err != nil {
return nil, err
}
// Determine xy order
order := false
if len(p.ChildExpressions) == 3 {
o, err := p.ChildExpressions[2].Eval(ctx, row)
if err != nil {
return nil, err
}
if o == nil {
return nil, nil
}
order, err = ParseAxisOrder(o.(string))
if err != nil {
return nil, sql.ErrInvalidArgument.New(p.FunctionName())
}
}
// Read data
return WKBToPoint(v[WKBHeaderLength:], isBig, srid, order)
}
// LineFromWKB is a function that returns a linestring type from a WKB byte array
type LineFromWKB struct {
expression.NaryExpression
}
var _ sql.FunctionExpression = (*LineFromWKB)(nil)
// NewLineFromWKB creates a new point expression.
func NewLineFromWKB(args ...sql.Expression) (sql.Expression, error) {
if len(args) < 1 || len(args) > 3 {
return nil, sql.ErrInvalidArgumentNumber.New("ST_LINEFROMWKB", "1 or 2", len(args))
}
return &LineFromWKB{expression.NaryExpression{ChildExpressions: args}}, nil
}
// FunctionName implements sql.FunctionExpression
func (l *LineFromWKB) FunctionName() string {
return "st_linefromwkb"
}
// Description implements sql.FunctionExpression
func (l *LineFromWKB) Description() string {
return "returns a new linestring from WKB format."
}
// Type implements the sql.Expression interface.
func (l *LineFromWKB) Type() sql.Type {
return sql.LineStringType{}
}
func (l *LineFromWKB) String() string {
var args = make([]string, len(l.ChildExpressions))
for i, arg := range l.ChildExpressions {
args[i] = arg.String()
}
return fmt.Sprintf("ST_LINEFROMWKB(%s)", strings.Join(args, ","))
}
// WithChildren implements the Expression interface.
func (l *LineFromWKB) WithChildren(children ...sql.Expression) (sql.Expression, error) {
return NewLineFromWKB(children...)
}
// Eval implements the sql.Expression interface.
func (l *LineFromWKB) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
// Evaluate child
val, err := l.ChildExpressions[0].Eval(ctx, row)
if err != nil {
return nil, err
}
if val == nil {
return nil, nil
}
// Must be of type byte array
v, ok := val.([]byte)
if !ok {
return nil, sql.ErrInvalidGISData.New("ST_LineFromWKB")
}
// Parse Header
isBig, geomType, err := ParseWKBHeader(v)
if err != nil {
return nil, sql.ErrInvalidGISData.New("ST_LineFromWKB")
}
// Not a line, throw error
if geomType != WKBLineID {
return nil, sql.ErrInvalidGISData.New("ST_LineFromWKB")
}
// TODO: convert to this block to helper function
// Determine SRID
srid := uint32(0)
if len(l.ChildExpressions) >= 2 {
s, err := l.ChildExpressions[1].Eval(ctx, row)
if err != nil {
return nil, err
}
if s == nil {
return nil, nil
}
s, err = sql.Uint32.Convert(s)
if err != nil {
return nil, err
}
srid = s.(uint32)
}
if err = ValidateSRID(srid); err != nil {
return nil, err
}
// Determine xy order
order := false
if len(l.ChildExpressions) == 3 {
o, err := l.ChildExpressions[2].Eval(ctx, row)
if err != nil {
return nil, err
}
if o == nil {
return nil, nil
}
order, err = ParseAxisOrder(o.(string))
if err != nil {
return nil, sql.ErrInvalidArgument.New(l.FunctionName())
}
}
// Read data
return WKBToLine(v[WKBHeaderLength:], isBig, srid, order)
}
// PolyFromWKB is a function that returns a polygon type from a WKB byte array
type PolyFromWKB struct {
expression.NaryExpression
}
var _ sql.FunctionExpression = (*PolyFromWKB)(nil)
// NewPolyFromWKB creates a new point expression.
func NewPolyFromWKB(args ...sql.Expression) (sql.Expression, error) {
if len(args) < 1 || len(args) > 3 {
return nil, sql.ErrInvalidArgumentNumber.New("ST_POLYFROMWKB", "1, 2, or 3", len(args))
}
return &PolyFromWKB{expression.NaryExpression{ChildExpressions: args}}, nil
}
// FunctionName implements sql.FunctionExpression
func (p *PolyFromWKB) FunctionName() string {
return "st_polyfromwkb"
}
// Description implements sql.FunctionExpression
func (p *PolyFromWKB) Description() string {
return "returns a new polygon from WKB format."
}
// Type implements the sql.Expression interface.
func (p *PolyFromWKB) Type() sql.Type {
return sql.PolygonType{}
}
func (p *PolyFromWKB) String() string {
var args = make([]string, len(p.ChildExpressions))
for i, arg := range p.ChildExpressions {
args[i] = arg.String()
}
return fmt.Sprintf("ST_POLYFROMWKB(%s)", strings.Join(args, ","))
}
// WithChildren implements the Expression interface.
func (p *PolyFromWKB) WithChildren(children ...sql.Expression) (sql.Expression, error) {
return NewPolyFromWKB(children...)
}
// Eval implements the sql.Expression interface.
func (p *PolyFromWKB) Eval(ctx *sql.Context, row sql.Row) (interface{}, error) {
// Evaluate child
val, err := p.ChildExpressions[0].Eval(ctx, row)
if err != nil {
return nil, err
}
if val == nil {
return nil, nil
}
// Must be of type byte array
v, ok := val.([]byte)
if !ok {
return nil, sql.ErrInvalidGISData.New("ST_PolyFromWKB")
}
// Parse Header
isBig, geomType, err := ParseWKBHeader(v)
if err != nil {
return nil, sql.ErrInvalidGISData.New("ST_PolyFromWKB")
}
// Not a polygon, throw error
if geomType != WKBPolyID {
return nil, sql.ErrInvalidGISData.New("ST_PolyFromWKB")
}
// TODO: convert to this block to helper function
// Determine SRID
srid := uint32(0)
if len(p.ChildExpressions) >= 2 {
s, err := p.ChildExpressions[1].Eval(ctx, row)
if err != nil {
return nil, err
}
if s == nil {
return nil, nil
}
s, err = sql.Uint32.Convert(s)
if err != nil {
return nil, err
}
srid = s.(uint32)
}
if err = ValidateSRID(srid); err != nil {
return nil, err
}
// Determine xy order
order := false
if len(p.ChildExpressions) == 3 {
o, err := p.ChildExpressions[2].Eval(ctx, row)
if err != nil {
return nil, err
}
if o == nil {
return nil, nil
}
order, err = ParseAxisOrder(o.(string))
if err != nil {
return nil, sql.ErrInvalidArgument.New(p.FunctionName())
}
}
// Read data
return WKBToPoly(v[WKBHeaderLength:], isBig, srid, order)
}
func ValidateSRID(srid uint32) error {
if srid != sql.CartesianSRID && srid != sql.GeoSpatialSRID {
return ErrInvalidSRID.New(srid)
}
return nil
} | sql/expression/function/wkb.go | 0.715821 | 0.468851 | wkb.go | starcoder |
package structmatcher
/*
* This file contains test structs and functions used in unit tests via dependency injection.
*/
import (
"fmt"
"reflect"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
)
/*
* If fields are to be filtered in or out, set shouldFilter to true; filterInclude is true to
* include fields or false to exclude fields, and filterFields contains the field names to filter on.
* To filter on a field "fieldname" in struct "structname", pass in "fieldname".
* To filter on a field "fieldname" in a nested struct under field "structfield", pass in "structfield.fieldname".
* This function assumes structs will only ever be nested one level deep.
*/
func StructMatcher(expected, actual interface{}, shouldFilter bool, filterInclude bool, filterFields ...string) []string {
return structMatcher(reflect.ValueOf(expected), reflect.ValueOf(actual), "", shouldFilter, filterInclude, filterFields...)
}
func structMatcher(expected, actual reflect.Value, fieldPath string, shouldFilter bool, filterInclude bool, filterFields ...string) []string {
// Add field names for the top-level struct to a filter map, and split off nested field names to pass down to nested structs
filterMap := make(map[string]bool)
nestedFilterFields := make([]string, 0)
for i := 0; i < len(filterFields); i++ {
fieldNames := strings.Split(filterFields[i], ".")
if len(fieldNames) == 2 {
nestedFilterFields = append(nestedFilterFields, fieldNames[1])
// If we include a nested struct field, we also need to include the nested struct
if filterInclude {
filterMap[fieldNames[0]] = true
}
} else {
filterMap[filterFields[i]] = true
}
}
expectedStruct := reflect.Indirect(expected)
actualStruct := reflect.Indirect(actual)
mismatches := []string{}
mismatches = append(mismatches, InterceptGomegaFailures(func() {
structCanInterface := true
for i := 0; i < expectedStruct.NumField(); i++ {
expectedField := reflect.Indirect(expectedStruct.Field(i))
actualField := reflect.Indirect(actualStruct.Field(i))
fieldName := actualStruct.Type().Field(i).Name
// If we're including, skip this field if the name doesn't match; if we're excluding, skip if it does match
if shouldFilter && ((filterInclude && !filterMap[fieldName]) || (!filterInclude && filterMap[fieldName])) {
continue
}
actualFieldIsNonemptySlice := actualField.Kind() == reflect.Slice && !actualField.IsNil() && actualField.Len() > 0
expectedFieldIsNonemptySlice := expectedField.Kind() == reflect.Slice && !expectedField.IsNil() && expectedField.Len() > 0
fieldIsStructSlice := actualFieldIsNonemptySlice && expectedFieldIsNonemptySlice && actualField.Len() == expectedField.Len() && actualField.Index(0).Kind() == reflect.Struct
if fieldIsStructSlice {
for j := 0; j < actualField.Len(); j++ {
expectedStructField := expectedStruct.Field(i).Index(j)
actualStructField := actualStruct.Field(i).Index(j)
subFieldPath := fmt.Sprintf("%s%s[%d].", fieldPath, fieldName, j)
mismatches = append(mismatches, structMatcher(expectedStructField, actualStructField, subFieldPath, shouldFilter, filterInclude, nestedFilterFields...)...)
}
} else if actualField.Kind() == reflect.Struct {
expectedStructField := expectedStruct.Field(i)
actualStructField := actualStruct.Field(i)
subFieldPath := fmt.Sprintf("%s%s.", fieldPath, fieldName)
mismatches = append(mismatches, structMatcher(expectedStructField, actualStructField, subFieldPath, shouldFilter, filterInclude, nestedFilterFields...)...)
} else {
if expectedStruct.Field(i).CanInterface() {
expectedValue := expectedStruct.Field(i).Interface()
actualValue := actualStruct.Field(i).Interface()
Expect(actualValue).To(Equal(expectedValue), "Mismatch on field %s%s", fieldPath, fieldName)
} else {
structCanInterface = false
}
}
}
if !structCanInterface {
extra := []interface{}{
"Mismatch on unexported field within top level struct",
}
if fieldPath != "" {
structName := fieldPath[0 : len(fieldPath)-1] // remove trailing dot.
extra = []interface{}{
"Mismatch on unexported field within %s", structName,
}
}
Expect(actualStruct.Interface()).To(Equal(expectedStruct.Interface()), extra...)
}
})...)
return mismatches
}
// Deprecated: Use structmatcher.MatchStruct() GomegaMatcher
func ExpectStructsToMatch(expected interface{}, actual interface{}) {
mismatches := StructMatcher(expected, actual, false, false)
if len(mismatches) > 0 {
Fail(strings.Join(mismatches, "\n"))
}
}
// Deprecated: Use structmatcher.MatchStruct().ExcludingFields() GomegaMatcher
func ExpectStructsToMatchExcluding(expected interface{}, actual interface{}, excludeFields ...string) {
mismatches := StructMatcher(expected, actual, true, false, excludeFields...)
if len(mismatches) > 0 {
Fail(strings.Join(mismatches, "\n"))
}
}
// Deprecated: Use structmatcher.MatchStruct().IncludingFields() GomegaMatcher
func ExpectStructsToMatchIncluding(expected interface{}, actual interface{}, includeFields ...string) {
mismatches := StructMatcher(expected, actual, true, true, includeFields...)
if len(mismatches) > 0 {
Fail(strings.Join(mismatches, "\n"))
}
}
type Matcher struct {
expected interface{}
includingFields []string
excludingFields []string
mismatches []string
}
var _ types.GomegaMatcher = &Matcher{}
func MatchStruct(expected interface{}) *Matcher {
return &Matcher{
expected: expected,
}
}
func (m *Matcher) Match(actual interface{}) (success bool, err error) {
if m.includingFields != nil {
m.mismatches = StructMatcher(m.expected, actual, true, true, m.includingFields...)
} else if m.excludingFields != nil {
m.mismatches = StructMatcher(m.expected, actual, true, false, m.excludingFields...)
} else {
m.mismatches = StructMatcher(m.expected, actual, false, false)
}
return len(m.mismatches) == 0, nil
}
func (m *Matcher) FailureMessage(actual interface{}) (message string) {
return "Expected structs to match but:\n" + strings.Join(m.mismatches, "\n")
}
func (m *Matcher) NegatedFailureMessage(actual interface{}) (message string) {
return "Expected structs not to match, but they did"
}
func (m *Matcher) IncludingFields(fields ...string) *Matcher {
m.includingFields = fields
return m
}
func (m *Matcher) ExcludingFields(fields ...string) *Matcher {
m.excludingFields = fields
return m
} | structmatcher/structmatcher.go | 0.633637 | 0.438485 | structmatcher.go | starcoder |
package runtime
import (
"encoding/base64"
"log"
"math"
"runtime"
"runtime/debug"
"strings"
"github.com/apmckinlay/gsuneido/util/dnum"
"github.com/apmckinlay/gsuneido/util/hacks"
"github.com/apmckinlay/gsuneido/util/regex"
)
var (
Zero Value = SuInt(0)
One Value = SuInt(1)
MinusOne Value = SuInt(-1)
MaxInt Value = SuDnum{Dnum: dnum.FromInt(math.MaxInt32)}
Inf Value = SuDnum{Dnum: dnum.PosInf}
NegInf Value = SuDnum{Dnum: dnum.NegInf}
True Value = SuBool(true)
False Value = SuBool(false)
// EmptyStr defined in sustr.go
)
func OpIs(x Value, y Value) Value {
return SuBool(x.Equal(y))
}
func OpIsnt(x Value, y Value) Value {
return SuBool(!x.Equal(y))
}
func OpLt(x Value, y Value) Value {
return SuBool(x.Compare(y) < 0)
}
func OpLte(x Value, y Value) Value {
return SuBool(x.Compare(y) <= 0)
}
func OpGt(x Value, y Value) Value {
return SuBool(x.Compare(y) > 0)
}
func OpGte(x Value, y Value) Value {
return SuBool(x.Compare(y) >= 0)
}
func OpAdd(x Value, y Value) Value {
if xi, xok := SuIntToInt(x); xok {
if yi, yok := SuIntToInt(y); yok {
return IntVal(xi + yi)
}
}
return SuDnum{Dnum: dnum.Add(ToDnum(x), ToDnum(y))}
}
func OpSub(x Value, y Value) Value {
if xi, xok := SuIntToInt(x); xok {
if yi, yok := SuIntToInt(y); yok {
return IntVal(xi - yi)
}
}
return SuDnum{Dnum: dnum.Sub(ToDnum(x), ToDnum(y))}
}
func OpMul(x Value, y Value) Value {
if xi, xok := SuIntToInt(x); xok {
if yi, yok := SuIntToInt(y); yok {
return IntVal(xi * yi)
}
}
return SuDnum{Dnum: dnum.Mul(ToDnum(x), ToDnum(y))}
}
func OpDiv(x Value, y Value) Value {
if yi, yok := SuIntToInt(y); yok && yi != 0 {
if xi, xok := SuIntToInt(x); xok {
if xi%yi == 0 {
return IntVal(xi / yi)
}
}
}
return SuDnum{Dnum: dnum.Div(ToDnum(x), ToDnum(y))}
}
func OpMod(x Value, y Value) Value {
return IntVal(ToInt(x) % ToInt(y))
}
func OpLeftShift(x Value, y Value) Value {
result := int32(ToInt(x)) << ToInt(y)
return IntVal(int(result))
}
func OpRightShift(x Value, y Value) Value {
result := uint32(ToInt(x)) >> ToInt(y)
return IntVal(int(result))
}
func OpBitOr(x Value, y Value) Value {
return IntVal(ToInt(x) | ToInt(y))
}
func OpBitAnd(x Value, y Value) Value {
return IntVal(ToInt(x) & ToInt(y))
}
func OpBitXor(x Value, y Value) Value {
return IntVal(ToInt(x) ^ ToInt(y))
}
func OpBitNot(x Value) Value {
return IntVal(^ToInt(x))
}
func OpNot(x Value) Value {
if x == True {
return False
} else if x == False {
return True
}
panic("not requires boolean")
}
func OpBool(x Value) bool {
switch x {
case True:
return true
case False:
return false
default:
panic("conditionals require true or false")
}
}
func OpUnaryPlus(x Value) Value {
if _, ok := x.(*smi); ok {
return x
}
return SuDnum{Dnum: ToDnum(x)}
}
func OpUnaryMinus(x Value) Value {
if xi, ok := SuIntToInt(x); ok {
return IntVal(-xi)
}
return SuDnum{Dnum: ToDnum(x).Neg()}
}
func OpCat(t *Thread, x, y Value) Value {
if ssx, ok := x.(SuStr); ok {
if ssy, ok := y.(SuStr); ok {
return cat2(string(ssx), string(ssy))
}
}
return cat3(t, x, y)
}
func cat2(xs, ys string) Value {
const LARGE = 256
if len(xs)+len(ys) < LARGE {
return SuStr(xs + ys)
}
if len(xs) == 0 {
return SuStr(ys)
}
if len(ys) == 0 {
return SuStr(xs)
}
return NewSuConcat().Add(xs).Add(ys)
}
func cat3(t *Thread, x, y Value) Value {
var result Value
if xc, ok := x.(SuConcat); ok {
result = xc.Add(catToStr(t, y))
} else {
result = cat2(catToStr(t, x), catToStr(t, y))
}
if xe, ok := x.(*SuExcept); ok {
return &SuExcept{SuStr: SuStr(AsStr(result)), Callstack: xe.Callstack}
}
if ye, ok := y.(*SuExcept); ok {
return &SuExcept{SuStr: SuStr(AsStr(result)), Callstack: ye.Callstack}
}
return result
}
func catToStr(t *Thread, v Value) string {
if d, ok := v.(ToStringable); ok {
return d.ToString(t)
}
return AsStr(v)
}
func OpMatch(t *Thread, x Value, y Value) SuBool {
var pat regex.Pattern
if r, ok := y.(SuRegex); ok {
pat = r.Pat
} else if t != nil {
pat = t.RxCache.Get(ToStr(y))
} else {
pat = regex.Compile(ToStr(y))
}
return SuBool(pat.Matches(ToStr(x)))
}
// ToIndex is used by ranges and string[i]
func ToIndex(key Value) int {
if n, ok := key.IfInt(); ok {
return n
}
panic("indexes must be integers")
}
func prepFrom(from int, size int) int {
if from < 0 {
from += size
if from < 0 {
from = 0
}
}
if from > size {
from = size
}
return from
}
func prepTo(from int, to int, size int) int {
if to < 0 {
to += size
}
if to < from {
to = from
}
if to > size {
to = size
}
return to
}
func prepLen(len int, size int) int {
if len < 0 {
len = 0
}
if len > size {
len = size
}
return len
}
func OpIter(x Value) SuIter {
iterable, ok := x.(interface{ Iter() Iter })
if !ok {
panic("can't iterate " + x.Type().String())
}
return SuIter{Iter: iterable.Iter()}
}
func OpCatch(t *Thread, e interface{}, catchPat string) *SuExcept {
se := ToSuExcept(t, e)
if catchMatch(string(se.SuStr), catchPat) {
return se
}
panic(se) // propagate panic if not caught
}
func ToSuExcept(t *Thread, e interface{}) *SuExcept {
se, ok := e.(*SuExcept)
if !ok {
// first catch creates SuExcept with callstack
var ss SuStr
if err, ok := e.(error); ok {
if _, ok := e.(runtime.Error); ok {
log.Println(e)
debug.PrintStack()
}
ss = SuStr(err.Error())
} else if s, ok := e.(string); ok {
ss = SuStr(s)
} else {
ss = SuStr(ToStr(e.(Value)))
}
se = NewSuExcept(t, ss)
}
return se
}
// catchMatch matches an exception string with a catch pattern
func catchMatch(e, pat string) bool {
for {
p := pat
i := strings.IndexByte(p, '|')
if i >= 0 {
pat = pat[i+1:]
p = p[:i]
}
if strings.HasPrefix(p, "*") {
if strings.Contains(e, p[1:]) {
return true
}
} else if strings.HasPrefix(e, p) {
return true
}
if i < 0 {
break
}
}
return false
}
func Unpack64(s string) Value {
data, err := base64.StdEncoding.DecodeString(s)
if err != nil {
panic("Unpack64 bad data")
}
return Unpack(hacks.BStoS(data))
} | runtime/ops.go | 0.515376 | 0.485051 | ops.go | starcoder |
package pddl
import (
"io"
"log"
)
// Parse returns either a Domain, a Problem or a parse error.
func Parse(file string, r io.Reader) (ast interface{}, err error) {
defer func() {
r := recover()
if r == nil {
return
}
if e, ok := r.(Error); ok {
err = e
} else {
panic(r)
}
}()
p, err := newParser(file, r)
if err != nil {
return
}
p.expect("(", "define")
defer p.expect(")")
if p.peekn(2).text == "domain" {
return parseDomain(p), nil
}
return parseProblem(p), nil
}
func parseDomain(p *parser) *Domain {
return &Domain{
Name: parseDomainName(p),
Requirements: parseReqsDef(p),
Types: parseTypesDef(p),
Constants: parseConstsDef(p),
Predicates: parsePredsDef(p),
Functions: parseFuncsDef(p),
Actions: parseActionsDef(p),
}
}
func parseDomainName(p *parser) Name {
p.expect("(", "domain")
defer p.expect(")")
return parseName(p, tokName)
}
func parseReqsDef(p *parser) (reqs []Name) {
if p.accept("(", ":requirements") {
defer p.expect(")")
for p.peek().typ == tokCname {
reqs = append(reqs, parseName(p, tokCname))
}
}
return
}
func parseTypesDef(p *parser) (types []Type) {
if p.accept("(", ":types") {
defer p.expect(")")
for _, t := range parseTypedListString(p, tokName) {
types = append(types, Type{TypedEntry: t})
}
}
return
}
func parseConstsDef(p *parser) []TypedEntry {
if p.accept("(", ":constants") {
defer p.expect(")")
return parseTypedListString(p, tokName)
}
return nil
}
func parsePredsDef(p *parser) []Predicate {
if p.accept("(", ":predicates") {
defer p.expect(")")
preds := []Predicate{parseAtomicFormSkele(p)}
for p.peek().typ == tokOpen {
preds = append(preds, parseAtomicFormSkele(p))
}
return preds
}
return nil
}
func parseAtomicFormSkele(p *parser) Predicate {
p.expect("(")
defer p.expect(")")
return Predicate{
Name: parseName(p, tokName),
Parameters: parseTypedListString(p, tokQname),
}
}
func parseAtomicFuncSkele(p *parser) Function {
p.expect("(")
defer p.expect(")")
return Function{
Name: parseName(p, tokName),
Parameters: parseTypedListString(p, tokQname),
}
}
func parseFuncsDef(p *parser) []Function {
if p.accept("(", ":functions") {
defer p.expect(")")
return parseFunctionTypedList(p)
}
return nil
}
func parseActionsDef(p *parser) (acts []Action) {
for p.peek().typ == tokOpen {
acts = append(acts, parseActionDef(p))
}
return
}
func parseTypedListString(p *parser, typ tokenType) (lst []TypedEntry) {
for {
ids := parseNames(p, typ)
if len(ids) == 0 && p.peek().typ == tokMinus {
log.Println("Parser hack: allowing an empty name list in front of a type in a typed list")
log.Println("This seems to be required for IPC 2008 woodworking-strips/p11-domain.pddl")
} else if len(ids) == 0 {
break
}
t := parseType(p)
for _, id := range ids {
lst = append(lst, TypedEntry{Name: id, Types: t})
}
}
return
}
func parseType(p *parser) (typ []TypeName) {
if !p.accept("-") {
return
}
if !p.accept("(") {
return []TypeName{{Name: parseName(p, tokName)}}
}
p.expect("either")
defer p.expect(")")
for _, id := range parseNamesPlus(p, tokName) {
typ = append(typ, TypeName{Name: id})
}
return
}
func parseFunctionTypedList(p *parser) (funs []Function) {
for {
var fs []Function
for p.peek().typ == tokOpen {
fs = append(fs, parseAtomicFuncSkele(p))
}
if len(fs) == 0 {
break
}
typ := parseFunctionType(p)
for i, _ := range fs {
fs[i].Types = typ
}
funs = append(funs, fs...)
}
return
}
func parseFunctionType(p *parser) (typ []TypeName) {
if !p.accept("-") {
return
}
return []TypeName{TypeName{
Name: Name{
Location: p.Loc(),
Str: p.expectText("number").text,
},
}}
}
func parseActionDef(p *parser) (act Action) {
p.expect("(", ":action")
defer p.expect(")")
act.Name = parseName(p, tokName)
act.Parameters = parseActParms(p)
if p.accept(":precondition") {
if !p.accept("(", ")") {
act.Precondition = parsePreGd(p)
}
}
if p.accept(":effect") {
if !p.accept("(", ")") {
act.Effect = parseEffect(p)
}
}
return
}
func parseActParms(p *parser) (parms []TypedEntry) {
p.expect(":parameters", "(")
defer p.expect(")")
return parseTypedListString(p, tokQname)
}
func parsePreGd(p *parser) Formula {
switch {
case p.accept("(", "and"):
return parseAndGd(p, parsePreGd)
case p.accept("(", "forall"):
return parseForallGd(p, parsePreGd)
}
return parsePrefGd(p)
}
func parsePrefGd(p *parser) Formula {
return parseGd(p)
}
func parseGd(p *parser) Formula {
switch {
case p.accept("(", "and"):
return parseAndGd(p, parseGd)
case p.accept("(", "or"):
return parseOrGd(p, parseGd)
case p.accept("(", "not"):
form := parseNotGd(p)
if lit, ok := form.(*LiteralNode); ok {
lit.Negative = !lit.Negative
return lit
}
return form
case p.accept("(", "imply"):
return parseImplyGd(p)
case p.accept("(", "exists"):
return parseExistsGd(p, parseGd)
case p.accept("(", "forall"):
return parseForallGd(p, parseGd)
}
return parseLiteral(p, false)
}
func parseLiteral(p *parser, eff bool) *LiteralNode {
lit := new(LiteralNode)
if p.accept("(", "not") {
lit.Negative = true
defer p.expect(")")
}
p.expect("(")
defer p.expect(")")
lit.IsEffect = eff
lit.Node = Node{p.Loc()}
if p.accept("=") {
lit.Predicate = Name{"=", lit.Node.Loc()}
} else {
lit.Predicate = parseName(p, tokName)
}
lit.Arguments = parseTerms(p)
return lit
}
func parseTerms(p *parser) (lst []Term) {
for {
l := p.Loc()
if t, ok := p.acceptToken(tokName); ok {
lst = append(lst, Term{Name: Name{t.text, l}})
continue
}
if t, ok := p.acceptToken(tokQname); ok {
lst = append(lst, Term{Name: Name{t.text, l}, Variable: true})
continue
}
break
}
return
}
func parseAndGd(p *parser, nested func(*parser) Formula) Formula {
defer p.expect(")")
return &AndNode{MultiNode{
Node: Node{p.Loc()},
Formula: parseFormulaStar(p, nested),
}}
}
func parseFormulaStar(p *parser, nested func(*parser) Formula) (fs []Formula) {
for p.peek().typ == tokOpen {
fs = append(fs, nested(p))
}
return
}
func parseOrGd(p *parser, nested func(*parser) Formula) Formula {
defer p.expect(")")
return &OrNode{MultiNode{
Node: Node{p.Loc()},
Formula: parseFormulaStar(p, nested),
}}
}
func parseNotGd(p *parser) Formula {
defer p.expect(")")
return &NotNode{UnaryNode{
Node: Node{p.Loc()},
Formula: parseGd(p),
}}
}
func parseImplyGd(p *parser) Formula {
defer p.expect(")")
return &ImplyNode{BinaryNode{
Node: Node{p.Loc()},
Left: parseGd(p),
Right: parseGd(p),
}}
}
func parseForallGd(p *parser, nested func(*parser) Formula) Formula {
defer p.expect(")")
loc := p.Loc()
return &ForallNode{
QuantNode: QuantNode{
Variables: parseQuantVariables(p),
UnaryNode: UnaryNode{Node{loc}, nested(p)},
},
IsEffect: false,
}
}
func parseQuantVariables(p *parser) []TypedEntry {
p.expect("(")
defer p.expect(")")
return parseTypedListString(p, tokQname)
}
func parseExistsGd(p *parser, nested func(*parser) Formula) Formula {
defer p.expect(")")
loc := p.Loc()
return &ExistsNode{QuantNode{
Variables: parseQuantVariables(p),
UnaryNode: UnaryNode{Node{loc}, nested(p)},
}}
}
func parseEffect(p *parser) Formula {
if p.accept("(", "and") {
return parseAndEffect(p, parseCeffect)
}
return parseCeffect(p)
}
func parseAndEffect(p *parser, nested func(*parser) Formula) Formula {
defer p.expect(")")
return &AndNode{MultiNode{
Node: Node{p.Loc()},
Formula: parseFormulaStar(p, nested),
}}
}
func parseCeffect(p *parser) Formula {
switch {
case p.accept("(", "forall"):
return parseForallEffect(p, parseEffect)
case p.accept("(", "when"):
return parseWhen(p, parseCondEffect)
}
return parsePeffect(p)
}
func parseForallEffect(p *parser, nested func(*parser) Formula) Formula {
defer p.expect(")")
loc := p.Loc()
return &ForallNode{
QuantNode: QuantNode{
Variables: parseQuantVariables(p),
UnaryNode: UnaryNode{Node{loc}, nested(p)},
},
IsEffect: true,
}
}
func parseWhen(p *parser, nested func(*parser) Formula) Formula {
defer p.expect(")")
loc := p.Loc()
return &WhenNode{
Condition: parseGd(p),
UnaryNode: UnaryNode{Node{loc}, nested(p)},
}
}
func parsePeffect(p *parser) Formula {
if _, ok := AssignOps[p.peekn(2).text]; ok && p.peek().typ == tokOpen {
return parseAssign(p)
}
return parseLiteral(p, true)
}
func parseAssign(p *parser) *AssignNode {
p.expect("(")
defer p.expect(")")
a := new(AssignNode)
a.Op = parseName(p, tokName)
a.Lval = parseFhead(p)
// f-exp:
// We support :action-costs, which means that
// an Fexp can be either a non-negative number
// (non-negativity is checked during semantic
// analysis) or it can be of the form:
// (<function-symbol> <term>*)
// i.e., an f-head
if n, ok := p.acceptToken(tokNum); ok {
a.IsNumber = true
a.Number = n.text
} else {
a.Fhead = parseFhead(p)
}
return a
}
func parseCondEffect(p *parser) Formula {
if p.accept("(", "and") {
return parseAndEffect(p, parsePeffect)
}
return parsePeffect(p)
}
func parseFhead(p *parser) (head Fhead) {
open := p.accept("(")
head.Name = parseName(p, tokName)
if open {
head.Arguments = parseTerms(p)
p.expect(")")
}
return
}
func parseProblem(p *parser) *Problem {
return &Problem{
Name: parseProbName(p),
Domain: parseProbDomain(p),
Requirements: parseReqsDef(p),
Objects: parseObjsDecl(p),
Init: parseInit(p),
Goal: parseGoal(p),
Metric: parseMetric(p),
}
}
func parseProbName(p *parser) Name {
p.expect("(", "problem")
defer p.expect(")")
return parseName(p, tokName)
}
func parseProbDomain(p *parser) Name {
p.expect("(", ":domain")
defer p.expect(")")
return parseName(p, tokName)
}
func parseObjsDecl(p *parser) []TypedEntry {
if p.accept("(", ":objects") {
defer p.expect(")")
return parseTypedListString(p, tokName)
}
return nil
}
func parseInit(p *parser) (els []Formula) {
p.expect("(", ":init")
defer p.expect(")")
for p.peek().typ == tokOpen {
els = append(els, parseInitEl(p))
}
return
}
func parseInitEl(p *parser) Formula {
loc := p.Loc()
if p.accept("(", "=") {
defer p.expect(")")
return &AssignNode{
Node: Node{loc},
Op: Name{"=", p.Loc()},
Lval: parseFhead(p),
IsNumber: true,
Number: p.expectType(tokNum).text,
IsInit: true,
}
}
return parseLiteral(p, false)
}
func parseGoal(p *parser) Formula {
p.expect("(", ":goal")
defer p.expect(")")
return parsePreGd(p)
}
func parseMetric(p *parser) Metric {
if p.accept("(", ":metric") {
p.expect("minimize", "(", "total-cost", ")", ")")
return MetricMinCost
}
return MetricMakespan
}
func parseNamesPlus(p *parser, typ tokenType) []Name {
return append([]Name{parseName(p, typ)}, parseNames(p, typ)...)
}
func parseNames(p *parser, typ tokenType) (ids []Name) {
for t, ok := p.acceptToken(typ); ok; t, ok = p.acceptToken(typ) {
l := p.Loc()
ids = append(ids, Name{t.text, l})
}
return
}
func parseName(p *parser, typ tokenType) Name {
return Name{
Location: p.Loc(),
Str: p.expectType(typ).text,
}
} | pddl/grammar.go | 0.510985 | 0.531817 | grammar.go | starcoder |
package gcs
import "math"
type (
// Sphere is a geometry represented as the set of points
// that are all at the same distance R from a given point,
// called center or origin.
Sphere struct {
R float64 // radius in meters
}
// SPoint is a point in the surface of the Sphere, ie, its
// distance from the origin is the radius of the sphere.
SPoint struct {
φ Angle // latitude in radians
λ Angle // longitude in radians
}
// Point in a spherical coordinate system
Point struct {
R float64 // radial distance from origin
φ Angle // latitude in radians
λ Angle // longitude in radians
}
)
const (
EarthRadius = 6378100 // Earth radius at equator in meters
MarsRadius = 3390000 // Mars radius at equator in meters
)
var (
// SphericalUnit is the unit sphere
SphericalUnit = Sphere{
R: 1,
}
// SphericalEarth is the spherical approximation of earth
SphericalEarth = Sphere{
R: EarthRadius,
}
// SphericalMars is the spherical approximation of mars
SphericalMars = Sphere{
R: MarsRadius,
}
)
// NewSPoint creates a point in the surface of a sphere.
// Lon and lat are in degrees.
func NewSPoint(lon, lat float64) SPoint {
return SPoint{
λ: toRadians(lon),
φ: toRadians(lat),
}
}
// Area gives the surface area
func (s *Sphere) Area() float64 {
return 4 * math.Pi * s.R * s.R
}
func (s *Sphere) Contains(p Point) bool {
return p.R <= s.R
}
// Distance of p1 and p2 throught the surface of the sphere (orthodromic
// distance or great circle) returned in meters.
// The algorithm uses the haversine formula and because of that it has
// lower precision for computing distance of antipodal points.
func (s *Sphere) Distance(p1, p2 SPoint) float64 {
return haversin(s.R, p1, p2)
}
// Ortho projects the longitude λ and latitude φ onto a secant plane
// (Orthographic projection). It returns the plane's cartesian points X and Y.
func (s *Sphere) Ortho(origin, p SPoint) (float64, float64) {
R := s.R
φ, λ := p.φ.Degrees(), p.λ.Degrees()
φ0, λ0 := origin.φ.Degrees(), origin.λ.Degrees()
x := R * math.Cos(φ) * math.Sin(λ-λ0)
y := R * (math.Cos(φ0)*math.Sin(φ) - math.Sin(φ0)*math.Cos(φ)*math.Cos(λ-λ0))
return x, y
}
// haversin function of angle θ
// hsin(θ) = sin²(θ/2)
func hsin(θ float64) float64 {
return math.Pow(math.Sin(θ/2), 2)
}
// haversin of α angle between points p1 and p2.
// The formula is:
// haversin(α) = hsin(φ2-φ1)+cosφ1.cosφ2.hsin(λ1-λ2)
// haversin(α) = (d/2R)²
// then:
// d = 2Rsin⁻¹√(haversin(α))
// Based on:
// https://www.math.ksu.edu/~dbski/writings/haversine.pdf
func haversin(R float64, p1, p2 SPoint) float64 {
φ1, λ1 := float64(p1.φ), float64(p1.λ)
φ2, λ2 := float64(p2.φ), float64(p2.λ)
h := hsin(φ2-φ1) + math.Cos(φ1)*math.Cos(φ2)*hsin(λ2-λ1)
return 2 * R * math.Asin(math.Sqrt(h))
} | gcs/sphere.go | 0.864496 | 0.784773 | sphere.go | starcoder |
package model
// SingleAxis is the option set for single axis.
// https://echarts.apache.org/en/option.html#singleAxis
type SingleAxis struct {
// The minimum value of axis.
// It can be set to a special value 'dataMin' so that the minimum value on this axis is set to be the minimum label.
// It will be automatically computed to make sure axis tick is equally distributed when not set.
Min interface{} `json:"min,omitempty"`
// The maximum value of axis.
// It can be set to a special value 'dataMax' so that the minimum value on this axis is set to be the maximum label.
// It will be automatically computed to make sure axis tick is equally distributed when not set.
Max interface{} `json:"max,omitempty"`
// Type of axis.
// Option:
// * 'value': Numerical axis, suitable for continuous data.
// * 'category': Category axis, suitable for discrete category data.
// Category data can be auto retrieved from series.data or dataset.source,
// or can be specified via xAxis.data.
// * 'time' Time axis, suitable for continuous time series data. As compared to value axis,
// it has a better formatting for time and a different tick calculation method. For example,
// it decides to use month, week, day or hour for tick based on the range of span.
// * 'log' Log axis, suitable for log data.
Type string `json:"type,omitempty"`
// Distance between grid component and the left side of the container.
// left value can be instant pixel value like 20; it can also be a percentage
// value relative to container width like '20%'; and it can also be 'left', 'center', or 'right'.
// If the left value is set to be 'left', 'center', or 'right',
// then the component will be aligned automatically based on position.
Left string `json:"left,omitempty"`
// Distance between grid component and the right side of the container.
// right value can be instant pixel value like 20; it can also be a percentage
// value relative to container width like '20%'.
Right string `json:"right,omitempty"`
// Distance between grid component and the top side of the container.
// top value can be instant pixel value like 20; it can also be a percentage
// value relative to container width like '20%'; and it can also be 'top', 'middle', or 'bottom'.
// If the left value is set to be 'top', 'middle', or 'bottom',
// then the component will be aligned automatically based on position.
Top string `json:"top,omitempty"`
// Distance between grid component and the bottom side of the container.
// bottom value can be instant pixel value like 20; it can also be a percentage
// value relative to container width like '20%'.
Bottom string `json:"bottom,omitempty"`
} | model/single_axis.go | 0.869035 | 0.434161 | single_axis.go | starcoder |
package paunch
import (
gl "github.com/chsc/gogl/gl21"
)
// Shape is an object that represents a vector shape, such as a triangle or
// other polygon, that can be drawn on screen.
type Shape struct {
mode gl.Enum
size int
vertexBuffer gl.Uint
verticies []float32
scaleX float64
scaleY float64
}
// NewShape creates a new Shape object based on the verticies and shape type.
func NewShape(shapeType ShapeType, verticies []float64) (*Shape, error) {
verticies32 := make([]float32, len(verticies))
for i, val := range verticies {
verticies32[i] = float32(val)
}
shape := &Shape{mode: gl.Enum(shapeType), size: len(verticies), vertexBuffer: 0, verticies: verticies32,
scaleX: 1, scaleY: 1}
gl.GenBuffers(1, &shape.vertexBuffer)
gl.BindBuffer(gl.ARRAY_BUFFER, gl.Uint(shape.vertexBuffer))
gl.BufferData(gl.ARRAY_BUFFER, gl.Sizeiptr(shape.size*4), gl.Pointer(&shape.verticies[0]), gl.DYNAMIC_DRAW)
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
return shape, checkForErrors()
}
// NewShapeFromShape creates a copy of an existing Shape object.
func NewShapeFromShape(copyShape *Shape) (*Shape, error) {
shape := &Shape{mode: copyShape.mode, size: copyShape.size, verticies: make([]float32, len(copyShape.verticies)),
scaleX: copyShape.scaleX, scaleY: copyShape.scaleY}
copy(shape.verticies, copyShape.verticies)
gl.GenBuffers(1, &shape.vertexBuffer)
gl.BindBuffer(gl.ARRAY_BUFFER, shape.vertexBuffer)
gl.BufferData(gl.ARRAY_BUFFER, gl.Sizeiptr(len(shape.verticies)*4), gl.Pointer(&shape.verticies[0]), gl.DYNAMIC_DRAW)
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
return shape, checkForErrors()
}
// SetScaling sets the scaling factor of the Shape object. For instance, an x
// and y scale value of two will make the Shape object twice as large.
func (shape *Shape) SetScaling(scaleX, scaleY float64) {
shape.scaleX = scaleX
shape.scaleY = scaleY
verticies := make([]float32, len(shape.verticies))
xTransform := shape.verticies[0] - (shape.verticies[0] * float32(scaleX))
yTransform := shape.verticies[1] - (shape.verticies[1] * float32(scaleY))
for i := range verticies {
if i%2 == 0 {
verticies[i] = shape.verticies[i] * float32(scaleX)
verticies[i] += xTransform
} else {
verticies[i] = shape.verticies[i] * float32(scaleY)
verticies[i] += yTransform
}
}
gl.BindBuffer(gl.ARRAY_BUFFER, shape.vertexBuffer)
gl.BufferSubData(gl.ARRAY_BUFFER, 0, gl.Sizeiptr(len(verticies)*4), gl.Pointer(&verticies[0]))
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
}
// Draw draws the Shape object.
func (shape *Shape) Draw() error {
gl.BindBuffer(gl.ARRAY_BUFFER, shape.vertexBuffer)
vertexAttribLoc := gl.GetAttribLocation(paunchEffect.program, gl.GLString("position"))
gl.VertexAttribPointer(gl.Uint(vertexAttribLoc), 2, gl.FLOAT, gl.FALSE, 0, gl.Offset(nil, 0))
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
gl.EnableVertexAttribArray(gl.Uint(vertexAttribLoc))
gl.DrawArrays(shape.mode, 0, gl.Sizei(shape.size/2))
gl.DisableVertexAttribArray(gl.Uint(vertexAttribLoc))
//gl.DisableVertexAttribArray(gl.Uint(1))
//gl.BindTexture(gl.TEXTURE_2D, 0)
return checkForErrors()
}
// Move moves the Shape object a specified distance.
func (shape *Shape) Move(x, y float64) {
var verticies []float32
for i := 0; i < len(shape.verticies); i += 2 {
shape.verticies[i] += float32(x)
shape.verticies[i+1] += float32(y)
}
if shape.scaleX != 1 || shape.scaleY != 1 {
verticies = make([]float32, len(shape.verticies))
xTransform := shape.verticies[0] - (shape.verticies[0] * float32(shape.scaleX))
yTransform := shape.verticies[1] - (shape.verticies[1] * float32(shape.scaleY))
for i := range verticies {
if i%2 == 0 {
verticies[i] = shape.verticies[i] * float32(shape.scaleX)
verticies[i] += xTransform
} else {
verticies[i] = shape.verticies[i] * float32(shape.scaleY)
verticies[i] += yTransform
}
}
} else {
verticies = shape.verticies
}
gl.BindBuffer(gl.ARRAY_BUFFER, shape.vertexBuffer)
gl.BufferSubData(gl.ARRAY_BUFFER, 0, gl.Sizeiptr(len(verticies)*4), gl.Pointer(&verticies[0]))
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
}
// SetPosition sets the position of the Shape object relative to first
// specified vertex.
func (shape *Shape) SetPosition(x, y float64) {
xDisp := x - float64(shape.verticies[0])
yDisp := y - float64(shape.verticies[1])
shape.Move(xDisp, yDisp)
}
// Position returns the X and Y position relative to the first specified
// vertex.
func (shape *Shape) Position() (x, y float64) {
return float64(shape.verticies[0]), float64(shape.verticies[1])
} | shape.go | 0.776962 | 0.6753 | shape.go | starcoder |
package tablehelpers
import (
"log"
"strconv"
"strings"
"time"
)
// Gives the suffix for the weekly tables that will be used during the successive week
func weeklyTableSuffix() string {
nextWeek := time.Now().UTC().AddDate(0, 0, 7)
return nextWeek.AddDate(0, 0, (1 - int(nextWeek.Weekday()))).Format("2006_01_02")
}
// Gives the suffix for the daily tables that will be used during the next day
func dailyTableSuffix() string {
nextDay := time.Now().UTC().AddDate(0, 0, 1)
return nextDay.Format("2006_01_02")
}
// Gives the suffix for the hourly tables that will be used during the next hour
func hourlyTableSuffix() string {
nextHour := time.Now().UTC().Add(1 * time.Hour)
h := strconv.Itoa(nextHour.Hour())
return nextHour.Format("2006_01_02") + "_" + h
}
func fetchTableTimeSuffix(tableType string) string {
if tableType == "hourly" {
return "_" + hourlyTableSuffix()
} else if tableType == "daily" {
return "_" + dailyTableSuffix()
} else {
return "_" + weeklyTableSuffix()
}
}
func isHourly(m int, cronExp []int) bool {
return cronExp[0] != -1 && m >= cronExp[0]
}
func isDaily(m int, h int, cronExp []int) bool {
if cronExp[1] == -1 {
return false
}
if cronExp[0] == -1 {
return h >= cronExp[1]
} else {
return m >= cronExp[0] && h >= cronExp[1]
}
}
func isWeekly(m int, h int, wd int, cronExp []int) bool {
if cronExp[2] == -1 {
return false
}
if cronExp[1] == -1 {
return wd == cronExp[2]
} else {
return isDaily(m, h, cronExp) && wd == cronExp[2]
}
}
// Parses the CRON expression given in format:
// minute.hour.weekDay
// and returns array of int values
// If any of the value is not an int,
// -1 is will be the placeholder
func parsedCronExp(cron string) []int {
var result []int
cronExp := strings.Split(cron, ".")
for _, i := range cronExp {
j, err := strconv.Atoi(i)
if err != nil {
log.Print(err)
result = append(result, -1)
} else {
result = append(result, j)
}
}
return result
}
// Method to check missed tables
// When given two slices returns the missing items
// from slice2 that is present in slice1
func SliceDifference(slice1 []string, slice2 []string) []string {
var diff []string
for _, s1 := range slice1 {
found := false
for _, s2 := range slice2 {
if s1 == s2 {
found = true
break
}
}
if !found {
diff = append(diff, s1)
}
}
return diff
}
// Checks CRON expression given
// and returns the table types to be checked now
func fetchCheckableTableTypes(cron string) []string {
var tableTypes []string
cronExp := parsedCronExp(cron)
log.Print("Preparing tables based on CRON expression", cronExp)
t := time.Now()
m, h, wd := t.Minute(), t.Hour(), int(t.Weekday())
// based on CRON time appends types of tables to be checked
if isHourly(m, cronExp) {
tableTypes = append(tableTypes, "hourly")
}
if isDaily(m, h, cronExp) {
tableTypes = append(tableTypes, "daily")
}
if isWeekly(m, h, wd, cronExp) {
tableTypes = append(tableTypes, "weekly")
}
return tableTypes
}
// Returns a slice of table names that will be checked
// by lambda for current time
// based on current time, CRON expression
// and models present as an ENV variable
func FetchScheduledTables(models []string, cron string) []string {
var tables []string
tableTypes := fetchCheckableTableTypes(cron)
// models := strings.Split(modelNames, ",")
for _, model := range models {
for _, tableType := range tableTypes {
tableName := model + "_" + tableType + fetchTableTimeSuffix(tableType)
tables = append(tables, tableName)
}
}
return tables
} | tablehelpers/table-helper-methods.go | 0.657758 | 0.430147 | table-helper-methods.go | starcoder |
package hashmap
import (
"bytes"
"fmt"
"reflect"
"sync"
"sync/atomic"
"unsafe"
)
// MaxFillRate is the maximum fill rate for the slice before a resize will happen.
const MaxFillRate = 50
type (
hashMapData struct {
keyRightShifts uint64 // 64 - log2 of array size, to be used as index in the data array
data unsafe.Pointer // pointer to slice data array
slice []*ListElement // storage for the slice for the garbage collector to not clean it up
count uint64 // count of filled elements in the slice
}
// HashMap implements a read optimized hash map.
HashMap struct {
mapDataPtr unsafe.Pointer // pointer to a map instance that gets replaced if the map resizes
linkedList *List // key sorted linked list of elements
sync.Mutex // mutex that is only used for resize operations
}
// KeyValue represents a key/value that is returned by the iterator.
KeyValue struct {
Key interface{}
Value unsafe.Pointer
}
)
// New returns a new HashMap.
func New() *HashMap {
return NewSize(8)
}
// NewSize returns a new HashMap instance with a specific initialization size.
func NewSize(size uint64) *HashMap {
hashmap := &HashMap{
linkedList: NewList(),
}
hashmap.Grow(size)
return hashmap
}
// Len returns the number of elements within the map.
func (m *HashMap) Len() uint64 {
return m.linkedList.Len()
}
func (m *HashMap) mapData() *hashMapData {
return (*hashMapData)(atomic.LoadPointer(&m.mapDataPtr))
}
// Fillrate returns the fill rate of the map as an percentage integer.
func (m *HashMap) Fillrate() uint64 {
mapData := m.mapData()
count := atomic.LoadUint64(&mapData.count)
sliceLen := uint64(len(mapData.slice))
return (count * 100) / sliceLen
}
func (m *HashMap) getSliceItemForKey(hashedKey uint64) (mapData *hashMapData, item *ListElement) {
mapData = m.mapData()
index := hashedKey >> mapData.keyRightShifts
sliceDataIndexPointer := (*unsafe.Pointer)(unsafe.Pointer(uintptr(mapData.data) + uintptr(index*intSizeBytes)))
item = (*ListElement)(atomic.LoadPointer(sliceDataIndexPointer))
return
}
// Del deletes the hashed key from the map.
func (m *HashMap) Del(key interface{}) {
hashedKey := getKeyHash(key)
for _, entry := m.getSliceItemForKey(hashedKey); entry != nil; entry = entry.Next() {
if entry.keyHash == hashedKey && entry.key == key {
m.linkedList.Delete(entry)
return
}
if entry.keyHash > hashedKey {
return
}
}
}
// DelHashedKey deletes the hashed key from the map.
func (m *HashMap) DelHashedKey(hashedKey uint64) {
for _, entry := m.getSliceItemForKey(hashedKey); entry != nil; entry = entry.Next() {
if entry.keyHash == hashedKey {
m.linkedList.Delete(entry)
return
}
if entry.keyHash > hashedKey {
return
}
}
}
// Insert sets the value under the specified key to the map if it does not exist yet.
// If a resizing operation is happening concurrently while calling Set, the item might show up in the map only after the resize operation is finished.
// Returns true if the item was inserted or false if it existed.
func (m *HashMap) Insert(key interface{}, value unsafe.Pointer) bool {
hashedKey := getKeyHash(key)
newEntry := &ListElement{
key: key,
keyHash: hashedKey,
value: value,
}
return m.insertListElement(newEntry, false)
}
// Set sets the value under the specified key to the map. An existing item for this key will be overwritten.
// If a resizing operation is happening concurrently while calling Set, the item might show up in the map only after the resize operation is finished.
func (m *HashMap) Set(key interface{}, value unsafe.Pointer) {
hashedKey := getKeyHash(key)
newEntry := &ListElement{
key: key,
keyHash: hashedKey,
value: value,
}
m.insertListElement(newEntry, true)
}
// SetHashedKey sets the value under the specified hash key to the map. An existing item for this key will be overwritten.
// You can use this function if your keys are already hashes and you want to avoid another hashing of the key.
// Do not use non hashes as keys for this function, the performance would decrease!
// If a resizing operation is happening concurrently while calling Set, the item might show up in the map only after the resize operation is finished.
func (m *HashMap) SetHashedKey(hashedKey uint64, value unsafe.Pointer) {
newEntry := &ListElement{
key: hashedKey,
keyHash: hashedKey,
value: value,
}
m.insertListElement(newEntry, true)
}
func (m *HashMap) insertListElement(newEntry *ListElement, update bool) bool {
for {
mapData, sliceItem := m.getSliceItemForKey(newEntry.keyHash)
if update {
if !m.linkedList.AddOrUpdate(newEntry, sliceItem) {
continue // a concurrent add did interfere, try again
}
} else {
existed, inserted := m.linkedList.Add(newEntry, sliceItem)
if existed {
return false
}
if !inserted {
continue
}
}
newSliceCount := mapData.addItemToIndex(newEntry)
if newSliceCount != 0 {
sliceLen := uint64(len(mapData.slice))
fillRate := (newSliceCount * 100) / sliceLen
if fillRate > MaxFillRate { // check if the slice needs to be resized
m.Lock()
currentMapData := m.mapData()
if mapData == currentMapData { // double check that no other resize happened
m.grow(0)
}
m.Unlock()
}
}
return true
}
}
// CasHashedKey performs a compare and swap operation sets the value under the specified hash key to the map. An existing item for this key will be overwritten.
func (m *HashMap) CasHashedKey(hashedKey uint64, from, to unsafe.Pointer) bool {
newEntry := &ListElement{
key: hashedKey,
keyHash: hashedKey,
value: to,
}
for {
mapData, sliceItem := m.getSliceItemForKey(hashedKey)
if !m.linkedList.Cas(newEntry, from, sliceItem) {
return false
}
newSliceCount := mapData.addItemToIndex(newEntry)
if newSliceCount != 0 {
sliceLen := uint64(len(mapData.slice))
fillRate := (newSliceCount * 100) / sliceLen
if fillRate > MaxFillRate { // check if the slice needs to be resized
m.Lock()
currentMapData := m.mapData()
if mapData == currentMapData { // double check that no other resize happened
m.grow(0)
}
m.Unlock()
}
}
return true
}
}
// adds an item to the index if needed and returns the new item counter if it changed, otherwise 0
func (mapData *hashMapData) addItemToIndex(item *ListElement) uint64 {
index := item.keyHash >> mapData.keyRightShifts
sliceDataIndexPointer := (*unsafe.Pointer)(unsafe.Pointer(uintptr(mapData.data) + uintptr(index*intSizeBytes)))
for { // loop until the smallest key hash is in the index
sliceItem := (*ListElement)(atomic.LoadPointer(sliceDataIndexPointer)) // get the current item in the index
if sliceItem == nil { // no item yet at this index
if atomic.CompareAndSwapPointer(sliceDataIndexPointer, nil, unsafe.Pointer(item)) {
return atomic.AddUint64(&mapData.count, 1)
}
continue // a new item was inserted concurrently, retry
}
if item.keyHash < sliceItem.keyHash {
// the new item is the smallest for this index?
if !atomic.CompareAndSwapPointer(sliceDataIndexPointer, unsafe.Pointer(sliceItem), unsafe.Pointer(item)) {
continue // a new item was inserted concurrently, retry
}
}
return 0
}
}
// Grow resizes the hashmap to a new size, gets rounded up to next power of 2.
// To double the size of the hashmap use newSize 0.
func (m *HashMap) Grow(newSize uint64) {
m.Lock()
m.grow(newSize)
m.Unlock()
}
func (m *HashMap) grow(newSize uint64) {
mapData := m.mapData()
if newSize == 0 {
newSize = uint64(len(mapData.slice)) << 1
} else {
newSize = roundUpPower2(newSize)
}
newSlice := make([]*ListElement, newSize)
header := (*reflect.SliceHeader)(unsafe.Pointer(&newSlice))
newMapData := &hashMapData{
keyRightShifts: 64 - log2(newSize),
data: unsafe.Pointer(header.Data), // use address of slice data storage
slice: newSlice,
}
m.fillIndexItems(newMapData) // initialize new index slice with longer keys
atomic.StorePointer(&m.mapDataPtr, unsafe.Pointer(newMapData))
m.fillIndexItems(newMapData) // make sure that the new index is up to date with the current state of the linked list
}
func (m *HashMap) fillIndexItems(mapData *hashMapData) {
first := m.linkedList.First()
item := first
lastIndex := uint64(0)
for item != nil {
index := item.keyHash >> mapData.keyRightShifts
if item == first || index != lastIndex { // store item with smallest hash key for every index
if !item.Deleted() {
mapData.addItemToIndex(item)
lastIndex = index
}
}
item = item.Next()
}
}
// String returns the map as a string, only hashed keys are printed.
func (m *HashMap) String() string {
buffer := bytes.NewBufferString("")
buffer.WriteRune('[')
first := m.linkedList.First()
item := first
for item != nil {
if !item.Deleted() {
if item != first {
buffer.WriteRune(',')
}
fmt.Fprint(buffer, item.keyHash)
}
item = item.Next()
}
buffer.WriteRune(']')
return buffer.String()
}
// Iter returns an iterator which could be used in a for range loop.
// The order of the items is sorted by hash keys.
func (m *HashMap) Iter() <-chan KeyValue {
ch := make(chan KeyValue) // do not use a size here since items can get added during iteration
go func() {
item := m.linkedList.First()
for item != nil {
if !item.Deleted() {
ch <- KeyValue{item.key, item.Value()}
}
item = item.Next()
}
close(ch)
}()
return ch
} | vendor/github.com/cornelk/hashmap/hashmap.go | 0.738669 | 0.420302 | hashmap.go | starcoder |
package car_generic
import (
m "github.com/niclabs/intersection-simulator/vehicle"
"math"
)
func GetTurnAngle() float64 {
return FourwayTurnAngle
}
func GetTurnRadius(intention int, dangerZoneLength float64) float64 {
var radius float64
if intention == LeftIntention {
radius = dangerZoneLength * (3.0 / 4.0)
} else if intention == RightIntention {
radius = dangerZoneLength / 4.0
} else {
radius = 0.0
}
return radius
}
func GetStartDirection(lane int) (dir float64) {
switch lane {
case BottomLane:
dir = 90
case RightLane:
dir = 180
case TopLane:
dir = 270
case LeftLane:
dir = 0
default:
dir = 360
}
return dir
}
func GetStartPosition(lane int, coopZoneLength, dangerZoneLength float64) m.Pos {
var x, y float64
laneWidth := dangerZoneLength / 2.0
switch lane {
case BottomLane:
x = (coopZoneLength + laneWidth) / 2.0
y = 0
case RightLane:
x = coopZoneLength
y = (coopZoneLength + laneWidth) / 2.0
case TopLane:
x = (coopZoneLength - laneWidth) / 2.0
y = coopZoneLength
case LeftLane:
x = 0
y = (coopZoneLength - laneWidth) / 2.0
default:
x = 0
y = 0
}
res := m.Pos{X: x, Y: y}
return res
}
/*
Returns the coordinates of where the car enters the inner intersection.
*/
func GetEnterPosition(lane int, coopZoneLength, dangerZoneLength float64) m.Pos {
var x, y float64
laneWidth := dangerZoneLength / 2.0
switch lane {
case BottomLane:
x = (coopZoneLength + laneWidth) / 2.0
y = (coopZoneLength - dangerZoneLength) / 2.0
case TopLane:
x = (coopZoneLength - laneWidth) / 2.0
y = (coopZoneLength + dangerZoneLength) / 2.0
case LeftLane:
x = (coopZoneLength - dangerZoneLength) / 2.0
y = (coopZoneLength + laneWidth) / 2.0
case RightLane:
x = (coopZoneLength + dangerZoneLength) / 2.0
y = (coopZoneLength - laneWidth) / 2.0
default:
x = 0.0
y = 0.0
}
res := m.Pos{X: x, Y: y}
return res
}
/*
Returns the coordinates of where the car exits the inner intersection.
*/
func GetEndPosition(lane, intention int, coopZoneLength, dangerZoneLength float64) m.Pos {
var x, y float64
laneWidth := dangerZoneLength / 2.0
switch lane {
case BottomLane:
if intention == LeftIntention {
x = (coopZoneLength - dangerZoneLength) / 2.0
y = (coopZoneLength + laneWidth) / 2.0
} else if intention == RightIntention {
x = (coopZoneLength + dangerZoneLength) / 2.0
y = (coopZoneLength - laneWidth) / 2.0
} else {
x = (coopZoneLength + laneWidth) / 2.0
y = (coopZoneLength + dangerZoneLength) / 2.0
}
case TopLane:
if intention == LeftIntention {
x = (coopZoneLength + dangerZoneLength) / 2.0
y = (coopZoneLength - laneWidth) / 2.0
} else if intention == RightIntention {
x = (coopZoneLength - dangerZoneLength) / 2.0
y = (coopZoneLength + laneWidth) / 2.0
} else {
x = (coopZoneLength - laneWidth) / 2.0
y = (coopZoneLength - dangerZoneLength) / 2.0
}
case LeftLane:
if intention == LeftIntention {
x = (coopZoneLength + laneWidth) / 2.0
y = (coopZoneLength + dangerZoneLength) / 2.0
} else if intention == RightIntention {
x = (coopZoneLength - laneWidth) / 2.0
y = (coopZoneLength - dangerZoneLength) / 2.0
} else {
x = (coopZoneLength + dangerZoneLength) / 2.0
y = (coopZoneLength - laneWidth) / 2.0
}
case RightLane:
if intention == LeftIntention {
x = (coopZoneLength - laneWidth) / 2.0
y = (coopZoneLength - dangerZoneLength) / 2.0
} else if intention == RightIntention {
x = (coopZoneLength + laneWidth) / 2.0
y = (coopZoneLength + dangerZoneLength) / 2.0
} else {
x = (coopZoneLength - dangerZoneLength) / 2.0
y = (coopZoneLength + laneWidth) / 2.0
}
default:
x = (coopZoneLength - dangerZoneLength) / 2.0
y = (coopZoneLength + laneWidth) / 2.0
}
res := m.Pos{X: x, Y: y}
return res
}
/*
A B
M
D C
Condition
(0<AM⋅AB<AB⋅AB)∧(0<AM⋅AD<AD⋅AD)
*/
func IsInsideDangerZone(A, B, C, D, M m.Pos) bool {
am := A.GetVector(M)
ab := A.GetVector(B)
ad := A.GetVector(D)
AMAD := am.ScalarProduct(ad)
AMAB := am.ScalarProduct(ab)
ABAB := ab.ScalarProduct(ab)
ADAD := ad.ScalarProduct(ad)
return 0 < AMAB && AMAB < ABAB && 0 < AMAD && AMAD < ADAD
}
/*
A B
D C
*/
func GetDangerZoneCoords(dangerZoneLength, coopZoneLength float64) (m.Pos, m.Pos, m.Pos, m.Pos) {
var A, B, C, D m.Pos
laneWidth := dangerZoneLength / 2
halfCoopZone := coopZoneLength / 2
A.X = halfCoopZone - laneWidth
A.Y = halfCoopZone + laneWidth
B.X = halfCoopZone + laneWidth
B.Y = halfCoopZone + laneWidth
C.X = halfCoopZone + laneWidth
C.Y = halfCoopZone - laneWidth
D.X = halfCoopZone - laneWidth
D.Y = halfCoopZone - laneWidth
return A, B, C, D
}
/*
Returns the center of the circumference that represents the turn trajectory.
*/
func GetCenterOfTurn(startPos, endPos m.Pos, car *Car) m.Pos {
startDir := GetStartDirection(car.Lane) * math.Pi / 180.0
a := math.Abs(math.Cos(startDir))
b := math.Abs(math.Sin(startDir))
x := startPos.X*a + endPos.X*b
y := startPos.Y*b + endPos.Y*a
return m.Pos{X: x, Y: y}
} | vehicle/car_generic/intersection_utils.go | 0.757256 | 0.588475 | intersection_utils.go | starcoder |
package template
/**
This package uses an adapted pattern from the Template design pattern.
The idea is that we have a set of steps we need to perform but the implementation of those steps may be handled by
specific templates how they see fit. E.g. The receipt pdf template may have the logo on the right side whereas the GP
pdf may have a centered logo and some text explaining patient details in a table. - This is typical template pattern.
The adapted part comes from the need for common methods to be reflected in a "Base" class.
E.g. building the footer maybe the same for EVERY template barring one so it makes sense to NOT have to duplicate the same
code for each derived class and instead only the different template can define this method.
Golang doesn't have the normal inheritance support from more mature OOP languages so we attempt to accomplish this
by using composition.
*/
import (
"fmt"
"os"
"github.com/johnfercher/maroto/pkg/pdf"
)
type iTemplate interface {
SetPageMargins()
BuildHeading()
BuildMainSection()
BuildFooter()
}
type Template struct {
Pdf pdf.Maroto
}
func (t *Template) Save() {
err := t.Pdf.OutputFileAndClose("pdfs/test.pdf")
if err != nil {
fmt.Println("⚠️ Could not save PDF:", err)
os.Exit(1)
}
fmt.Println("PDF saved successfully")
}
func (t *Template) SetPageMargins() {
fmt.Println("Base class calls setPageMargins()")
}
func (t *Template) BuildHeading() {
fmt.Println("Base class calls buildHeading()")
}
func (t *Template) BuildMainSection() {
fmt.Println("Base class calls buildMainSection()")
}
func (t *Template) BuildFooter() {
fmt.Println("Base class calls buildFooter()")
}
/**
Every template will have a header, main and footer
The implementation for this is handled by the specific template
but if the implementation is not handled in the derived class then the base method is used instead
*/
func (t *Template) Generate() {
t.SetPageMargins()
t.BuildHeading()
t.BuildMainSection()
t.BuildFooter()
}
func Generate(t iTemplate) {
t.SetPageMargins()
t.BuildHeading()
t.BuildMainSection()
t.BuildFooter()
} | template/template.go | 0.643329 | 0.499878 | template.go | starcoder |
package commands
import (
"bytes"
"fmt"
"math"
"os"
"github.com/go-gl/mathgl/mgl32"
"github.com/gookit/color"
"github.com/roboticeyes/gorexfile/encoding/rexfile"
"github.com/urfave/cli/v2"
)
// DensityCommand reduces density of pointLists
var DensityCommand = &cli.Command{
Name: "density",
Usage: "Reduces density of pointLists to a specified grid size, an absolute amount or by percentage",
Action: DensityActions,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "resolution",
Value: false,
Usage: "set the minimum distance between points in meters, this option yields the best results " +
"because it evens out the density across the entire point cloud",
Aliases: []string{"res"},
},
&cli.BoolFlag{
Name: "percent",
Value: false,
Usage: "reduction in percent every pointList will be reduced by",
Aliases: []string{"pct"},
},
&cli.BoolFlag{
Name: "absolute",
Value: false,
Usage: "amount to which every pointList will be reduced",
Aliases: []string{"abs"},
},
&cli.Float64Flag{
Name: "value",
Usage: "reduction amount",
Aliases: []string{"val"},
},
},
}
// MirrorActions reduces density of pointLists
func DensityActions(ctx *cli.Context) error {
output := ctx.Args().Get(1)
if !ctx.Bool("resolution") && !ctx.Bool("percent") && !ctx.Bool("absolute") {
color.Red.Println("Please specify: resolution, percent or absolute")
return fmt.Errorf("No density arguments given")
}
if output == "" {
color.Red.Println("Please specify an output file as second parameter")
return fmt.Errorf("No output file specified")
}
_, rexContent, err := OpenRexFileFromContext(ctx)
if err != nil {
return err
}
if len(rexContent.PointLists) == 0 {
color.Red.Println("REX file must have at least one PointList. Density function only affects PointLists")
return fmt.Errorf("File contains no PointLists")
}
if ctx.Bool("resolution") {
ReducePointListDensityVoxelBased(ctx, rexContent)
} else {
ReducePointListDensityNaive(ctx, rexContent)
}
// create new file
f, err := os.Create(output)
if err != nil {
panic(err)
}
defer f.Close()
var buf bytes.Buffer
e := rexfile.NewEncoder(&buf)
err = e.Encode(*rexContent)
if err != nil {
panic(err)
}
n, err := f.Write(buf.Bytes())
if err != nil {
panic(err)
}
color.Green.Printf("Successfully written %d bytes to file %s\n", n, output)
return nil
}
type GridLocation struct {
x int
y int
z int
}
type GridEntry struct {
location mgl32.Vec3
color mgl32.Vec3
}
// ReducePointListDensityVoxelBased evens out and thins out pointLists
func ReducePointListDensityVoxelBased(ctx *cli.Context, rexContent *rexfile.File) {
for i := 0; i < len(rexContent.PointLists); i++ {
originalPointListLength := len(rexContent.PointLists[i].Points)
voxelCellSize := float32(ctx.Float64("val"))
hasColor := len(rexContent.PointLists[i].Colors) > 0
if !hasColor {
rexContent.PointLists[i].Colors = make([]mgl32.Vec3, originalPointListLength)
}
//sort points into voxel construct
voxelGrid := make(map[GridLocation][]GridEntry)
for j := 0; j < originalPointListLength; j++ {
gridLocation := GetGridLocationOfVec3(rexContent.PointLists[i].Points[j], voxelCellSize)
gridEntry := GridEntry{rexContent.PointLists[i].Points[j], rexContent.PointLists[i].Colors[j]}
if voxelGrid[gridLocation] != nil {
voxelGrid[gridLocation] = append(voxelGrid[gridLocation], gridEntry)
} else {
voxelGrid[gridLocation] = []GridEntry{gridEntry}
}
}
//process voxel averages
averagedPoints := make([]mgl32.Vec3, len(voxelGrid))
averagedColors := make([]mgl32.Vec3, len(voxelGrid))
iter := 0
for gridLocation, pointsInGridCell := range voxelGrid {
summedLocation := mgl32.Vec3{}
summedColor := mgl32.Vec3{}
for j := 0; j < len(pointsInGridCell); j++ {
summedLocation = mgl32.Vec3.Add(pointsInGridCell[j].location, summedLocation)
summedColor = mgl32.Vec3.Add(pointsInGridCell[j].color, summedColor)
}
//translate voxel grid to real-world coords
averagedLocation := mgl32.Vec3{float32(gridLocation.x) * voxelCellSize, float32(gridLocation.y) * voxelCellSize, float32(gridLocation.z) * voxelCellSize}
//use this instead for avg location, but grid looks nicer
//averagedLocation := mgl32.Vec3.Mul(summedLocation, float32(1)/float32(len(pointsInGridCell)))
averagedColor := mgl32.Vec3.Mul(summedColor, float32(1)/float32(len(pointsInGridCell)))
averagedPoints[iter] = averagedLocation
averagedColors[iter] = averagedColor
iter++
}
rexContent.PointLists[i].Points = averagedPoints
if hasColor {
rexContent.PointLists[i].Colors = averagedColors
} else {
rexContent.PointLists[i].Colors = nil
}
}
}
func GetGridLocationOfVec3(vec3 mgl32.Vec3, cellSize float32) GridLocation {
return GridLocation{
int(vec3[0] / cellSize),
int(vec3[1] / cellSize),
int(vec3[2] / cellSize),
}
}
// ReducePointListDensityNaive heavily depends on pointList's spatial distribution to work correctly. works fine for laser scans to achieve desired counts/percentages.
func ReducePointListDensityNaive(ctx *cli.Context, rexContent *rexfile.File) {
for i := 0; i < len(rexContent.PointLists); i++ {
originalPointListLength := len(rexContent.PointLists[i].Points)
reducedPointListLength := GetNewPointArraySize(originalPointListLength, ctx.Float64("val"), ctx.Bool("percent"))
hasColor := len(rexContent.PointLists[i].Colors) > 0
color.Red.Println("resolution argument should be preferred, especially for non-evenly distributed pointLists")
if originalPointListLength <= reducedPointListLength {
color.Red.Println("Skipped pointList already smaller or equal to the desired size. PointListID:", rexContent.PointLists[i].ID)
continue
}
if !hasColor {
rexContent.PointLists[i].Colors = make([]mgl32.Vec3, originalPointListLength)
}
tempListPoints := make([]mgl32.Vec3, reducedPointListLength)
tempListColors := make([]mgl32.Vec3, reducedPointListLength)
for j := 0; j < reducedPointListLength; j++ {
adjustedIndex := int((float32(j) / float32(reducedPointListLength)) * float32(originalPointListLength))
tempListPoints[j] = rexContent.PointLists[i].Points[adjustedIndex]
if len(rexContent.PointLists[i].Colors) > 0 {
tempListColors[j] = rexContent.PointLists[i].Colors[adjustedIndex]
} else {
tempListColors[j] = mgl32.Vec3{1.0, 0.0, 1.0}
}
}
rexContent.PointLists[i].Points = tempListPoints
if hasColor {
rexContent.PointLists[i].Colors = tempListColors
} else {
rexContent.PointLists[i].Colors = nil
}
}
}
func GetNewPointArraySize(pointListSize int, reduction float64, isPercentage bool) int {
if isPercentage {
return int(math.Ceil(float64(pointListSize) * reduction / 100))
} else {
return int(reduction)
}
} | cmd/rxi/commands/density.go | 0.598312 | 0.413122 | density.go | starcoder |
package virustotal
import (
"context"
virustotal "github.com/VirusTotal/vt-go"
"github.com/turbot/steampipe-plugin-sdk/grpc/proto"
"github.com/turbot/steampipe-plugin-sdk/plugin"
"github.com/turbot/steampipe-plugin-sdk/plugin/transform"
)
func tableVirusTotalIP(ctx context.Context) *plugin.Table {
return &plugin.Table{
Name: "virustotal_ip",
Description: "Information and analysis for an IP address.",
List: &plugin.ListConfig{
Hydrate: listIP,
KeyColumns: plugin.SingleColumn("id"),
},
Columns: []*plugin.Column{
// Top columns
{Name: "id", Type: proto.ColumnType_IPADDR, Hydrate: ipQual, Transform: transform.FromValue(), Description: "The IP to retrieve."},
// Other columns
{Name: "as_owner", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "as_owner"), Description: "Owner of the Autonomous System to which the IP belongs."},
{Name: "asn", Type: proto.ColumnType_INT, Transform: transform.FromValue().TransformP(getAttribute, "asn"), Description: "Autonomous System Number to which the IP belongs."},
{Name: "category", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "category"), Description: "Normalized result: harmlaess, undetected, suspicious, malicious."},
{Name: "continent", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "continent"), Description: "Continent where the IP is placed (ISO-3166 continent code)."},
{Name: "country", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "country"), Description: "Country where the IP is placed (ISO-3166 country code)."},
{Name: "engine_name", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "engine_name"), Description: "Complete name of the URL scanning service."},
{Name: "last_analysis_results", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "last_analysis_results"), Description: "Result from URL scanners. Dict with scanner name as key and a dict with notes/result from that scanner as value."},
{Name: "last_analysis_stats", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "last_analysis_stats"), Description: "Number of different results from this scans."},
{Name: "last_https_certificate", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "last_https_certificate"), Description: "SSL Certificate object retrieved last time the IP was analysed."},
{Name: "last_https_certificate_date", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromValue().TransformP(getAttribute, "last_https_certificate_date").Transform(transform.UnixToTimestamp), Description: "Date when the certificate was retrieved by VirusTotal."},
{Name: "last_modification_date", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromValue().TransformP(getAttribute, "last_modification_date").Transform(transform.UnixToTimestamp), Description: "Date when any of IP's information was last updated."},
{Name: "method", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "method"), Description: "Type of service given by that URL scanning service, e.g. blacklist."},
{Name: "network", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "network"), Description: "IPv4 network range to which the IP belongs."},
{Name: "regional_internet_registry", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "regional_internet_registry"), Description: "One of the current regional internet registries: AFRINIC, ARIN, APNIC, LACNIC or RIPE NCC."},
{Name: "reputation", Type: proto.ColumnType_INT, Transform: transform.FromValue().TransformP(getAttribute, "reputation"), Description: "IP's score calculated from the votes of the VirusTotal's community."},
{Name: "result", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "result"), Description: "Raw value returned by the URL scanner: e.g. clean, malicious, suspicious, phishing. It may vary from scanner to scanner, hence the need for the category field for normalisation."},
{Name: "tags", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "tags"), Description: "List of representative attributes."},
{Name: "total_votes", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "total_votes"), Description: "Unweighted number of total votes from the community, divided into harmless and malicious."},
{Name: "whois", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "whois"), Description: "WHOIS information as returned from the pertinent whois server."},
{Name: "whois_date", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromValue().TransformP(getAttribute, "whois_date").Transform(transform.UnixToTimestamp), Description: "Date of the last update of the whois record in VirusTotal."},
},
}
}
func ipQual(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
quals := d.KeyColumnQuals
id := quals["id"].GetInetValue().GetAddr()
return id, nil
}
func listIP(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
conn, err := connect(ctx, d)
if err != nil {
plugin.Logger(ctx).Error("virustotal_ip.listIP", "connection_error", err)
return nil, err
}
quals := d.KeyColumnQuals
id := quals["id"].GetInetValue().GetAddr()
u := virustotal.URL("ip_addresses/" + id)
it, err := conn.Iterator(u)
if err != nil {
plugin.Logger(ctx).Error("virustotal_ip.listIP", "query_error", err, "it", it)
return nil, err
}
defer it.Close()
for it.Next() {
i := it.Get()
d.StreamListItem(ctx, i)
}
if err := it.Error(); err != nil {
if !isNotFoundError(err) {
plugin.Logger(ctx).Error("virustotal_ip.listIP", "query_error", err, "it", it)
return nil, err
}
}
return nil, nil
} | virustotal/table_virustotal_ip.go | 0.57344 | 0.437944 | table_virustotal_ip.go | starcoder |
package binarytree
import (
"fmt"
"reflect"
"github.com/Thrimbda/dune/utils"
)
type BSTimpl struct {
root BinNode
}
func (b BSTimpl) Insert(value interface{}) {
var father BinNode
brother := b.root
node := &BinNodePtr{value, nil, nil, nil}
for brother != nil {
father = brother
if utils.LessComparator(node.Element(), brother.Element()) {
brother = brother.Left()
} else {
brother = brother.Right()
}
}
node.SetParent(father)
if father == nil {
b.root = node
} else if utils.LessComparator(node.Element(), father.Element()) {
father.SetLeft(node)
} else {
father.SetRight(node)
}
}
func (b BSTimpl) Delete(Key int) {
node := SearchHelp(b.root, Key)
if node.Left() == nil {
b.transplant(node, node.Right())
} else if node.Right() == nil {
b.transplant(node, node.Left())
} else {
replacement := MinimumHelp(node)
if replacement.Parent() != node {
b.transplant(replacement, replacement.Right())
replacement.SetRight(node.Right())
replacement.Right().SetParent(replacement)
}
b.transplant(node, replacement)
replacement.SetLeft(node.Left())
replacement.Left().SetParent(replacement)
}
}
func (b BSTimpl) Search(key int) BST {
return &BSTimpl{SearchHelp(b.root, key)}
}
func SearchHelp(root BinNode, key int) BinNode {
node := root
for node != nil && !reflect.DeepEqual(key, node.Element()) {
if utils.LessComparator(key, node.Element()) {
node = node.Left()
} else {
node = node.Right()
}
}
return node
}
func (b BSTimpl) transplant(u, v BinNode) {
if u.Parent() == nil {
b.root = v
} else if u == u.Parent().Left() {
u.Parent().SetLeft(v)
} else {
u.Parent().SetRight(v)
}
if v != nil {
v.SetParent(u.Parent())
}
}
func (b BSTimpl) Successor() BST {
return &BSTimpl{SuccessorHelp(b.root)}
}
func SuccessorHelp(root BinNode) BinNode {
if root.Right() != nil {
return MinimumHelp(root.Right())
}
helper := root
successor := root.Parent()
for successor != nil && helper == successor.Right() {
helper = successor
successor = successor.Parent()
}
return successor
}
func (b BSTimpl) Predecessor() BST {
return &BSTimpl{PredecessorHelp(b.root)}
}
func PredecessorHelp(root BinNode) BinNode {
if root.Left() != nil {
return MaximumHelp(root.Left())
}
helper := root
predecessor := root.Parent()
for predecessor != nil && helper == predecessor.Left() {
helper = predecessor
predecessor = predecessor.Parent()
}
return predecessor
}
func (b BSTimpl) isEmpty() bool {
return b.root == nil
}
func DeleteHelp(root BinNode, Key int) BinNode {
return nil
}
func (b BSTimpl) Minimum() BST {
return &BSTimpl{MinimumHelp(b.root)}
}
func MinimumHelp(root BinNode) BinNode {
minTree := root
for root.Left() != nil {
minTree = root.Left()
}
return minTree
}
func (b BSTimpl) Maximum() BST {
return &BSTimpl{MaximumHelp(b.root)}
}
func MaximumHelp(root BinNode) BinNode {
maxTree := root
for root.Right() != nil {
maxTree = root.Right()
}
return maxTree
}
func (b BSTimpl) InorderWalk() {
inorderWalkHelp(b.root)
}
func inorderWalkHelp(root BinNode) {
if root != nil {
inorderWalkHelp(root.Left())
fmt.Printf("%s ", root.Element())
inorderWalkHelp(root.Right())
}
} | binarytree/binary_search_tree.go | 0.568176 | 0.429669 | binary_search_tree.go | starcoder |
package speaker
// 5. Path Attributes
// This section discusses the path attributes of the UPDATE message.
// Path attributes fall into four separate categories:
// 1. Well-known mandatory.
// 2. Well-known discretionary.
// 3. Optional transitive.
// 4. Optional non-transitive.
// BGP implementations MUST recognize all well-known attributes. Some
// of these attributes are mandatory and MUST be included in every
// UPDATE message that contains NLRI. Others are discretionary and MAY
// or MAY NOT be sent in a particular UPDATE message.
// Once a BGP peer has updated any well-known attributes, it MUST pass
// these attributes to its peers in any updates it transmits.
// In addition to well-known attributes, each path MAY contain one or
// more optional attributes. It is not required or expected that all
// BGP implementations support all optional attributes. The handling of
// an unrecognized optional attribute is determined by the setting of
// the Transitive bit in the attribute flags octet. Paths with
// unrecognized transitive optional attributes SHOULD be accepted. If a
// path with an unrecognized transitive optional attribute is accepted
// and passed to other BGP peers, then the unrecognized transitive
// optional attribute of that path MUST be passed, along with the path,
// to other BGP peers with the Partial bit in the Attribute Flags octet
// set to 1. If a path with a recognized, transitive optional attribute
// is accepted and passed along to other BGP peers and the Partial bit
// in the Attribute Flags octet is set to 1 by some previous AS, it MUST
// NOT be set back to 0 by the current AS. Unrecognized non-transitive
// optional attributes MUST be quietly ignored and not passed along to
// other BGP peers.
// New, transitive optional attributes MAY be attached to the path by
// the originator or by any other BGP speaker in the path. If they are
// not attached by the originator, the Partial bit in the Attribute
// Flags octet is set to 1. The rules for attaching new non-transitive
// optional attributes will depend on the nature of the specific
// attribute. The documentation of each new non-transitive optional
// attribute will be expected to include such rules (the description of
// the MULTI_EXIT_DISC attribute gives an example). All optional
// attributes (both transitive and non-transitive), MAY be updated (if
// appropriate) by BGP speakers in the path.
// The sender of an UPDATE message SHOULD order path attributes within
// the UPDATE message in ascending order of attribute type. The
// receiver of an UPDATE message MUST be prepared to handle path
// attributes within UPDATE messages that are out of order.
// The same attribute (attribute with the same type) cannot appear more
// than once within the Path Attributes field of a particular UPDATE
// message.
// The mandatory category refers to an attribute that MUST be present in
// both IBGP and EBGP exchanges if NLRI are contained in the UPDATE
// message. Attributes classified as optional for the purpose of the
// protocol extension mechanism may be purely discretionary,
// discretionary, required, or disallowed in certain contexts.
// attribute EBGP IBGP
// ORIGIN mandatory mandatory
// AS_PATH mandatory mandatory
// NEXT_HOP mandatory mandatory
// MULTI_EXIT_DISC discretionary discretionary
// LOCAL_PREF see Section 5.1.5 required
// ATOMIC_AGGREGATE see Section 5.1.6 and 9.1.4
// AGGREGATOR discretionary discretionary
// 5.1. Path Attribute Usage
// The usage of each BGP path attribute is described in the following
// clauses.
// 5.1.1. ORIGIN
// ORIGIN is a well-known mandatory attribute. The ORIGIN attribute is
// generated by the speaker that originates the associated routing
// information. Its value SHOULD NOT be changed by any other speaker.
// 5.1.2. AS_PATH
// AS_PATH is a well-known mandatory attribute. This attribute
// identifies the autonomous systems through which routing information
// carried in this UPDATE message has passed. The components of this
// list can be AS_SETs or AS_SEQUENCEs.
// When a BGP speaker propagates a route it learned from another BGP
// speaker's UPDATE message, it modifies the route's AS_PATH attribute
// based on the location of the BGP speaker to which the route will be
// sent:
// a) When a given BGP speaker advertises the route to an internal
// peer, the advertising speaker SHALL NOT modify the AS_PATH
// attribute associated with the route.
// b) When a given BGP speaker advertises the route to an external
// peer, the advertising speaker updates the AS_PATH attribute as
// follows:
// 1) if the first path segment of the AS_PATH is of type
// AS_SEQUENCE, the local system prepends its own AS number as
// the last element of the sequence (put it in the leftmost
// position with respect to the position of octets in the
// protocol message). If the act of prepending will cause an
// overflow in the AS_PATH segment (i.e., more than 255 ASes),
// it SHOULD prepend a new segment of type AS_SEQUENCE and
// prepend its own AS number to this new segment.
// 2) if the first path segment of the AS_PATH is of type AS_SET,
// the local system prepends a new path segment of type
// AS_SEQUENCE to the AS_PATH, including its own AS number in
// that segment.
// 3) if the AS_PATH is empty, the local system creates a path
// segment of type AS_SEQUENCE, places its own AS into that
// segment, and places that segment into the AS_PATH.
// When a BGP speaker originates a route then:
// a) the originating speaker includes its own AS number in a path
// segment, of type AS_SEQUENCE, in the AS_PATH attribute of all
// UPDATE messages sent to an external peer. In this case, the AS
// number of the originating speaker's autonomous system will be
// the only entry the path segment, and this path segment will be
// the only segment in the AS_PATH attribute.
// b) the originating speaker includes an empty AS_PATH attribute in
// all UPDATE messages sent to internal peers. (An empty AS_PATH
// attribute is one whose length field contains the value zero).
// Whenever the modification of the AS_PATH attribute calls for
// including or prepending the AS number of the local system, the local
// system MAY include/prepend more than one instance of its own AS
// number in the AS_PATH attribute. This is controlled via local
// configuration.
// 5.1.3. NEXT_HOP
// The NEXT_HOP is a well-known mandatory attribute that defines the IP
// address of the router that SHOULD be used as the next hop to the
// destinations listed in the UPDATE message. The NEXT_HOP attribute is
// calculated as follows:
// 1) When sending a message to an internal peer, if the route is not
// locally originated, the BGP speaker SHOULD NOT modify the
// NEXT_HOP attribute unless it has been explicitly configured to
// announce its own IP address as the NEXT_HOP. When announcing a
// locally-originated route to an internal peer, the BGP speaker
// SHOULD use the interface address of the router through which
// the announced network is reachable for the speaker as the
// NEXT_HOP. If the route is directly connected to the speaker,
// or if the interface address of the router through which the
// announced network is reachable for the speaker is the internal
// peer's address, then the BGP speaker SHOULD use its own IP
// address for the NEXT_HOP attribute (the address of the
// interface that is used to reach the peer).
// 2) When sending a message to an external peer, X, and the peer is
// one IP hop away from the speaker:
// - If the route being announced was learned from an internal
// peer or is locally originated, the BGP speaker can use an
// interface address of the internal peer router (or the
// internal router) through which the announced network is
// reachable for the speaker for the NEXT_HOP attribute,
// provided that peer X shares a common subnet with this
// address. This is a form of "third party" NEXT_HOP attribute.
// - Otherwise, if the route being announced was learned from an
// external peer, the speaker can use an IP address of any
// adjacent router (known from the received NEXT_HOP attribute)
// that the speaker itself uses for local route calculation in
// the NEXT_HOP attribute, provided that peer X shares a common
// subnet with this address. This is a second form of "third
// party" NEXT_HOP attribute.
// - Otherwise, if the external peer to which the route is being
// advertised shares a common subnet with one of the interfaces
// of the announcing BGP speaker, the speaker MAY use the IP
// address associated with such an interface in the NEXT_HOP
// attribute. This is known as a "first party" NEXT_HOP
// attribute.
// - By default (if none of the above conditions apply), the BGP
// speaker SHOULD use the IP address of the interface that the
// speaker uses to establish the BGP connection to peer X in the
// NEXT_HOP attribute.
// 3) When sending a message to an external peer X, and the peer is
// multiple IP hops away from the speaker (aka "multihop EBGP"):
// - The speaker MAY be configured to propagate the NEXT_HOP
// attribute. In this case, when advertising a route that the
// speaker learned from one of its peers, the NEXT_HOP attribute
// of the advertised route is exactly the same as the NEXT_HOP
// attribute of the learned route (the speaker does not modify
// the NEXT_HOP attribute).
// - By default, the BGP speaker SHOULD use the IP address of the
// interface that the speaker uses in the NEXT_HOP attribute to
// establish the BGP connection to peer X.
// Normally, the NEXT_HOP attribute is chosen such that the shortest
// available path will be taken. A BGP speaker MUST be able to support
// the disabling advertisement of third party NEXT_HOP attributes in
// order to handle imperfectly bridged media.
// A route originated by a BGP speaker SHALL NOT be advertised to a peer
// using an address of that peer as NEXT_HOP. A BGP speaker SHALL NOT
// install a route with itself as the next hop.
// The NEXT_HOP attribute is used by the BGP speaker to determine the
// actual outbound interface and immediate next-hop address that SHOULD
// be used to forward transit packets to the associated destinations.
// The immediate next-hop address is determined by performing a
// recursive route lookup operation for the IP address in the NEXT_HOP
// attribute, using the contents of the Routing Table, selecting one
// entry if multiple entries of equal cost exist. The Routing Table
// entry that resolves the IP address in the NEXT_HOP attribute will
// always specify the outbound interface. If the entry specifies an
// attached subnet, but does not specify a next-hop address, then the
// address in the NEXT_HOP attribute SHOULD be used as the immediate
// next-hop address. If the entry also specifies the next-hop address,
// this address SHOULD be used as the immediate next-hop address for
// packet forwarding.
// 5.1.4. MULTI_EXIT_DISC
// The MULTI_EXIT_DISC is an optional non-transitive attribute that is
// intended to be used on external (inter-AS) links to discriminate
// among multiple exit or entry points to the same neighboring AS. The
// value of the MULTI_EXIT_DISC attribute is a four-octet unsigned
// number, called a metric. All other factors being equal, the exit
// point with the lower metric SHOULD be preferred. If received over
// EBGP, the MULTI_EXIT_DISC attribute MAY be propagated over IBGP to
// other BGP speakers within the same AS (see also 9.1.2.2). The
// MULTI_EXIT_DISC attribute received from a neighboring AS MUST NOT be
// propagated to other neighboring ASes.
// A BGP speaker MUST implement a mechanism (based on local
// configuration) that allows the MULTI_EXIT_DISC attribute to be
// removed from a route. If a BGP speaker is configured to remove the
// MULTI_EXIT_DISC attribute from a route, then this removal MUST be
// done prior to determining the degree of preference of the route and
// prior to performing route selection (Decision Process phases 1 and
// 2).
// An implementation MAY also (based on local configuration) alter the
// value of the MULTI_EXIT_DISC attribute received over EBGP. If a BGP
// speaker is configured to alter the value of the MULTI_EXIT_DISC
// attribute received over EBGP, then altering the value MUST be done
// prior to determining the degree of preference of the route and prior
// to performing route selection (Decision Process phases 1 and 2). See
// Section 9.1.2.2 for necessary restrictions on this.
// 5.1.5. LOCAL_PREF
// LOCAL_PREF is a well-known attribute that SHALL be included in all
// UPDATE messages that a given BGP speaker sends to other internal
// peers. A BGP speaker SHALL calculate the degree of preference for
// each external route based on the locally-configured policy, and
// include the degree of preference when advertising a route to its
// internal peers. The higher degree of preference MUST be preferred.
// A BGP speaker uses the degree of preference learned via LOCAL_PREF in
// its Decision Process (see Section 9.1.1).
// A BGP speaker MUST NOT include this attribute in UPDATE messages it
// sends to external peers, except in the case of BGP Confederations
// [RFC3065]. If it is contained in an UPDATE message that is received
// from an external peer, then this attribute MUST be ignored by the
// receiving speaker, except in the case of BGP Confederations
// [RFC3065].
// 5.1.6. ATOMIC_AGGREGATE
// ATOMIC_AGGREGATE is a well-known discretionary attribute.
// When a BGP speaker aggregates several routes for the purpose of
// advertisement to a particular peer, the AS_PATH of the aggregated
// route normally includes an AS_SET formed from the set of ASes from
// which the aggregate was formed. In many cases, the network
// administrator can determine if the aggregate can safely be advertised
// without the AS_SET, and without forming route loops.
// If an aggregate excludes at least some of the AS numbers present in
// the AS_PATH of the routes that are aggregated as a result of dropping
// the AS_SET, the aggregated route, when advertised to the peer, SHOULD
// include the ATOMIC_AGGREGATE attribute.
// A BGP speaker that receives a route with the ATOMIC_AGGREGATE
// attribute SHOULD NOT remove the attribute when propagating the route
// to other speakers.
// A BGP speaker that receives a route with the ATOMIC_AGGREGATE
// attribute MUST NOT make any NLRI of that route more specific (as
// defined in 9.1.4) when advertising this route to other BGP speakers.
// A BGP speaker that receives a route with the ATOMIC_AGGREGATE
// attribute needs to be aware of the fact that the actual path to
// destinations, as specified in the NLRI of the route, while having the
// loop-free property, may not be the path specified in the AS_PATH
// attribute of the route.
// 5.1.7. AGGREGATOR
// AGGREGATOR is an optional transitive attribute, which MAY be included
// in updates that are formed by aggregation (see Section 9.2.2.2). A
// BGP speaker that performs route aggregation MAY add the AGGREGATOR
// attribute, which SHALL contain its own AS number and IP address. The
// IP address SHOULD be the same as the BGP Identifier of the speaker.
// 6. BGP Error Handling.
// This section describes actions to be taken when errors are detected
// while processing BGP messages.
// When any of the conditions described here are detected, a
// NOTIFICATION message, with the indicated Error Code, Error Subcode,
// and Data fields, is sent, and the BGP connection is closed (unless it
// is explicitly stated that no NOTIFICATION message is to be sent and
// the BGP connection is not to be closed). If no Error Subcode is
// specified, then a zero MUST be used.
// The phrase "the BGP connection is closed" means the TCP connection
// has been closed, the associated Adj-RIB-In has been cleared, and all
// resources for that BGP connection have been deallocated. Entries in
// the Loc-RIB associated with the remote peer are marked as invalid.
// The local system recalculates its best routes for the destinations of
// the routes marked as invalid. Before the invalid routes are deleted
// from the system, it advertises, to its peers, either withdraws for
// the routes marked as invalid, or the new best routes before the
// invalid routes are deleted from the system.
// Unless specified explicitly, the Data field of the NOTIFICATION
// message that is sent to indicate an error is empty.
// 6.5. Hold Timer Expired Error Handling
// If a system does not receive successive KEEPALIVE, UPDATE, and/or
// NOTIFICATION messages within the period specified in the Hold Time
// field of the OPEN message, then the NOTIFICATION message with the
// Hold Timer Expired Error Code is sent and the BGP connection is
// closed.
// 6.6. Finite State Machine Error Handling
// Any error detected by the BGP Finite State Machine (e.g., receipt of
// an unexpected event) is indicated by sending the NOTIFICATION message
// with the Error Code Finite State Machine Error.
// 6.7. Cease
// In the absence of any fatal errors (that are indicated in this
// section), a BGP peer MAY choose, at any given time, to close its BGP
// connection by sending the NOTIFICATION message with the Error Code
// Cease. However, the Cease NOTIFICATION message MUST NOT be used when
// a fatal error indicated by this section does exist.
// A BGP speaker MAY support the ability to impose a locally-configured,
// upper bound on the number of address prefixes the speaker is willing
// to accept from a neighbor. When the upper bound is reached, the
// speaker, under control of local configuration, either (a) discards
// new address prefixes from the neighbor (while maintaining the BGP
// connection with the neighbor), or (b) terminates the BGP connection
// with the neighbor. If the BGP speaker decides to terminate its BGP
// connection with a neighbor because the number of address prefixes
// received from the neighbor exceeds the locally-configured, upper
// bound, then the speaker MUST send the neighbor a NOTIFICATION message
// with the Error Code Cease. The speaker MAY also log this locally.
// 6.8. BGP Connection Collision Detection
// If a pair of BGP speakers try to establish a BGP connection with each
// other simultaneously, then two parallel connections well be formed.
// If the source IP address used by one of these connections is the same
// as the destination IP address used by the other, and the destination
// IP address used by the first connection is the same as the source IP
// address used by the other, connection collision has occurred. In the
// event of connection collision, one of the connections MUST be closed.
// Based on the value of the BGP Identifier, a convention is established
// for detecting which BGP connection is to be preserved when a
// collision occurs. The convention is to compare the BGP Identifiers
// of the peers involved in the collision and to retain only the
// connection initiated by the BGP speaker with the higher-valued BGP
// Identifier.
// Upon receipt of an OPEN message, the local system MUST examine all of
// its connections that are in the OpenConfirm state. A BGP speaker MAY
// also examine connections in an OpenSent state if it knows the BGP
// Identifier of the peer by means outside of the protocol. If, among
// these connections, there is a connection to a remote BGP speaker
// whose BGP Identifier equals the one in the OPEN message, and this
// connection collides with the connection over which the OPEN message
// is received, then the local system performs the following collision
// resolution procedure:
// 1) The BGP Identifier of the local system is compared to the BGP
// Identifier of the remote system (as specified in the OPEN
// message). Comparing BGP Identifiers is done by converting them
// to host byte order and treating them as 4-octet unsigned
// integers.
// 2) If the value of the local BGP Identifier is less than the
// remote one, the local system closes the BGP connection that
// already exists (the one that is already in the OpenConfirm
// state), and accepts the BGP connection initiated by the remote
// system.
// 3) Otherwise, the local system closes the newly created BGP
// connection (the one associated with the newly received OPEN
// message), and continues to use the existing one (the one that
// is already in the OpenConfirm state).
// Unless allowed via configuration, a connection collision with an
// existing BGP connection that is in the Established state causes
// closing of the newly created connection.
// Note that a connection collision cannot be detected with connections
// that are in Idle, Connect, or Active states.
// Closing the BGP connection (that results from the collision
// resolution procedure) is accomplished by sending the NOTIFICATION
// message with the Error Code Cease.
// 7. BGP Version Negotiation
// BGP speakers MAY negotiate the version of the protocol by making
// multiple attempts at opening a BGP connection, starting with the
// highest version number each BGP speaker supports. If an open attempt
// fails with an Error Code, OPEN Message Error, and an Error Subcode,
// Unsupported Version Number, then the BGP speaker has available the
// version number it tried, the version number its peer tried, the
// version number passed by its peer in the NOTIFICATION message, and
// the version numbers it supports. If the two peers do support one or
// more common versions, then this will allow them to rapidly determine
// the highest common version. In order to support BGP version
// negotiation, future versions of BGP MUST retain the format of the
// OPEN and NOTIFICATION messages.
// 9. UPDATE Message Handling
// An UPDATE message may be received only in the Established state.
// Receiving an UPDATE message in any other state is an error. When an
// UPDATE message is received, each field is checked for validity, as
// specified in Section 6.3.
// If an optional non-transitive attribute is unrecognized, it is
// quietly ignored. If an optional transitive attribute is
// unrecognized, the Partial bit (the third high-order bit) in the
// attribute flags octet is set to 1, and the attribute is retained for
// propagation to other BGP speakers.
// If an optional attribute is recognized and has a valid value, then,
// depending on the type of the optional attribute, it is processed
// locally, retained, and updated, if necessary, for possible
// propagation to other BGP speakers.
// If the UPDATE message contains a non-empty WITHDRAWN ROUTES field,
// the previously advertised routes, whose destinations (expressed as IP
// prefixes) are contained in this field, SHALL be removed from the
// Adj-RIB-In. This BGP speaker SHALL run its Decision Process because
// the previously advertised route is no longer available for use.
// If the UPDATE message contains a feasible route, the Adj-RIB-In will
// be updated with this route as follows: if the NLRI of the new route
// is identical to the one the route currently has stored in the Adj-
// RIB-In, then the new route SHALL replace the older route in the Adj-
// RIB-In, thus implicitly withdrawing the older route from service.
// Otherwise, if the Adj-RIB-In has no route with NLRI identical to the
// new route, the new route SHALL be placed in the Adj-RIB-In.
// Once the BGP speaker updates the Adj-RIB-In, the speaker SHALL run
// its Decision Process.
// 9.1. Decision Process
// The Decision Process selects routes for subsequent advertisement by
// applying the policies in the local Policy Information Base (PIB) to
// the routes stored in its Adj-RIBs-In. The output of the Decision
// Process is the set of routes that will be advertised to peers; the
// selected routes will be stored in the local speaker's Adj-RIBs-Out,
// according to policy.
// The BGP Decision Process described here is conceptual, and does not
// have to be implemented precisely as described, as long as the
// implementations support the described functionality and they exhibit
// the same externally visible behavior.
// The selection process is formalized by defining a function that takes
// the attribute of a given route as an argument and returns either (a)
// a non-negative integer denoting the degree of preference for the
// route, or (b) a value denoting that this route is ineligible to be
// installed in Loc-RIB and will be excluded from the next phase of
// route selection.
// The function that calculates the degree of preference for a given
// route SHALL NOT use any of the following as its inputs: the existence
// of other routes, the non-existence of other routes, or the path
// attributes of other routes. Route selection then consists of the
// individual application of the degree of preference function to each
// feasible route, followed by the choice of the one with the highest
// degree of preference.
// The Decision Process operates on routes contained in the Adj-RIBs-In,
// and is responsible for:
// - selection of routes to be used locally by the speaker
// - selection of routes to be advertised to other BGP peers
// - route aggregation and route information reduction
// The Decision Process takes place in three distinct phases, each
// triggered by a different event:
// a) Phase 1 is responsible for calculating the degree of preference
// for each route received from a peer.
// b) Phase 2 is invoked on completion of phase 1. It is responsible
// for choosing the best route out of all those available for each
// distinct destination, and for installing each chosen route into
// the Loc-RIB.
// c) Phase 3 is invoked after the Loc-RIB has been modified. It is
// responsible for disseminating routes in the Loc-RIB to each
// peer, according to the policies contained in the PIB. Route
// aggregation and information reduction can optionally be
// performed within this phase.
// 9.1.1. Phase 1: Calculation of Degree of Preference
// The Phase 1 decision function is invoked whenever the local BGP
// speaker receives, from a peer, an UPDATE message that advertises a
// new route, a replacement route, or withdrawn routes.
// The Phase 1 decision function is a separate process,f which completes
// when it has no further work to do.
// The Phase 1 decision function locks an Adj-RIB-In prior to operating
// on any route contained within it, and unlocks it after operating on
// all new or unfeasible routes contained within it.
// For each newly received or replacement feasible route, the local BGP
// speaker determines a degree of preference as follows:
// If the route is learned from an internal peer, either the value of
// the LOCAL_PREF attribute is taken as the degree of preference, or
// the local system computes the degree of preference of the route
// based on preconfigured policy information. Note that the latter
// may result in formation of persistent routing loops.
// If the route is learned from an external peer, then the local BGP
// speaker computes the degree of preference based on preconfigured
// policy information. If the return value indicates the route is
// ineligible, the route MAY NOT serve as an input to the next phase
// of route selection; otherwise, the return value MUST be used as
// the LOCAL_PREF value in any IBGP readvertisement.
// The exact nature of this policy information, and the computation
// involved, is a local matter.
// 9.1.2. Phase 2: Route Selection
// The Phase 2 decision function is invoked on completion of Phase 1.
// The Phase 2 function is a separate process, which completes when it
// has no further work to do. The Phase 2 process considers all routes
// that are eligible in the Adj-RIBs-In.
// The Phase 2 decision function is blocked from running while the Phase
// 3 decision function is in process. The Phase 2 function locks all
// Adj-RIBs-In prior to commencing its function, and unlocks them on
// completion.
// If the NEXT_HOP attribute of a BGP route depicts an address that is
// not resolvable, or if it would become unresolvable if the route was
// installed in the routing table, the BGP route MUST be excluded from
// the Phase 2 decision function.
// If the AS_PATH attribute of a BGP route contains an AS loop, the BGP
// route should be excluded from the Phase 2 decision function. AS loop
// detection is done by scanning the full AS path (as specified in the
// AS_PATH attribute), and checking that the autonomous system number of
// the local system does not appear in the AS path. Operations of a BGP
// speaker that is configured to accept routes with its own autonomous
// system number in the AS path are outside the scope of this document.
// It is critical that BGP speakers within an AS do not make conflicting
// decisions regarding route selection that would cause forwarding loops
// to occur.
// For each set of destinations for which a feasible route exists in the
// Adj-RIBs-In, the local BGP speaker identifies the route that has:
// a) the highest degree of preference of any route to the same set
// of destinations, or
// b) is the only route to that destination, or
// c) is selected as a result of the Phase 2 tie breaking rules
// specified in Section 9.1.2.2.
// The local speaker SHALL then install that route in the Loc-RIB,
// replacing any route to the same destination that is currently being
// held in the Loc-RIB. When the new BGP route is installed in the
// Routing Table, care must be taken to ensure that existing routes to
// the same destination that are now considered invalid are removed from
// the Routing Table. Whether the new BGP route replaces an existing
// non-BGP route in the Routing Table depends on the policy configured
// on the BGP speaker.
// The local speaker MUST determine the immediate next-hop address from
// the NEXT_HOP attribute of the selected route (see Section 5.1.3). If
// either the immediate next-hop or the IGP cost to the NEXT_HOP (where
// the NEXT_HOP is resolved through an IGP route) changes, Phase 2 Route
// Selection MUST be performed again.
// Notice that even though BGP routes do not have to be installed in the
// Routing Table with the immediate next-hop(s), implementations MUST
// take care that, before any packets are forwarded along a BGP route,
// its associated NEXT_HOP address is resolved to the immediate
// (directly connected) next-hop address, and that this address (or
// multiple addresses) is finally used for actual packet forwarding.
// Unresolvable routes SHALL be removed from the Loc-RIB and the routing
// table. However, corresponding unresolvable routes SHOULD be kept in
// the Adj-RIBs-In (in case they become resolvable).
// 9.1.2.1. Route Resolvability Condition
// As indicated in Section 9.1.2, BGP speakers SHOULD exclude
// unresolvable routes from the Phase 2 decision. This ensures that
// only valid routes are installed in Loc-RIB and the Routing Table.
// The route resolvability condition is defined as follows:
// 1) A route Rte1, referencing only the intermediate network
// address, is considered resolvable if the Routing Table contains
// at least one resolvable route Rte2 that matches Rte1's
// intermediate network address and is not recursively resolved
// (directly or indirectly) through Rte1. If multiple matching
// routes are available, only the longest matching route SHOULD be
// considered.
// 2) Routes referencing interfaces (with or without intermediate
// addresses) are considered resolvable if the state of the
// referenced interface is up and if IP processing is enabled on
// this interface.
// BGP routes do not refer to interfaces, but can be resolved through
// the routes in the Routing Table that can be of both types (those that
// specify interfaces or those that do not). IGP routes and routes to
// directly connected networks are expected to specify the outbound
// interface. Static routes can specify the outbound interface, the
// intermediate address, or both.
// Note that a BGP route is considered unresolvable in a situation where
// the BGP speaker's Routing Table contains no route matching the BGP
// route's NEXT_HOP. Mutually recursive routes (routes resolving each
// other or themselves) also fail the resolvability check.
// It is also important that implementations do not consider feasible
// routes that would become unresolvable if they were installed in the
// Routing Table, even if their NEXT_HOPs are resolvable using the
// current contents of the Routing Table (an example of such routes
// would be mutually recursive routes). This check ensures that a BGP
// speaker does not install routes in the Routing Table that will be
// removed and not used by the speaker. Therefore, in addition to local
// Routing Table stability, this check also improves behavior of the
// protocol in the network.
// Whenever a BGP speaker identifies a route that fails the
// resolvability check because of mutual recursion, an error message
// SHOULD be logged.
// 9.1.2.2. Breaking Ties (Phase 2)
// In its Adj-RIBs-In, a BGP speaker may have several routes to the same
// destination that have the same degree of preference. The local
// speaker can select only one of these routes for inclusion in the
// associated Loc-RIB. The local speaker considers all routes with the
// same degrees of preference, both those received from internal peers,
// and those received from external peers.
// The following tie-breaking procedure assumes that, for each candidate
// route, all the BGP speakers within an autonomous system can ascertain
// the cost of a path (interior distance) to the address depicted by the
// NEXT_HOP attribute of the route, and follow the same route selection
// algorithm.
// The tie-breaking algorithm begins by considering all equally
// preferable routes to the same destination, and then selects routes to
// be removed from consideration. The algorithm terminates as soon as
// only one route remains in consideration. The criteria MUST be
// applied in the order specified.
// Several of the criteria are described using pseudo-code. Note that
// the pseudo-code shown was chosen for clarity, not efficiency. It is
// not intended to specify any particular implementation. BGP
// implementations MAY use any algorithm that produces the same results
// as those described here.
// a) Remove from consideration all routes that are not tied for
// having the smallest number of AS numbers present in their
// AS_PATH attributes. Note that when counting this number, an
// AS_SET counts as 1, no matter how many ASes are in the set.
// b) Remove from consideration all routes that are not tied for
// having the lowest Origin number in their Origin attribute.
// c) Remove from consideration routes with less-preferred
// MULTI_EXIT_DISC attributes. MULTI_EXIT_DISC is only comparable
// between routes learned from the same neighboring AS (the
// neighboring AS is determined from the AS_PATH attribute).
// Routes that do not have the MULTI_EXIT_DISC attribute are
// considered to have the lowest possible MULTI_EXIT_DISC value.
// This is also described in the following procedure:
// for m = all routes still under consideration
// for n = all routes still under consideration
// if (neighborAS(m) == neighborAS(n)) and (MED(n) < MED(m))
// remove route m from consideration
// In the pseudo-code above, MED(n) is a function that returns the
// value of route n's MULTI_EXIT_DISC attribute. If route n has
// no MULTI_EXIT_DISC attribute, the function returns the lowest
// possible MULTI_EXIT_DISC value (i.e., 0).
// Similarly, neighborAS(n) is a function that returns the
// neighbor AS from which the route was received. If the route is
// learned via IBGP, and the other IBGP speaker didn't originate
// the route, it is the neighbor AS from which the other IBGP
// speaker learned the route. If the route is learned via IBGP,
// and the other IBGP speaker either (a) originated the route, or
// (b) created the route by aggregation and the AS_PATH attribute
// of the aggregate route is either empty or begins with an
// AS_SET, it is the local AS.
// If a MULTI_EXIT_DISC attribute is removed before re-advertising
// a route into IBGP, then comparison based on the received EBGP
// MULTI_EXIT_DISC attribute MAY still be performed. If an
// implementation chooses to remove MULTI_EXIT_DISC, then the
// optional comparison on MULTI_EXIT_DISC, if performed, MUST be
// performed only among EBGP-learned routes. The best EBGP-
// learned route may then be compared with IBGP-learned routes
// after the removal of the MULTI_EXIT_DISC attribute. If
// MULTI_EXIT_DISC is removed from a subset of EBGP-learned
// routes, and the selected "best" EBGP-learned route will not
// have MULTI_EXIT_DISC removed, then the MULTI_EXIT_DISC must be
// used in the comparison with IBGP-learned routes. For IBGP-
// learned routes, the MULTI_EXIT_DISC MUST be used in route
// comparisons that reach this step in the Decision Process.
// Including the MULTI_EXIT_DISC of an EBGP-learned route in the
// comparison with an IBGP-learned route, then removing the
// MULTI_EXIT_DISC attribute, and advertising the route has been
// proven to cause route loops.
// d) If at least one of the candidate routes was received via EBGP,
// remove from consideration all routes that were received via
// IBGP.
// e) Remove from consideration any routes with less-preferred
// interior cost. The interior cost of a route is determined by
// calculating the metric to the NEXT_HOP for the route using the
// Routing Table. If the NEXT_HOP hop for a route is reachable,
// but no cost can be determined, then this step should be skipped
// (equivalently, consider all routes to have equal costs).
// This is also described in the following procedure.
// for m = all routes still under consideration
// for n = all routes in still under consideration
// if (cost(n) is lower than cost(m))
// remove m from consideration
// In the pseudo-code above, cost(n) is a function that returns
// the cost of the path (interior distance) to the address given
// in the NEXT_HOP attribute of the route.
// f) Remove from consideration all routes other than the route that
// was advertised by the BGP speaker with the lowest BGP
// Identifier value.
// g) Prefer the route received from the lowest peer address.
// 9.1.3. Phase 3: Route Dissemination
// The Phase 3 decision function is invoked on completion of Phase 2, or
// when any of the following events occur:
// a) when routes in the Loc-RIB to local destinations have changed
// b) when locally generated routes learned by means outside of BGP
// have changed
// c) when a new BGP speaker connection has been established
// The Phase 3 function is a separate process that completes when it has
// no further work to do. The Phase 3 Routing Decision function is
// blocked from running while the Phase 2 decision function is in
// process.
// All routes in the Loc-RIB are processed into Adj-RIBs-Out according
// to configured policy. This policy MAY exclude a route in the Loc-RIB
// from being installed in a particular Adj-RIB-Out. A route SHALL NOT
// be installed in the Adj-Rib-Out unless the destination, and NEXT_HOP
// described by this route, may be forwarded appropriately by the
// Routing Table. If a route in Loc-RIB is excluded from a particular
// Adj-RIB-Out, the previously advertised route in that Adj-RIB-Out MUST
// be withdrawn from service by means of an UPDATE message (see 9.2).
// Route aggregation and information reduction techniques (see Section
// 9.2.2.1) may optionally be applied.
// Any local policy that results in routes being added to an Adj-RIB-Out
// without also being added to the local BGP speaker's forwarding table
// is outside the scope of this document.
// When the updating of the Adj-RIBs-Out and the Routing Table is
// complete, the local BGP speaker runs the Update-Send process of 9.2.
// 9.1.4. Overlapping Routes
// A BGP speaker may transmit routes with overlapping Network Layer
// Reachability Information (NLRI) to another BGP speaker. NLRI overlap
// occurs when a set of destinations are identified in non-matching
// multiple routes. Because BGP encodes NLRI using IP prefixes, overlap
// will always exhibit subset relationships. A route describing a
// smaller set of destinations (a longer prefix) is said to be more
// specific than a route describing a larger set of destinations (a
// shorter prefix); similarly, a route describing a larger set of
// destinations is said to be less specific than a route describing a
// smaller set of destinations.
// The precedence relationship effectively decomposes less specific
// routes into two parts:
// - a set of destinations described only by the less specific route,
// and
// - a set of destinations described by the overlap of the less
// specific and the more specific routes
// The set of destinations described by the overlap represents a portion
// of the less specific route that is feasible, but is not currently in
// use. If a more specific route is later withdrawn, the set of
// destinations described by the overlap will still be reachable using
// the less specific route.
// If a BGP speaker receives overlapping routes, the Decision Process
// MUST consider both routes based on the configured acceptance policy.
// If both a less and a more specific route are accepted, then the
// Decision Process MUST install, in Loc-RIB, either both the less and
// the more specific routes or aggregate the two routes and install, in
// Loc-RIB, the aggregated route, provided that both routes have the
// same value of the NEXT_HOP attribute.
// If a BGP speaker chooses to aggregate, then it SHOULD either include
// all ASes used to form the aggregate in an AS_SET, or add the
// ATOMIC_AGGREGATE attribute to the route. This attribute is now
// primarily informational. With the elimination of IP routing
// protocols that do not support classless routing, and the elimination
// of router and host implementations that do not support classless
// routing, there is no longer a need to de-aggregate. Routes SHOULD
// NOT be de-aggregated. In particular, a route that carries the
// ATOMIC_AGGREGATE attribute MUST NOT be de-aggregated. That is, the
// NLRI of this route cannot be more specific. Forwarding along such a
// route does not guarantee that IP packets will actually traverse only
// ASes listed in the AS_PATH attribute of the route.
// 9.2. Update-Send Process
// The Update-Send process is responsible for advertising UPDATE
// messages to all peers. For example, it distributes the routes chosen
// by the Decision Process to other BGP speakers, which may be located
// in either the same autonomous system or a neighboring autonomous
// system.
// When a BGP speaker receives an UPDATE message from an internal peer,
// the receiving BGP speaker SHALL NOT re-distribute the routing
// information contained in that UPDATE message to other internal peers
// (unless the speaker acts as a BGP Route Reflector [RFC2796]).
// As part of Phase 3 of the route selection process, the BGP speaker
// has updated its Adj-RIBs-Out. All newly installed routes and all
// newly unfeasible routes for which there is no replacement route SHALL
// be advertised to its peers by means of an UPDATE message.
// A BGP speaker SHOULD NOT advertise a given feasible BGP route from
// its Adj-RIB-Out if it would produce an UPDATE message containing the
// same BGP route as was previously advertised.
// Any routes in the Loc-RIB marked as unfeasible SHALL be removed.
// Changes to the reachable destinations within its own autonomous
// system SHALL also be advertised in an UPDATE message.
// If, due to the limits on the maximum size of an UPDATE message (see
// Section 4), a single route doesn't fit into the message, the BGP
// speaker MUST not advertise the route to its peers and MAY choose to
// log an error locally.
// 9.2.1. Controlling Routing Traffic Overhead
// The BGP protocol constrains the amount of routing traffic (that is,
// UPDATE messages), in order to limit both the link bandwidth needed to
// advertise UPDATE messages and the processing power needed by the
// Decision Process to digest the information contained in the UPDATE
// messages.
// 9.2.1.1. Frequency of Route Advertisement
// The parameter MinRouteAdvertisementIntervalTimer determines the
// minimum amount of time that must elapse between an advertisement
// and/or withdrawal of routes to a particular destination by a BGP
// speaker to a peer. This rate limiting procedure applies on a per-
// destination basis, although the value of
// MinRouteAdvertisementIntervalTimer is set on a per BGP peer basis.
// Two UPDATE messages sent by a BGP speaker to a peer that advertise
// feasible routes and/or withdrawal of unfeasible routes to some common
// set of destinations MUST be separated by at least
// MinRouteAdvertisementIntervalTimer. This can only be achieved by
// keeping a separate timer for each common set of destinations. This
// would be unwarranted overhead. Any technique that ensures that the
// interval between two UPDATE messages sent from a BGP speaker to a
// peer that advertise feasible routes and/or withdrawal of unfeasible
// routes to some common set of destinations will be at least
// MinRouteAdvertisementIntervalTimer, and will also ensure that a
// constant upper bound on the interval is acceptable.
// Since fast convergence is needed within an autonomous system, either
// (a) the MinRouteAdvertisementIntervalTimer used for internal peers
// SHOULD be shorter than the MinRouteAdvertisementIntervalTimer used
// for external peers, or (b) the procedure describe in this section
// SHOULD NOT apply to routes sent to internal peers.
// This procedure does not limit the rate of route selection, but only
// the rate of route advertisement. If new routes are selected multiple
// times while awaiting the expiration of
// MinRouteAdvertisementIntervalTimer, the last route selected SHALL be
// advertised at the end of MinRouteAdvertisementIntervalTimer.
// 9.2.1.2. Frequency of Route Origination
// The parameter MinASOriginationIntervalTimer determines the minimum
// amount of time that must elapse between successive advertisements of
// UPDATE messages that report changes within the advertising BGP
// speaker's own autonomous systems.
// 9.2.2. Efficient Organization of Routing Information
// Having selected the routing information it will advertise, a BGP
// speaker may avail itself of several methods to organize this
// information in an efficient manner.
// 9.2.2.1. Information Reduction
// Information reduction may imply a reduction in granularity of policy
// control - after information is collapsed, the same policies will
// apply to all destinations and paths in the equivalence class.
// The Decision Process may optionally reduce the amount of information
// that it will place in the Adj-RIBs-Out by any of the following
// methods:
// a) Network Layer Reachability Information (NLRI):
// Destination IP addresses can be represented as IP address
// prefixes. In cases where there is a correspondence between the
// address structure and the systems under control of an
// autonomous system administrator, it will be possible to reduce
// the size of the NLRI carried in the UPDATE messages.
// b) AS_PATHs:
// AS path information can be represented as ordered AS_SEQUENCEs
// or unordered AS_SETs. AS_SETs are used in the route
// aggregation algorithm described in Section 9.2.2.2. They
// reduce the size of the AS_PATH information by listing each AS
// number only once, regardless of how many times it may have
// appeared in multiple AS_PATHs that were aggregated.
// An AS_SET implies that the destinations listed in the NLRI can
// be reached through paths that traverse at least some of the
// constituent autonomous systems. AS_SETs provide sufficient
// information to avoid routing information looping; however,
// their use may prune potentially feasible paths because such
// paths are no longer listed individually in the form of
// AS_SEQUENCEs. In practice, this is not likely to be a problem
// because once an IP packet arrives at the edge of a group of
// autonomous systems, the BGP speaker is likely to have more
// detailed path information and can distinguish individual paths
// from destinations.
// 9.2.2.2. Aggregating Routing Information
// Aggregation is the process of combining the characteristics of
// several different routes in such a way that a single route can be
// advertised. Aggregation can occur as part of the Decision Process to
// reduce the amount of routing information that will be placed in the
// Adj-RIBs-Out.
// Aggregation reduces the amount of information that a BGP speaker must
// store and exchange with other BGP speakers. Routes can be aggregated
// by applying the following procedure, separately, to path attributes
// of the same type and to the Network Layer Reachability Information.
// Routes that have different MULTI_EXIT_DISC attributes SHALL NOT be
// aggregated.
// If the aggregated route has an AS_SET as the first element in its
// AS_PATH attribute, then the router that originates the route SHOULD
// NOT advertise the MULTI_EXIT_DISC attribute with this route.
// Path attributes that have different type codes cannot be aggregated
// together. Path attributes of the same type code may be aggregated,
// according to the following rules:
// NEXT_HOP:
// When aggregating routes that have different NEXT_HOP
// attributes, the NEXT_HOP attribute of the aggregated route
// SHALL identify an interface on the BGP speaker that performs
// the aggregation.
// ORIGIN attribute:
// If at least one route among routes that are aggregated has
// ORIGIN with the value INCOMPLETE, then the aggregated route
// MUST have the ORIGIN attribute with the value INCOMPLETE.
// Otherwise, if at least one route among routes that are
// aggregated has ORIGIN with the value EGP, then the aggregated
// route MUST have the ORIGIN attribute with the value EGP. In
// all other cases,, the value of the ORIGIN attribute of the
// aggregated route is IGP.
// AS_PATH attribute:
// If routes to be aggregated have identical AS_PATH attributes,
// then the aggregated route has the same AS_PATH attribute as
// each individual route.
// For the purpose of aggregating AS_PATH attributes, we model
// each AS within the AS_PATH attribute as a tuple <type, value>,
// where "type" identifies a type of the path segment the AS
// belongs to (e.g., AS_SEQUENCE, AS_SET), and "value" identifies
// the AS number. If the routes to be aggregated have different
// AS_PATH attributes, then the aggregated AS_PATH attribute SHALL
// satisfy all of the following conditions:
// - all tuples of type AS_SEQUENCE in the aggregated AS_PATH
// SHALL appear in all of the AS_PATHs in the initial set of
// routes to be aggregated.
// - all tuples of type AS_SET in the aggregated AS_PATH SHALL
// appear in at least one of the AS_PATHs in the initial set
// (they may appear as either AS_SET or AS_SEQUENCE types).
// - for any tuple X of type AS_SEQUENCE in the aggregated
// AS_PATH, which precedes tuple Y in the aggregated AS_PATH,
// X precedes Y in each AS_PATH in the initial set, which
// contains Y, regardless of the type of Y.
// - No tuple of type AS_SET with the same value SHALL appear
// more than once in the aggregated AS_PATH.
// - Multiple tuples of type AS_SEQUENCE with the same value may
// appear in the aggregated AS_PATH only when adjacent to
// another tuple of the same type and value.
// An implementation may choose any algorithm that conforms to
// these rules. At a minimum, a conformant implementation SHALL
// be able to perform the following algorithm that meets all of
// the above conditions:
// - determine the longest leading sequence of tuples (as
// defined above) common to all the AS_PATH attributes of the
// routes to be aggregated. Make this sequence the leading
// sequence of the aggregated AS_PATH attribute.
// - set the type of the rest of the tuples from the AS_PATH
// attributes of the routes to be aggregated to AS_SET, and
// append them to the aggregated AS_PATH attribute.
// - if the aggregated AS_PATH has more than one tuple with the
// same value (regardless of tuple's type), eliminate all but
// one such tuple by deleting tuples of the type AS_SET from
// the aggregated AS_PATH attribute.
// - for each pair of adjacent tuples in the aggregated AS_PATH,
// if both tuples have the same type, merge them together, as
// long as doing so will not cause a segment with a length
// greater than 255 to be generated.
// Appendix F, Section F.6 presents another algorithm that
// satisfies the conditions and allows for more complex policy
// configurations.
// ATOMIC_AGGREGATE:
// If at least one of the routes to be aggregated has
// ATOMIC_AGGREGATE path attribute, then the aggregated route
// SHALL have this attribute as well.
// AGGREGATOR:
// Any AGGREGATOR attributes from the routes to be aggregated MUST
// NOT be included in the aggregated route. The BGP speaker
// performing the route aggregation MAY attach a new AGGREGATOR
// attribute (see Section 5.1.7).
// 9.3. Route Selection Criteria
// Generally, additional rules for comparing routes among several
// alternatives are outside the scope of this document. There are two
// exceptions:
// - If the local AS appears in the AS path of the new route being
// considered, then that new route cannot be viewed as better than
// any other route (provided that the speaker is configured to
// accept such routes). If such a route were ever used, a routing
// loop could result.
// - In order to achieve a successful distributed operation, only
// routes with a likelihood of stability can be chosen. Thus, an
// AS SHOULD avoid using unstable routes, and it SHOULD NOT make
// rapid, spontaneous changes to its choice of route. Quantifying
// the terms "unstable" and "rapid" (from the previous sentence)
// will require experience, but the principle is clear. Routes
// that are unstable can be "penalized" (e.g., by using the
// procedures described in [RFC2439]).
// 9.4. Originating BGP routes
// A BGP speaker may originate BGP routes by injecting routing
// information acquired by some other means (e.g., via an IGP) into BGP.
// A BGP speaker that originates BGP routes assigns the degree of
// preference (e.g., according to local configuration) to these routes
// by passing them through the Decision Process (see Section 9.1).
// These routes MAY also be distributed to other BGP speakers within the
// local AS as part of the update process (see Section 9.2). The
// decision of whether to distribute non-BGP acquired routes within an
// AS via BGP depends on the environment within the AS (e.g., type of
// IGP) and SHOULD be controlled via configuration.
// Appendix F.1. Multiple Networks Per Message
// The BGP protocol allows for multiple address prefixes with the same
// path attributes to be specified in one message. Using this
// capability is highly recommended. With one address prefix per
// message there is a substantial increase in overhead in the receiver.
// Not only does the system overhead increase due to the reception of
// multiple messages, but the overhead of scanning the routing table for
// updates to BGP peers and other routing protocols (and sending the
// associated messages) is incurred multiple times as well.
// One method of building messages that contain many address prefixes
// per path attribute set from a routing table that is not organized on
// a per path attribute set basis is to build many messages as the
// routing table is scanned. As each address prefix is processed, a
// message for the associated set of path attributes is allocated, if it
// does not exist, and the new address prefix is added to it. If such a
// message exists, the new address prefix is appended to it. If the
// message lacks the space to hold the new address prefix, it is
// transmitted, a new message is allocated, and the new address prefix
// is inserted into the new message. When the entire routing table has
// been scanned, all allocated messages are sent and their resources are
// released. Maximum compression is achieved when all destinations
// covered by the address prefixes share a common set of path
// attributes, making it possible to send many address prefixes in one
// 4096-byte message.
// When peering with a BGP implementation that does not compress
// multiple address prefixes into one message, it may be necessary to
// take steps to reduce the overhead from the flood of data received
// when a peer is acquired or when a significant network topology change
// occurs. One method of doing this is to limit the rate of updates.
// This will eliminate the redundant scanning of the routing table to
// provide flash updates for BGP peers and other routing protocols. A
// disadvantage of this approach is that it increases the propagation
// latency of routing information. By choosing a minimum flash update
// interval that is not much greater than the time it takes to process
// the multiple messages, this latency should be minimized. A better
// method would be to read all received messages before sending updates.
// Appendix F.2. Reducing Route Flapping
// To avoid excessive route flapping, a BGP speaker that needs to
// withdraw a destination and send an update about a more specific or
// less specific route should combine them into the same UPDATE message.
// Appendix F.3. Path Attribute Ordering
// Implementations that combine update messages (as described above in
// Section 6.1) may prefer to see all path attributes presented in a
// known order. This permits them to quickly identify sets of
// attributes from different update messages that are semantically
// identical. To facilitate this, it is a useful optimization to order
// the path attributes according to type code. This optimization is
// entirely optional.
// Appendix F.4. AS_SET Sorting
// Another useful optimization that can be done to simplify this
// situation is to sort the AS numbers found in an AS_SET. This
// optimization is entirely optional.
// Appendix F.5. Control Over Version Negotiation
// Because BGP-4 is capable of carrying aggregated routes that cannot be
// properly represented in BGP-3, an implementation that supports BGP-4
// and another BGP version should provide the capability to only speak
// BGP-4 on a per-peer basis.
// Appendix F.6. Complex AS_PATH Aggregation
// An implementation that chooses to provide a path aggregation
// algorithm retaining significant amounts of path information may wish
// to use the following procedure:
// For the purpose of aggregating AS_PATH attributes of two routes,
// we model each AS as a tuple <type, value>, where "type" identifies
// a type of the path segment the AS belongs to (e.g., AS_SEQUENCE,
// AS_SET), and "value" is the AS number. Two ASes are said to be
// the same if their corresponding <type, value> tuples are the same.
// The algorithm to aggregate two AS_PATH attributes works as
// follows:
// a) Identify the same ASes (as defined above) within each
// AS_PATH attribute that are in the same relative order within
// both AS_PATH attributes. Two ASes, X and Y, are said to be
// in the same order if either:
// - X precedes Y in both AS_PATH attributes, or
// - Y precedes X in both AS_PATH attributes.
// b) The aggregated AS_PATH attribute consists of ASes identified
// in (a), in exactly the same order as they appear in the
// AS_PATH attributes to be aggregated. If two consecutive
// ASes identified in (a) do not immediately follow each other
// in both of the AS_PATH attributes to be aggregated, then the
// intervening ASes (ASes that are between the two consecutive
// ASes that are the same) in both attributes are combined into
// an AS_SET path segment that consists of the intervening ASes
// from both AS_PATH attributes. This segment is then placed
// between the two consecutive ASes identified in (a) of the
// aggregated attribute. If two consecutive ASes identified in
// (a) immediately follow each other in one attribute, but do
// not follow in another, then the intervening ASes of the
// latter are combined into an AS_SET path segment. This
// segment is then placed between the two consecutive ASes
// identified in (a) of the aggregated attribute.
// c) For each pair of adjacent tuples in the aggregated AS_PATH,
// if both tuples have the same type, merge them together if
// doing so will not cause a segment of a length greater than
// 255 to be generated.
// If, as a result of the above procedure, a given AS number appears
// more than once within the aggregated AS_PATH attribute, all but
// the last instance (rightmost occurrence) of that AS number should
// be removed from the aggregated AS_PATH attribute. | old/speaker/speaker.go | 0.620392 | 0.533944 | speaker.go | starcoder |
package components
import (
"math"
"github.com/factorion/graytracer/pkg/primitives"
"github.com/factorion/graytracer/pkg/shapes"
)
// Computations Set of pre-computed values used for point detection
type Computations struct {
shapes.Intersection
Point, OverPoint, UnderPoint, EyeVector, NormalVector, ReflectVector primitives.PV
Index1, Index2 float64
Inside bool
}
// Schlick Calculate an approximation of the Fresnel effect
func (c Computations) Schlick() float64 {
cos := c.EyeVector.DotProduct(c.NormalVector)
if c.Index1 > c.Index2 {
ratio := c.Index1 / c.Index2
sin2Theta := math.Pow(ratio, 2) * (1.0 - math.Pow(cos, 2))
if sin2Theta > 1.0 {
return 1.0
}
cosTheta := math.Sqrt(1.0 - sin2Theta)
cos = cosTheta
}
r0 := math.Pow((c.Index1 - c.Index2) / (c.Index1 + c.Index2), 2)
return r0 + ((1 - r0) * math.Pow(1 - cos, 5))
}
// PrepareComputations Calculates the vectors at the point on the object
func PrepareComputations(i shapes.Intersection, ray primitives.Ray, xs shapes.Intersections) Computations {
comp := Computations{Intersection:i}
comp.Point = ray.Position(comp.Distance)
comp.EyeVector = ray.Direction.Negate()
comp.NormalVector = comp.Obj.Normal(comp.Point)
if comp.NormalVector.DotProduct(comp.EyeVector) < 0 {
comp.NormalVector = comp.NormalVector.Negate()
comp.Inside = true
} else{
comp.Inside = false
}
scaledNormal := comp.NormalVector.Scalar(primitives.EPSILON)
comp.OverPoint = comp.Point.Add(scaledNormal)
comp.UnderPoint = comp.Point.Subtract(scaledNormal)
comp.ReflectVector = ray.Direction.Reflect(comp.NormalVector)
var stack []shapes.Shape
for _, inter := range xs {
if len(stack) == 0 {
comp.Index1 = 1.0
} else {
comp.Index1 = stack[len(stack) - 1].Material().RefractiveIndex
}
if index := contains(stack, inter.Obj); index >= 0 {
stack = append(stack[:index], stack[index + 1:]...)
} else {
stack = append(stack, inter.Obj)
}
if i == inter {
if len(stack) == 0 {
comp.Index2 = 1.0
} else {
comp.Index2 = stack[len(stack) - 1].Material().RefractiveIndex
}
break
}
}
return comp
}
func contains(s []shapes.Shape, e shapes.Shape) int {
for i, a := range s {
if a == e {
return i
}
}
return -1
} | pkg/components/computations.go | 0.746139 | 0.593609 | computations.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.